-
Notifications
You must be signed in to change notification settings - Fork 1
/
paper.bib
390 lines (363 loc) · 38 KB
/
paper.bib
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
@article{desender_dynamic_2022,
title = {Dynamic influences on static measures of metacognition},
volume = {13},
doi = {10.1038/s41467-022-31727-0},
abstract = {Humans differ in their capability to judge the accuracy of their own choices via confidence judgments. Signal detection theory has been used to quantify the extent to which confidence tracks accuracy via M-ratio, often referred to as metacognitive efficiency. This measure, however, is static in that it does not consider the dynamics of decision making. This could be problematic because humans may shift their level of response caution to alter the tradeoff between speed and accuracy. Such shifts could induce unaccounted-for sources of variation in the assessment of metacognition. Instead, evidence accumulation frameworks consider decision making, including the computation of confidence, as a dynamic process unfolding over time. We draw on evidence accumulation frameworks to examine the influence of response caution on metacognition. Simulation results demonstrate that response caution has an influence on M-ratio. We then tested and confirmed that this was also the case in human participants who were explicitly instructed to either focus on speed or accuracy. We next demonstrated that this association between M-ratio and response caution was also present in an experiment without any reference towards speed. The latter finding was replicated in an independent dataset. In contrast, when data were analyzed with a novel dynamic measure of metacognition, which we refer to as v-ratio, in all of the three studies there was no effect of speed-accuracy tradeoff. These findings have important implications for research on metacognition, such as its measurement, domain-generality, individual differences, and neural correlates.Competing Interest StatementThe authors have declared no competing interest.},
number = {1},
journal = {Nature Communications},
author = {Desender, Kobe and Vermeylen, Luc and Verguts, Tom},
year = {2022},
pages = {1--30},
file = {PDF:C\:\\Users\\PPA714\\Zotero\\storage\\3WET758L\\Desender (2021) Dynamic influences on static measures of metacognition.pdf:application/pdf},
}
@article{Desender2021,
title = {Dynamic expressions of confidence within an evidence accumulation framework},
volume = {207},
issn = {18737838},
url = {https://doi.org/10.1016/j.cognition.2020.104522},
doi = {10.1016/j.cognition.2020.104522},
abstract = {Human observers can reliably report their confidence in the choices they make. An influential framework conceptualizes decision confidence as the probability of a decision being correct, given the choice made and the evidence on which it was based. This framework accounts for three diagnostic signatures of human confidence reports, including an opposite dependence of confidence on evidence strength for correct and error trials. However, the framework does not account for the temporal evolution of these signatures, because it only describes the transformation of a static representation of evidence into choice and the associated confidence. Here, we combine this framework with another influential framework: dynamic accumulation of evidence over time, and build on the notion that confidence reflects the probability of being correct, given the choice and accumulated evidence up until that point. Critically, we show that such a dynamic model predicts that the diagnostic signatures of confidence depend on time; most critically, it predicts a stronger opposite dependence of confidence on evidence strength and choice correctness as a function of time. We tested, and confirmed, these predictions in human behaviour during random dot motion discrimination, in which confidence judgments were queried at different points in time. We conclude that human confidence reports reflect the dynamics of the probability of being correct given the accumulated evidence and choice.},
number = {104522},
journal = {Cognition},
author = {Desender, Kobe and Donner, Tobias H. and Verguts, Tom},
year = {2021},
pmid = {33256974},
note = {Publisher: Elsevier B.V.},
keywords = {Confidence, Decision making, Drift diffusion model, Metacognition},
pages = {1--11},
file = {PDF:C\:\\Users\\PPA714\\Zotero\\storage\\7ZB7YBS8\\Desender (2021) Dynamic expressions of confidence.pdf:application/pdf},
}
@article{Rausch2020,
title = {Cognitive modelling reveals distinct electrophysiological markers of decision confidence and error monitoring},
volume = {218},
copyright = {All rights reserved},
doi = {10.1016/j.neuroimage.2020.116963},
number = {116963},
journal = {NeuroImage},
author = {Rausch, Manuel and Zehetleitner, Michael and Steinhauser, Marco and Maier, Martin E.},
year = {2020},
pages = {1--14},
file = {PDF:C\:\\Users\\PPA714\\Zotero\\storage\\CVK3TM6G\\Rausch (2020) Distinct EEG correlates of confidence and error monitoring.pdf:application/pdf},
}
@article{rausch_modelling_2021,
title = {Modelling visibility judgments using models of decision confidence},
volume = {83},
copyright = {All rights reserved},
doi = {10.3758/s13414-021-02284-3},
journal = {Attention, Perception \& Psychophysics},
author = {Rausch, Manuel and Hellmann, Sebastian and Zehetleitner, Michael},
year = {2021},
note = {Publisher: Attention, Perception, \& Psychophysics},
keywords = {metacognition, consciousness, cognitive modeling, visibility, visual awareness},
pages = {3311--3336},
file = {PDF:C\:\\Users\\PPA714\\Zotero\\storage\\NWVZAFA9\\Rausch (2021) Modelling visibility judgments.pdf:application/pdf},
}
@article{boundy-singer_confidence_2022,
title = {Confidence reflects a noisy decision reliability estimate},
volume = {7},
issn = {2397-3374},
url = {https://www.nature.com/articles/s41562-022-01464-x},
doi = {10.1038/s41562-022-01464-x},
language = {en},
number = {1},
urldate = {2023-03-10},
journal = {Nature Human Behaviour},
author = {Boundy-Singer, Zoe M. and Ziemba, Corey M. and Goris, Robbe L. T.},
month = nov,
year = {2022},
pages = {142--154},
file = {Boundy-Singer et al. - 2022 - Confidence reflects a noisy decision reliability e.pdf:C\:\\Users\\PPA714\\Zotero\\storage\\I8G9FSF7\\Boundy-Singer et al. - 2022 - Confidence reflects a noisy decision reliability e.pdf:application/pdf},
}
@article{Maniscalco2016,
title = {The signal processing architecture underlying subjective reports of sensory awareness},
volume = {1},
doi = {10.1093/nc/niw002},
journal = {Neuroscience of Consciousness},
author = {Maniscalco, Brian and Lau, Hakwan},
year = {2016},
keywords = {consciousness, awareness, contents of consciousness, perception, psychophysics, theories and models},
pages = {1--17},
file = {PDF:C\:\\Users\\PPA714\\Zotero\\storage\\FUXY3PI9\\Maniscalco (2016) The signal processing architecture underlying subjective reports of sensory awareness.pdf:application/pdf},
}
@article{Shekhar2020a,
title = {The {Nature} of {Metacognitive} {Inefficiency} in {Perceptual} {Decision} {Making}},
volume = {128},
issn = {19391471},
doi = {10.1037/rev0000249},
abstract = {Humans have the metacognitive ability to judge the accuracy of their own decisions via confidence ratings. A substantial body of research has demonstrated that human metacognition is fallible but it remains unclear how metacognitive inefficiency should be incorporated into a mechanistic model of confidence generation. Here we show that, contrary to what is typically assumed, metacognitive inefficiency depends on the level of confidence. We found that, across 5 different data sets and 4 different measures of metacognition, metacognitive ability decreased with higher confidence ratings. To understand the nature of this effect, we collected a large dataset of 20 subjects completing 2,800 trials each and providing confidence ratings on a continuous scale. The results demonstrated a robustly nonlinear zROC curve with downward curvature, despite a decades-old assumption of linearity. This pattern of results was reproduced by a new mechanistic model of confidence generation, which assumes the existence of lognormally distributed metacognitive noise. The model outperformed competing models either lacking metacognitive noise altogether or featuring Gaussian metacognitive noise. Further, the model could generate a measure of metacognitive ability which was independent of confidence levels. These findings establish an empirically validated model of confidence generation, have significant implications about measures of metacognitive ability, and begin to reveal the underlying nature of metacognitive inefficiency.},
number = {1},
journal = {Psychological Review},
author = {Shekhar, Medha and Rahnev, Dobromir},
year = {2021},
pmid = {32673034},
keywords = {Confidence, Metacognition, Computational model, Metacognitive noise, Perceptual decision making},
pages = {45--70},
file = {PDF:C\:\\Users\\PPA714\\Zotero\\storage\\I2Y99AIH\\Shekhar (2020) The nature of metacognitive inefficiency.pdf:application/pdf},
}
@article{Rausch2017,
title = {Should metacognition be measured by logistic regression?},
volume = {49},
copyright = {All rights reserved},
issn = {10902376},
doi = {10.1016/j.concog.2017.02.007},
abstract = {© 2017 Elsevier Inc. Are logistic regression slopes suitable to quantify metacognitive sensitivity, i.e. the efficiency with which subjective reports differentiate between correct and incorrect task responses? We analytically show that logistic regression slopes are independent from rating criteria in one specific model of metacognition, which assumes (i) that rating decisions are based on sensory evidence generated independently of the sensory evidence used for primary task responses and (ii) that the distributions of evidence are logistic. Given a hierarchical model of metacognition, logistic regression slopes depend on rating criteria. According to all considered models, regression slopes depend on the primary task criterion. A reanalysis of previous data revealed that massive numbers of trials are required to distinguish between hierarchical and independent models with tolerable accuracy. It is argued that researchers who wish to use logistic regression as measure of metacognitive sensitivity need to control the primary task criterion and rating criteria.},
journal = {Consciousness and Cognition},
author = {Rausch, Manuel and Zehetleitner, Michael},
year = {2017},
keywords = {Metacognition, Signal detection theory, Logistic regression, Type 2 signal detection theory, Cognitive modeling, Generalized linear regression, Metacognitive sensitivity},
pages = {291--312},
file = {PDF:C\:\\Users\\PPA714\\Zotero\\storage\\S5EC6DSC\\Rausch (2017) Should metacognition be measured by logistic regression.pdf:application/pdf},
}
@article{Rigoux2014,
title = {Bayesian model selection for group studies - {Revisited}},
volume = {84},
issn = {10959572},
url = {http://dx.doi.org/10.1016/j.neuroimage.2013.08.065},
doi = {10.1016/j.neuroimage.2013.08.065},
abstract = {In this paper, we revisit the problem of Bayesian model selection (BMS) at the group level. We originally addressed this issue in Stephan et al. (2009), where models are treated as random effects that could differ between subjects, with an unknown population distribution. Here, we extend this work, by (i) introducing the Bayesian omnibus risk (BOR) as a measure of the statistical risk incurred when performing group BMS, (ii) highlighting the difference between random effects BMS and classical random effects analyses of parameter estimates, and (iii) addressing the problem of between group or condition model comparisons. We address the first issue by quantifying the chance likelihood of apparent differences in model frequencies. This leads to the notion of protected exceedance probabilities. The second issue arises when people want to ask "whether a model parameter is zero or not" at the group level. Here, we provide guidance as to whether to use a classical second-level analysis of parameter estimates, or random effects BMS. The third issue rests on the evidence for a difference in model labels or frequencies across groups or conditions. Overall, we hope that the material presented in this paper finesses the problems of group-level BMS in the analysis of neuroimaging and behavioural data. © 2013 Elsevier Inc.},
journal = {NeuroImage},
author = {Rigoux, L. and Stephan, K. E. and Friston, K. J. and Daunizeau, J.},
year = {2014},
pmid = {24018303},
note = {Publisher: Elsevier Inc.},
keywords = {Between-condition comparison, Between-group comparison, DCM, Exceedance probability, Mixed effects, Random effects, Statistical risk},
pages = {971--985},
file = {PDF:C\:\\Users\\PPA714\\Zotero\\storage\\YKPFAD78\\Rigoux (2014) Model selection.pdf:application/pdf},
}
@article{Rausch2018,
title = {Confidence in masked orientation judgments is informed by both evidence and visibility},
volume = {80},
copyright = {All rights reserved},
issn = {1943393X},
doi = {10.3758/s13414-017-1431-5},
abstract = {How do human observers determine their degree of belief that they are correct in a decision about a visual stimulus—that is, their confidence? According to prominent theories of confidence, the quality of stimulation should be positively related to confidence in correct decisions, and negatively to confidence in incorrect decisions. However, in a backward-masked orientation task with a varying stimulus onset asynchrony (SOA), we observed that confidence in incorrect decisions also increased with stimulus quality. Model fitting to our decision and confidence data revealed that the best explanation for the present data was the new weighted evidence-and-visibility model, according to which confidence is determined by evidence about the orientation as well as by the general visibility of the stimulus. Signal detection models, postdecisional accumulation models, two-channel models, and decision-time-based models were all unable to explain the pattern of confidence as a function of SOA and decision correctness. We suggest that the metacognitive system combines several cues related to the correctness of a decision about a visual stimulus in order to calculate decision confidence.},
number = {1},
journal = {Attention, Perception, and Psychophysics},
author = {Rausch, Manuel and Hellmann, Sebastian and Zehetleitner, Michael},
year = {2018},
note = {Publisher: Attention, Perception, \& Psychophysics},
keywords = {Confidence, Metacognition, Signal detection theory, Perceptual decision making, Masking, Visual perception, Cognitive modeling, Math modeling},
pages = {134--154},
file = {PDF:C\:\\Users\\PPA714\\Zotero\\storage\\3H5ZZCLZ\\Rausch (2018) Confidence is informed by both evidence and visibility.pdf:application/pdf},
}
@article{Rahnev2020,
title = {The {Confidence} {Database}},
volume = {4},
copyright = {All rights reserved},
issn = {2397-3374},
url = {http://www.nature.com/articles/s41562-019-0813-1},
doi = {10.1038/s41562-019-0813-1},
abstract = {Understanding how people rate their confidence is critical for the characterization of a wide range of perceptual, memory, motor and cognitive processes. To enable the continued exploration of these processes, we created a large database of confidence studies spanning a broad set of paradigms, participant populations and fields of study. The data from each study are structured in a common, easy-to-use format that can be easily imported and analysed using multiple software packages. Each dataset is accompanied by an explanation regarding the nature of the collected data. At the time of publication, the Confidence Database (which is available at https://osf.io/s46pr/) contained 145 datasets with data from more than 8,700 participants and almost 4 million trials. The database will remain open for new submissions indefinitely and is expected to continue to grow. Here we show the usefulness of this large collection of datasets in four different analyses that provide precise estimations of several foundational confidence-related effects. This Resource introduces a new public database that enables researchers to re-analyse a large corpus of studies into meta-cognitive confidence judgements.},
journal = {Nature Human Behaviour},
author = {Rahnev, Dobromir and Desender, Kobe and Lee, Alan L. F. and Adler, William T. and Aguilar-Lleyda, David and Akdoğan, Başak and Arbuzova, Polina and Atlas, Lauren Y. and Balcı, Fuat and Bang, Ji Won and Bègue, Indrit and Birney, Damian P. and Brady, Timothy F. and Calder-Travis, Joshua and Chetverikov, Andrey and Clark, Torin K. and Davranche, Karen and Denison, Rachel N. and Dildine, Troy C. and Double, Kit S. and Duyan, Yalçın A. and Faivre, Nathan and Fallow, Kaitlyn and Filevich, Elisa and Gajdos, Thibault and Gallagher, Regan M. and de Gardelle, Vincent and Gherman, Sabina and Haddara, Nadia and Hainguerlot, Marine and Hsu, Tzu-Yu and Hu, Xiao and Iturrate, Iñaki and Jaquiery, Matt and Kantner, Justin and Koculak, Marcin and Konishi, Mahiko and Koß, Christina and Kvam, Peter D. and Kwok, Sze Chai and Lebreton, Maël and Lempert, Karolina M. and Ming Lo, Chien and Luo, Liang and Maniscalco, Brian and Martin, Antonio and Massoni, Sébastien and Matthews, Julian and Mazancieux, Audrey and Merfeld, Daniel M. and O’Hora, Denis and Palser, Eleanor R. and Paulewicz, Borysław and Pereira, Michael and Peters, Caroline and Philiastides, Marios G. and Pfuhl, Gerit and Prieto, Fernanda and Rausch, Manuel and Recht, Samuel and Reyes, Gabriel and Rouault, Marion and Sackur, Jérôme and Sadeghi, Saeedeh and Samaha, Jason and Seow, Tricia X. F. and Shekhar, Medha and Sherman, Maxine T. and Siedlecka, Marta and Skóra, Zuzanna and Song, Chen and Soto, David and Sun, Sai and van Boxtel, Jeroen J. A. and Wang, Shuo and Weidemann, Christoph T. and Weindel, Gabriel and Wierzchoń, Michał and Xu, Xinming and Ye, Qun and Yeon, Jiwon and Zou, Futing and Zylberberg, Ariel},
year = {2020},
pages = {317--325},
file = {PDF:C\:\\Users\\PPA714\\Zotero\\storage\\AMX7QXU7\\Rahnev (2020) The confidence database.pdf:application/pdf},
}
@book{Green1966,
address = {New York},
title = {Signal detection theory and psychophysics},
publisher = {Wiley},
author = {Green, D. M. and Swets, J. A.},
year = {1966},
}
@article{Pleskac2010,
title = {Two-{Stage} {Dynamic} {Signal} {Detection}: {A} {Theory} of {Choice} , {Decision} {Time}, and {Confidence}},
volume = {117},
doi = {10.1037/a0019737},
number = {3},
journal = {Psychological Review},
author = {Pleskac, Timothy J and Busemeyer, Jerome R},
year = {2010},
keywords = {confidence, diffusion model, for example, mind, a measure of cognitive, confidence has long been, in, inner workings of the, optimal solution, performance, psychophysics confidence was originally, subjective probability, thought to be a, time pressure, used to chart the, window},
pages = {864--901},
file = {PDF:C\:\\Users\\PPA714\\Zotero\\storage\\46XSG6J7\\Pleskac (2010) two stage dynamic signal detection.pdf:application/pdf},
}
@article{pereira_evidence_2021,
title = {Evidence accumulation relates to perceptual consciousness and monitoring},
volume = {12},
issn = {2041-1723},
url = {http://www.nature.com/articles/s41467-021-23540-y},
doi = {10.1038/s41467-021-23540-y},
abstract = {Abstract
A fundamental scientific question concerns the neural basis of perceptual consciousness and perceptual monitoring resulting from the processing of sensory events. Although recent studies identified neurons reflecting stimulus visibility, their functional role remains unknown. Here, we show that perceptual consciousness and monitoring involve evidence accumulation. We recorded single-neuron activity in a participant with a microelectrode in the posterior parietal cortex, while they detected vibrotactile stimuli around detection threshold and provided confidence estimates. We find that detected stimuli elicited neuronal responses resembling evidence accumulation during decision-making, irrespective of motor confounds or task demands. We generalize these findings in healthy volunteers using electroencephalography. Behavioral and neural responses are reproduced with a computational model considering a stimulus as detected if accumulated evidence reaches a bound, and confidence as the distance between maximal evidence and that bound. We conclude that gradual changes in neuronal dynamics during evidence accumulation relates to perceptual consciousness and perceptual monitoring in humans.},
language = {en},
number = {1},
urldate = {2022-09-21},
journal = {Nature Communications},
author = {Pereira, Michael and Megevand, Pierre and Tan, Mi Xue and Chang, Wenwen and Wang, Shuo and Rezai, Ali and Seeck, Margitta and Corniola, Marco and Momjian, Shahan and Bernasconi, Fosco and Blanke, Olaf and Faivre, Nathan},
month = dec,
year = {2021},
pages = {3261},
file = {Pereira et al. - 2021 - Evidence accumulation relates to perceptual consci.pdf:C\:\\Users\\PPA714\\Zotero\\storage\\N2HZYAJZ\\Pereira et al. - 2021 - Evidence accumulation relates to perceptual consci.pdf:application/pdf},
}
@article{guggenmos_reverse_2022,
title = {Reverse engineering of metacognition},
volume = {11},
doi = {10.7554/eLife.75420},
abstract = {The human ability to introspect on thoughts, perceptions or actions − metacognitive ability − has become a focal topic of both cognitive basic and clinical research. At the same time it has become increasingly clear that currently available quantitative tools are limited in their ability to make unconfounded inferences about metacognition. As a step forward, the present work introduces a comprehensive modeling framework of metacognition that allows for inferences about metacognitive noise and metacognitive biases during the readout of decision values or at the confidence reporting stage. The model assumes that confidence results from a continuous but noisy and potentially biased transformation of decision values, described by a confidence link function. A canonical set of metacognitive noise distributions is introduced which differ, amongst others, in their predictions about metacognitive sign flips of decision values. Successful recovery of model parameters is demonstrated, and the model is validated on an empirical data set. In particular, it is shown that metacognitive noise and bias parameters correlate with conventional behavioral measures. Crucially, in contrast to these conventional measures, metacognitive noise parameters inferred from the model are shown to be independent of performance. This work is accompanied by a toolbox (ReMeta) that allows researchers to estimate key parameters of metacognition in confidence datasets.},
language = {en},
journal = {eLife},
author = {Guggenmos, Matthias},
year = {2022},
pages = {1--29},
file = {Guggenmos - 2021 - Reverse engineering of metacognition.pdf:C\:\\Users\\PPA714\\Zotero\\storage\\MGJ2IMC6\\Guggenmos - 2021 - Reverse engineering of metacognition.pdf:application/pdf},
}
@article{Fleming2017a,
title = {{HMeta}-d: hierarchical {Bayesian} estimation of metacognitive efficiency from confidence ratings},
volume = {1},
doi = {10.1093/nc/nix007},
journal = {Neuroscience of Consciousness},
author = {Fleming, Stephen M},
year = {2017},
keywords = {metacognition, confidence, signal detection theory, bayes},
pages = {1--14},
file = {PDF:C\:\\Users\\PPA714\\Zotero\\storage\\3HKZC26B\\Fleming (2017) HMeta-d.pdf:application/pdf},
}
@article{dayan_metacognitive_2023,
title = {Metacognitive {Information} {Theory}},
volume = {7},
issn = {2470-2986},
url = {https://direct.mit.edu/opmi/article/doi/10.1162/opmi_a_00091/116663/Metacognitive-Information-Theory},
doi = {10.1162/opmi_a_00091},
abstract = {The capacity that subjects have to rate confidence in their choices is a form of metacognition, and can be assessed according to bias, sensitivity and efficiency. Rich networks of domain-specific and domain-general regions of the brain are involved in the rating, and are associated with its quality and its use for regulating the processes of thinking and acting. Sensitivity and efficiency are often measured by quantities called meta–d0 and the M-ratio that are based on reverse engineering the potential accuracy of the original, primary, choice that is implied by the quality of the confidence judgements. Here, we advocate a straightforward measure of sensitivity, called meta–I , which assesses the mutual information between the accuracy of the subject’s choices and the confidence reports, and two normalized versions of this measure that quantify efficiency in different regimes. Unlike most other measures, meta–I -based quantities increase with the number of correctly assessed bins with which confidence is reported. We illustrate meta–I on data from a perceptual decision-making task, and via a simple form of simulated second-order metacognitive observer.},
language = {en},
urldate = {2023-09-04},
journal = {Open Mind},
author = {Dayan, Peter},
month = jul,
year = {2023},
pages = {392--411},
file = {Dayan - 2023 - Metacognitive Information Theory.pdf:C\:\\Users\\PPA714\\Zotero\\storage\\VIWIVHXC\\Dayan - 2023 - Metacognitive Information Theory.pdf:application/pdf},
}
@article{shekhar_how_2024,
title = {How {Do} {Humans} {Give} {Confidence}? {A} {Comprehensive} {Comparison} of {Process} {Models} of {Perceptual} {Metacognition}},
volume = {153},
doi = {10.1037/xge0001524},
abstract = {Several process models have attempted to describe the computations that underlie metacognition in humans. However, due to lack of systematic, widespread comparisons between these models, there is no consensus on what mechanisms best characterize the process of confidence generation. In this study, we tested 14 popular models of metacognition on three large data sets from basic perceptual tasks, using multiple quantitative as well as qualitative metrics. Our results highlight two mechanisms as the most plausible, generalizable features of confidence—the selective corruption of confidence by signal-dependent metacognitive noise and a heuristic strategy that uses stimulus visibility to estimate confidence. Analyzing the qualitative patterns of confidence generated by the models provides additional insights into each model’s success or failure. Our results also help to establish a comprehensive framework for model comparisons that can guide future efforts.},
language = {en},
number = {3},
journal = {Journal of Experimental Psychology: General},
author = {Shekhar, Medha and Rahnev, Dobromir},
year = {2024},
pages = {656--688},
file = {Shekhar und Rahnev - How Do Humans Give Confidence A Comprehensive Comp.pdf:C\:\\Users\\PPA714\\Zotero\\storage\\549ZQDCI\\Shekhar und Rahnev - How Do Humans Give Confidence A Comprehensive Comp.pdf:application/pdf},
}
@article{hellmann_confidence_2024,
title = {Confidence {Is} {Influenced} by {Evidence} {Accumulation} {Time} in {Dynamical} {Decision} {Models}},
issn = {2522-0861, 2522-087X},
url = {https://link.springer.com/10.1007/s42113-024-00205-9},
doi = {10.1007/s42113-024-00205-9},
abstract = {Confidence judgments are closely correlated with response times across a wide range of decision tasks. Sequential sampling models offer two competing explanations for the relationship between confidence and response time: According to some models, decision time directly influences confidence. Other models explain the correlation by linking subjective confidence computation to the decision process dynamics. In previous model comparisons, drift diffusion-based confidence models that do not explicitly consider decision time in the computation of confidence provided superior model fits compared to race models that directly included decision time in the internal computation of confidence. In the present study, we present support for the assumption that confidence explicitly takes decision time and post-decisional accumulation time into account. We propose the dynamical visibility, time, and evidence (dynaViTE) model, an extension of the dynamical weighted evidence and visibility (dynWEV) model. DynaViTE assumes that confidence is not solely based on the final amount of accumulated evidence but explicitly includes time in the computation of confidence. Model comparisons using four previously published data sets with different perceptual decision tasks showed a good model fit of dynaViTE, indicating that the relationship between confidence and response time is not only due to the close link in the accumulation process but also to an explicit inclusion of time in the computation of confidence.},
language = {en},
urldate = {2024-07-24},
journal = {Computational Brain \& Behavior},
author = {Hellmann, Sebastian and Zehetleitner, Michael and Rausch, Manuel},
month = jul,
year = {2024},
file = {Hellmann et al. - 2024 - Confidence Is Influenced by Evidence Accumulation .pdf:C\:\\Users\\PPA714\\Zotero\\storage\\3DH3LTI4\\Hellmann et al. - 2024 - Confidence Is Influenced by Evidence Accumulation .pdf:application/pdf},
}
@article{Maniscalco2012,
title = {A signal detection theoretic method for estimating metacognitive sensitivity from confidence ratings},
volume = {21},
doi = {10.1016/j.concog.2011.09.021},
number = {1},
journal = {Consciousness and Cognition},
author = {Maniscalco, Brian and Lau, Hakwan},
year = {2012},
pages = {422--430},
file = {PDF:C\:\\Users\\PPA714\\Zotero\\storage\\GY4SAJRN\\Maniscalco (2012) estimating metacognitive sensitivity from confidence ratings with supplementary material.pdf:application/pdf},
}
@article{egan_operating_1959,
title = {Operating {Characteristics} {Determined} by {Binary} {Decisions} and by {Ratings}},
volume = {31},
doi = {10.1121/1.1907783},
language = {en},
number = {6},
journal = {Journal of the Acoustical Society of America},
author = {Egan, James P and Schulman, Arthur I and Greenberg, Gordon Z},
year = {1959},
pages = {768--773},
file = {Egan et al. - Operating Characteristics Determined by Binary Dec.pdf:C\:\\Users\\PPA714\\Zotero\\storage\\ZSFH43UD\\Egan et al. - Operating Characteristics Determined by Binary Dec.pdf:application/pdf},
}
@article{rahnev_consensus_2022,
title = {Consensus {Goals} in the {Field} of {Visual} {Metacognition}},
volume = {17},
doi = {10.1177/17456916221075615},
abstract = {Despite the tangible progress in psychological and cognitive sciences over the last several years, these disciplines still trail other more mature sciences in identifying the most important questions that need to be solved. Reaching such consensus could lead to greater synergy across different laboratories, faster progress, and increased focus on solving important problems rather than pursuing isolated, niche efforts. Here, 26 researchers from the field of visual metacognition reached consensus on four long-term and two medium-term common goals. We describe the process that we followed, the goals themselves, and our plans for accomplishing these goals. If this effort proves successful within the next few years, such consensus building around common goals could be adopted more widely in psychological science.},
language = {en},
number = {6},
journal = {Perspectives on Psychological Science},
author = {Rahnev, Dobromir and Balsdon, Tarryn and Charles, Lucie and de Gardelle, Vincent and Denison, Rachel and Desender, Kobe and Faivre, Nathan and Filevich, Elisa and Fleming, Stephen M. and Jehee, Janneke and Lau, Hakwan and Lee, Alan L. F. and Locke, Shannon M. and Mamassian, Pascal and Odegaard, Brian and Peters, Megan A K and Reyes, Gabriel and Rouault, Marion and Sackur, Jérôme and Samaha, Jason and Sergent, Claire and Sherman, Maxine T. and Siedlecka, Marta and Soto, David and Vlassova, Alexandra and Zylberberg, Ariel},
year = {2022},
pages = {1746--1765},
file = {Rahnev et al. - Consensus Goals in the Field of Visual Metacogniti.pdf:C\:\\Users\\PPA714\\Zotero\\storage\\8NXYYQUS\\Rahnev et al. - Consensus Goals in the Field of Visual Metacogniti.pdf:application/pdf},
}
@incollection{Maniscalco2014,
address = {Berlin Heidelberg},
title = {Signal {Detection} {Theory} {Analysis} of {Type} 1 and {Type} 2 {Data}: {Meta}-d, {Response}- {Specific} {Meta}-d, and the {Unequal} {Variance} {SDT} {Model}},
booktitle = {The {Cognitive} {Neuroscience} of {Metacognition}},
publisher = {Springer},
author = {Maniscalco, Brian and Lau, Hakwan C.},
editor = {Fleming, Stephen M. and Frith, C D},
year = {2014},
doi = {10.1007/978-3-642-45190-4_3},
pages = {25--66},
file = {PDF:C\:\\Users\\PPA714\\Zotero\\storage\\XJU2VSSC\\Maniscalco (2014) Signal detection theory analysis of type 1 and type 2 data.pdf:application/pdf},
}
@article{rausch_measures_2023,
title = {Measures of metacognitive efficiency across cognitive models of decision confidence.},
copyright = {All rights reserved},
issn = {1939-1463, 1082-989X},
url = {https://doi.apa.org/doi/10.1037/met0000634},
doi = {10.1037/met0000634},
abstract = {Meta-d′/d′ has become the quasi-gold standard to quantify metacognitive efficiency because meta-d′/d′ was developed to control for discrimination performance, discrimination criteria, and confidence criteria even without the assumption of a specific generative model underlying confidence judgments. Using simulations, we demonstrate that meta-d′/d′ is not free from assumptions about confidence models: Only when we simulated data using a generative model of confidence according to which the evidence underlying confidence judgments is sampled independently from the evidence utilized in the choice process from a truncated Gaussian distribution, meta-d′/d′ was unaffected by discrimination performance, discrimination task criteria, and confidence criteria. According to five alternative generative models of confidence, there exist at least some combination of parameters where meta-d′/d′ is affected by discrimination performance, discrimination criteria, and confidence criteria. A simulation using empirically fitted parameter sets showed that the magnitude of the correlation between meta-d′/d′ and discrimination performance, discrimination task criteria, and confidence criteria depends heavily on the generative model and the specific parameter set and varies between negligibly small and very large. These simulations imply that a difference in meta-d′/d′ between conditions does not necessarily reflect a difference in metacognitive efficiency but might as well be caused by a difference in discrimination performance, discrimination task criterion, or confidence criteria.},
language = {en},
urldate = {2024-03-25},
journal = {Psychological Methods},
author = {Rausch, Manuel and Hellmann, Sebastian and Zehetleitner, Michael},
month = dec,
year = {2023},
file = {Rausch et al. - 2023 - Measures of metacognitive efficiency across cognit.pdf:C\:\\Users\\PPA714\\Zotero\\storage\\JZ2J69PU\\Rausch et al. - 2023 - Measures of metacognitive efficiency across cognit.pdf:application/pdf},
}
@article{hellmann_simultaneous_2023,
title = {Simultaneous modeling of choice, confidence, and response time in visual perception},
volume = {130},
copyright = {All rights reserved},
doi = {10.1037/rev0000411},
abstract = {How can choice, confidence, and response times be modeled simultaneously? Here, we propose the new dynamical weighted evidence and visibility model (dynWEV), an extension of the drift diffusion model of decision making, to account for choices, reaction times, and confidence simultaneously. The decision process in a binary perceptual task is described as a Wiener process accumulating sensory evidence about the choice options bounded by two constant thresholds. To account for confidence judgments, we assume a period of postdecisional accumulation of sensory evidence and parallel accumulation of information about the reliability of the present stimulus. We examined model fits in two experiments, a motion discrimination task with random dot kinematograms and a post-masked orientation discrimination task. A comparison between the dynamical weighted evidence and visibility model, two-stage dynamical signal detection theory, and several versions of race models of decision making showed that only dynWEV produced acceptable fits of choices, confidence, and reaction time. This finding suggests that confidence judgments depend not only on choice evidence but also on a parallel estimate of stimulus discriminability and postdecisional accumulation of evidence.},
language = {en},
number = {6},
journal = {Psychological Review},
author = {Hellmann, Sebastian and Zehetleitner, Michael and Rausch, Manuel},
year = {2023},
pages = {1521--1543},
file = {Hellmann et al. - Simultaneous modeling of choice, confidence, and r.pdf:C\:\\Users\\PPA714\\Zotero\\storage\\NXX7SB92\\Hellmann et al. - Simultaneous modeling of choice, confidence, and r.pdf:application/pdf},
}
@book{hautus_detection_2021,
address = {New York},
edition = {3},
title = {Detection theory: {A} user's guide},
publisher = {Routledge},
author = {Hautus, Michael J and Macmillan, Neil A and Creelman, C Douglas},
year = {2021},
}
@article{daunizeau_vba_2014,
title = {{VBA}: {A} {Probabilistic} {Treatment} of {Nonlinear} {Models} for {Neurobiological} and {Behavioural} {Data}},
volume = {10},
issn = {1553-7358},
shorttitle = {{VBA}},
url = {https://dx.plos.org/10.1371/journal.pcbi.1003441},
doi = {10.1371/journal.pcbi.1003441},
abstract = {This work is in line with an on-going effort tending toward a computational (quantitative and refutable) understanding of human neuro-cognitive processes. Many sophisticated models for behavioural and neurobiological data have flourished during the past decade. Most of these models are partly unspecified (i.e. they have unknown parameters) and nonlinear. This makes them difficult to peer with a formal statistical data analysis framework. In turn, this compromises the reproducibility of model-based empirical studies. This work exposes a software toolbox that provides generic, efficient and robust probabilistic solutions to the three problems of model-based analysis of empirical data: (i) data simulation, (ii) parameter estimation/model selection, and (iii) experimental design optimization.},
language = {en},
number = {1},
urldate = {2024-12-02},
journal = {PLoS Computational Biology},
author = {Daunizeau, Jean and Adam, Vincent and Rigoux, Lionel},
editor = {Prlic, Andreas},
month = jan,
year = {2014},
pages = {e1003441},
file = {PDF:C\:\\Users\\PPA714\\Zotero\\storage\\IU3HL9IB\\Daunizeau et al. - 2014 - VBA A Probabilistic Treatment of Nonlinear Models for Neurobiological and Behavioural Data.pdf:application/pdf},
}