Skip to content

Commit

Permalink
Adding plotting capability
Browse files Browse the repository at this point in the history
  • Loading branch information
Rausch authored and Rausch committed Nov 30, 2024
1 parent b5fe057 commit 0513393
Show file tree
Hide file tree
Showing 7 changed files with 116 additions and 68 deletions.
4 changes: 4 additions & 0 deletions NAMESPACE
Original file line number Diff line number Diff line change
Expand Up @@ -4,10 +4,14 @@ export(estimateMetaI)
export(fitConf)
export(fitConfModels)
export(fitMetaDprime)
export(plotConfModelFit)
export(simConf)
import(ggplot2)
import(parallel)
importFrom(Rmisc,summarySEwithin)
importFrom(plyr,ddply)
importFrom(plyr,mdply)
importFrom(plyr,summarise)
importFrom(stats,dnorm)
importFrom(stats,integrate)
importFrom(stats,optim)
Expand Down
33 changes: 24 additions & 9 deletions R/plotConfModelFit.R
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@
#' one column for each estimated model parameter (parameters
#' not present in a specific model are filled with NAs)
#'
#' #' @examples
#' @examples
#' # 1. Select two subjects from the masked orientation discrimination experiment
#' data <- subset(MaskOri, participant %in% c(1:2))
#' head(data)
Expand All @@ -58,7 +58,7 @@
#' myPlottedFit
#' }
#' @import ggplot2
#' @importFrom plyr ddply transform summarise
#' @importFrom plyr ddply summarise
#' @importFrom Rmisc summarySEwithin
#'
#' @export
Expand Down Expand Up @@ -90,17 +90,29 @@ plotConfModelFit <- function(data, fitted_pars, model = NULL){
}
if(!all(data$correct %in% c(0,1))) stop("correct should be 1 or 0")

myColor <- switch(model, 'GN' = 1, 'IG' = 2, 'ITGc' = 3, 'ITGcm' = 4, 'logN' = 5,
'logWEV' = 6,'PDA' = 7, 'WEV' = 8, 'SDT' = 9) # models are color coded
PlotName <-
switch(model,
'GN' = "Gaussian noise model",
'IG' = "Independent Gaussian model",
'ITGc' = "Independent truncated Gaussian model: HMetad-Version",
'ITGcm' = "Independent truncated Gaussian model: Meta-d'-Version",
'logN' = "Logistic noise model",
'logWEV' = "Logistic weighted evidence and visibility model",
'PDA' = "Post-decisional accumulation model",
'WEV' = "Weighted evidence and visibility model",
'SDT' = "Signal detection rating model") # models are color coded

# 1. First aggregate on the level of subjects

AggDist <-
plyr::ddply(data,
~ diffCond * rating * stimulus * correct * participant, #,
plyr::summarise, p = length(rating), .drop=FALSE)
~ diffCond * rating *
stimulus * correct * participant, #,
plyr::summarise,
p = length(rating), .drop=FALSE)

AggDist <- plyr::ddply(AggDist, ~ diffCond * stimulus,
AggDist <- plyr::ddply(AggDist, ~
diffCond * stimulus * participant,
transform, N = sum(p))
AggDist$p <- AggDist$p / AggDist$N

Expand All @@ -119,8 +131,10 @@ plotConfModelFit <- function(data, fitted_pars, model = NULL){
na.rm = TRUE, .drop = TRUE)
AggDist$rating <- as.numeric(AggDist$rating)
levels(AggDist$stimulus) <- c("S = -1", "S = 1")
AggDist$diffCond <- factor(as.numeric(AggDist$diffCond)) # diffCond should code the order of difficulty levels
levels(AggDist$diffCond) <- paste("K =", as.numeric(levels(AggDist$diffCond)))
AggDist$diffCond <-
factor(as.numeric(AggDist$diffCond)) # diffCond should code the order of difficulty levels
levels(AggDist$diffCond) <-
paste("K =", as.numeric(levels(AggDist$diffCond)))

# 4) create the prediction from model fit

Expand Down Expand Up @@ -160,6 +174,7 @@ plotConfModelFit <- function(data, fitted_pars, model = NULL){
xlab("Confidence rating") +
ylab("probability") +
ylim(c(0,1))+
ggtitle(PlotName) +
theme(strip.text.y = element_text(angle=0)) +
theme_minimal()

Expand Down
64 changes: 11 additions & 53 deletions README.rmd
Original file line number Diff line number Diff line change
Expand Up @@ -157,9 +157,7 @@ features in the confidence judgment. The parameters $w$ and $\sigma$ are free pa
The conceptual idea of meta-d′ is to quantify metacognition in terms of sensitivity
in a hypothetical signal detection rating model describing the primary task,
under the assumption that participants had perfect access to the sensory evidence
and were perfectly consistent in placing their confidence criteria (Maniscalco & Lau, 2012, 2014). Using a signal detection model describing the primary task to quantify metacognition
allows a direct comparison between metacognitive accuracy and discrimination performance
because both are measured on the same scale. Meta-d′ can be compared against the estimate of the distance between the two stimulus distributions estimated from discrimination responses, which is referred to as d′: If meta-d′ equals d′, it means that metacognitive accuracy is exactly as good as expected from discrimination performance. If meta-d′ is lower than d′, it means that metacognitive accuracy is not optimal. It can be shown that the implicit model of confidence underlying the meta-d'/d' method is identical to different versions of the independent truncated Gaussian model (Rausch et al., 2023), depending on whether the original model specification by Maniscalco and Lau (2012) or alternatively the specification by Fleming (2017) is used. We strongly recommend to test whether the independent truncated Gaussian models are adequate descriptions of the data before quantifying metacognitive efficiency with meta-d′/d′.
and were perfectly consistent in placing their confidence criteria (Maniscalco & Lau, 2012, 2014). Using a signal detection model describing the primary task to quantify metacognition, it allows a direct comparison between metacognitive accuracy and discrimination performancembecause both are measured on the same scale. Meta-d′ can be compared against the estimate of the distance between the two stimulus distributions estimated from discrimination responses, which is referred to as d′: If meta-d′ equals d′, it means that metacognitive accuracy is exactly as good as expected from discrimination performance. If meta-d′ is lower than d′, it means that metacognitive accuracy is not optimal. It can be shown that the implicit model of confidence underlying the meta-d'/d' method is identical to different versions of the independent truncated Gaussian model (Rausch et al., 2023), depending on whether the original model specification by Maniscalco and Lau (2012) or alternatively the specification by Fleming (2017) is used. We strongly recommend to test whether the independent truncated Gaussian models are adequate descriptions of the data before quantifying metacognitive efficiency with meta-d′/d′.

### Information-theoretic measures of metacognition

Expand All @@ -182,18 +180,15 @@ $$meta-I_{2}^{r} = meta-I / H(Y = \hat{Y})$$
Notably, Dayan (2023) pointed out that a liberal or conservative use of the confidence levels will affected the mutual information and thus all information-theoretic measures of metacognition.

In addition to Dayan's measures, Meyen et al. (submitted) suggested an
additional measure that normalizes the Meta-I by the range of possible values
it can take. This required deriving lower and upper bounds of the transmitted
information given a participant's accuracy.
additional measure that normalizes the Meta-I by the range of possible values it can take. This required deriving lower and upper bounds of the transmitted information given a participant's accuracy.

$$RMI = \frac{meta-I}{\max_{\text{accuracy}}\{meta-I\}}$$

As these measures are prone to estimation bias, the package offers a simple
bias reduction mechanism in which the observed frequencies of
stimulus-response combinations are taken as the underlying probability
distribution. From this, Monte-Carlo simulations are conducted to estimate
and subtract the bias from these measures. Note that there is probably no way
to remove this bias completely.
and subtract the bias from these measures. Note that there is probably no way to remove this bias completely.

## Installation

Expand Down Expand Up @@ -262,60 +257,23 @@ It can be seen that the independent truncated Gaussian model is consistently out

### Visualization

After obtaining model fits, it is strongly recommended to visualize the prediction implied by the best fitting sets of parameters and to compare the prediction with the actual data (Palminteri et al., 2017). The best way to visualize the data is highly specific to the data set and research question, which is why `statConfR` does not come with its own visualization tools. This being said, here is an example for how a visualization of model fit could look like:
After obtaining the model fit, it is strongly recommended to visualise the predictions implied by the best-fitting set of parameters and to compare the predictions with the actual data (Palminteri et al., 2017). The `statConfR' package provides the function `plotConfModelFit', which plots the empirically observed distribution of responses and confidence ratings (on the x-axis) as a function of discriminability (in the rows) and stimulus (in the columns) as bars. Superimposed on the empirical data, the plot also shows the prediction of a selected model as dots. The parameters of the model are passed to `plotConfModelFit' by the argument `fitted_pars'

<!-- Stuff where only the code should be shown and executed, but do not show R yapping -->

```{r, echo=TRUE, results="hide", message=FALSE, warning=FALSE}
library(tidyverse)
AggregatedData <- MaskOri %>%
mutate(ratings = as.numeric(rating), diffCond = as.numeric(diffCond)) %>%
group_by(participant, diffCond, correct ) %>%
dplyr::summarise(ratings=mean(ratings,na.rm=T)) %>%
Rmisc::summarySEwithin(measurevar = "ratings",
withinvars = c("diffCond", "correct"),
idvar = "participant",
na.rm = TRUE, .drop = TRUE) %>%
mutate(diffCond = as.numeric(diffCond))
AggregatedPrediction <-
rbind(fitted_pars %>%
filter(model=="ITGcm") %>%
group_by(participant) %>%
simConf(model="ITGcm") %>%
mutate(model="ITGcm"),
fitted_pars %>%
filter(model=="WEV") %>%
group_by(participant) %>%
simConf(model="WEV") %>%
mutate(model="WEV")) %>%
mutate(ratings = as.numeric(rating) ) %>%
group_by(participant, diffCond, correct, model ) %>%
dplyr::summarise(ratings=mean(ratings,na.rm=T)) %>%
Rmisc::summarySEwithin(measurevar = "ratings",
withinvars = c("diffCond", "correct", "model"),
idvar = "participant",
na.rm = TRUE, .drop = TRUE) %>%
mutate(diffCond = as.numeric(diffCond))
PlotMeans <-
ggplot(AggregatedPrediction,
aes(x = diffCond, y = ratings, color = correct)) + facet_grid(~ model) +
ylim(c(1,5)) +
geom_line() + ylab("confidence rating") + xlab("difficulty condition") +
scale_color_manual(values = c("darkorange", "navy"),
labels = c("Error", "Correct response"), name = "model prediction") +
geom_errorbar(data = AggregatedData,
aes(ymin = ratings-se, ymax = ratings+se), color="black") +
geom_point(data = AggregatedData, aes(shape=correct), color="black") +
scale_shape_manual(values = c(15, 16),
labels = c("Error", "Correct response"), name = "observed data") +
theme_bw()
PlotFitWEV <- plotConfModelFit(MaskOri, fitted_pars, model="WEV")
PlotFitITGcm <- plotConfModelFit(MaskOri, fitted_pars, model="ITGcm")
```
<!-- Show both the code and the output Figure! -->

```{r, echo=TRUE, fig.cap = "Predicted vs. observed confidence as a function of discriminability and correctness"}
PlotMeans
```{r, echo=TRUE, fig.cap = "Observed distribution of accuracy and responses as a function of discriminability and stimulus vs. prediction by the weighted evidence and visibility model"}
PlotFitWEV
```

```{r, echo=TRUE, fig.cap = "Observed distribution of accuracy and responses as a function of discriminability and stimulus vs. prediction by the Independent truncated Gaussian model: HMetad-Version (ITGc)"}
PlotFitITGcm
```

### Measuring metacognition

Expand Down
Binary file modified TestResults.RData
Binary file not shown.
5 changes: 4 additions & 1 deletion TestScript.R
Original file line number Diff line number Diff line change
Expand Up @@ -371,7 +371,8 @@ merge(MetaDs %>% select(participant, Ratio),

# 5) Plotting fits

PlotFitSDT <- plotConfModelFit(MaskOri, fitted_pars, model="SDT")
PlotFitSDT <- plotConfModelFit(data=MaskOri, fitted_pars=fitted_pars, model="SDT")

PlotFitGN <- plotConfModelFit(MaskOri, fitted_pars, model="GN")
PlotFitLogN <- plotConfModelFit(MaskOri, fitted_pars, model="logN")
PlotFitWEV <- plotConfModelFit(MaskOri, fitted_pars, model="WEV")
Expand All @@ -381,6 +382,8 @@ PlotFitITGc <- plotConfModelFit(MaskOri, fitted_pars, model="ITGc")
PlotFitIG <- plotConfModelFit(MaskOri, fitted_pars, model="IG")
PlotFitPDA <- plotConfModelFit(MaskOri, fitted_pars, model="PDA")

test <- group_BMS_fits(fitted_pars)
group_BMS(fitted_pars)

save(fitted_pars, PlotFitsBICWeights,
recov_pars_SDT, Plot_recov_SDT,
Expand Down
9 changes: 4 additions & 5 deletions man/fitConfModels.Rd

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

69 changes: 69 additions & 0 deletions man/plotConfModelFit.Rd

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

0 comments on commit 0513393

Please sign in to comment.