Note that we simply load in the workspace here for creating the figure and for running the extra effect size statistics reported in the JoVE manuscript. For the actual analyses themselves where all models are created, see ‘Analysis.Rmd’ on https://osf.io/bbf3c/. Here, we start where that analysis ended and load in everything done there to make new figures and run effect size analyses.
# see also the file 'poweranalysisJoVE.R'!!
effectSize <- function(meandiff, varres, varintsub= 0, varslopesub = 0, varintit = 0, varslopeit = 0){
d = meandiff/sqrt(varres + varintsub + varslopesub + varintit + varslopeit)
print(d)
}
# function for effect sizes based on Westfall, Judd & Kenny, 2014 as cited in Brysbaert & Steyvers, 2019
# figure 6
# p values:
# using Wald's z test:
# (but this is anti-conservative, so only done as a first step)
summary(mFCACA)
## Generalized linear mixed model fit by maximum likelihood (Laplace
## Approximation) [glmerMod]
## Family: binomial ( logit )
## Formula: correctanswer ~ vocabCovariateC + learningCondition * snItemtype +
## (1 + snItemtype | subjectnr)
## Data: dataFCACA
##
## AIC BIC logLik deviance df.resid
## 1300.1 1346.6 -642.0 1284.1 2488
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -5.5213 0.1309 0.1809 0.2362 1.1246
##
## Random effects:
## Groups Name Variance Std.Dev. Corr
## subjectnr (Intercept) 0.9374 0.9682
## snItemtype 2.1394 1.4627 -0.22
## Number of obs: 2496, groups: subjectnr, 104
##
## Fixed effects:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) 3.1405 0.1811 17.341 < 2e-16 ***
## vocabCovariateC 4.4885 1.1868 3.782 0.000155 ***
## learningCondition 0.5702 0.2757 2.069 0.038591 *
## snItemtype 0.9997 0.3315 3.016 0.002564 **
## learningCondition:snItemtype -0.4722 0.4868 -0.970 0.332012
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr) vcbCvC lrnngC snItmt
## vocabCovrtC 0.182
## lernngCndtn 0.086 -0.056
## snItemtype 0.257 -0.027 -0.016
## lrnngCndt:I -0.032 -0.043 0.074 0.112
summary(mEMACA2)
## Generalized linear mixed model fit by maximum likelihood (Laplace
## Approximation) [glmerMod]
## Family: binomial ( logit )
## Formula:
## correctanswer ~ vocabCovariateC + learningCondition * adjacencyItemtype *
## snItemtype + (0 + adjacencyItemtype:snItemtype | subjectnr)
## Data: dataEMACA
##
## AIC BIC logLik deviance df.resid
## 5566.8 5631.9 -2773.4 5546.8 4982
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -3.4789 -0.9917 0.4388 0.6443 2.1216
##
## Random effects:
## Groups Name Variance Std.Dev.
## subjectnr adjacencyItemtype:snItemtype 1.35e-16 1.162e-08
## Number of obs: 4992, groups: subjectnr, 104
##
## Fixed effects:
## Estimate Std. Error z value
## (Intercept) 0.93542 0.03442 27.175
## vocabCovariateC 3.65461 0.30022 12.173
## learningCondition 0.75857 0.06850 11.074
## adjacencyItemtype -1.07910 0.06876 -15.694
## snItemtype 0.31726 0.06852 4.630
## learningCondition:adjacencyItemtype 0.05507 0.13701 0.402
## learningCondition:snItemtype -0.07397 0.13700 -0.540
## adjacencyItemtype:snItemtype -0.73969 0.13706 -5.397
## learningCondition:adjacencyItemtype:snItemtype 0.20362 0.27405 0.743
## Pr(>|z|)
## (Intercept) < 2e-16 ***
## vocabCovariateC < 2e-16 ***
## learningCondition < 2e-16 ***
## adjacencyItemtype < 2e-16 ***
## snItemtype 3.65e-06 ***
## learningCondition:adjacencyItemtype 0.688
## learningCondition:snItemtype 0.589
## adjacencyItemtype:snItemtype 6.78e-08 ***
## learningCondition:adjacencyItemtype:snItemtype 0.457
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr) vcbCvC lrnngC adjcnI snItmt lrnngCndtn:dI lrnngCndtn:sI
## vocabCovrtC 0.100
## lernngCndtn 0.162 -0.011
## adjcncyItmt -0.240 -0.087 -0.105
## snItemtype 0.119 0.026 0.034 -0.122
## lrnngCndtn:dI -0.104 0.017 -0.234 0.162 -0.036
## lrnngCndtn:sI 0.032 -0.012 0.117 -0.035 0.164 -0.120
## adjcncyIt:I -0.123 -0.030 -0.036 0.119 -0.234 0.033 -0.106
## lrnngCn:I:I -0.035 0.013 -0.120 0.033 -0.106 0.117 -0.234
## adjI:I
## vocabCovrtC
## lernngCndtn
## adjcncyItmt
## snItemtype
## lrnngCndtn:dI
## lrnngCndtn:sI
## adjcncyIt:I
## lrnngCn:I:I 0.164
# using likelihood ratio:
# which is better according to: http://bbolker.github.io/mixedmodels-misc/glmmFAQ.html#testing-hypotheses
# these are reported in JoVE manuscript
mFCACA0 = glmer(correctanswer ~ vocabCovariateC + learningCondition:snItemtype + snItemtype + (1 + snItemtype|subjectnr), data=dataFCACA, family = "binomial")
summary(mFCACA0)
## Generalized linear mixed model fit by maximum likelihood (Laplace
## Approximation) [glmerMod]
## Family: binomial ( logit )
## Formula: correctanswer ~ vocabCovariateC + learningCondition:snItemtype +
## snItemtype + (1 + snItemtype | subjectnr)
## Data: dataFCACA
##
## AIC BIC logLik deviance df.resid
## 1302.3 1343.0 -644.1 1288.3 2489
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -6.0171 0.1285 0.1786 0.2497 1.1147
##
## Random effects:
## Groups Name Variance Std.Dev. Corr
## subjectnr (Intercept) 1.027 1.014
## snItemtype 2.196 1.482 -0.19
## Number of obs: 2496, groups: subjectnr, 104
##
## Fixed effects:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) 3.1558 0.1858 16.983 < 2e-16 ***
## vocabCovariateC 4.7155 1.2247 3.850 0.000118 ***
## snItemtype 1.0445 0.3384 3.086 0.002026 **
## learningCondition:snItemtype -0.5652 0.5051 -1.119 0.263152
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr) vcbCvC snItmt
## vocabCovrtC 0.181
## snItemtype 0.280 -0.040
## lrnngCndt:I -0.121 0.007 -0.153
anova(mFCACA, mFCACA0)
## Data: dataFCACA
## Models:
## mFCACA0: correctanswer ~ vocabCovariateC + learningCondition:snItemtype +
## mFCACA0: snItemtype + (1 + snItemtype | subjectnr)
## mFCACA: correctanswer ~ vocabCovariateC + learningCondition * snItemtype +
## mFCACA: (1 + snItemtype | subjectnr)
## Df AIC BIC logLik deviance Chisq Chi Df Pr(>Chisq)
## mFCACA0 7 1302.3 1343.0 -644.13 1288.3
## mFCACA 8 1300.1 1346.6 -642.03 1284.1 4.1995 1 0.04044 *
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
mEMACA0 = glmer(correctanswer ~ vocabCovariateC + learningCondition:adjacencyItemtype + learningCondition:snItemtype + learningCondition:adjacencyItemtype:snItemtype + adjacencyItemtype*snItemtype + (0 + adjacencyItemtype:snItemtype|subjectnr), data=dataEMACA, family = "binomial")
## boundary (singular) fit: see ?isSingular
summary(mEMACA0)
## Generalized linear mixed model fit by maximum likelihood (Laplace
## Approximation) [glmerMod]
## Family: binomial ( logit )
## Formula:
## correctanswer ~ vocabCovariateC + learningCondition:adjacencyItemtype +
## learningCondition:snItemtype + learningCondition:adjacencyItemtype:snItemtype +
## adjacencyItemtype * snItemtype + (0 + adjacencyItemtype:snItemtype |
## subjectnr)
## Data: dataEMACA
##
## AIC BIC logLik deviance df.resid
## 5691.2 5749.8 -2836.6 5673.2 4983
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -3.3040 -1.0838 0.4744 0.7101 1.8943
##
## Random effects:
## Groups Name Variance Std.Dev.
## subjectnr adjacencyItemtype:snItemtype 0 0
## Number of obs: 4992, groups: subjectnr, 104
##
## Fixed effects:
## Estimate Std. Error z value
## (Intercept) 0.91262 0.03358 27.180
## vocabCovariateC 3.78911 0.29207 12.973
## adjacencyItemtype -1.06099 0.06703 -15.828
## snItemtype 0.32600 0.06674 4.884
## learningCondition:adjacencyItemtype 0.39049 0.13025 2.998
## learningCondition:snItemtype -0.24643 0.13263 -1.858
## adjacencyItemtype:snItemtype -0.75527 0.13349 -5.658
## learningCondition:adjacencyItemtype:snItemtype 0.55808 0.26512 2.105
## Pr(>|z|)
## (Intercept) < 2e-16 ***
## vocabCovariateC < 2e-16 ***
## adjacencyItemtype < 2e-16 ***
## snItemtype 1.04e-06 ***
## learningCondition:adjacencyItemtype 0.00272 **
## learningCondition:snItemtype 0.06316 .
## adjacencyItemtype:snItemtype 1.53e-08 ***
## learningCondition:adjacencyItemtype:snItemtype 0.03529 *
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr) vcbCvC adjcnI snItmt lrnngCndtn:dI lrnngCndtn:sI adjI:I
## vocabCovrtC 0.106
## adjcncyItmt -0.230 -0.095
## snItemtype 0.123 0.028 -0.125
## lrnngCndtn:dI 0.045 0.016 -0.026 0.044
## lrnngCndtn:sI -0.051 -0.009 0.050 -0.028 -0.102
## adjcncyIt:I -0.127 -0.033 0.122 -0.222 -0.044 0.044
## lrnngCn:I:I 0.051 0.011 -0.049 0.044 0.097 -0.212 -0.028
## convergence code: 0
## boundary (singular) fit: see ?isSingular
anova(mEMACA2, mEMACA0)
## Data: dataEMACA
## Models:
## mEMACA0: correctanswer ~ vocabCovariateC + learningCondition:adjacencyItemtype +
## mEMACA0: learningCondition:snItemtype + learningCondition:adjacencyItemtype:snItemtype +
## mEMACA0: adjacencyItemtype * snItemtype + (0 + adjacencyItemtype:snItemtype |
## mEMACA0: subjectnr)
## mEMACA2: correctanswer ~ vocabCovariateC + learningCondition * adjacencyItemtype *
## mEMACA2: snItemtype + (0 + adjacencyItemtype:snItemtype | subjectnr)
## Df AIC BIC logLik deviance Chisq Chi Df Pr(>Chisq)
## mEMACA0 9 5691.2 5749.8 -2836.6 5673.2
## mEMACA2 10 5566.8 5631.9 -2773.4 5546.8 126.39 1 < 2.2e-16 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
# effect sizes (with help from Cassandra L. Jacobs)):
# note that neither Brysbaert & Stevens nor Westfall, Kenny & Judd explicitly talk about glmer's, only lmer's
# thus, we do this only for the RT lmer's, not for accuracy glmer's
# Figure 7
# figure 7A
modelSummary(mFCRTV)
## lmer(formula = reactionTime ~ vocabCovariateC + learningCondition +
## (1 | subjectnr), data = dataFCRTV)
## Observations: 1621; Groups: subjectnr, 104
##
## Linear mixed model fit by REML
##
## Fixed Effects:
## Estimate SE F error df Pr(>F)
## (Intercept) 1.48507 0.03909 1443.169 102.0 < 2e-16 ***
## vocabCovariateC -1.08318 0.39502 7.512 124.2 0.00703 **
## learningCondition -0.17769 0.07814 5.170 100.3 0.02511 *
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## NOTE: F, error df, and p-values from Kenward-Roger approximation
##
## Random Effects:
## Groups Name Std.Dev.
## subjectnr (Intercept) 0.32990
## Residual 0.86495
## Warning in deviance.merMod(Model): deviance() is deprecated for REML fits;
## use REMLcrit for the REML criterion or deviance(.,REML=FALSE) for deviance
## calculated at the REML fit
##
## AIC: 4267.1; BIC: 4294.1; logLik: -2128.6; Deviance: 4257.1
# significant effect of learning condition
# manually read out the relevant variances
summary(mFCRTV)
## Linear mixed model fit by REML ['lmerMod']
## Formula: reactionTime ~ vocabCovariateC + learningCondition + (1 | subjectnr)
## Data: dataFCRTV
##
## REML criterion at convergence: 4257.1
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -2.1936 -0.6132 -0.2835 0.3389 6.5156
##
## Random effects:
## Groups Name Variance Std.Dev.
## subjectnr (Intercept) 0.1088 0.3299
## Residual 0.7481 0.8650
## Number of obs: 1621, groups: subjectnr, 104
##
## Fixed effects:
## Estimate Std. Error t value
## (Intercept) 1.48507 0.03909 37.990
## vocabCovariateC -1.08318 0.39502 -2.742
## learningCondition -0.17769 0.07814 -2.274
##
## Correlation of Fixed Effects:
## (Intr) vcbCvC
## vocabCovrtC -0.071
## lernngCndtn -0.003 -0.063
mFCRTVresvar = 0.7481
mFCRTVintvar = 0.1088
# retrieve the mean difference from the model predictions
mFCRTVmeandiff = pFCRTV$Predicted[2] - pFCRTV$Predicted[1]
# and as a sanity check, yes, that's identical to the learningCondition coefficient
effectSize(mFCRTVmeandiff, mFCRTVresvar, mFCRTVintvar)
## [1] -0.1919543
# -0.19 (negative bc prod faster)
# figure 7B
modelSummary(mFCRTA)
## lmer(formula = reactionTime ~ vocabCovariateC + learningCondition *
## snItemtype + (1 + snItemtype | subjectnr), data = dataFCRTA)
## Observations: 2229; Groups: subjectnr, 104
##
## Linear mixed model fit by REML
##
## Fixed Effects:
## Estimate SE F error df Pr(>F)
## (Intercept) 2.01461 0.07471 727.209 101.0 < 2e-16 ***
## vocabCovariateC -0.86123 0.68480 1.551 102.0 0.215795
## learningCondition -0.59653 0.14992 15.830 101.1 0.000131 ***
## snItemtype 0.08893 0.07764 1.312 101.9 0.254715
## learningCondition:snItemtype -0.21308 0.15527 1.883 101.9 0.172992
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## NOTE: F, error df, and p-values from Kenward-Roger approximation
##
## Random Effects:
## Groups Name Std.Dev. Corr
## subjectnr (Intercept) 0.74167
## snItemtype 0.71101 -0.233
## Residual 0.79310
## Warning in deviance.merMod(Model): deviance() is deprecated for REML fits;
## use REMLcrit for the REML criterion or deviance(.,REML=FALSE) for deviance
## calculated at the REML fit
##
## AIC: 5791.2; BIC: 5842.6; logLik: -2886.6; Deviance: 5773.2
# significant effect of learning condition
# manually read out the relevant variances
summary(mFCRTA)
## Linear mixed model fit by REML ['lmerMod']
## Formula: reactionTime ~ vocabCovariateC + learningCondition * snItemtype +
## (1 + snItemtype | subjectnr)
## Data: dataFCRTA
##
## REML criterion at convergence: 5773.2
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -4.0484 -0.5491 -0.1702 0.3951 8.0781
##
## Random effects:
## Groups Name Variance Std.Dev. Corr
## subjectnr (Intercept) 0.5501 0.7417
## snItemtype 0.5055 0.7110 -0.23
## Residual 0.6290 0.7931
## Number of obs: 2229, groups: subjectnr, 104
##
## Fixed effects:
## Estimate Std. Error t value
## (Intercept) 2.01461 0.07471 26.967
## vocabCovariateC -0.86123 0.68480 -1.258
## learningCondition -0.59653 0.14992 -3.979
## snItemtype 0.08893 0.07764 1.145
## learningCondition:snItemtype -0.21308 0.15527 -1.372
##
## Correlation of Fixed Effects:
## (Intr) vcbCvC lrnngC snItmt
## vocabCovrtC -0.002
## lernngCndtn -0.002 -0.082
## snItemtype -0.210 -0.002 0.003
## lrnngCndt:I 0.003 0.003 -0.210 -0.008
mFCRTAresvar = 0.6290
mFCRTAintvar = 0.5501
# retrieve the mean difference from the model predictions
mFCRTAmeandiff = pFCRTA$Predicted[2] - pFCRTA$Predicted[1]
# and as a sanity check, yes, that's identical to the learningCondition coefficient
effectSize(mFCRTAmeandiff, mFCRTAresvar, mFCRTAintvar)
## [1] -0.5493622
# -0.54 (negative bc prod faster)
# Figure 7 C
modelSummary(mFCRTP)
## lmer(formula = reactionTime ~ vocabCovariateC + learningCondition *
## probItemtype + (1 + probItemtype | subjectnr), data = dataFCRTP)
## Observations: 2317; Groups: subjectnr, 104
##
## Linear mixed model fit by REML
##
## Fixed Effects:
## Estimate SE F error df Pr(>F)
## (Intercept) 1.20489 0.02931 1689.6918 101.2 < 2e-16
## vocabCovariateC -1.33630 0.28015 22.3837 116.1 6.34e-06
## learningCondition -0.13333 0.05875 5.1489 100.8 0.0254
## probItemtype -0.03540 0.02441 2.1024 100.3 0.1502
## learningCondition:probItemtype -0.04682 0.04881 0.9195 100.3 0.3399
##
## (Intercept) ***
## vocabCovariateC ***
## learningCondition *
## probItemtype
## learningCondition:probItemtype
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## NOTE: F, error df, and p-values from Kenward-Roger approximation
##
## Random Effects:
## Groups Name Std.Dev. Corr
## subjectnr (Intercept) 0.272568
## probItemtype 0.057354 -1.000
## Residual 0.570844
## Warning in deviance.merMod(Model): deviance() is deprecated for REML fits;
## use REMLcrit for the REML criterion or deviance(.,REML=FALSE) for deviance
## calculated at the REML fit
##
## AIC: 4197.8; BIC: 4249.5; logLik: -2089.9; Deviance: 4179.8
# learningCondition is significant
# manually read out variances
summary(mFCRTP)
## Linear mixed model fit by REML ['lmerMod']
## Formula: reactionTime ~ vocabCovariateC + learningCondition * probItemtype +
## (1 + probItemtype | subjectnr)
## Data: dataFCRTP
##
## REML criterion at convergence: 4179.8
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -2.0676 -0.5705 -0.2116 0.2881 8.7507
##
## Random effects:
## Groups Name Variance Std.Dev. Corr
## subjectnr (Intercept) 0.074293 0.27257
## probItemtype 0.003289 0.05735 -1.00
## Residual 0.325863 0.57084
## Number of obs: 2317, groups: subjectnr, 104
##
## Fixed effects:
## Estimate Std. Error t value
## (Intercept) 1.20489 0.02931 41.107
## vocabCovariateC -1.33630 0.28015 -4.770
## learningCondition -0.13333 0.05875 -2.269
## probItemtype -0.03540 0.02441 -1.450
## learningCondition:probItemtype -0.04682 0.04881 -0.959
##
## Correlation of Fixed Effects:
## (Intr) vcbCvC lrnngC prbItm
## vocabCovrtC -0.027
## lernngCndtn -0.001 -0.072
## probItemtyp -0.211 -0.017 -0.001
## lrnngCndt:I -0.002 0.008 -0.212 -0.007
mFCRTPresvar = 0.325863
mFCRTPintvar = 0.074293
# retrieve the mean difference from the model predictions
mFCRTPmeandiff = pFCRTPJoVE$Predicted[2] - pFCRTPJoVE$Predicted[1]
# note that the one titled pFCRTP has other factors,
# the one called pFCRTPJoVE is appropriate because it only has the learningCondition factor
# and as a sanity check, yes, that's identical to the learningCondition coefficient
effectSize(mFCRTPmeandiff, mFCRTPresvar, mFCRTPintvar)
## [1] -0.2107689
# -0.22
# figure 7D
modelSummary(mEMRTW)
## lmer(formula = reactionTime ~ vocabCovariateC + learningCondition +
## (1 | subjectnr), data = dataEMRTW)
## Observations: 2133; Groups: subjectnr, 103
##
## Linear mixed model fit by REML
##
## Fixed Effects:
## Estimate SE F error df Pr(>F)
## (Intercept) 2.39647 0.07611 991.266 100.26 <2e-16 ***
## vocabCovariateC -1.09770 0.77534 2.004 107.02 0.1598
## learningCondition -0.39361 0.15206 6.700 99.58 0.0111 *
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## NOTE: F, error df, and p-values from Kenward-Roger approximation
##
## Random Effects:
## Groups Name Std.Dev.
## subjectnr (Intercept) 0.71923
## Residual 1.20415
## Warning in deviance.merMod(Model): deviance() is deprecated for REML fits;
## use REMLcrit for the REML criterion or deviance(.,REML=FALSE) for deviance
## calculated at the REML fit
##
## AIC: 7072.2; BIC: 7100.5; logLik: -3531.1; Deviance: 7062.2
# learningCondition is significant
# read out variances by hand
summary(mEMRTW)
## Linear mixed model fit by REML ['lmerMod']
## Formula: reactionTime ~ vocabCovariateC + learningCondition + (1 | subjectnr)
## Data: dataEMRTW
##
## REML criterion at convergence: 7062.2
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -2.5173 -0.6477 -0.2049 0.5082 5.9442
##
## Random effects:
## Groups Name Variance Std.Dev.
## subjectnr (Intercept) 0.5173 0.7192
## Residual 1.4500 1.2042
## Number of obs: 2133, groups: subjectnr, 103
##
## Fixed effects:
## Estimate Std. Error t value
## (Intercept) 2.39647 0.07611 31.486
## vocabCovariateC -1.09770 0.77534 -1.416
## learningCondition -0.39361 0.15206 -2.589
##
## Correlation of Fixed Effects:
## (Intr) vcbCvC
## vocabCovrtC -0.064
## lernngCndtn -0.009 -0.045
mEMRTWresvar = 1.4500
mEMRTWintvar = 0.5173
# retrieve the mean difference from the model predictions
mEMRTWmeandiff = pEMRTW$Predicted[2] - pEMRTW$Predicted[1]
# and as a sanity check, yes, that's identical to the learningCondition coefficient
effectSize(mEMRTWmeandiff, mEMRTWresvar, mEMRTWintvar)
## [1] -0.2806246
# -0.28
# Figure 7E
modelSummary(mEMRTA)
## lmer(formula = reactionTime ~ vocabCovariateC + learningCondition *
## adjacencyItemtype * snItemtype + (1 + adjacencyItemtype *
## snItemtype | subjectnr), data = dataEMRTA)
## Observations: 3270; Groups: subjectnr, 103
##
## Linear mixed model fit by REML
##
## Fixed Effects:
## Estimate SE F
## (Intercept) 1.94459 0.06399 922.5135
## vocabCovariateC -1.05411 0.63951 2.5968
## learningCondition -0.33119 0.12788 6.7011
## adjacencyItemtype 0.06822 0.04862 1.9561
## snItemtype -0.24184 0.03355 51.2614
## learningCondition:adjacencyItemtype -0.08268 0.09724 0.7181
## learningCondition:snItemtype 0.04724 0.06710 0.4888
## adjacencyItemtype:snItemtype 0.23775 0.06398 13.6090
## learningCondition:adjacencyItemtype:snItemtype -0.09139 0.12793 0.5026
## error df Pr(>F)
## (Intercept) 100.15 < 2e-16 ***
## vocabCovariateC 106.60 0.110036
## learningCondition 99.58 0.011076 *
## adjacencyItemtype 93.04 0.165260
## snItemtype 90.70 2.05e-10 ***
## learningCondition:adjacencyItemtype 93.06 0.398933
## learningCondition:snItemtype 90.69 0.486234
## adjacencyItemtype:snItemtype 89.94 0.000385 ***
## learningCondition:adjacencyItemtype:snItemtype 89.92 0.480187
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## NOTE: F, error df, and p-values from Kenward-Roger approximation
##
## Random Effects:
## Groups Name Std.Dev. Corr
## subjectnr (Intercept) 0.62364
## adjacencyItemtype 0.36630 0.146
## snItemtype 0.16023 -0.094 0.465
## adjacencyItemtype:snItemtype 0.25872 -0.193 -0.456 -0.536
## Residual 0.78990
## Warning in deviance.merMod(Model): deviance() is deprecated for REML fits;
## use REMLcrit for the REML criterion or deviance(.,REML=FALSE) for deviance
## calculated at the REML fit
##
## AIC: 8228.0; BIC: 8349.9; logLik: -4094.0; Deviance: 8188.0
# learningCondition is significant
# read out variances by hand
summary(mEMRTA)
## Linear mixed model fit by REML ['lmerMod']
## Formula:
## reactionTime ~ vocabCovariateC + learningCondition * adjacencyItemtype *
## snItemtype + (1 + adjacencyItemtype * snItemtype | subjectnr)
## Data: dataEMRTA
##
## REML criterion at convergence: 8188
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -4.4251 -0.4877 -0.1930 0.2278 7.0715
##
## Random effects:
## Groups Name Variance Std.Dev. Corr
## subjectnr (Intercept) 0.38892 0.6236
## adjacencyItemtype 0.13418 0.3663 0.15
## snItemtype 0.02567 0.1602 -0.09 0.47
## adjacencyItemtype:snItemtype 0.06694 0.2587 -0.19 -0.46 -0.54
## Residual 0.62395 0.7899
## Number of obs: 3270, groups: subjectnr, 103
##
## Fixed effects:
## Estimate Std. Error t value
## (Intercept) 1.94459 0.06399 30.388
## vocabCovariateC -1.05411 0.63951 -1.648
## learningCondition -0.33119 0.12788 -2.590
## adjacencyItemtype 0.06822 0.04862 1.403
## snItemtype -0.24184 0.03355 -7.208
## learningCondition:adjacencyItemtype -0.08268 0.09724 -0.850
## learningCondition:snItemtype 0.04724 0.06710 0.704
## adjacencyItemtype:snItemtype 0.23775 0.06398 3.716
## learningCondition:adjacencyItemtype:snItemtype -0.09139 0.12793 -0.714
##
## Correlation of Fixed Effects:
## (Intr) vcbCvC lrnngC adjcnI snItmt lrnngCndtn:dI lrnngCndtn:sI
## vocabCovrtC -0.059
## lernngCndtn -0.026 -0.043
## adjcncyItmt 0.162 -0.002 -0.022
## snItemtype -0.048 -0.010 0.006 0.208
## lrnngCndtn:dI -0.021 -0.010 0.162 -0.098 -0.020
## lrnngCndtn:sI 0.006 -0.005 -0.048 -0.020 -0.146 0.208
## adjcncyIt:I -0.057 -0.020 -0.005 -0.153 0.054 0.014 -0.070
## lrnngCn:I:I -0.006 -0.003 -0.058 0.013 -0.070 -0.153 0.054
## adjI:I
## vocabCovrtC
## lernngCndtn
## adjcncyItmt
## snItemtype
## lrnngCndtn:dI
## lrnngCndtn:sI
## adjcncyIt:I
## lrnngCn:I:I -0.152
mEMRTAresvar = 0.62395
mEMRTAintvar = 0.38892
# retrieve the mean difference from the model predictions
mEMRTAmeandiff = pEMRTA$Predicted[2] - pEMRTA$Predicted[1]
# and as a sanity check, yes, that's identical to the learningCondition coefficient
effectSize(mEMRTAmeandiff, mEMRTAresvar, mEMRTAintvar)
## [1] -0.3290795
# -0.33
# Figure 7F
modelSummary(mEMRTP)
## lmer(formula = reactionTime ~ vocabCovariateC + learningCondition *
## probItemtype + (1 + probItemtype | subjectnr), data = dataEMRTP)
## Observations: 3788; Groups: subjectnr, 102
##
## Linear mixed model fit by REML
##
## Fixed Effects:
## Estimate SE F error df Pr(>F)
## (Intercept) 1.26405 0.04608 752.3014 99.56 <2e-16 ***
## vocabCovariateC 0.18358 0.47645 0.1462 106.65 0.7029
## learningCondition -0.22081 0.09187 5.7756 98.94 0.0181 *
## probItemtype 0.02969 0.02456 1.4576 94.39 0.2303
## learningCondition:probItemtype -0.01664 0.04912 0.1146 94.39 0.7358
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## NOTE: F, error df, and p-values from Kenward-Roger approximation
##
## Random Effects:
## Groups Name Std.Dev. Corr
## subjectnr (Intercept) 0.445730
## probItemtype 0.037009 1.000
## Residual 0.741418
## Warning in deviance.merMod(Model): deviance() is deprecated for REML fits;
## use REMLcrit for the REML criterion or deviance(.,REML=FALSE) for deviance
## calculated at the REML fit
##
## AIC: 8782.5; BIC: 8838.7; logLik: -4382.3; Deviance: 8764.5
# learningCondition is significant
# read out variances by hand
summary(mEMRTP)
## Linear mixed model fit by REML ['lmerMod']
## Formula: reactionTime ~ vocabCovariateC + learningCondition * probItemtype +
## (1 + probItemtype | subjectnr)
## Data: dataEMRTP
##
## REML criterion at convergence: 8764.5
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -3.0080 -0.4868 -0.1572 0.2152 8.0748
##
## Random effects:
## Groups Name Variance Std.Dev. Corr
## subjectnr (Intercept) 0.19868 0.44573
## probItemtype 0.00137 0.03701 1.00
## Residual 0.54970 0.74142
## Number of obs: 3788, groups: subjectnr, 102
##
## Fixed effects:
## Estimate Std. Error t value
## (Intercept) 1.26405 0.04608 27.430
## vocabCovariateC 0.18358 0.47645 0.385
## learningCondition -0.22081 0.09187 -2.403
## probItemtype 0.02969 0.02456 1.209
## learningCondition:probItemtype -0.01664 0.04912 -0.339
##
## Correlation of Fixed Effects:
## (Intr) vcbCvC lrnngC prbItm
## vocabCovrtC -0.083
## lernngCndtn -0.018 -0.026
## probItemtyp 0.117 0.024 0.000
## lrnngCndt:I 0.001 -0.009 0.119 -0.029
mEMRTPresvar = 0.54970
mEMRTPintvar = 0.19868
# retrieve the mean difference from the model predictions
mEMRTPmeandiff = pEMRTPJoVE$Predicted[2] - pEMRTPJoVE$Predicted[1]
# note that the one titled pEMRTP has other factors,
# the one called pEMRTPJoVE is appropriate because it only has the learningCondition factor
# and as a sanity check, yes, that's identical to the learningCondition coefficient
effectSize(mEMRTPmeandiff, mEMRTPresvar, mEMRTPintvar)
## [1] -0.2552408
# -0.25
save.image("WorkspaceInclJoVE.Rdata")