-
Notifications
You must be signed in to change notification settings - Fork 0
/
Study3_Judgments_of_Policy_Efficacy_after_the_Learning_Task_Causal.R
81 lines (70 loc) · 4.29 KB
/
Study3_Judgments_of_Policy_Efficacy_after_the_Learning_Task_Causal.R
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
#######################################
# Judgment Accuracy: Causal Functions #
#######################################
#import libraries
library('lmerTest')
library("tidyverse")
# Import data
Study3_Judgments_of_Policy_Efficacy_after_the_Learning_Task_Causal <- as_tibble(read.csv("Study3_Judgments_of_Policy_Efficacy_after_the_Learning_Task_Causal.csv"))
# Preview data
head(Study3_Judgments_of_Policy_Efficacy_after_the_Learning_Task_Causal)
# data dictionary:
## "Participant" = participant number
## "Function_Exposure_Treatment" = denotes whether or not participant received function exposure treatment (see Study 3 method section for details;), 0=no, 1=yes
## "Function" = function number (causal functions only; range 1-4; refer to payoff function figure)
## "Congruence" = denotes preference-congruence for a given policy/function
## "Ambiguity" = denotes ambiguity of function (low vs high)
## "Judgment_Accuracy" = values are the distance a final judgment was from the correct assessment value (3 possible values: 0-2); 0=correct judgment; 1=incorrect (policy A = policy B); 2=incorrect (e.g., Policy A < Policy B)
# Re-coded judgment accuracy so that correct=1 and incorrect judgment=0
Study3_Judgments_of_Policy_Efficacy_after_the_Learning_Task_Causal$Judgment_Accuracy_Coded <-
ifelse(Study3_Judgments_of_Policy_Efficacy_after_the_Learning_Task_Causal$Judgment_Accuracy==0, 1, 0)
# Descriptives: average judgment accuracy across congruence and ambiguity
tapply(Study3_Judgments_of_Policy_Efficacy_after_the_Learning_Task_Causal$Judgment_Accuracy_Coded,
list(Study3_Judgments_of_Policy_Efficacy_after_the_Learning_Task_Causal$Ambiguity,
Study3_Judgments_of_Policy_Efficacy_after_the_Learning_Task_Causal$Congruence),
mean,
na.rm=TRUE)
# Effects coding for model
Study3_Judgments_of_Policy_Efficacy_after_the_Learning_Task_Causal$Ambiguity2 <-
ifelse(Study3_Judgments_of_Policy_Efficacy_after_the_Learning_Task_Causal$Ambiguity=="Low",.5,-.5)
Study3_Judgments_of_Policy_Efficacy_after_the_Learning_Task_Causal$Function_Exposure_Treatment2 <-
ifelse(Study3_Judgments_of_Policy_Efficacy_after_the_Learning_Task_Causal$Function_Exposure_Treatment==1,.5,-.5)
Study3_Judgments_of_Policy_Efficacy_after_the_Learning_Task_Causal$Congruence2 <-
ifelse(Study3_Judgments_of_Policy_Efficacy_after_the_Learning_Task_Causal$Congruence=="Congruent",.5,-.5)
#############################
# logistic regression model #
#############################
# model convergence issues, before settling on this model
m <- glmer(Judgment_Accuracy_Coded ~ Congruence2*Ambiguity2*Function_Exposure_Treatment2 +
(1|Participant),
data=Study3_Judgments_of_Policy_Efficacy_after_the_Learning_Task_Causal, family="binomial")
summary(m)
# Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
# Family: binomial ( logit )
# Formula: Judgment_Accuracy_Coded ~ Congruence2 * Ambiguity2 * Function_Exposure_Treatment2 + (1 | Participant)
# Data: Study3_Judgments_of_Policy_Efficacy_after_the_Learning_Task_Causal
#
# AIC BIC logLik deviance df.resid
# 297.2 330.5 -139.6 279.2 291
#
# Scaled residuals:
# Min 1Q Median 3Q Max
# -3.5590 -0.6939 0.2810 0.5547 4.2426
#
# Random effects:
# Groups Name Variance Std.Dev.
# Participant (Intercept) 4e-14 2e-07
# Number of obs: 300, groups: Participant, 78
#
# Fixed effects:
# Estimate Std. Error z value Pr(>|z|)
# (Intercept) 0.02477 0.17240 0.144 0.886
# Congruence2 1.65521 0.34481 4.800 1.58e-06 ***
# Ambiguity2 3.01146 0.34481 8.734 < 2e-16 ***
# Function_Exposure_Treatment2 0.30077 0.34481 0.872 0.383
# Congruence2:Ambiguity2 -0.70537 0.68961 -1.023 0.306
# Congruence2:Function_Exposure_Treatment2 -0.80822 0.68961 -1.172 0.241
# Ambiguity2:Function_Exposure_Treatment2 -0.71713 0.68961 -1.040 0.298
# Congruence2:Ambiguity2:Function_Exposure_Treatment2 -1.01007 1.37920 -0.732 0.464
# ---
# Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1