-
Notifications
You must be signed in to change notification settings - Fork 0
/
Study2A_Judgments_of_Policy_Efficacy_after_the_Learning_Task_Causal.R
160 lines (134 loc) · 7.1 KB
/
Study2A_Judgments_of_Policy_Efficacy_after_the_Learning_Task_Causal.R
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
#######################################
# Judgment Accuracy: Causal Functions #
#######################################
#import libraries
library('lmerTest')
library("tidyverse")
# Import data
Study2A_Judgments_of_Policy_Efficacy_after_the_Learning_Task_Causal <- as_tibble(read.csv("Study2A_Judgments_of_Policy_Efficacy_after_the_Learning_Task_Causal.csv"))
# Preview data
head(Study2A_Judgments_of_Policy_Efficacy_after_the_Learning_Task_Causal)
# data dictionary:
## "Participant" = participant number
## "Function" = function number (causal functions only; range 1-4; refer to payoff function figure)
## "Congruence" = denotes preference-congruence for a given policy/function
## "Ambiguity" = denotes ambiguity of function (low vs high)
## "Judgment_Accuracy" = values are the distance a final judgment was from the correct assessment value (3 possible values: 0-2); 0=correct judgment; 1=incorrect (policy A = policy B); 2=incorrect (e.g., Policy A < Policy B)
# Re-coded judgment accuracy so that correct=1 and incorrect judgment=0
Study2A_Judgments_of_Policy_Efficacy_after_the_Learning_Task_Causal$Judgment_Accuracy_Coded <-
ifelse(Study2A_Judgments_of_Policy_Efficacy_after_the_Learning_Task_Causal$Judgment_Accuracy==0, 1, 0)
# Descriptives: average judgment accuracy across congruence and ambiguity
tapply(Study2A_Judgments_of_Policy_Efficacy_after_the_Learning_Task_Causal$Judgment_Accuracy_Coded,
list(Study2A_Judgments_of_Policy_Efficacy_after_the_Learning_Task_Causal$Ambiguity,
Study2A_Judgments_of_Policy_Efficacy_after_the_Learning_Task_Causal$Congruence),
mean,
na.rm=TRUE)
# Effects coding for model
Study2A_Judgments_of_Policy_Efficacy_after_the_Learning_Task_Causal$Ambiguity2 <-
ifelse(Study2A_Judgments_of_Policy_Efficacy_after_the_Learning_Task_Causal$Ambiguity=="Low",.5,-.5)
##################################################
# Preference-Congruence vs Incongruence Analysis #
##################################################
# subset data
jpe_causal_preference_only <- Study2A_Judgments_of_Policy_Efficacy_after_the_Learning_Task_Causal %>%
filter(!Congruence %in% "Neutral")
# Effects coding for model
jpe_causal_preference_only$Congruence2 <-
ifelse(jpe_causal_preference_only$Congruence=="Congruent", .5,-.5)
#------------------------------------------
# logistic regression: Congruent vs Incongruent Analysis
# Maximal Model
# m1 <- glmer(Judgment_Accuracy_Coded ~ Congruence2*Ambiguity2 +
# (1+Congruence2*Ambiguity2|Participant),
# data=jpe_causal_preference_only, family="binomial",
# control=glmerControl(optimizer="bobyqa"))
# summary(m1)
# Error: number of observations (=176) < number of random effects (=352) for term (1 + Congruence2 * Ambiguity2 | Participant); the random-effects parameters are probably unidentifiable
m2 <- glmer(as.factor(Judgment_Accuracy_Coded) ~ Congruence2*Ambiguity2 +
(1+Congruence2*Ambiguity2||Participant),
data=jpe_causal_preference_only, family="binomial",
control=glmerControl(optimizer="bobyqa"))
summary(m2)
# Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
# Family: binomial ( logit )
# Formula: as.factor(Judgment_Accuracy_Coded) ~ Congruence2 * Ambiguity2 + (1 + Congruence2 * Ambiguity2 || Participant)
# Data: jpe_causal_preference_only
# Control: glmerControl(optimizer = "bobyqa")
#
# AIC BIC logLik deviance df.resid
# 196.4 221.8 -90.2 180.4 168
#
# Scaled residuals:
# Min 1Q Median 3Q Max
# -2.5142 -0.3844 0.2693 0.5053 2.2317
#
# Random effects:
# Groups Name Variance Std.Dev.
# Participant (Intercept) 0.0000 0.0000
# Participant.1 Congruence2 0.6963 0.8344
# Participant.2 Ambiguity2 3.5666 1.8885
# Participant.3 Congruence2:Ambiguity2 0.0000 0.0000
# Number of obs: 176, groups: Participant, 88
#
# Fixed effects:
# Estimate Std. Error z value Pr(>|z|)
# (Intercept) 0.3226 0.2208 1.461 0.144035
# Congruence2 1.6782 0.5511 3.045 0.002323 **
# Ambiguity2 2.8455 0.7431 3.829 0.000129 ***
# Congruence2:Ambiguity2 -0.7199 0.9121 -0.789 0.429983
# ---
# Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
#-----------------------------------------------------------------------------------------
#########################################
# Strong vs Neutral Preference Analysis #
#########################################
# Create new predictor variable with effects coding that compares strong vs neutral preference
Study2A_Judgments_of_Policy_Efficacy_after_the_Learning_Task_Causal$Preference_Strength <-
ifelse(Study2A_Judgments_of_Policy_Efficacy_after_the_Learning_Task_Causal$Congruence=="Neutral",.5,-.5)
## Maximal Model
#m1 <- glmer(Judgment_Accuracy_Coded ~ Preference_Strength*Ambiguity2 +
# (1+Preference_Strength*Ambiguity2|Participant),
# data=Study2A_Judgments_of_Policy_Efficacy_after_the_Learning_Task_Causal, family="binomial",
# control=glmerControl(optimizer="bobyqa"))
#summary(m1)
## Model failed to converge: degenerate Hessian with 2 negative eigenvalues
m2 <- glmer(Judgment_Accuracy_Coded ~ Preference_Strength*Ambiguity2 +
(1+Preference_Strength*Ambiguity2||Participant),
data=Study2A_Judgments_of_Policy_Efficacy_after_the_Learning_Task_Causal, family="binomial",
control=glmerControl(optimizer="bobyqa"))
summary(m2)
# Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
# Family: binomial ( logit )
# Formula: Judgment_Accuracy_Coded ~ Preference_Strength * Ambiguity2 + (1 + Preference_Strength * Ambiguity2 || Participant)
# Data: Study2A_Judgments_of_Policy_Efficacy_after_the_Learning_Task_Causal
# Control: glmerControl(optimizer = "bobyqa")
#
# AIC BIC logLik deviance df.resid
# 385.4 416.3 -184.7 369.4 344
#
# Scaled residuals:
# Min 1Q Median 3Q Max
# -1.8033 -0.4537 0.3041 0.3738 1.8710
#
# Random effects:
# Groups Name Variance Std.Dev.
# Participant (Intercept) 5.477e-01 7.401e-01
# Participant.1 Preference_Strength 9.806e-14 3.131e-07
# Participant.2 Ambiguity2 4.940e+00 2.223e+00
# Participant.3 Preference_Strength:Ambiguity2 0.000e+00 0.000e+00
# Number of obs: 352, groups: Participant, 88
#
# Fixed effects:
# Estimate Std. Error z value Pr(>|z|)
# (Intercept) 0.2252 0.1709 1.318 0.188
# Preference_Strength -0.4372 0.3170 -1.379 0.168
# Ambiguity2 3.2412 0.5516 5.876 4.19e-09 ***
# Preference_Strength:Ambiguity2 0.6985 0.6193 1.128 0.259
# ---
# Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
#
# Correlation of Fixed Effects:
# (Intr) Prfr_S Ambgt2
# Prfrnc_Strn 0.011
# Ambiguity2 0.156 -0.130
# Prfrnc_S:A2 -0.007 0.057 0.113