-
Notifications
You must be signed in to change notification settings - Fork 0
/
Study1_Judgments_of_Policy_Efficacy_after_the_Learning_Task_Causal.R
96 lines (69 loc) · 3.37 KB
/
Study1_Judgments_of_Policy_Efficacy_after_the_Learning_Task_Causal.R
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
#######################################
# Judgment Accuracy: Causal Functions #
#######################################
#import libraries
library('rcompanion') # 'wilcoxonOneSampleR()'
library("tidyverse")
# Import data
Study1_Judgments_of_Policy_Efficacy_after_the_Learning_Task_Causal <- as_tibble(read.csv("Study1_Judgments_of_Policy_Efficacy_after_the_Learning_Task_Causal.csv"))
# Preview data
head(Study1_Judgments_of_Policy_Efficacy_after_the_Learning_Task_Causal)
# data dictionary:
## "Participant" = participant number
## "Function" = function number (causal functions only; range 1-4; refer to payoff function figure)
## "Congruence" = denotes preference-congruence for a given policy/function, 1=yes, -1=no (i.e., is preferred policy actually the best policy)
## "Ambiguity" = denotes ambiguity of function (low vs high)
## "Judgment_Accuracy" = values are the distance a final judgment was from the correct assessment value; lower scores reflect greater accuracy (range: 0-10).
# Note: As explained in the paper, two separate analyses were conducted for each predictor variable (congruence and ambiguity). These analyses are divided below.
#######################
# Congruence Analysis #
#######################
jpe_causal_congruence <- Study1_Judgments_of_Policy_Efficacy_after_the_Learning_Task_Causal %>%
group_by(Participant, Congruence) %>%
summarise(Mean_Judgment_Accuracy = mean(Judgment_Accuracy, na.rm=T))
jpe_causal_preference_congruent <- jpe_causal_congruence %>%
filter(Congruence==1)
jpe_causal_preference_incongruent <- jpe_causal_congruence %>%
filter(Congruence==-1)
# descriptives
jpe_causal_preference_congruent %>%
summary() # Mean_Judgment_Accuracy median = 3
jpe_causal_preference_incongruent %>%
summary() # Mean_Judgment_Accuracy median = 6
# Wilcoxon Signed Rank Tests
wilcox.test(x=jpe_causal_preference_congruent$Mean_Judgment_Accuracy,
y=jpe_causal_preference_incongruent$Mean_Judgment_Accuracy,
paired=FALSE,
exact=TRUE
)
# W = 354.50, p < .001
# effect size: r
wilcoxonR(x=jpe_causal_congruence$Mean_Judgment_Accuracy,
g=jpe_causal_congruence$Congruence,
ci=TRUE, paired=TRUE) # r= .44, CI = .249 – .620 (Note: CI resamples every time it is run)
######################
# Ambiguity Analysis #
######################
jpe_causal_ambiguity <- Study1_Judgments_of_Policy_Efficacy_after_the_Learning_Task_Causal %>%
group_by(Participant, Ambiguity) %>%
summarise(Mean_Judgment_Accuracy = mean(Judgment_Accuracy, na.rm=T))
jpe_causal_ambiguity_low <- jpe_causal_ambiguity %>%
filter(Ambiguity=="Low")
jpe_causal_ambiguity_high <- jpe_causal_ambiguity %>%
filter(Ambiguity=="High")
# descriptives
jpe_causal_ambiguity_low %>%
summary() # Mean_Judgment_Accuracy median = 2
jpe_causal_ambiguity_high %>%
summary() # Mean_Judgment_Accuracy median = 5
# Wilcoxon Signed Rank Tests
wilcox.test(x=jpe_causal_ambiguity_low$Mean_Judgment_Accuracy,
y=jpe_causal_ambiguity_high$Mean_Judgment_Accuracy,
paired=TRUE,
exact=TRUE
)
# W = 94.50, p < .001
# effect size: r
wilcoxonR(x=jpe_causal_ambiguity$Mean_Judgment_Accuracy,
g=jpe_causal_ambiguity$Ambiguity,
ci=TRUE, paired=TRUE) # r= .49, CI = .32 – .65 (Note: CI resamples every time it is run)