/
experiment1.Rmd
537 lines (424 loc) · 22.4 KB
/
experiment1.Rmd
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
---
title: "Analysis of Experiment 1"
editor_options:
chunk_output_type: console
output:
workflowr::wflow_html:
code_folding: hide
---
```{r, include=FALSE}
options(width = 120)
local({
hook_output <- knitr::knit_hooks$get('output')
knitr::knit_hooks$set(output = function(x, options) {
options$attr.output <- c(
options$attr.output,
sprintf('style="max-height: %s;"', options$max.height)
)
hook_output(x, options)
})
})
```
## Load data and R packages
```{r results='hide', message=F, warning=F}
# All but 1 of these packages can be easily installed from CRAN.
# However it was harder to install the showtext package. On Mac, I did this:
# installed 'homebrew' using Terminal: ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
# installed 'libpng' using Terminal: brew install libpng
# installed 'showtext' in R using: devtools::install_github("yixuan/showtext")
library(showtext)
library(brms)
library(lme4)
library(bayesplot)
library(tidyverse)
library(gridExtra)
library(kableExtra)
library(bayestestR)
library(tidybayes)
library(cowplot)
library(car)
source("code/helper_functions.R")
# set up nice font for figure
nice_font <- "Lora"
font_add_google(name = nice_font, family = nice_font, regular.wt = 400, bold.wt = 700)
showtext_auto()
exp1_treatments <- c("Intact control", "Ringers", "LPS")
durations <- read_csv("data/data_collection_sheets/experiment_durations.csv") %>%
filter(experiment == 1) %>% select(-experiment)
durations <- bind_rows(durations, tibble(hive = "SkyLab", observation_time_minutes = 90))
# Note that we here merge the heat-treated LPS data with the LPS data.
# This change was made because there is no difference in the response variables between the two LPS treatments (heated and standard LPS), and because all evidence suggests that heat treatment does not do anything to the LPS; see the earlier version of this paper on bioarXiv for complete results without merging these treatments and discussion of why we included two LPS treatments (short version: immunologists often have heat-treated LPS as a control, even though this does not make any sense because LPS is notoriously heat-stable, and an immunologist colleague told us to include this control before we knew about this).
# The original manuscript was rejected by 2 journals, in part because the reviewers were baffled by the inclusion of two different LPS treatments, and so we merged the LPS for simplicity in this version. The results are not changed in any way (they're jsut simpler as there are now 3 treatments instead of 4 in experiment 1) - see the bioarXiv version for comparison.
outcome_tally <- read_csv(file = "data/clean_data/experiment_1_outcome_tally.csv") %>%
mutate(treatment = replace(treatment, treatment == "Heat-treated LPS", "LPS")) %>% # merge the 2 LPS treatments
group_by(hive, treatment, outcome) %>%
summarise(n = sum(n), .groups = "drop") %>%
mutate(outcome = replace(outcome, outcome == "Left of own volition", "Left voluntarily")) %>%
mutate(outcome = factor(outcome, levels = c("Stayed inside the hive", "Left voluntarily", "Forced out")),
treatment = factor(treatment, levels = exp1_treatments)) %>%
arrange(hive, treatment, outcome)
# Re-formatted version of the same data, where each row is an individual bee. We need this format to run the brms model.
data_for_categorical_model <- outcome_tally %>%
mutate(id = 1:n()) %>%
split(.$id) %>%
map(function(x){
if(x$n[1] == 0) return(NULL)
data.frame(
treatment = x$treatment[1],
hive = x$hive[1],
outcome = rep(x$outcome[1], x$n))
}) %>% do.call("rbind", .) %>% as_tibble() %>%
arrange(hive, treatment) %>%
mutate(outcome_numeric = as.numeric(outcome),
hive = as.character(hive),
treatment = factor(treatment, levels = exp1_treatments)) %>%
left_join(durations, by = "hive")
```
## Inspect the raw data {.tabset .tabset-fade}
Click the three tabs to see each table.
### Sample sizes by treatment
```{r}
sample_sizes <- data_for_categorical_model %>%
group_by(treatment) %>%
summarise(n = n(), .groups = "drop")
sample_sizes %>%
kable() %>% kable_styling(full_width = FALSE)
```
### Sample sizes by treatment and hive
```{r}
data_for_categorical_model %>%
group_by(hive, treatment) %>%
summarise(n = n(), .groups = "drop") %>%
spread(treatment, n) %>%
kable() %>% kable_styling(full_width = FALSE)
```
### Oberved outcomes
```{r}
outcome_tally %>%
spread(outcome, n) %>%
kable(digits = 3) %>% kable_styling(full_width = FALSE)
```
## Plot showing raw means and 95% CI of the mean
```{r fig.width=9, fig.height=5, fig.showtext=TRUE}
all_hives <- outcome_tally %>%
group_by(treatment, outcome) %>%
summarise(n = sum(n), .groups = "drop") %>%
ungroup() %>% mutate(hive = "All hives")
pd <- position_dodge(.3)
outcome_tally %>%
group_by(treatment, outcome) %>%
summarise(n = sum(n), .groups = "drop") %>% mutate() %>%
group_by(treatment) %>%
mutate(total_n = sum(n),
percent = 100 * n / sum(n),
SE = sqrt(total_n * (percent/100) * (1-(percent/100)))) %>%
ungroup() %>%
mutate(lowerCI = map_dbl(1:n(), ~ 100 * binom.test(n[.x], total_n[.x])$conf.int[1]),
upperCI = map_dbl(1:n(), ~ 100 * binom.test(n[.x], total_n[.x])$conf.int[2])) %>%
filter(outcome != "Stayed inside the hive") %>%
ggplot(aes(treatment, percent, fill = outcome)) +
geom_errorbar(aes(ymin=lowerCI, ymax=upperCI), position = pd, width = 0) +
geom_point(stat = "identity", position = pd, colour = "grey15", pch = 21, size = 4) +
scale_fill_brewer(palette = "Pastel2", name = "Outcome", direction = -1) +
xlab("Treatment") + ylab("% bees (\u00B1 95% CIs)") +
theme_bw(20) +
theme(legend.position = "top",
text = element_text(family = nice_font)) +
coord_flip()
```
## Preliminary GLMM
The multinomial model below is not commonly used, though we believe it is the right choice for this particular experiment (e.g. because it can model a three-item categorical response variable, and it can incorporate priors). However during peer review, we were asked whether the results were similar when using standard statistical methods. To address this question, we here present a frequentist Generalised Linear Mixed Model (GLMM; using `lme4::glmer`), which tests the null hypothesis that the proportion of bees exiting the hive (i.e. the proportion leaving voluntarily plus those that were forced out) is equal between treatment groups and hives.
The model's qualitative results are similar to those from the multinomial model: bees treated with LPS left the hive more often than intact controls (and there was a non-significant trend for more LPS bees to leave than Ringers bees), and there was variation between hives in the proportion of bees leaving.
### Parameter estimates from the GLMM {.tabset}
#### With `Intact control` as the reference level
Note that the intact and Ringers controls are not different, but the intact control differs from the LPS treatment.
```{r message=FALSE, warning=FALSE}
glmm_data <- outcome_tally %>%
group_by(hive, treatment, outcome) %>%
summarise(n = sum(n)) %>%
mutate(left = ifelse(outcome == "Stayed inside the hive", "stayed_inside" ,"left_hive")) %>%
group_by(hive, treatment, left) %>%
summarise(n = sum(n)) %>%
spread(left, n)
simple_model <- glmer(
cbind(left_hive, stayed_inside) ~ treatment + (1 | hive),
data = glmm_data,
family = "binomial")
summary(simple_model)
```
#### With `Ringers` as the reference level
Note that the intact and Ringers controls are not different as before. Moreover the Ringers and LPS treatment do not differ significantly.
```{r}
simple_model <- glmer(
cbind(left_hive, stayed_inside) ~ treatment + (1 | hive),
data = glmm_data %>%
mutate(treatment = relevel(treatment, ref = "Ringers")),
family = "binomial")
summary(simple_model)
```
### Inspect Type II Anova table
```{r}
Anova(simple_model, test = "Chisq", Type = "II")
```
## Baysian multinomial model
### Fit the model
Fit the multinomial logistic models, with a 3-item response variable describing what happened to each bee introduced to the hive: stayed inside, left voluntarily, or forced out by the other workers.
```{r}
if(!file.exists("output/exp1_model.rds")){
prior <- c(set_prior("normal(0, 3)", class = "b", dpar = "mu2"),
set_prior("normal(0, 3)", class = "b", dpar = "mu3"),
set_prior("normal(0, 1)", class = "sd", dpar = "mu2", group = "hive"),
set_prior("normal(0, 1)", class = "sd", dpar = "mu3", group = "hive"))
exp1_model <- brm(
outcome_numeric ~ treatment + (1 | hive),
data = data_for_categorical_model,
prior = prior,
family = "categorical",
chains = 4, cores = 1, iter = 5000, seed = 1)
saveRDS(exp1_model, "output/exp1_model.rds")
}
exp1_model <- readRDS("output/exp1_model.rds")
```
### Posterior predictive check
This plot shows ten predictions from the posterior (pale blue) as well as the original data (dark blue), for the three categorical outcomes (1: stayed inside, 2: left voluntarily, 3: forced out). The predicted number of bees in each outcome category is similar to the real data, illustrating that the model is able to recapitulate the original data fairly closely (a necessary requirement for making inferences from the model).
```{r message=F, warning=F}
pp_check(exp1_model, type = "hist", nsamples = 8)
```
### Parameter estimates from the model
#### Output of the Bayesian multinomial logistic model
```{r max.height='300px', max.width='400px'}
summary(exp1_model)
```
#### Formatted `brms` output for Table S1
The code chunk below wrangles the output of the `summary()` function for `brms` models into a more readable table of results, and also adds 'Bayesian p-values' (i.e. the posterior probability that the true effect size has the same sign as the reported effect).
******
***Table S1:*** Table summarising the posterior estimates of each fixed effect in the Bayesian multinomial logistic model of Experiment 1. Because there were three possible outcomes for each bee (Stayed inside, Left voluntarily, or Forced out), there are two parameter estimates for each predictor in the model. 'Treatment' is a fixed factor with three levels, and the effects shown here are expressed relative to the 'Intact control' group. The $p$ column gives the posterior probability that the true effect size is opposite in sign to what is reported in the Estimate column, similarly to a $p$-value.
```{r}
tableS1 <- get_fixed_effects_with_p_values(exp1_model) %>%
mutate(mu = map_chr(str_extract_all(Parameter, "mu[:digit:]"), ~ .x[1]),
Parameter = str_remove_all(Parameter, "mu[:digit:]_"),
Parameter = str_replace_all(Parameter, "treatment", "Treatment: ")) %>%
arrange(mu) %>%
select(-mu, -Rhat, -Bulk_ESS, -Tail_ESS) %>%
mutate(PP = format(round(PP, 4), nsmall = 4))
names(tableS1)[3:5] <- c("Est. Error", "Lower 95% CI", "Upper 95% CI")
saveRDS(tableS1, file = "figures/tableS1.rds")
tableS1 %>%
kable(digits = 3) %>%
kable_styling(full_width = FALSE) %>%
pack_rows("% bees leaving voluntarily", 1, 3) %>%
pack_rows("% bees forced out", 4, 6)
```
## Plotting estimates from the model
### Derive prediction from the posterior
```{r}
get_posterior_preds <- function(focal_hive){
new <- expand.grid(
treatment = levels(data_for_categorical_model$treatment),
hive = focal_hive)
preds <- fitted(exp1_model, newdata = new, summary = FALSE)
dimnames(preds) <- list(NULL, new[,1], NULL)
rbind(
as.data.frame(preds[,, 1]) %>%
mutate(outcome = "Stayed inside the hive", posterior_sample = 1:n()),
as.data.frame(preds[,, 2]) %>%
mutate(outcome = "Left voluntarily", posterior_sample = 1:n()),
as.data.frame(preds[,, 3]) %>%
mutate(outcome = "Forced out", posterior_sample = 1:n())) %>%
gather(treatment, prop, `Intact control`, Ringers, LPS) %>%
mutate(outcome = factor(outcome,
c("Stayed inside the hive", "Left voluntarily", "Forced out")),
treatment = factor(treatment,
c("Intact control", "Ringers", "LPS"))) %>%
as_tibble() %>% arrange(treatment, outcome)
}
# plotting data for panel A: one specific hive
plotting_data <- get_posterior_preds(focal_hive = "Zoology")
# stats data: for panel B and the table of stats
stats_data <- get_posterior_preds(focal_hive = NA)
```
### Make Figure 1
```{r fig.height=3.4, fig.width=9, fig.showtext = TRUE, warning=FALSE}
cols <- RColorBrewer::brewer.pal(3, "Set2")
dot_plot <- plotting_data %>%
left_join(sample_sizes, by = "treatment") %>%
arrange(treatment) %>%
mutate(outcome = str_replace_all(outcome, "Stayed inside the hive", "Stayed inside"),
outcome = factor(outcome, c("Stayed inside", "Left voluntarily", "Forced out"))) %>%
ggplot(aes(100 * prop, treatment)) +
stat_dotsh(quantiles = 100, fill = "grey40", colour = "grey40") +
stat_pointintervalh(aes(colour = outcome, fill = outcome),
.width = c(0.5, 0.95),
position = position_nudge(y = -0.07),
point_colour = "grey26", pch = 21, stroke = 0.4) +
scale_colour_manual(values = cols) +
scale_fill_manual(values = cols) +
facet_wrap( ~ outcome, scales = "free_x") +
xlab("% bees (posterior estimate)") + ylab("Treatment") +
theme_bw() +
coord_cartesian(ylim=c(1.4, 3)) +
theme(
text = element_text(family = nice_font),
strip.background = element_rect(fill = "#eff0f1"),
panel.grid.major.y = element_blank(),
legend.position = "none"
)
# positive effect = odds of this outcome are higher for trt2 than trt1 (put control as trt1)
get_log_odds <- function(trt1, trt2){
log((trt2 / (1 - trt2) / (trt1 / (1 - trt1))))
}
LOR <- stats_data %>%
spread(treatment, prop) %>%
mutate(LOR_intact_Ringers = get_log_odds(`Intact control`, Ringers),
LOR_intact_LPS = get_log_odds(`Intact control`, LPS),
LOR_Ringers_LPS = get_log_odds(Ringers, LPS)) %>%
select(posterior_sample, outcome, starts_with("LOR")) %>%
gather(LOR, comparison, starts_with("LOR")) %>%
mutate(LOR = str_remove_all(LOR, "LOR_"),
LOR = str_replace_all(LOR, "Ringers_LPS", "LPS\n(Ringers)"),
LOR = str_replace_all(LOR, "intact_Ringers", "Ringers\n(Intact control)"),
LOR = str_replace_all(LOR, "intact_LPS", "LPS\n(Intact control)"))
levs <- LOR$LOR %>% unique() %>% sort()
LOR$LOR <- factor(LOR$LOR, rev(levs[c(3,5,4,2,1,6)]))
LOR_plot <- LOR %>%
mutate(outcome = str_replace_all(outcome, "Stayed inside the hive", "Stayed inside"),
outcome = factor(outcome, levels = rev(c("Forced out", "Left voluntarily", "Stayed inside")))) %>%
ggplot(aes(y = comparison, x = LOR, colour = outcome)) +
geom_hline(yintercept = 0, size = 0.3, colour = "grey20") +
geom_hline(yintercept = log(2), linetype = 2, size = 0.6, colour = "grey") +
geom_hline(yintercept = -log(2), linetype = 2, size = 0.6, colour = "grey") +
stat_pointinterval(aes(colour = outcome, fill = outcome),
.width = c(0.5, 0.95),
position = position_dodge(0.6),
point_colour = "grey26", pch = 21, stroke = 0.4) +
scale_colour_manual(values = cols) +
scale_fill_manual(values = cols) +
ylab("Effect size (log odds ratio)") +
xlab("Treatment (reference)") +
theme_bw() +
coord_flip() +
theme(
text = element_text(family = nice_font),
panel.grid.major.y = element_blank(),
legend.position = "none"
)
p <- cowplot::plot_grid(
plotlist = list(dot_plot, LOR_plot),
labels = c("A", "B"),
nrow = 1, align = 'v', axis = 'l',
rel_heights = c(1.4, 1))
ggsave(plot = p, filename = "figures/fig1.pdf", height = 3.4, width = 9)
p
```
<br></br>
***Figure 1***: Results of Experiment 1 (n = 842 bees). Panel A shows the posterior estimate of the mean % bees staying inside the hive (left), leaving voluntarily (middle), or being forced out (right), for each of the four treatments. The quantile dot plot shows 100 approximately equally likely estimates of the true % bees, and the horizontal bars show the median and the 50% and 95% credible intervals of the posterior distribution. Panel B gives the posterior estimates of the effect size of each treatment, relative to one of the other treatments (the name of which appears in parentheses), expressed as a log odds ratio (LOR). Positive LOR indicates that the % bees showing this particular outcome is higher in the treatment than the control; for example, more bees left voluntarily (orange) or were forced out (blue) in the LPS treatment than in the intact control. The dashed lines mark $LOR = 0$, indicating no effect, and $LOR = \pm log(2)$, i.e. the point at which the odds are twice as high in one treatment as the other.
## Hypothesis testing and effect sizes
This section calculates the posterior difference in treatment group means, in order to perform some null hypothesis testing, calculate effect size (as a log odds ratio), and calculate the 95% credible intervals on the effect size.
The following code chunks perform planned contrasts between pairs of treatments that we consider important to the biological hypotheses under test. For example the contrast between the LPS treatment and the Ringers treatment provides information about the effect of immmune stimulation, while the Ringers - Intact Control contrast provides information about the effect of wounding in the absence of LPS.
#### Calculate contrasts: % bees staying inside the hive
```{r}
# Helper function to summarise a posterior, including calculating
# p_direction, i.e. the posterior probability that the effect size has the stated direction,
# which has a similar interpretation to a one-tailed p-value
my_summary <- function(df, columns) {
lapply(columns, function(x){
p <- 1 - (df %>% pull(!! x) %>%
bayestestR::p_direction() %>% as.numeric())
df %>% pull(!! x) %>% posterior_summary() %>% as_tibble() %>%
mutate(PP = p) %>% mutate(Metric = x) %>% select(Metric, everything()) %>%
mutate(` ` = ifelse(PP < 0.1, "~", ""),
` ` = replace(` `, PP < 0.05, "\\*"),
` ` = replace(` `, PP < 0.01, "**"),
` ` = replace(` `, PP < 0.001, "***"),
` ` = replace(` `, PP == " ", ""))
}) %>% do.call("rbind", .)
}
# Helper to make one unit of the big stats table
make_stats_table <- function(
dat, groupA, groupB, comparison, metric){
output <- dat %>%
spread(treatment, prop) %>%
mutate(
metric_here = 100 * (!! enquo(groupB) - !! enquo(groupA)),
`Log odds ratio` = get_log_odds(!! enquo(groupA), !! enquo(groupB))) %>%
my_summary(c("metric_here", "Log odds ratio")) %>%
mutate(PP = c(" ", format(round(PP[2], 4), nsmall = 4)),
` ` = c(" ", ` `[2]),
Comparison = comparison) %>%
select(Comparison, everything()) %>%
mutate(Metric = replace(Metric, Metric == "metric_here", metric))
names(output)[names(output) == "metric_here"] <- metric
output
}
stayed_inside_stats_table <- rbind(
stats_data %>%
filter(outcome == "Stayed inside the hive") %>%
make_stats_table(`Ringers`, `LPS`, "LPS (Ringers)",
metric = "Difference in % bees staying inside"),
stats_data %>%
filter(outcome == "Stayed inside the hive") %>%
make_stats_table(`Intact control`, `LPS`, "LPS (Intact control)",
metric = "Difference in % bees staying inside"),
stats_data %>%
filter(outcome == "Stayed inside the hive") %>%
make_stats_table(`Intact control`, `Ringers`, "Ringers (Intact control)",
metric = "Difference in % bees staying inside")
) %>% as_tibble()
stayed_inside_stats_table[c(2,4,6), 1] <- " "
```
#### Calculate contrasts: % bees that left voluntarily
```{r}
voluntary_stats_table <- rbind(
stats_data %>%
filter(outcome == "Left voluntarily") %>%
make_stats_table(`Ringers`, `LPS`,
"LPS (Ringers)",
metric = "Difference in % bees leaving voluntarily"),
stats_data %>%
filter(outcome == "Left voluntarily") %>%
make_stats_table(`Intact control`, `LPS`,
"LPS (Intact control)",
metric = "Difference in % bees leaving voluntarily"),
stats_data %>%
filter(outcome == "Left voluntarily") %>%
make_stats_table(`Intact control`, `Ringers`,
"Ringers (Intact control)",
metric = "Difference in % bees leaving voluntarily")
) %>% as_tibble()
voluntary_stats_table[c(2,4,6), 1] <- " "
```
#### Calculate contrasts: % bees that were forced out
```{r}
forced_out_stats_table <- rbind(
stats_data %>%
filter(outcome == "Forced out") %>%
make_stats_table(`Ringers`, `LPS`,
"LPS (Ringers)",
metric = "Difference in % bees forced out"),
stats_data %>%
filter(outcome == "Forced out") %>%
make_stats_table(`Intact control`, `LPS`,
"LPS (Intact control)",
metric = "Difference in % bees forced out"),
stats_data %>%
filter(outcome == "Left voluntarily") %>%
make_stats_table(`Intact control`, `Ringers`, "Ringers (Intact control)",
metric = "Difference in % bees forced out")
) %>% as_tibble()
forced_out_stats_table[c(2,4,6), 1] <- " "
```
#### Present all contrasts in one table:
***Table S2***: This table gives statistics associated with each of the contrasts plotted in Figure 1B. Each pair of rows gives the absolute effect size (i.e. the difference in % bees) and standardised effect size (as log odds ratio; LOR) for the focal treatment, relative to the treatment shown in parentheses, for one of the three possible outcomes (Stayed inside, Left voluntarily, or Forced out). A LOR of $|log(x)|$ indicates that the outcome is $x$ times more frequent in one treatment compared to the other, e.g. $log(2) = 0.69$ and $log(0.5) = -0.69$ correspond to a two-fold difference in frequency. The $PP$ column gives the posterior probability that the true effect size has the same sign as is shown in the Estimate column; this metric has a similar interpretation to a one-tailed $p$ value.
```{r}
tableS2 <- bind_rows(
stayed_inside_stats_table,
voluntary_stats_table,
forced_out_stats_table)
saveRDS(tableS2, file = "figures/tableS2.rds")
tableS2 %>%
kable(digits = 2) %>% kable_styling(full_width = FALSE) %>%
row_spec(seq(2,18,by=2), extra_css = "border-bottom: solid;") %>%
pack_rows("% bees staying inside", 1, 6) %>%
pack_rows("% bees leaving voluntarily", 7, 12) %>%
pack_rows("% bees forced out", 13, 18)
```