diff --git a/..Rcheck/00check.log b/..Rcheck/00check.log deleted file mode 100644 index 78d4bc4..0000000 --- a/..Rcheck/00check.log +++ /dev/null @@ -1,14 +0,0 @@ -* using log directory ‘/Users/Anderkea/Documents/GitHub/wpgsd/..Rcheck’ -* using R version 4.5.0 (2025-04-11) -* using platform: aarch64-apple-darwin20 -* R was compiled by - Apple clang version 14.0.0 (clang-1400.0.29.202) - GNU Fortran (GCC) 14.2.0 -* running under: macOS Sequoia 15.6.1 -* using session charset: UTF-8 -* using option ‘--no-manual’ -* checking for file ‘./DESCRIPTION’ ... ERROR -Required fields missing or empty: - ‘Author’ ‘Maintainer’ -* DONE -Status: 1 ERROR diff --git a/.Rbuildignore b/.Rbuildignore index bfc2061..b0d641c 100644 --- a/.Rbuildignore +++ b/.Rbuildignore @@ -10,3 +10,5 @@ ^CITATION\.cff$ ^doc$ ^Meta$ +^examples$ +^inst/aiprompts$ diff --git a/.gitignore b/.gitignore index b1e2565..afb2531 100644 --- a/.gitignore +++ b/.gitignore @@ -6,3 +6,5 @@ docs /doc/ /Meta/ +*.tar.gz +*.Rcheck/ diff --git a/R/generate_event_cc.R b/R/generate_event_cc.R index 12c95d3..fd531f6 100644 --- a/R/generate_event_cc.R +++ b/R/generate_event_cc.R @@ -18,7 +18,8 @@ #' #------------------------Example of IA and FA #' event <- data.frame( #' Population = c("Experimental 1", "Experimental 2", "Experimental 3", "Control"), -#' IA = c(70, 75, 80, 85), # Interim Analysis values indicating the number of events observed in each group +#' # Interim Analysis values indicating the number of events observed in each group +#' IA = c(70, 75, 80, 85), #' FA = c(135, 150, 165, 170) #' ) #' @@ -33,8 +34,10 @@ #' #----------------------Example of two IAs and FA #' event <- data.frame( #' Population = c("Experimental 1", "Experimental 2", "Experimental 3", "Control"), -#' IA1 = c(70, 75, 80, 85), # First Interim Analysis values indicating the number of events observed in each group -#' IA2 = c(90, 95, 100, 105), # Second Interim Analysis values indicating the number of events observed in each group +#' # First Interim Analysis values indicating the number of events observed in each group +#' IA1 = c(70, 75, 80, 85), +#' # Second Interim Analysis values indicating the number of events observed in each group +#' IA2 = c(90, 95, 100, 105), #' FA = c(135, 150, 165, 170) #' ) #' diff --git a/R/generate_event_ol.R b/R/generate_event_ol.R index cf60af2..243f29b 100644 --- a/R/generate_event_ol.R +++ b/R/generate_event_ol.R @@ -22,8 +22,10 @@ #' @examples #' #------------------------Example of IA and FA #' event <- data.frame( -#' Population = c("Population 1", "Population 2", "Population 1 Intersection 2", "Overall population"), -#' IA = c(100, 110, 80, 225), # Interim Analysis values indicating the number of events observed in each group +#' Population = c("Population 1", "Population 2", "Population 1 Intersection 2", +#' "Overall population"), +#' # Interim Analysis values indicating the number of events observed in each group +#' IA = c(100, 110, 80, 225), #' FA = c(200, 220, 160, 450) #' ) #' @@ -37,9 +39,12 @@ #' #' #----------------------Example of two IAs and FA #' event <- data.frame( -#' Population = c("Population 1", "Population 2", "Population 1 Intersection 2", "Overall population"), -#' IA1 = c(100, 110, 80, 225), # First Interim Analysis values indicating the number of events observed in each group -#' IA2 = c(120, 130, 90, 240), # Second Interim Analysis values indicating the number of events observed in each group +#' Population = c("Population 1", "Population 2", "Population 1 Intersection 2", +#' "Overall population"), +#' # First Interim Analysis values indicating the number of events observed in each group +#' IA1 = c(100, 110, 80, 225), +#' # Second Interim Analysis values indicating the number of events observed in each group +#' IA2 = c(120, 130, 90, 240), #' FA = c(200, 220, 160, 450) #' ) #' diff --git a/R/generate_event_table_.R b/R/generate_event_table_.R index 8644095..a6aafda 100644 --- a/R/generate_event_table_.R +++ b/R/generate_event_table_.R @@ -27,29 +27,32 @@ #' # ----------------------- Example of common control #' event <- data.frame( #' Population = c("Experimental 1", "Experimental 2", "Experimental 3", "Control"), -#' IA = c(70, 75, 80, 85), # Interim analysis values indicating the number of events observed in each experimental group. -#' FA = c(135, 150, 165, 170) # Final analysis values indicating the cumulative number of events observed in each group. +#' # Interim analysis values indicating the number of events observed in each experimental group. +#' IA = c(70, 75, 80, 85), +#' # Final analysis values indicating the cumulative number of events observed in each group. +#' FA = c(135, 150, 165, 170) #' ) #' #' hypothesis <- list( -#' H1 = "Experimental 1 vs. Control", # Hypothesis comparing Experimental 1 with Control. -#' H2 = "Experimental 2 vs. Control", # Hypothesis comparing Experimental 2 with Control. -#' H3 = "Experimental 1 vs. Experimental 2" # Hypothesis comparing Experimental 1 and Experimental 2. +#' H1 = "Experimental 1 vs. Control", +#' H2 = "Experimental 2 vs. Control", +#' H3 = "Experimental 1 vs. Experimental 2" #' ) #' #' generate_event_table_(event, hypothesis, type = "common_control") #' #' # ------------------------ Example of overall population #' event <- data.frame( -#' Population = c("Population 1", "Population 2", "Population 1 Intersection 2", "Overall population"), +#' Population = c("Population 1", "Population 2", "Population 1 Intersection 2", +#' "Overall population"), #' IA = c(100, 110, 80, 225), # Interim analysis values for the overall population. #' FA = c(200, 220, 160, 450) # Final analysis values for the overall population. #' ) #' #' hypothesis <- list( -#' H1 = "Efficacy in Population 1", # Hypothesis assessing efficacy in Population 1. -#' H2 = "Efficacy in Population 2", # Hypothesis assessing efficacy in Population 2. -#' H3 = "Efficacy in Overall population" # Hypothesis assessing efficacy in the overall population. +#' H1 = "Efficacy in Population 1", +#' H2 = "Efficacy in Population 2", +#' H3 = "Efficacy in Overall population" #' ) #' #' generate_event_table_(event, hypothesis, type = "overlap_population") diff --git a/EventTable_README.md b/inst/aiprompts/EventTable_README.md similarity index 100% rename from EventTable_README.md rename to inst/aiprompts/EventTable_README.md diff --git a/man/generate_event_table_.Rd b/man/generate_event_table_.Rd index b088395..c72acfb 100644 --- a/man/generate_event_table_.Rd +++ b/man/generate_event_table_.Rd @@ -44,29 +44,32 @@ It can handle two types of analysis: one comparing experimental groups to a comm # ----------------------- Example of common control event <- data.frame( Population = c("Experimental 1", "Experimental 2", "Experimental 3", "Control"), - IA = c(70, 75, 80, 85), # Interim analysis values indicating the number of events observed in each experimental group. - FA = c(135, 150, 165, 170) # Final analysis values indicating the cumulative number of events observed in each group. + # Interim analysis values indicating the number of events observed in each experimental group. + IA = c(70, 75, 80, 85), + # Final analysis values indicating the cumulative number of events observed in each group. + FA = c(135, 150, 165, 170) ) hypothesis <- list( - H1 = "Experimental 1 vs. Control", # Hypothesis comparing Experimental 1 with Control. - H2 = "Experimental 2 vs. Control", # Hypothesis comparing Experimental 2 with Control. - H3 = "Experimental 1 vs. Experimental 2" # Hypothesis comparing Experimental 1 and Experimental 2. + H1 = "Experimental 1 vs. Control", + H2 = "Experimental 2 vs. Control", + H3 = "Experimental 1 vs. Experimental 2" ) generate_event_table_(event, hypothesis, type = "common_control") # ------------------------ Example of overall population event <- data.frame( - Population = c("Population 1", "Population 2", "Population 1 Intersection 2", "Overall population"), + Population = c("Population 1", "Population 2", "Population 1 Intersection 2", + "Overall population"), IA = c(100, 110, 80, 225), # Interim analysis values for the overall population. FA = c(200, 220, 160, 450) # Final analysis values for the overall population. ) hypothesis <- list( - H1 = "Efficacy in Population 1", # Hypothesis assessing efficacy in Population 1. - H2 = "Efficacy in Population 2", # Hypothesis assessing efficacy in Population 2. - H3 = "Efficacy in Overall population" # Hypothesis assessing efficacy in the overall population. + H1 = "Efficacy in Population 1", + H2 = "Efficacy in Population 2", + H3 = "Efficacy in Overall population" ) generate_event_table_(event, hypothesis, type = "overlap_population") diff --git a/man/generate_event_table_cc.Rd b/man/generate_event_table_cc.Rd index d6963f9..11480c4 100644 --- a/man/generate_event_table_cc.Rd +++ b/man/generate_event_table_cc.Rd @@ -34,7 +34,8 @@ This function generates a table of events for given experimental arms and a cont #------------------------Example of IA and FA event <- data.frame( Population = c("Experimental 1", "Experimental 2", "Experimental 3", "Control"), - IA = c(70, 75, 80, 85), # Interim Analysis values indicating the number of events observed in each group + # Interim Analysis values indicating the number of events observed in each group + IA = c(70, 75, 80, 85), FA = c(135, 150, 165, 170) ) @@ -49,8 +50,10 @@ generate_event_table_cc(event, hypothesis) #----------------------Example of two IAs and FA event <- data.frame( Population = c("Experimental 1", "Experimental 2", "Experimental 3", "Control"), - IA1 = c(70, 75, 80, 85), # First Interim Analysis values indicating the number of events observed in each group - IA2 = c(90, 95, 100, 105), # Second Interim Analysis values indicating the number of events observed in each group + # First Interim Analysis values indicating the number of events observed in each group + IA1 = c(70, 75, 80, 85), + # Second Interim Analysis values indicating the number of events observed in each group + IA2 = c(90, 95, 100, 105), FA = c(135, 150, 165, 170) ) diff --git a/man/generate_event_table_ol.Rd b/man/generate_event_table_ol.Rd index ff45b6f..70f47df 100644 --- a/man/generate_event_table_ol.Rd +++ b/man/generate_event_table_ol.Rd @@ -36,8 +36,10 @@ This function generates a table of events for specified populations based on the \examples{ #------------------------Example of IA and FA event <- data.frame( - Population = c("Population 1", "Population 2", "Population 1 Intersection 2", "Overall population"), - IA = c(100, 110, 80, 225), # Interim Analysis values indicating the number of events observed in each group + Population = c("Population 1", "Population 2", "Population 1 Intersection 2", + "Overall population"), + # Interim Analysis values indicating the number of events observed in each group + IA = c(100, 110, 80, 225), FA = c(200, 220, 160, 450) ) @@ -51,9 +53,12 @@ generate_event_table_ol(event, hypothesis) #----------------------Example of two IAs and FA event <- data.frame( - Population = c("Population 1", "Population 2", "Population 1 Intersection 2", "Overall population"), - IA1 = c(100, 110, 80, 225), # First Interim Analysis values indicating the number of events observed in each group - IA2 = c(120, 130, 90, 240), # Second Interim Analysis values indicating the number of events observed in each group + Population = c("Population 1", "Population 2", "Population 1 Intersection 2", + "Overall population"), + # First Interim Analysis values indicating the number of events observed in each group + IA1 = c(100, 110, 80, 225), + # Second Interim Analysis values indicating the number of events observed in each group + IA2 = c(120, 130, 90, 240), FA = c(200, 220, 160, 450) ) diff --git a/vignettes/.gitignore b/vignettes/.gitignore new file mode 100644 index 0000000..2d19fc7 --- /dev/null +++ b/vignettes/.gitignore @@ -0,0 +1 @@ +*.html diff --git a/vignettes/adj-seq-p.html b/vignettes/adj-seq-p.html deleted file mode 100644 index 98a667a..0000000 --- a/vignettes/adj-seq-p.html +++ /dev/null @@ -1,2960 +0,0 @@ - - - - -
- - - - - - - - - -This vignette demonstrates the calculation of adjusted sequential -p-values for multiple populations in a group sequential trial design. -We’ll show a streamlined approach using helper functions to reduce code -repetition while maintaining technical accuracy. The methods implemented -in this vignette are based on the work by Zhao et -al. (2025). The end result is a adjusted p-value at both interim -and final analysis for each hypothesis tested. In all cases, this -adjusted p-value can be compared to the family-wise error rate (FWER) -for the trial simplifying interpretation by adjusting for multiplicity -created by testing multiple hypotheses at group sequential analyses.
- -In a 2-arm controlled clinical trial with one primary endpoint, there -are 3 null hypotheses based on populations defined defined by biomarker -status. In each case the null hypothesis assumes no difference in the -distribution of the time until a primary endpoint is reached between the -treatment and control groups:
-We will use a graphical approach to visualize the multiplicity -strategy.
-# Transition matrix and initial weights
-m <- matrix(c(
- 0, 3/7, 4/7,
- 3/7, 0, 4/7,
- 0.5, 0.5, 0
-), nrow = 3, byrow = TRUE)
-
-w <- c(0.3, 0.3, 0.4) # Initial weights
-
-# Visualize strategy
-name_hypotheses <- c("H1: Biomarker A positive", "H2: Biomarker B positive", "H3: Overall Population")
-
-hplot <- gMCPLite::hGraph(
- 3,
- alphaHypotheses = w, m = m,
- nameHypotheses = name_hypotheses, trhw = .2, trhh = .1,
- digits = 5, trdigits = 3, size = 5, halfWid = 1, halfHgt = 0.5,
- offset = 0.2, trprop = 0.4,
- fill = as.factor(c(2, 3, 1)),
- palette = c("#BDBDBD", "#E0E0E0", "#EEEEEE"),
- wchar = "w"
-)
-hplotWe assume 2 analyses: an interim analysis (IA) and a final analysis -(FA). For the multiplicity adjustments, we need the number of events in -the treatment and control groups combined that are available for testing -each hypothesis at both analyses for each population and the -intersection of populations. In the following AB positive means positive -for both biomarker A and biomarker B.
-# Create event data systematically
-create_event_data <- function() {
- populations <- rep(c("A positive", "B positive", "AB positive", "overall"), 2)
- analyses <- rep(c(1, 2), each = 4)
- events <- c(100, 110, 80, 225, 200, 220, 160, 450) # IA, then FA
-
- tibble(
- population = populations,
- analysis = analyses,
- event = events
- )
-}
-
-event_tbl <- create_event_data()
-event_tbl %>%
- gt() %>%
- tab_header(title = "Event Count by Population and Analysis")| Event Count by Population and Analysis | -||
| population | -analysis | -event | -
|---|---|---|
| A positive | -1 | -100 |
| B positive | -1 | -110 |
| AB positive | -1 | -80 |
| overall | -1 | -225 |
| A positive | -2 | -200 |
| B positive | -2 | -220 |
| AB positive | -2 | -160 |
| overall | -2 | -450 |
We assume the following unadjusted p-values at each analysis for each -hypothesis.
-# Observed p-values
-obs_tbl <- tribble(
- ~hypothesis, ~analysis, ~obs_p,
- "H1", 1, 0.02,
- "H2", 1, 0.01,
- "H3", 1, 0.012,
- "H1", 2, 0.015,
- "H2", 2, 0.012,
- "H3", 2, 0.010
-) %>%
- mutate(obs_Z = -qnorm(obs_p))
-
-obs_tbl %>%
- gt() %>%
- tab_header(title = "Nominal p-values")| Nominal p-values | -|||
| hypothesis | -analysis | -obs_p | -obs_Z | -
|---|---|---|---|
| H1 | -1 | -0.020 | -2.053749 |
| H2 | -1 | -0.010 | -2.326348 |
| H3 | -1 | -0.012 | -2.257129 |
| H1 | -2 | -0.015 | -2.170090 |
| H2 | -2 | -0.012 | -2.257129 |
| H3 | -2 | -0.010 | -2.326348 |
-p_obs_IA <- (obs_tbl %>% filter(analysis == 1))$obs_p
-p_obs_FA <- (obs_tbl %>% filter(analysis == 2))$obs_pWe now have all the information we need to perform testing and -adjusting p-values.
-Next we calculate information fractions at interim and final -analyses. The final event count at each analysis is assumed to be the -planned count for each population.
-# Helper function to extract events
-get_events <- function(analysis_num, population_name) {
- event_tbl %>%
- filter(analysis == analysis_num, population == population_name) %>%
- pull(event)
-}
-
-# Extract event counts
-events_IA <- event_tbl %>% filter(analysis == 1)
-events_FA <- event_tbl %>% filter(analysis == 2)
-
-a_pos_IA <- get_events(1, "A positive")
-b_pos_IA <- get_events(1, "B positive")
-ab_pos_IA <- get_events(1, "AB positive")
-overall_IA <- get_events(1, "overall")
-
-a_pos_FA <- get_events(2, "A positive")
-b_pos_FA <- get_events(2, "B positive")
-ab_pos_FA <- get_events(2, "AB positive")
-overall_FA <- get_events(2, "overall")
-
-# Calculate information fractions
-IF_IA <- c(
- (a_pos_IA + overall_IA) / (a_pos_FA + overall_FA), # H1
- (b_pos_IA + overall_IA) / (b_pos_FA + overall_FA), # H2
- (ab_pos_IA + overall_IA) / (ab_pos_FA + overall_FA) # H3
-)
-
-tibble(
- Hypothesis = c("H1", "H2", "H3"),
- Information_Fraction = IF_IA
-) %>%
- gt() %>%
- tab_header(title = "Information Fractions at Interim Analysis") %>%
- fmt_number(columns = 2, decimals = 3)| Information Fractions at Interim Analysis | -|
| Hypothesis | -Information_Fraction | -
|---|---|
| H1 | -0.500 |
| H2 | -0.500 |
| H3 | -0.500 |
Now we can create a correlation matrix for all tests performed based -on the methods of Anderson et al. (2022) -(or Chen et al. (2021)).
-# Create correlation matrix using event intersections
-event_intersections <- tribble(
- ~H1, ~H2, ~Analysis, ~Event,
- # Analysis 1 - Interim
- 1, 1, 1, a_pos_IA,
- 2, 2, 1, b_pos_IA,
- 3, 3, 1, overall_IA,
- 1, 2, 1, ab_pos_IA,
- 1, 3, 1, a_pos_IA,
- 2, 3, 1, b_pos_IA,
- # Analysis 2 - Final
- 1, 1, 2, a_pos_FA,
- 2, 2, 2, b_pos_FA,
- 3, 3, 2, overall_FA,
- 1, 2, 2, ab_pos_FA,
- 1, 3, 2, a_pos_FA,
- 2, 3, 2, b_pos_FA
-)
-
-# Generate correlation from events
-correlation_matrix <- generate_corr(event_intersections)
-
-correlation_matrix %>%
- round(3) %>%
- knitr::kable(caption = "Correlation Matrix (6x6)")| H1_A1 | -H2_A1 | -H3_A1 | -H1_A2 | -H2_A2 | -H3_A2 | -
|---|---|---|---|---|---|
| 1.000 | -0.763 | -0.667 | -0.707 | -0.539 | -0.471 | -
| 0.763 | -1.000 | -0.699 | -0.539 | -0.707 | -0.494 | -
| 0.667 | -0.699 | -1.000 | -0.471 | -0.494 | -0.707 | -
| 0.707 | -0.539 | -0.471 | -1.000 | -0.763 | -0.667 | -
| 0.539 | -0.707 | -0.494 | -0.763 | -1.000 | -0.699 | -
| 0.471 | -0.494 | -0.707 | -0.667 | -0.699 | -1.000 | -
# Helper function for systematic calculations
-calculate_seq_p_systematic <- function(test_analysis, p_obs_IA, p_obs_FA, w, m, correlation_matrix, IF_IA) {
-
- combinations <- c("H1, H2, H3", "H1, H2", "H1, H3", "H2, H3", "H1", "H2", "H3")
-
- results <- map_dfr(combinations, ~ {
- seq_p <- calc_seq_p(
- test_analysis = test_analysis,
- test_hypothesis = .x,
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ),
- alpha_spending_type = 2,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = correlation_matrix,
- spending_fun = gsDesign::sfHSD,
- spending_fun_par = -4,
- info_frac = c(min(IF_IA), 1),
- interval = c(1e-4, 0.2)
- )
-
- tibble(
- combination = .x,
- sequential_p = seq_p
- )
- })
-
- return(results)
-}
-
-# Calculate for both interim and final analyses
-ia_results <- calculate_seq_p_systematic(1, p_obs_IA, p_obs_FA, w, m, correlation_matrix, IF_IA) %>%
- mutate(analysis = "Interim")
-
-fa_results <- calculate_seq_p_systematic(2, p_obs_IA, p_obs_FA, w, m, correlation_matrix, IF_IA) %>%
- mutate(analysis = "Final")# Combined results table
-combined_results <- bind_rows(ia_results, fa_results)
-
-combined_results %>%
- gt() %>%
- tab_header(title = "Sequential p-values - Comprehensive Results") %>%
- fmt_number(columns = "sequential_p", decimals = 4) %>%
- tab_style(
- style = cell_fill(color = "lightblue"),
- locations = cells_body(rows = analysis == "Interim")
- ) %>%
- tab_style(
- style = cell_fill(color = "lightgreen"),
- locations = cells_body(rows = analysis == "Final")
- )| Sequential p-values - Comprehensive Results | -||
| combination | -sequential_p | -analysis | -
|---|---|---|
| H1, H2, H3 | -0.1943 | -Interim |
| H1, H2 | -0.1400 | -Interim |
| H1, H3 | -0.1553 | -Interim |
| H2, H3 | -0.1529 | -Interim |
| H1 | -0.1678 | -Interim |
| H2 | -0.0839 | -Interim |
| H3 | -0.1007 | -Interim |
| H1, H2, H3 | -0.0206 | -Final |
| H1, H2 | -0.0210 | -Final |
| H1, H3 | -0.0165 | -Final |
| H2, H3 | -0.0162 | -Final |
| H1 | -0.0159 | -Final |
| H2 | -0.0127 | -Final |
| H3 | -0.0106 | -Final |
# Calculate adjusted sequential p-values (max over relevant combinations)
-calculate_adjusted <- function(results_df) {
- h1_adj <- max(
- results_df$sequential_p[results_df$combination == "H1, H2, H3"],
- results_df$sequential_p[results_df$combination == "H1, H2"],
- results_df$sequential_p[results_df$combination == "H1, H3"],
- results_df$sequential_p[results_df$combination == "H1"]
- )
-
- h2_adj <- max(
- results_df$sequential_p[results_df$combination == "H1, H2, H3"],
- results_df$sequential_p[results_df$combination == "H1, H2"],
- results_df$sequential_p[results_df$combination == "H2, H3"],
- results_df$sequential_p[results_df$combination == "H2"]
- )
-
- h3_adj <- max(
- results_df$sequential_p[results_df$combination == "H1, H2, H3"],
- results_df$sequential_p[results_df$combination == "H1, H3"],
- results_df$sequential_p[results_df$combination == "H2, H3"],
- results_df$sequential_p[results_df$combination == "H3"]
- )
-
- tibble(
- hypothesis = c("H1", "H2", "H3"),
- adjusted_sequential_p = c(h1_adj, h2_adj, h3_adj)
- )
-}
-
-# Calculate for both analyses
-ia_adjusted <- calculate_adjusted(ia_results) %>% mutate(analysis = "Interim")
-fa_adjusted <- calculate_adjusted(fa_results) %>% mutate(analysis = "Final")
-
-adjusted_results <- bind_rows(ia_adjusted, fa_adjusted)
-
-adjusted_results %>%
- gt() %>%
- tab_header(title = "Adjusted Sequential p-values") %>%
- fmt_number(columns = "adjusted_sequential_p", decimals = 4) %>%
- tab_style(
- style = cell_fill(color = "pink"),
- locations = cells_body(rows = adjusted_sequential_p <= 0.025)
- )| Adjusted Sequential p-values | -||
| hypothesis | -adjusted_sequential_p | -analysis | -
|---|---|---|
| H1 | -0.1943 | -Interim |
| H2 | -0.1943 | -Interim |
| H3 | -0.1943 | -Interim |
| H1 | -0.0210 | -Final |
| H2 | -0.0210 | -Final |
| H3 | -0.0206 | -Final |
The systematic approach demonstrates:
-The adjusted sequential p-values account for both: - Multiple
-comparisons (across populations)
-- Sequential testing (interim and final analyses)
Results highlighted in pink indicate rejection at α = 0.025 -level.
-
-
-
-[](https://github.com/Merck/wpgsd/actions/workflows/R-CMD-check.yaml)
-[](https://app.codecov.io/gh/Merck/wpgsd?branch=main)
-
-
-Weighted parametric group sequential design (WPGSD) allows one to take advantage
-of the known correlation structure in constructing efficacy bounds to control
-family-wise error rate (FWER) for a group sequential design. Here correlation
-may be due to common observations in nested populations, due to common
-observations in overlapping populations, or due to common observations
-in the control arm.
-
-## Installation
-
-The easiest way to get wpgsd is to install from CRAN:
-
-```r
-install.packages("wpgsd")
-```
-
-Alternatively, to use a new feature or get a bug fix,
-you can install the development version of wpgsd from GitHub:
-
-```r
-# install.packages("remotes")
-remotes::install_github("Merck/wpgsd")
-```
-
-## Current limitations
-
-There are some limitations that are currently being addressed.
-Please use the package with caution in production environments.
-
-- The current implementation may have limitations when handling more
- complex scenarios beyond what has been demonstrated.
-- The API is subject to potential breaking changes as it is currently
- being reviewed and refactored.
-- Further validation is needed to ensure the reliability of the package.
-- More documentation is expected in the future releases.
-
-## References
-
-Anderson, K. M., Guo, Z., Zhao, J., & Sun, L. Z. (2022).
-A unified framework for weighted parametric group sequential design.
-_Biometrical Journal_, 64(7), 1219--1239.
-
-BibTeX entry:
-
-```
-@article{anderson2022unified,
- title = {A unified framework for weighted parametric group sequential design},
- author = {Anderson, Keaven M and Guo, Zifang and Zhao, Jing and Sun, Linda Z},
- journal = {Biometrical Journal},
- volume = {64},
- number = {7},
- pages = {1219--1239},
- year = {2022},
- publisher = {Wiley Online Library}
-}
-```
diff --git a/wpgsd.Rcheck/00_pkg_src/wpgsd/build/partial.rdb b/wpgsd.Rcheck/00_pkg_src/wpgsd/build/partial.rdb
deleted file mode 100644
index c78d6ee..0000000
Binary files a/wpgsd.Rcheck/00_pkg_src/wpgsd/build/partial.rdb and /dev/null differ
diff --git a/wpgsd.Rcheck/00_pkg_src/wpgsd/build/vignette.rds b/wpgsd.Rcheck/00_pkg_src/wpgsd/build/vignette.rds
deleted file mode 100644
index 5ec4d87..0000000
Binary files a/wpgsd.Rcheck/00_pkg_src/wpgsd/build/vignette.rds and /dev/null differ
diff --git a/wpgsd.Rcheck/00_pkg_src/wpgsd/examples/event_table_integration.R b/wpgsd.Rcheck/00_pkg_src/wpgsd/examples/event_table_integration.R
deleted file mode 100644
index 06fb2b6..0000000
--- a/wpgsd.Rcheck/00_pkg_src/wpgsd/examples/event_table_integration.R
+++ /dev/null
@@ -1,71 +0,0 @@
-# Example integration of EventTable S7 class with existing wpgsd functions
-# This shows how EventTable can work with generate_corr()
-
-library(wpgsd)
-library(tibble)
-
-# Example 1: Using EventTable with generate_corr()
-# Create event data using the EventTable class
-event_data_raw <- tibble::tribble(
- ~H1, ~H2, ~Analysis, ~Event,
- 1, 1, 1, 155,
- 2, 2, 1, 160,
- 3, 3, 1, 165,
- 1, 2, 1, 85,
- 1, 3, 1, 85,
- 2, 3, 1, 85,
- 1, 1, 2, 305,
- 2, 2, 2, 320,
- 3, 3, 2, 335,
- 1, 2, 2, 170,
- 1, 3, 2, 170,
- 2, 3, 2, 170
-)
-
-# Create EventTable object
-event_table <- EventTable(data = event_data_raw)
-print("Created EventTable:")
-print(event_table)
-
-# Use with generate_corr (currently expects tibble/data.frame)
-# The @data slot extracts the underlying tibble
-correlation_matrix <- generate_corr(event_table@data)
-print("Generated correlation matrix:")
-print(correlation_matrix)
-
-# Example 2: Data validation before processing
-cat("\nExample 2: Data validation\n")
-cat("==========================\n")
-
-# Try to create EventTable with invalid data
-tryCatch({
- invalid_data <- tibble::tibble(
- H1 = c(1, 2, -1), # Invalid: negative hypothesis index
- H2 = c(1, 2, 3),
- Analysis = c(1, 1, 1),
- Event = c(100, 200, 150)
- )
-
- invalid_table <- EventTable(data = invalid_data)
-}, error = function(e) {
- cat("Validation caught error:", e$message, "\n")
-})
-
-# Example 3: Subsetting capabilities
-cat("\nExample 3: Subsetting\n")
-cat("=====================\n")
-
-# Get only first analysis
-analysis_1 <- subset_event_table(event_table, analysis = 1)
-cat("Analysis 1 events:\n")
-print(analysis_1@data)
-
-# Get only H1-H2 hypothesis pairs
-h1_h2_events <- subset_event_table(event_table, hypotheses = c(1, 2))
-cat("\nH1-H2 hypothesis events:\n")
-print(h1_h2_events@data)
-
-# Example 4: Summary information
-cat("\nExample 4: Summary\n")
-cat("==================\n")
-summary(event_table)
diff --git a/wpgsd.Rcheck/00_pkg_src/wpgsd/examples/test_event_table.R b/wpgsd.Rcheck/00_pkg_src/wpgsd/examples/test_event_table.R
deleted file mode 100644
index d55ecbf..0000000
--- a/wpgsd.Rcheck/00_pkg_src/wpgsd/examples/test_event_table.R
+++ /dev/null
@@ -1,50 +0,0 @@
-# Test script for EventTable S7 class
-# This demonstrates how to use the new EventTable class
-
-library(wpgsd)
-library(tibble)
-
-# Create sample event data as used in the package
-event_data <- tibble::tribble(
- ~H1, ~H2, ~Analysis, ~Event,
- 1, 1, 1, 155,
- 2, 2, 1, 160,
- 3, 3, 1, 165,
- 1, 2, 1, 85,
- 1, 3, 1, 85,
- 2, 3, 1, 85,
- 1, 1, 2, 305,
- 2, 2, 2, 320,
- 3, 3, 2, 335,
- 1, 2, 2, 170,
- 1, 3, 2, 170,
- 2, 3, 2, 170
-)
-
-# Create EventTable object
-event_table <- EventTable(data = event_data)
-
-# Print the object
-print(event_table)
-
-# Get summary
-summary(event_table)
-
-# Subset by analysis
-subset_analysis_1 <- subset_event_table(event_table, analysis = 1)
-print("Analysis 1 subset:")
-print(subset_analysis_1)
-
-# Subset by hypotheses
-subset_h1_h2 <- subset_event_table(event_table, hypotheses = c(1, 2))
-print("H1-H2 subset:")
-print(subset_h1_h2)
-
-# Convert existing tibble to EventTable
-converted_table <- as_event_table(event_data)
-print("Converted table:")
-print(converted_table)
-
-# Validate data
-validate_event_table_data(event_data)
-print("Data validation passed!")
diff --git a/wpgsd.Rcheck/00_pkg_src/wpgsd/inst/CITATION b/wpgsd.Rcheck/00_pkg_src/wpgsd/inst/CITATION
deleted file mode 100644
index 4cf24a4..0000000
--- a/wpgsd.Rcheck/00_pkg_src/wpgsd/inst/CITATION
+++ /dev/null
@@ -1,12 +0,0 @@
-bibentry(
- "Article",
- title = "A unified framework for weighted parametric group sequential design",
- author = "Keaven M Anderson and Zifang Guo and Jing Zhao and Linda Z Sun",
- journal = "Biometrical Journal",
- volume = "64",
- number = "7",
- pages = "1219--1239",
- year = "2022",
- publisher = "Wiley Online Library",
- doi = "10.1002/bimj.202100085",
-)
diff --git a/wpgsd.Rcheck/00_pkg_src/wpgsd/inst/aiprompts/compute_correlations_prompts.txt b/wpgsd.Rcheck/00_pkg_src/wpgsd/inst/aiprompts/compute_correlations_prompts.txt
deleted file mode 100644
index 6c9d599..0000000
--- a/wpgsd.Rcheck/00_pkg_src/wpgsd/inst/aiprompts/compute_correlations_prompts.txt
+++ /dev/null
@@ -1,76 +0,0 @@
-Three functions are defined here, one for checking input data (`check_event_data()`) and
-another for computing correlations (`compute_correlations()`) and one (`gen_corr()`) for transforming a data frame where correlations are computed
-to a correlation matrix.
-All three should be exported.
-When users enter the `event` input to `compute_correlations()`, it is advised to call `check_event_data()` first by
-specifying check = TRUE (the default).
-Output will be output either as a data frame or as a correlation matrix, as specified below.
-An exported function `gen_corr()` will do a transformation from the data frame output to a symmetric matrix.
-
-Let M > 1 be the integer number of hypotheses to be tested.
-Assume there are integer K > 1 analyses.
-Let real-valued Z[i, k] be the standardized normal test statistic for
-hypothesis i, 1 <= i <= M, analysis k, 1 <= k <= K.
-We wish to compute and return correlations for all hypothesis tests
-performed across analyses.
-For tests Z[i1, k1], Z[i2, k2], 1 <= i1 <= i2 <= M, 1 <= k1 <= k2 <= K that are
-performed we will provide the count of events (for time-to-event outcomes) or
-sample size (for other outcomes) that are common between the two tests.
-This is input in a data frame called `event`
-with the following columns:
-
-`H1` - hypothesis i1 = H1
-`H2` - hypothesis i2 = H2
-`Analysis` - value in 1,2,...,K
-`Event` - event count for time-to-event endpoints, otherwise sample size which
-is included in the both test Z[H1, Analysis] and Z[H2, Analysis].
-
-Other arguments for `compute_correlations()` are
-
-`check` - logical indicating whether to perform input checks using `check_event_data()` (default is TRUE)
-`return_matrix` - logical indicating whether to return a correlation matrix (default is TRUE)
-
-The function `check_event_data()` will have the argument `event` as specified above.
-Data checks in `check_event_data()` :
-- Check that `event` is a data frame and return an informative error message if not.
-- Check that columns specified above exist and return informative error message, if not.
-- Check that H1 <= H2 for each row and provide informative error message, if not.
-- Check that the combination of H1, H2, Analysis is a unique in the input;
-this should be evaluated and appropriate error message returned if not unique.
-Check that Event is an integer >= 0 for all rows and provide an
-appropriate error message, if not.
-- Check that unique values of Analysis are sequential positive integers with
-minimum value 1 and maximum value > 1; K from above will be the maximum value.
-Return an informative error message if any of these checks fail.
-- Check that unique values of H1 are sequential integers with minimum value 1
-and maximum value > 1; M from above will be the maximum value.
-Same for H2.
-Return an informative error message if any of these checks fail.
-- Check that for k=1,...,K that if there is a row with H1 < H2, then there is also a row with H1 == H2.
-Return an informative error message if not.
-- Check that for any value of Analysis, and H1 = i1 < H2 = i2 specified that there is also a row with H1 = i1, H2 = i1 and another with
-H1 = i2, H2 = i2. Return an informative error message if not.
-
-
-Returned value from `compute_correlations()`.
-The returned value either a data frame (if `return_matrix = FALSE`) or a correlation matrix, otherwise.
-The data frame has the following columns.
- H1 - hypothesis i1
- H2 - hypothesis i2 >= i1
- Analysis1 - k1.
- Analysis2 - k2 >= k1.
- Correlation - correlation between Z[i1, k1] and Z[i2, k2].
-
-Correlation is computed as
-Event for i1, i2 and k1 divided by square root of the product of
-Event for H1 = i1, H2 = i1, Analysis = min(k1, k2) times
-Event for H1 = i2, H2 = i2, Analysis = k2.
-
-If `return_ matrix = TRUE` (default) then a symmetric matrix should be returned as follows.
-Columns should be named `H1_A1`, `H1_A2`, ..., `H1_AK`, `H2_A1`, `H2_A2`, ..., `H2_AK`, ..., `HM_A1`, `HM_A2`, ..., `HM_AK` where
-M and K are as computed from `event`.
-Rows should be labeled the same way.
-Diagonal elements are all 1.
-Off-diagonal elements are filled with the computed correlations.
-Only cells included should be where `Event` value was provided for the combination of Analysis, H1 and H2.
-
diff --git a/wpgsd.Rcheck/00_pkg_src/wpgsd/inst/aiprompts/s7 prompts.md b/wpgsd.Rcheck/00_pkg_src/wpgsd/inst/aiprompts/s7 prompts.md
deleted file mode 100644
index 69bae54..0000000
--- a/wpgsd.Rcheck/00_pkg_src/wpgsd/inst/aiprompts/s7 prompts.md
+++ /dev/null
@@ -1,101 +0,0 @@
-## Prompt for s7 Class conversion
-
-We want to convert the wpgsd R package to operate with s7 classes throughout.
-Please check for exported functions in the R directory suggest what s7 classes need to be developed.
-
-## Suggested S7 Classes for wpgsd Package Conversion
-
-Based on the exported functions and their data structures, here are the key S7 classes that should be developed:
-
-### 1. **EventTable Class**
-- **Purpose**: Represents event count data structure used in `generate_corr()` and `generate_event_table()`
-- **Key Fields**:
- - `H1`, `H2`: Hypothesis indices
- - `Analysis`: Analysis number
- - `Event`: Event count
-- **Methods**: Validation, printing, subsetting
-
-### 2. **CorrelationMatrix Class**
-- **Purpose**: Represents correlation matrices returned by `generate_corr()`
-- **Key Fields**:
- - `matrix`: The correlation matrix
- - `n_hypotheses`: Number of hypotheses
- - `n_analyses`: Number of analyses
- - `column_names`: Named columns (e.g., "H1_A1", "H2_A1", etc.)
-- **Methods**: Validation (positive definite), printing, subsetting
-
-### 3. **TransitionMatrix Class**
-- **Purpose**: Represents transition matrices used in graphical procedures
-- **Key Fields**:
- - `matrix`: The transition matrix
- - `n_hypotheses`: Number of hypotheses
-- **Methods**: Validation (row sums ≤ 1), printing
-
-### 4. **Bounds Class**
-- **Purpose**: Represents p-value boundaries from `generate_bounds()`
-- **Key Fields**:
- - `data`: Tibble with Analysis, Hypotheses, H1, H2, etc., xi columns
- - `type`: Boundary type (0, 1, 2, or 3)
- - `alpha`: Overall alpha level
- - `n_hypotheses`: Number of hypotheses
- - `n_analyses`: Number of analyses
-- **Methods**: Validation, printing, plotting
-
-### 5. **ObservedPValues Class**
-- **Purpose**: Represents observed p-values for `closed_test()`
-- **Key Fields**:
- - `data`: Tibble with Analysis, H1, H2, etc. columns
- - `n_hypotheses`: Number of hypotheses
- - `n_analyses`: Number of analyses
-- **Methods**: Validation, printing
-
-### 6. **ClosedTestResult Class**
-- **Purpose**: Represents results from `closed_test()`
-- **Key Fields**:
- - `outcome_matrix`: Matrix summarizing testing results
- - `bounds`: Associated bounds used
- - `p_obs`: Observed p-values used
- - `rejected_hypotheses`: List of rejected hypotheses by analysis
-- **Methods**: Validation, printing, summary
-
-### 7. **StudyDesign Class**
-- **Purpose**: High-level class to encapsulate entire study design
-- **Key Fields**:
- - `initial_weights`: Vector of initial hypothesis weights
- - `transition_matrix`: TransitionMatrix object
- - `n_hypotheses`: Number of hypotheses
- - `n_analyses`: Number of planned analyses
- - `alpha`: Overall alpha level
- - `spending_functions`: List of spending functions
-- **Methods**: Validation, printing, summary
-
-### 8. **SequentialPValue Class**
-- **Purpose**: Represents sequential p-values from `calc_seq_p()`
-- **Key Fields**:
- - `value`: The sequential p-value
- - `test_analysis`: Analysis index
- - `test_hypothesis`: Hypothesis being tested
- - `method_type`: Alpha spending type used
-- **Methods**: Validation, printing
-
-## Implementation Benefits
-
-1. **Type Safety**: S7 classes provide strong typing and validation
-2. **Method Dispatch**: Can implement specialized methods for each class
-3. **Consistency**: Standardized data structures across the package
-4. **User Experience**: Better error messages and documentation
-5. **Extensibility**: Easy to add new methods and functionality
-
-## Migration Strategy
-
-1. Start with core data structures (`EventTable`, `CorrelationMatrix`)
-2. Update `generate_corr()` to return `CorrelationMatrix` objects
-3. Update `generate_bounds()` to return `Bounds` objects
-4. Progressively convert other functions
-5. Ensure backward compatibility during transition
-6. Update documentation and vignettes
-
-This S7 class structure would provide a solid foundation for the wpgsd package while maintaining the existing functionality and improving the user experience with better type safety and method dispatch.
-
-Claude Sonnet 4 running under VSCode returned the following:
-
diff --git a/wpgsd.Rcheck/00_pkg_src/wpgsd/inst/doc/adj-seq-p.R b/wpgsd.Rcheck/00_pkg_src/wpgsd/inst/doc/adj-seq-p.R
deleted file mode 100644
index 63d7cc2..0000000
--- a/wpgsd.Rcheck/00_pkg_src/wpgsd/inst/doc/adj-seq-p.R
+++ /dev/null
@@ -1,739 +0,0 @@
-## ----message = FALSE, warning = FALSE-----------------------------------------
-library(wpgsd)
-library(gsDesign)
-library(tibble)
-library(gt)
-library(dplyr)
-
-## -----------------------------------------------------------------------------
-event_tbl <- tribble(
- ~population, ~analysis, ~event,
- "A positive", 1, 100,
- "B positive", 1, 110,
- "AB positive", 1, 80,
- "overall", 1, 225,
- "A positive", 2, 200,
- "B positive", 2, 220,
- "AB positive", 2, 160,
- "overall", 2, 450,
-)
-
-## -----------------------------------------------------------------------------
-obs_tbl <- tribble(
- ~hypothesis, ~analysis, ~obs_p,
- "H1", 1, 0.02,
- "H2", 1, 0.01,
- "H3", 1, 0.012,
- "H1", 2, 0.015,
- "H2", 2, 0.012,
- "H3", 2, 0.010
-) %>%
- mutate(obs_Z = -qnorm(obs_p))
-
-obs_tbl %>%
- gt() %>%
- tab_header(title = "Nominal p-values")
-
-p_obs_IA <- (obs_tbl %>% filter(analysis == 1))$obs_p
-p_obs_FA <- (obs_tbl %>% filter(analysis == 2))$obs_p
-
-## -----------------------------------------------------------------------------
-IF_IA <- c(
- ((event_tbl %>% filter(analysis == 1, population == "A positive"))$event + (event_tbl %>% filter(analysis == 1, population == "overall"))$event) /
- ((event_tbl %>% filter(analysis == 2, population == "A positive"))$event + (event_tbl %>% filter(analysis == 2, population == "overall"))$event),
- ((event_tbl %>% filter(analysis == 1, population == "B positive"))$event + (event_tbl %>% filter(analysis == 1, population == "overall"))$event) /
- ((event_tbl %>% filter(analysis == 2, population == "B positive"))$event + (event_tbl %>% filter(analysis == 2, population == "overall"))$event),
- ((event_tbl %>% filter(analysis == 1, population == "AB positive"))$event + (event_tbl %>% filter(analysis == 1, population == "overall"))$event) /
- ((event_tbl %>% filter(analysis == 2, population == "AB positive"))$event + (event_tbl %>% filter(analysis == 2, population == "overall"))$event)
-)
-
-IF_IA
-
-## -----------------------------------------------------------------------------
-# Transition matrix in Figure A1
-m <- matrix(c(
- 0, 3 / 7, 4 / 7,
- 3 / 7, 0, 4 / 7,
- 0.5, 0.5, 0
-), nrow = 3, byrow = TRUE)
-# Initial weights
-w <- c(0.3, 0.3, 0.4)
-
-## -----------------------------------------------------------------------------
-name_hypotheses <- c("H1: Biomarker A positive", "H2: Biomarker B positive", "H3: Overall Population")
-
-hplot <- gMCPLite::hGraph(
- 3,
- alphaHypotheses = w, m = m,
- nameHypotheses = name_hypotheses, trhw = .2, trhh = .1,
- digits = 5, trdigits = 3, size = 5, halfWid = 1, halfHgt = 0.5,
- offset = 0.2, trprop = 0.4,
- fill = as.factor(c(2, 3, 1)),
- palette = c("#BDBDBD", "#E0E0E0", "#EEEEEE"),
- wchar = "w"
-)
-hplot
-
-## -----------------------------------------------------------------------------
-# Event count of intersection of paired hypotheses - Table 2
-# H1, H2: Hypotheses intersected.
-# (1, 1) represents counts for hypothesis 1
-# (1, 2) for counts for the intersection of hypotheses 1 and 2
-event <- tribble(
- ~H1, ~H2, ~Analysis, ~Event,
- 1, 1, 1, event_tbl %>% filter(analysis == 1, population == "A positive") %>% select(event) %>% as.numeric(),
- 2, 2, 1, event_tbl %>% filter(analysis == 1, population == "B positive") %>% select(event) %>% as.numeric(),
- 3, 3, 1, event_tbl %>% filter(analysis == 1, population == "overall") %>% select(event) %>% as.numeric(),
- 1, 2, 1, event_tbl %>% filter(analysis == 1, population == "AB positive") %>% select(event) %>% as.numeric(),
- 1, 3, 1, event_tbl %>% filter(analysis == 1, population == "A positive") %>% select(event) %>% as.numeric(),
- 2, 3, 1, event_tbl %>% filter(analysis == 1, population == "B positive") %>% select(event) %>% as.numeric(),
- 1, 1, 2, event_tbl %>% filter(analysis == 2, population == "A positive") %>% select(event) %>% as.numeric(),
- 2, 2, 2, event_tbl %>% filter(analysis == 2, population == "B positive") %>% select(event) %>% as.numeric(),
- 3, 3, 2, event_tbl %>% filter(analysis == 2, population == "overall") %>% select(event) %>% as.numeric(),
- 1, 2, 2, event_tbl %>% filter(analysis == 2, population == "AB positive") %>% select(event) %>% as.numeric(),
- 1, 3, 2, event_tbl %>% filter(analysis == 2, population == "A positive") %>% select(event) %>% as.numeric(),
- 2, 3, 2, event_tbl %>% filter(analysis == 2, population == "B positive") %>% select(event) %>% as.numeric()
-)
-event
-
-# Generate correlation from events
-gs_corr <- wpgsd::generate_corr(event)
-gs_corr %>% round(2)
-
-## ----wpgsdSeqIA, message=FALSE------------------------------------------------
-seq_p_IA_H123 <- calc_seq_p(
- test_analysis = 1,
- test_hypothesis = "H1, H2, H3",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ),
- alpha_spending_type = 2,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = gsDesign::sfHSD,
- spending_fun_par = -4,
- info_frac = c(min(IF_IA), 1),
- interval = c(1e-4, 0.2)
-)
-
-seq_p_IA_H12 <- calc_seq_p(
- test_analysis = 1,
- test_hypothesis = "H1, H2",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ),
- alpha_spending_type = 2,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = gsDesign::sfHSD,
- spending_fun_par = -4,
- info_frac = c(min(IF_IA), 1),
- interval = c(1e-4, 0.2)
-)
-
-seq_p_IA_H13 <- calc_seq_p(
- test_analysis = 1,
- test_hypothesis = "H1, H3",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ),
- alpha_spending_type = 2,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = gsDesign::sfHSD,
- spending_fun_par = -4,
- info_frac = c(min(IF_IA), 1),
- interval = c(1e-4, 0.2)
-)
-
-seq_p_IA_H23 <- calc_seq_p(
- test_analysis = 1, # stage of interest
- test_hypothesis = "H2, H3",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ),
- alpha_spending_type = 2,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = gsDesign::sfHSD,
- spending_fun_par = -4,
- info_frac = c(min(IF_IA), 1),
- interval = c(1e-4, 0.2)
-)
-
-seq_p_IA_H1 <- calc_seq_p(
- test_analysis = 1,
- test_hypothesis = "H1",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ),
- alpha_spending_type = 2,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = gsDesign::sfHSD,
- spending_fun_par = -4,
- info_frac = c(min(IF_IA), 1),
- interval = c(1e-4, 0.2)
-)
-
-seq_p_IA_H2 <- calc_seq_p(
- test_analysis = 1,
- test_hypothesis = "H2",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ),
- alpha_spending_type = 2,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = gsDesign::sfHSD,
- spending_fun_par = -4,
- info_frac = c(min(IF_IA), 1),
- interval = c(1e-4, 0.2)
-)
-
-seq_p_IA_H3 <- calc_seq_p(
- test_analysis = 1,
- test_hypothesis = "H3",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ),
- alpha_spending_type = 2,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = gsDesign::sfHSD,
- spending_fun_par = -4,
- info_frac = c(min(IF_IA), 1),
- interval = c(1e-4, 0.2)
-)
-
-## ----weightedBonferroniSeqIA, message=FALSE-----------------------------------
-seq_p_IA_H123_B <- calc_seq_p(
- test_analysis = 1, # stage of interest
- test_hypothesis = "H1, H2, H3",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 0,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = list(gsDesign::sfHSD, gsDesign::sfHSD, gsDesign::sfHSD),
- spending_fun_par = list(-4, -4, -4),
- info_frac = list(c(IF_IA[1], 1), c(IF_IA[2], 1), c(IF_IA[2], 1)),
- interval = c(1e-4, 0.3)
-)
-
-seq_p_IA_H12_B <- calc_seq_p(
- test_analysis = 1, # stage of interest
- test_hypothesis = "H1, H2",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 0,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = list(gsDesign::sfHSD, gsDesign::sfHSD, gsDesign::sfHSD),
- spending_fun_par = list(-4, -4, -4),
- info_frac = list(c(IF_IA[1], 1), c(IF_IA[2], 1), c(IF_IA[2], 1)),
- interval = c(1e-4, 0.2)
-)
-
-seq_p_IA_H13_B <- calc_seq_p(
- test_analysis = 1, # stage of interest
- test_hypothesis = "H1, H3",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 0,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = list(gsDesign::sfHSD, gsDesign::sfHSD, gsDesign::sfHSD),
- spending_fun_par = list(-4, -4, -4),
- info_frac = list(c(IF_IA[1], 1), c(IF_IA[2], 1), c(IF_IA[2], 1)),
- interval = c(1e-4, 0.3)
-)
-
-seq_p_IA_H23_B <- calc_seq_p(
- test_analysis = 1, # stage of interest
- test_hypothesis = "H2, H3",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 0,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = list(gsDesign::sfHSD, gsDesign::sfHSD, gsDesign::sfHSD),
- spending_fun_par = list(-4, -4, -4),
- info_frac = list(c(IF_IA[1], 1), c(IF_IA[2], 1), c(IF_IA[2], 1)),
- interval = c(1e-4, 0.3)
-)
-
-seq_p_IA_H1_B <- calc_seq_p(
- test_analysis = 1, # stage of interest
- test_hypothesis = "H1",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 0,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = list(gsDesign::sfHSD, gsDesign::sfHSD, gsDesign::sfHSD),
- spending_fun_par = list(-4, -4, -4),
- info_frac = list(c(IF_IA[1], 1), c(IF_IA[2], 1), c(IF_IA[2], 1)),
- interval = c(1e-4, 0.3)
-)
-
-seq_p_IA_H2_B <- calc_seq_p(
- test_analysis = 1, # stage of interest
- test_hypothesis = "H2",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 0,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = list(gsDesign::sfHSD, gsDesign::sfHSD, gsDesign::sfHSD),
- spending_fun_par = list(-4, -4, -4),
- info_frac = list(c(IF_IA[1], 1), c(IF_IA[2], 1), c(IF_IA[2], 1)),
- interval = c(1e-4, 0.3)
-)
-
-seq_p_IA_H3_B <- calc_seq_p(
- test_analysis = 1, # stage of interest
- test_hypothesis = "H3",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 0,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = list(gsDesign::sfHSD, gsDesign::sfHSD, gsDesign::sfHSD),
- spending_fun_par = list(-4, -4, -4),
- info_frac = list(c(IF_IA[1], 1), c(IF_IA[2], 1), c(IF_IA[2], 1)),
- interval = c(1e-4, 0.3)
-)
-
-## ----wpgsdSeqFA, message=FALSE------------------------------------------------
-seq_p_FA_H123 <- calc_seq_p(
- test_analysis = 2, # stage of interest
- test_hypothesis = "H1, H2, H3",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 2,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = gsDesign::sfHSD,
- spending_fun_par = -4,
- info_frac = c(min(IF_IA), 1),
- interval = c(1e-4, 0.15)
-)
-
-seq_p_FA_H12 <- calc_seq_p(
- test_analysis = 2, # stage of interest
- test_hypothesis = "H1, H2",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 2,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = gsDesign::sfHSD,
- spending_fun_par = -4,
- info_frac = c(min(IF_IA), 1),
- interval = c(1e-4, 0.15)
-)
-
-seq_p_FA_H13 <- calc_seq_p(
- test_analysis = 2, # stage of interest
- test_hypothesis = "H1, H3",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 2,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = gsDesign::sfHSD,
- spending_fun_par = -4,
- info_frac = c(min(IF_IA), 1),
- interval = c(1e-4, 0.15)
-)
-
-seq_p_FA_H23 <- calc_seq_p(
- test_analysis = 2, # stage of interest
- test_hypothesis = "H2, H3",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 2,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = gsDesign::sfHSD,
- spending_fun_par = -4,
- info_frac = c(min(IF_IA), 1),
- interval = c(1e-4, 0.15)
-)
-
-seq_p_FA_H1 <- calc_seq_p(
- test_analysis = 2, # stage of interest
- test_hypothesis = "H1",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 2,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = gsDesign::sfHSD,
- spending_fun_par = -4,
- info_frac = c(min(IF_IA), 1),
- interval = c(1e-4, 0.2)
-)
-
-seq_p_FA_H2 <- calc_seq_p(
- test_analysis = 2, # stage of interest
- test_hypothesis = "H2",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 2,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = gsDesign::sfHSD,
- spending_fun_par = -4,
- info_frac = c(min(IF_IA), 1),
- interval = c(1e-4, 0.2)
-)
-
-seq_p_FA_H3 <- calc_seq_p(
- test_analysis = 2, # stage of interest
- test_hypothesis = "H3",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 2,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = gsDesign::sfHSD,
- spending_fun_par = -4,
- info_frac = c(min(IF_IA), 1),
- interval = c(1e-4, 0.2)
-)
-
-## ----weightedBonferroniSeqFA, message=FALSE-----------------------------------
-seq_p_FA_H123_B <- calc_seq_p(
- test_analysis = 2, # stage of interest
- test_hypothesis = "H1, H2, H3",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 0,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = list(gsDesign::sfHSD, gsDesign::sfHSD, gsDesign::sfHSD),
- spending_fun_par = list(-4, -4, -4),
- info_frac = list(c(IF_IA[1], 1), c(IF_IA[2], 1), c(IF_IA[2], 1)),
- interval = c(1e-4, 0.2)
-)
-
-seq_p_FA_H12_B <- calc_seq_p(
- test_analysis = 2, # stage of interest
- test_hypothesis = "H1, H2",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 0,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = list(gsDesign::sfHSD, gsDesign::sfHSD, gsDesign::sfHSD),
- spending_fun_par = list(-4, -4, -4),
- info_frac = list(c(IF_IA[1], 1), c(IF_IA[2], 1), c(IF_IA[2], 1)),
- interval = c(1e-4, 0.2)
-)
-
-seq_p_FA_H13_B <- calc_seq_p(
- test_analysis = 2, # stage of interest
- test_hypothesis = "H1, H3",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 0,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = list(gsDesign::sfHSD, gsDesign::sfHSD, gsDesign::sfHSD),
- spending_fun_par = list(-4, -4, -4),
- info_frac = list(c(IF_IA[1], 1), c(IF_IA[2], 1), c(IF_IA[2], 1)),
- interval = c(1e-4, 0.2)
-)
-
-seq_p_FA_H23_B <- calc_seq_p(
- test_analysis = 2, # stage of interest
- test_hypothesis = "H2, H3",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 0,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = list(gsDesign::sfHSD, gsDesign::sfHSD, gsDesign::sfHSD),
- spending_fun_par = list(-4, -4, -4),
- info_frac = list(c(IF_IA[1], 1), c(IF_IA[2], 1), c(IF_IA[2], 1)),
- interval = c(1e-4, 0.2)
-)
-
-seq_p_FA_H1_B <- calc_seq_p(
- test_analysis = 2, # stage of interest
- test_hypothesis = "H1",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 0,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = list(gsDesign::sfHSD, gsDesign::sfHSD, gsDesign::sfHSD),
- spending_fun_par = list(-4, -4, -4),
- info_frac = list(c(IF_IA[1], 1), c(IF_IA[2], 1), c(IF_IA[2], 1)),
- interval = c(1e-4, 0.2)
-)
-
-seq_p_FA_H2_B <- calc_seq_p(
- test_analysis = 2, # stage of interest
- test_hypothesis = "H2",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 0,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = list(gsDesign::sfHSD, gsDesign::sfHSD, gsDesign::sfHSD),
- spending_fun_par = list(-4, -4, -4),
- info_frac = list(c(IF_IA[1], 1), c(IF_IA[2], 1), c(IF_IA[2], 1)),
- interval = c(1e-4, 0.2)
-)
-
-seq_p_FA_H3_B <- calc_seq_p(
- test_analysis = 2, # stage of interest
- test_hypothesis = "H3",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 0,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = list(gsDesign::sfHSD, gsDesign::sfHSD, gsDesign::sfHSD),
- spending_fun_par = list(-4, -4, -4),
- info_frac = list(c(IF_IA[1], 1), c(IF_IA[2], 1), c(IF_IA[2], 1)),
- interval = c(1e-4, 0.2)
-)
-
-## ----wpgsdAdjSeqIA------------------------------------------------------------
-adj_seq_IA_H1 <- max(seq_p_IA_H123, seq_p_IA_H12, seq_p_IA_H13, seq_p_IA_H1)
-adj_seq_IA_H2 <- max(seq_p_IA_H123, seq_p_IA_H12, seq_p_IA_H23, seq_p_IA_H2)
-adj_seq_IA_H3 <- max(seq_p_IA_H123, seq_p_IA_H13, seq_p_IA_H23, seq_p_IA_H3)
-
-cat("The adjusted-sequential p-value of H1, H2, H3 in IA via WPGSD is", adj_seq_IA_H1, adj_seq_IA_H2, adj_seq_IA_H3, "\n")
-
-## ----weightedBonferroniAdjSeqIA-----------------------------------------------
-adj_seq_IA_H1_B <- max(seq_p_IA_H123_B, seq_p_IA_H12_B, seq_p_IA_H13_B, seq_p_IA_H1_B)
-adj_seq_IA_H2_B <- max(seq_p_IA_H123_B, seq_p_IA_H12_B, seq_p_IA_H23_B, seq_p_IA_H2_B)
-adj_seq_IA_H3_B <- max(seq_p_IA_H123_B, seq_p_IA_H13_B, seq_p_IA_H23_B, seq_p_IA_H3_B)
-
-cat("The adjusted-sequential p-value of H1, H2, H3 in FA via weighted Bonferroni is", adj_seq_IA_H1_B, adj_seq_IA_H2_B, adj_seq_IA_H3_B, "\n")
-
-## ----wpgsdAdjSeqFA------------------------------------------------------------
-adj_seq_FA_H1 <- max(seq_p_FA_H123, seq_p_FA_H12, seq_p_FA_H13, seq_p_FA_H1)
-adj_seq_FA_H2 <- max(seq_p_FA_H123, seq_p_FA_H12, seq_p_FA_H23, seq_p_FA_H2)
-adj_seq_FA_H3 <- max(seq_p_FA_H123, seq_p_FA_H13, seq_p_FA_H23, seq_p_FA_H3)
-
-cat("The adjusted-sequential p-value of H1, H2, H3 in FA via WPGSD is", adj_seq_FA_H1, adj_seq_FA_H2, adj_seq_FA_H3, "\n")
-
-## ----weightedBonferroniAdjSeqFA-----------------------------------------------
-adj_seq_FA_H1_B <- max(seq_p_FA_H123_B, seq_p_FA_H12_B, seq_p_FA_H13_B, seq_p_FA_H1_B)
-adj_seq_FA_H2_B <- max(seq_p_FA_H123_B, seq_p_FA_H12_B, seq_p_FA_H23_B, seq_p_FA_H2_B)
-adj_seq_FA_H3_B <- max(seq_p_FA_H123_B, seq_p_FA_H13_B, seq_p_FA_H23_B, seq_p_FA_H3_B)
-
-cat("The adjusted-sequential p-value of H1, H2, H3 in FA via weighted Bonferroni is", adj_seq_FA_H1_B, adj_seq_FA_H2_B, adj_seq_FA_H3_B, "\n")
-
-## -----------------------------------------------------------------------------
-ans <- tribble(
- ~Analysis, ~Hypothesis, ~`Sequential p-values of WPGSD`, ~`Sequential p-values of Weighted Bonferroni`, ~`Adjusted-sequential p-values of WPGSD`, ~`Adjusted-sequential p-values of Weighted Bonferroni`,
- "IA", "H123", seq_p_IA_H123, seq_p_IA_H123_B, NA, NA,
- "IA", "H12", seq_p_IA_H12, seq_p_IA_H12_B, NA, NA,
- "IA", "H13", seq_p_IA_H13, seq_p_IA_H13_B, NA, NA,
- "IA", "H23", seq_p_IA_H23, seq_p_IA_H23_B, NA, NA,
- "IA", "H1", seq_p_IA_H1, seq_p_IA_H1_B, adj_seq_IA_H1, adj_seq_IA_H1_B,
- "IA", "H2", seq_p_IA_H2, seq_p_IA_H2_B, adj_seq_IA_H2, adj_seq_IA_H2_B,
- "IA", "H3", seq_p_IA_H3, seq_p_IA_H3_B, adj_seq_IA_H3, adj_seq_IA_H3_B,
- "FA", "H123", seq_p_FA_H123, seq_p_FA_H123_B, NA, NA,
- "FA", "H12", seq_p_FA_H12, seq_p_FA_H12_B, NA, NA,
- "FA", "H13", seq_p_FA_H13, seq_p_FA_H13_B, NA, NA,
- "FA", "H23", seq_p_FA_H23, seq_p_FA_H23_B, NA, NA,
- "FA", "H1", seq_p_FA_H1, seq_p_FA_H1_B, adj_seq_FA_H1, adj_seq_FA_H1_B,
- "FA", "H2", seq_p_FA_H2, seq_p_FA_H2_B, adj_seq_FA_H2, adj_seq_FA_H2_B,
- "FA", "H3", seq_p_FA_H3, seq_p_FA_H3_B, adj_seq_FA_H3, adj_seq_FA_H3_B
-)
-
-ans %>%
- select(
- Analysis, Hypothesis,
- `Sequential p-values of Weighted Bonferroni`, `Adjusted-sequential p-values of Weighted Bonferroni`,
- `Sequential p-values of WPGSD`, `Adjusted-sequential p-values of WPGSD`
- ) %>%
- gt() %>%
- tab_spanner(
- label = "Weighted Bonferroni",
- columns = c(`Sequential p-values of Weighted Bonferroni`, `Adjusted-sequential p-values of Weighted Bonferroni`)
- ) %>%
- tab_spanner(
- label = "WPGSD",
- columns = c(`Sequential p-values of WPGSD`, `Adjusted-sequential p-values of WPGSD`)
- ) %>%
- tab_style_body(
- columns = where(is.numeric),
- style = cell_fill(color = "pink"),
- fn = function(x) x <= 0.025
- ) %>%
- fmt_number(columns = 3:6, decimals = 4) %>%
- tab_header(
- title = "(Adjusted-) sequential p-values",
- subtitle = "Multiple populations"
- ) # %>% as_latex()
-
diff --git a/wpgsd.Rcheck/00_pkg_src/wpgsd/inst/doc/adj-seq-p.Rmd b/wpgsd.Rcheck/00_pkg_src/wpgsd/inst/doc/adj-seq-p.Rmd
deleted file mode 100644
index df94c1c..0000000
--- a/wpgsd.Rcheck/00_pkg_src/wpgsd/inst/doc/adj-seq-p.Rmd
+++ /dev/null
@@ -1,810 +0,0 @@
----
-title: "Adjusted sequential p-values"
-author: "Yujie Zhao, Qi Liu, Linda Sun, Keaven Anderson"
-output:
- rmarkdown::html_document:
- toc: true
- toc_float: true
- toc_depth: 2
- number_sections: true
- highlight: "textmate"
- css: "custom.css"
-# bibliography: "example.bib"
-vignette: >
- %\VignetteEngine{knitr::rmarkdown}
- %\VignetteIndexEntry{Adjusted sequential p-values}
----
-
-```{r, message = FALSE, warning = FALSE}
-library(wpgsd)
-library(gsDesign)
-library(tibble)
-library(gt)
-library(dplyr)
-```
-
-# Example overview
-
-In a 2-arm controlled clinical trial example with one primary endpoint, there are 3 patient populations defined by the status of two biomarkers A and B:
-
-- biomarker A positive,
-- biomarker B positive,
-- overall population.
-
-The 3 primary elementary hypotheses are:
-
-- $H_1$: the experimental treatment is superior to the control in the biomarker A positive population;
-- \item $H_2$: the experimental treatment is superior to the control in the biomarker B positive population;
-- \item $H_3$: the experimental treatment is superior to the control in the overall population.
-
-Assume an interim analysis and a final analysis are planned for the study and the number of events are listed as
-
-```{r}
-event_tbl <- tribble(
- ~population, ~analysis, ~event,
- "A positive", 1, 100,
- "B positive", 1, 110,
- "AB positive", 1, 80,
- "overall", 1, 225,
- "A positive", 2, 200,
- "B positive", 2, 220,
- "AB positive", 2, 160,
- "overall", 2, 450,
-)
-```
-
-The observed p-values are
-
-```{r}
-obs_tbl <- tribble(
- ~hypothesis, ~analysis, ~obs_p,
- "H1", 1, 0.02,
- "H2", 1, 0.01,
- "H3", 1, 0.012,
- "H1", 2, 0.015,
- "H2", 2, 0.012,
- "H3", 2, 0.010
-) %>%
- mutate(obs_Z = -qnorm(obs_p))
-
-obs_tbl %>%
- gt() %>%
- tab_header(title = "Nominal p-values")
-
-p_obs_IA <- (obs_tbl %>% filter(analysis == 1))$obs_p
-p_obs_FA <- (obs_tbl %>% filter(analysis == 2))$obs_p
-```
-
-The information fraction of $H_1, H_2, H_3$ at IA is
-
-```{r}
-IF_IA <- c(
- ((event_tbl %>% filter(analysis == 1, population == "A positive"))$event + (event_tbl %>% filter(analysis == 1, population == "overall"))$event) /
- ((event_tbl %>% filter(analysis == 2, population == "A positive"))$event + (event_tbl %>% filter(analysis == 2, population == "overall"))$event),
- ((event_tbl %>% filter(analysis == 1, population == "B positive"))$event + (event_tbl %>% filter(analysis == 1, population == "overall"))$event) /
- ((event_tbl %>% filter(analysis == 2, population == "B positive"))$event + (event_tbl %>% filter(analysis == 2, population == "overall"))$event),
- ((event_tbl %>% filter(analysis == 1, population == "AB positive"))$event + (event_tbl %>% filter(analysis == 1, population == "overall"))$event) /
- ((event_tbl %>% filter(analysis == 2, population == "AB positive"))$event + (event_tbl %>% filter(analysis == 2, population == "overall"))$event)
-)
-
-IF_IA
-```
-
-We assign the initial weights of $H_1, H_2, H_3$ as$\left(w_1(I), w_2(I), w_3(I) \right) = (0.3, 0.3, 0.4).$ And its multiplicity strategy is visualized in below. If $H_1$ is rejected, then $3/7$ local significance level $\alpha_1$ will be propagated to $H_2$, and $4/7$ will go to $H_3$. If $H_3$ is rejected, then half of $\alpha_3$ goes to $H_1$, and half goes to $H_2$.
-
-```{r}
-# Transition matrix in Figure A1
-m <- matrix(c(
- 0, 3 / 7, 4 / 7,
- 3 / 7, 0, 4 / 7,
- 0.5, 0.5, 0
-), nrow = 3, byrow = TRUE)
-# Initial weights
-w <- c(0.3, 0.3, 0.4)
-```
-
-```{r}
-name_hypotheses <- c("H1: Biomarker A positive", "H2: Biomarker B positive", "H3: Overall Population")
-
-hplot <- gMCPLite::hGraph(
- 3,
- alphaHypotheses = w, m = m,
- nameHypotheses = name_hypotheses, trhw = .2, trhh = .1,
- digits = 5, trdigits = 3, size = 5, halfWid = 1, halfHgt = 0.5,
- offset = 0.2, trprop = 0.4,
- fill = as.factor(c(2, 3, 1)),
- palette = c("#BDBDBD", "#E0E0E0", "#EEEEEE"),
- wchar = "w"
-)
-hplot
-```
-
-The correlation of the 6 statistisc (2 analysis $\times$ 3 hypothesis) are
-
-```{r}
-# Event count of intersection of paired hypotheses - Table 2
-# H1, H2: Hypotheses intersected.
-# (1, 1) represents counts for hypothesis 1
-# (1, 2) for counts for the intersection of hypotheses 1 and 2
-event <- tribble(
- ~H1, ~H2, ~Analysis, ~Event,
- 1, 1, 1, event_tbl %>% filter(analysis == 1, population == "A positive") %>% select(event) %>% as.numeric(),
- 2, 2, 1, event_tbl %>% filter(analysis == 1, population == "B positive") %>% select(event) %>% as.numeric(),
- 3, 3, 1, event_tbl %>% filter(analysis == 1, population == "overall") %>% select(event) %>% as.numeric(),
- 1, 2, 1, event_tbl %>% filter(analysis == 1, population == "AB positive") %>% select(event) %>% as.numeric(),
- 1, 3, 1, event_tbl %>% filter(analysis == 1, population == "A positive") %>% select(event) %>% as.numeric(),
- 2, 3, 1, event_tbl %>% filter(analysis == 1, population == "B positive") %>% select(event) %>% as.numeric(),
- 1, 1, 2, event_tbl %>% filter(analysis == 2, population == "A positive") %>% select(event) %>% as.numeric(),
- 2, 2, 2, event_tbl %>% filter(analysis == 2, population == "B positive") %>% select(event) %>% as.numeric(),
- 3, 3, 2, event_tbl %>% filter(analysis == 2, population == "overall") %>% select(event) %>% as.numeric(),
- 1, 2, 2, event_tbl %>% filter(analysis == 2, population == "AB positive") %>% select(event) %>% as.numeric(),
- 1, 3, 2, event_tbl %>% filter(analysis == 2, population == "A positive") %>% select(event) %>% as.numeric(),
- 2, 3, 2, event_tbl %>% filter(analysis == 2, population == "B positive") %>% select(event) %>% as.numeric()
-)
-event
-
-# Generate correlation from events
-gs_corr <- wpgsd::generate_corr(event)
-gs_corr %>% round(2)
-```
-
-# Sequential p-value
-
-## IA
-
-```{r, wpgsdSeqIA, message=FALSE}
-seq_p_IA_H123 <- calc_seq_p(
- test_analysis = 1,
- test_hypothesis = "H1, H2, H3",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ),
- alpha_spending_type = 2,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = gsDesign::sfHSD,
- spending_fun_par = -4,
- info_frac = c(min(IF_IA), 1),
- interval = c(1e-4, 0.2)
-)
-
-seq_p_IA_H12 <- calc_seq_p(
- test_analysis = 1,
- test_hypothesis = "H1, H2",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ),
- alpha_spending_type = 2,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = gsDesign::sfHSD,
- spending_fun_par = -4,
- info_frac = c(min(IF_IA), 1),
- interval = c(1e-4, 0.2)
-)
-
-seq_p_IA_H13 <- calc_seq_p(
- test_analysis = 1,
- test_hypothesis = "H1, H3",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ),
- alpha_spending_type = 2,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = gsDesign::sfHSD,
- spending_fun_par = -4,
- info_frac = c(min(IF_IA), 1),
- interval = c(1e-4, 0.2)
-)
-
-seq_p_IA_H23 <- calc_seq_p(
- test_analysis = 1, # stage of interest
- test_hypothesis = "H2, H3",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ),
- alpha_spending_type = 2,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = gsDesign::sfHSD,
- spending_fun_par = -4,
- info_frac = c(min(IF_IA), 1),
- interval = c(1e-4, 0.2)
-)
-
-seq_p_IA_H1 <- calc_seq_p(
- test_analysis = 1,
- test_hypothesis = "H1",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ),
- alpha_spending_type = 2,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = gsDesign::sfHSD,
- spending_fun_par = -4,
- info_frac = c(min(IF_IA), 1),
- interval = c(1e-4, 0.2)
-)
-
-seq_p_IA_H2 <- calc_seq_p(
- test_analysis = 1,
- test_hypothesis = "H2",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ),
- alpha_spending_type = 2,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = gsDesign::sfHSD,
- spending_fun_par = -4,
- info_frac = c(min(IF_IA), 1),
- interval = c(1e-4, 0.2)
-)
-
-seq_p_IA_H3 <- calc_seq_p(
- test_analysis = 1,
- test_hypothesis = "H3",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ),
- alpha_spending_type = 2,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = gsDesign::sfHSD,
- spending_fun_par = -4,
- info_frac = c(min(IF_IA), 1),
- interval = c(1e-4, 0.2)
-)
-```
-
-```{r, weightedBonferroniSeqIA, message=FALSE}
-seq_p_IA_H123_B <- calc_seq_p(
- test_analysis = 1, # stage of interest
- test_hypothesis = "H1, H2, H3",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 0,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = list(gsDesign::sfHSD, gsDesign::sfHSD, gsDesign::sfHSD),
- spending_fun_par = list(-4, -4, -4),
- info_frac = list(c(IF_IA[1], 1), c(IF_IA[2], 1), c(IF_IA[2], 1)),
- interval = c(1e-4, 0.3)
-)
-
-seq_p_IA_H12_B <- calc_seq_p(
- test_analysis = 1, # stage of interest
- test_hypothesis = "H1, H2",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 0,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = list(gsDesign::sfHSD, gsDesign::sfHSD, gsDesign::sfHSD),
- spending_fun_par = list(-4, -4, -4),
- info_frac = list(c(IF_IA[1], 1), c(IF_IA[2], 1), c(IF_IA[2], 1)),
- interval = c(1e-4, 0.2)
-)
-
-seq_p_IA_H13_B <- calc_seq_p(
- test_analysis = 1, # stage of interest
- test_hypothesis = "H1, H3",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 0,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = list(gsDesign::sfHSD, gsDesign::sfHSD, gsDesign::sfHSD),
- spending_fun_par = list(-4, -4, -4),
- info_frac = list(c(IF_IA[1], 1), c(IF_IA[2], 1), c(IF_IA[2], 1)),
- interval = c(1e-4, 0.3)
-)
-
-seq_p_IA_H23_B <- calc_seq_p(
- test_analysis = 1, # stage of interest
- test_hypothesis = "H2, H3",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 0,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = list(gsDesign::sfHSD, gsDesign::sfHSD, gsDesign::sfHSD),
- spending_fun_par = list(-4, -4, -4),
- info_frac = list(c(IF_IA[1], 1), c(IF_IA[2], 1), c(IF_IA[2], 1)),
- interval = c(1e-4, 0.3)
-)
-
-seq_p_IA_H1_B <- calc_seq_p(
- test_analysis = 1, # stage of interest
- test_hypothesis = "H1",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 0,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = list(gsDesign::sfHSD, gsDesign::sfHSD, gsDesign::sfHSD),
- spending_fun_par = list(-4, -4, -4),
- info_frac = list(c(IF_IA[1], 1), c(IF_IA[2], 1), c(IF_IA[2], 1)),
- interval = c(1e-4, 0.3)
-)
-
-seq_p_IA_H2_B <- calc_seq_p(
- test_analysis = 1, # stage of interest
- test_hypothesis = "H2",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 0,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = list(gsDesign::sfHSD, gsDesign::sfHSD, gsDesign::sfHSD),
- spending_fun_par = list(-4, -4, -4),
- info_frac = list(c(IF_IA[1], 1), c(IF_IA[2], 1), c(IF_IA[2], 1)),
- interval = c(1e-4, 0.3)
-)
-
-seq_p_IA_H3_B <- calc_seq_p(
- test_analysis = 1, # stage of interest
- test_hypothesis = "H3",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 0,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = list(gsDesign::sfHSD, gsDesign::sfHSD, gsDesign::sfHSD),
- spending_fun_par = list(-4, -4, -4),
- info_frac = list(c(IF_IA[1], 1), c(IF_IA[2], 1), c(IF_IA[2], 1)),
- interval = c(1e-4, 0.3)
-)
-```
-
-## FA
-
-```{r, wpgsdSeqFA, message=FALSE}
-seq_p_FA_H123 <- calc_seq_p(
- test_analysis = 2, # stage of interest
- test_hypothesis = "H1, H2, H3",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 2,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = gsDesign::sfHSD,
- spending_fun_par = -4,
- info_frac = c(min(IF_IA), 1),
- interval = c(1e-4, 0.15)
-)
-
-seq_p_FA_H12 <- calc_seq_p(
- test_analysis = 2, # stage of interest
- test_hypothesis = "H1, H2",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 2,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = gsDesign::sfHSD,
- spending_fun_par = -4,
- info_frac = c(min(IF_IA), 1),
- interval = c(1e-4, 0.15)
-)
-
-seq_p_FA_H13 <- calc_seq_p(
- test_analysis = 2, # stage of interest
- test_hypothesis = "H1, H3",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 2,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = gsDesign::sfHSD,
- spending_fun_par = -4,
- info_frac = c(min(IF_IA), 1),
- interval = c(1e-4, 0.15)
-)
-
-seq_p_FA_H23 <- calc_seq_p(
- test_analysis = 2, # stage of interest
- test_hypothesis = "H2, H3",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 2,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = gsDesign::sfHSD,
- spending_fun_par = -4,
- info_frac = c(min(IF_IA), 1),
- interval = c(1e-4, 0.15)
-)
-
-seq_p_FA_H1 <- calc_seq_p(
- test_analysis = 2, # stage of interest
- test_hypothesis = "H1",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 2,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = gsDesign::sfHSD,
- spending_fun_par = -4,
- info_frac = c(min(IF_IA), 1),
- interval = c(1e-4, 0.2)
-)
-
-seq_p_FA_H2 <- calc_seq_p(
- test_analysis = 2, # stage of interest
- test_hypothesis = "H2",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 2,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = gsDesign::sfHSD,
- spending_fun_par = -4,
- info_frac = c(min(IF_IA), 1),
- interval = c(1e-4, 0.2)
-)
-
-seq_p_FA_H3 <- calc_seq_p(
- test_analysis = 2, # stage of interest
- test_hypothesis = "H3",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 2,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = gsDesign::sfHSD,
- spending_fun_par = -4,
- info_frac = c(min(IF_IA), 1),
- interval = c(1e-4, 0.2)
-)
-```
-
-```{r, weightedBonferroniSeqFA, message=FALSE}
-seq_p_FA_H123_B <- calc_seq_p(
- test_analysis = 2, # stage of interest
- test_hypothesis = "H1, H2, H3",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 0,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = list(gsDesign::sfHSD, gsDesign::sfHSD, gsDesign::sfHSD),
- spending_fun_par = list(-4, -4, -4),
- info_frac = list(c(IF_IA[1], 1), c(IF_IA[2], 1), c(IF_IA[2], 1)),
- interval = c(1e-4, 0.2)
-)
-
-seq_p_FA_H12_B <- calc_seq_p(
- test_analysis = 2, # stage of interest
- test_hypothesis = "H1, H2",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 0,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = list(gsDesign::sfHSD, gsDesign::sfHSD, gsDesign::sfHSD),
- spending_fun_par = list(-4, -4, -4),
- info_frac = list(c(IF_IA[1], 1), c(IF_IA[2], 1), c(IF_IA[2], 1)),
- interval = c(1e-4, 0.2)
-)
-
-seq_p_FA_H13_B <- calc_seq_p(
- test_analysis = 2, # stage of interest
- test_hypothesis = "H1, H3",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 0,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = list(gsDesign::sfHSD, gsDesign::sfHSD, gsDesign::sfHSD),
- spending_fun_par = list(-4, -4, -4),
- info_frac = list(c(IF_IA[1], 1), c(IF_IA[2], 1), c(IF_IA[2], 1)),
- interval = c(1e-4, 0.2)
-)
-
-seq_p_FA_H23_B <- calc_seq_p(
- test_analysis = 2, # stage of interest
- test_hypothesis = "H2, H3",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 0,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = list(gsDesign::sfHSD, gsDesign::sfHSD, gsDesign::sfHSD),
- spending_fun_par = list(-4, -4, -4),
- info_frac = list(c(IF_IA[1], 1), c(IF_IA[2], 1), c(IF_IA[2], 1)),
- interval = c(1e-4, 0.2)
-)
-
-seq_p_FA_H1_B <- calc_seq_p(
- test_analysis = 2, # stage of interest
- test_hypothesis = "H1",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 0,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = list(gsDesign::sfHSD, gsDesign::sfHSD, gsDesign::sfHSD),
- spending_fun_par = list(-4, -4, -4),
- info_frac = list(c(IF_IA[1], 1), c(IF_IA[2], 1), c(IF_IA[2], 1)),
- interval = c(1e-4, 0.2)
-)
-
-seq_p_FA_H2_B <- calc_seq_p(
- test_analysis = 2, # stage of interest
- test_hypothesis = "H2",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 0,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = list(gsDesign::sfHSD, gsDesign::sfHSD, gsDesign::sfHSD),
- spending_fun_par = list(-4, -4, -4),
- info_frac = list(c(IF_IA[1], 1), c(IF_IA[2], 1), c(IF_IA[2], 1)),
- interval = c(1e-4, 0.2)
-)
-
-seq_p_FA_H3_B <- calc_seq_p(
- test_analysis = 2, # stage of interest
- test_hypothesis = "H3",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 0,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = list(gsDesign::sfHSD, gsDesign::sfHSD, gsDesign::sfHSD),
- spending_fun_par = list(-4, -4, -4),
- info_frac = list(c(IF_IA[1], 1), c(IF_IA[2], 1), c(IF_IA[2], 1)),
- interval = c(1e-4, 0.2)
-)
-```
-
-# Adjusted-Sequential p-value
-
-## IA
-
-```{r, wpgsdAdjSeqIA}
-adj_seq_IA_H1 <- max(seq_p_IA_H123, seq_p_IA_H12, seq_p_IA_H13, seq_p_IA_H1)
-adj_seq_IA_H2 <- max(seq_p_IA_H123, seq_p_IA_H12, seq_p_IA_H23, seq_p_IA_H2)
-adj_seq_IA_H3 <- max(seq_p_IA_H123, seq_p_IA_H13, seq_p_IA_H23, seq_p_IA_H3)
-
-cat("The adjusted-sequential p-value of H1, H2, H3 in IA via WPGSD is", adj_seq_IA_H1, adj_seq_IA_H2, adj_seq_IA_H3, "\n")
-```
-
-```{r, weightedBonferroniAdjSeqIA}
-adj_seq_IA_H1_B <- max(seq_p_IA_H123_B, seq_p_IA_H12_B, seq_p_IA_H13_B, seq_p_IA_H1_B)
-adj_seq_IA_H2_B <- max(seq_p_IA_H123_B, seq_p_IA_H12_B, seq_p_IA_H23_B, seq_p_IA_H2_B)
-adj_seq_IA_H3_B <- max(seq_p_IA_H123_B, seq_p_IA_H13_B, seq_p_IA_H23_B, seq_p_IA_H3_B)
-
-cat("The adjusted-sequential p-value of H1, H2, H3 in FA via weighted Bonferroni is", adj_seq_IA_H1_B, adj_seq_IA_H2_B, adj_seq_IA_H3_B, "\n")
-```
-
-## FA
-
-### WPGSD
-```{r, wpgsdAdjSeqFA}
-adj_seq_FA_H1 <- max(seq_p_FA_H123, seq_p_FA_H12, seq_p_FA_H13, seq_p_FA_H1)
-adj_seq_FA_H2 <- max(seq_p_FA_H123, seq_p_FA_H12, seq_p_FA_H23, seq_p_FA_H2)
-adj_seq_FA_H3 <- max(seq_p_FA_H123, seq_p_FA_H13, seq_p_FA_H23, seq_p_FA_H3)
-
-cat("The adjusted-sequential p-value of H1, H2, H3 in FA via WPGSD is", adj_seq_FA_H1, adj_seq_FA_H2, adj_seq_FA_H3, "\n")
-```
-
-```{r, weightedBonferroniAdjSeqFA}
-adj_seq_FA_H1_B <- max(seq_p_FA_H123_B, seq_p_FA_H12_B, seq_p_FA_H13_B, seq_p_FA_H1_B)
-adj_seq_FA_H2_B <- max(seq_p_FA_H123_B, seq_p_FA_H12_B, seq_p_FA_H23_B, seq_p_FA_H2_B)
-adj_seq_FA_H3_B <- max(seq_p_FA_H123_B, seq_p_FA_H13_B, seq_p_FA_H23_B, seq_p_FA_H3_B)
-
-cat("The adjusted-sequential p-value of H1, H2, H3 in FA via weighted Bonferroni is", adj_seq_FA_H1_B, adj_seq_FA_H2_B, adj_seq_FA_H3_B, "\n")
-```
-
-# Summary
-
-```{r}
-ans <- tribble(
- ~Analysis, ~Hypothesis, ~`Sequential p-values of WPGSD`, ~`Sequential p-values of Weighted Bonferroni`, ~`Adjusted-sequential p-values of WPGSD`, ~`Adjusted-sequential p-values of Weighted Bonferroni`,
- "IA", "H123", seq_p_IA_H123, seq_p_IA_H123_B, NA, NA,
- "IA", "H12", seq_p_IA_H12, seq_p_IA_H12_B, NA, NA,
- "IA", "H13", seq_p_IA_H13, seq_p_IA_H13_B, NA, NA,
- "IA", "H23", seq_p_IA_H23, seq_p_IA_H23_B, NA, NA,
- "IA", "H1", seq_p_IA_H1, seq_p_IA_H1_B, adj_seq_IA_H1, adj_seq_IA_H1_B,
- "IA", "H2", seq_p_IA_H2, seq_p_IA_H2_B, adj_seq_IA_H2, adj_seq_IA_H2_B,
- "IA", "H3", seq_p_IA_H3, seq_p_IA_H3_B, adj_seq_IA_H3, adj_seq_IA_H3_B,
- "FA", "H123", seq_p_FA_H123, seq_p_FA_H123_B, NA, NA,
- "FA", "H12", seq_p_FA_H12, seq_p_FA_H12_B, NA, NA,
- "FA", "H13", seq_p_FA_H13, seq_p_FA_H13_B, NA, NA,
- "FA", "H23", seq_p_FA_H23, seq_p_FA_H23_B, NA, NA,
- "FA", "H1", seq_p_FA_H1, seq_p_FA_H1_B, adj_seq_FA_H1, adj_seq_FA_H1_B,
- "FA", "H2", seq_p_FA_H2, seq_p_FA_H2_B, adj_seq_FA_H2, adj_seq_FA_H2_B,
- "FA", "H3", seq_p_FA_H3, seq_p_FA_H3_B, adj_seq_FA_H3, adj_seq_FA_H3_B
-)
-
-ans %>%
- select(
- Analysis, Hypothesis,
- `Sequential p-values of Weighted Bonferroni`, `Adjusted-sequential p-values of Weighted Bonferroni`,
- `Sequential p-values of WPGSD`, `Adjusted-sequential p-values of WPGSD`
- ) %>%
- gt() %>%
- tab_spanner(
- label = "Weighted Bonferroni",
- columns = c(`Sequential p-values of Weighted Bonferroni`, `Adjusted-sequential p-values of Weighted Bonferroni`)
- ) %>%
- tab_spanner(
- label = "WPGSD",
- columns = c(`Sequential p-values of WPGSD`, `Adjusted-sequential p-values of WPGSD`)
- ) %>%
- tab_style_body(
- columns = where(is.numeric),
- style = cell_fill(color = "pink"),
- fn = function(x) x <= 0.025
- ) %>%
- fmt_number(columns = 3:6, decimals = 4) %>%
- tab_header(
- title = "(Adjusted-) sequential p-values",
- subtitle = "Multiple populations"
- ) # %>% as_latex()
-```
diff --git a/wpgsd.Rcheck/00_pkg_src/wpgsd/inst/doc/adj-seq-p.html b/wpgsd.Rcheck/00_pkg_src/wpgsd/inst/doc/adj-seq-p.html
deleted file mode 100644
index 0236e45..0000000
--- a/wpgsd.Rcheck/00_pkg_src/wpgsd/inst/doc/adj-seq-p.html
+++ /dev/null
@@ -1,3399 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-library(wpgsd)
-library(gsDesign)
-library(tibble)
-library(gt)
-library(dplyr)
-In a 2-arm controlled clinical trial example with one primary -endpoint, there are 3 patient populations defined by the status of two -biomarkers A and B:
-The 3 primary elementary hypotheses are:
-Assume an interim analysis and a final analysis are planned for the -study and the number of events are listed as
-event_tbl <- tribble(
- ~population, ~analysis, ~event,
- "A positive", 1, 100,
- "B positive", 1, 110,
- "AB positive", 1, 80,
- "overall", 1, 225,
- "A positive", 2, 200,
- "B positive", 2, 220,
- "AB positive", 2, 160,
- "overall", 2, 450,
-)
-The observed p-values are
-obs_tbl <- tribble(
- ~hypothesis, ~analysis, ~obs_p,
- "H1", 1, 0.02,
- "H2", 1, 0.01,
- "H3", 1, 0.012,
- "H1", 2, 0.015,
- "H2", 2, 0.012,
- "H3", 2, 0.010
-) %>%
- mutate(obs_Z = -qnorm(obs_p))
-
-obs_tbl %>%
- gt() %>%
- tab_header(title = "Nominal p-values")
-| Nominal p-values | -|||
| hypothesis | -analysis | -obs_p | -obs_Z | -
|---|---|---|---|
| H1 | -1 | -0.020 | -2.053749 |
| H2 | -1 | -0.010 | -2.326348 |
| H3 | -1 | -0.012 | -2.257129 |
| H1 | -2 | -0.015 | -2.170090 |
| H2 | -2 | -0.012 | -2.257129 |
| H3 | -2 | -0.010 | -2.326348 |
p_obs_IA <- (obs_tbl %>% filter(analysis == 1))$obs_p
-p_obs_FA <- (obs_tbl %>% filter(analysis == 2))$obs_p
-The information fraction of \(H_1, H_2, -H_3\) at IA is
-IF_IA <- c(
- ((event_tbl %>% filter(analysis == 1, population == "A positive"))$event + (event_tbl %>% filter(analysis == 1, population == "overall"))$event) /
- ((event_tbl %>% filter(analysis == 2, population == "A positive"))$event + (event_tbl %>% filter(analysis == 2, population == "overall"))$event),
- ((event_tbl %>% filter(analysis == 1, population == "B positive"))$event + (event_tbl %>% filter(analysis == 1, population == "overall"))$event) /
- ((event_tbl %>% filter(analysis == 2, population == "B positive"))$event + (event_tbl %>% filter(analysis == 2, population == "overall"))$event),
- ((event_tbl %>% filter(analysis == 1, population == "AB positive"))$event + (event_tbl %>% filter(analysis == 1, population == "overall"))$event) /
- ((event_tbl %>% filter(analysis == 2, population == "AB positive"))$event + (event_tbl %>% filter(analysis == 2, population == "overall"))$event)
-)
-
-IF_IA
-## [1] 0.5 0.5 0.5
-We assign the initial weights of \(H_1, -H_2, H_3\) as\(\left(w_1(I), w_2(I), -w_3(I) \right) = (0.3, 0.3, 0.4).\) And its multiplicity strategy -is visualized in below. If \(H_1\) is -rejected, then \(3/7\) local -significance level \(\alpha_1\) will be -propagated to \(H_2\), and \(4/7\) will go to \(H_3\). If \(H_3\) is rejected, then half of \(\alpha_3\) goes to \(H_1\), and half goes to \(H_2\).
-# Transition matrix in Figure A1
-m <- matrix(c(
- 0, 3 / 7, 4 / 7,
- 3 / 7, 0, 4 / 7,
- 0.5, 0.5, 0
-), nrow = 3, byrow = TRUE)
-# Initial weights
-w <- c(0.3, 0.3, 0.4)
-name_hypotheses <- c("H1: Biomarker A positive", "H2: Biomarker B positive", "H3: Overall Population")
-
-hplot <- gMCPLite::hGraph(
- 3,
- alphaHypotheses = w, m = m,
- nameHypotheses = name_hypotheses, trhw = .2, trhh = .1,
- digits = 5, trdigits = 3, size = 5, halfWid = 1, halfHgt = 0.5,
- offset = 0.2, trprop = 0.4,
- fill = as.factor(c(2, 3, 1)),
- palette = c("#BDBDBD", "#E0E0E0", "#EEEEEE"),
- wchar = "w"
-)
-hplot
-The correlation of the 6 statistisc (2 analysis \(\times\) 3 hypothesis) are
-# Event count of intersection of paired hypotheses - Table 2
-# H1, H2: Hypotheses intersected.
-# (1, 1) represents counts for hypothesis 1
-# (1, 2) for counts for the intersection of hypotheses 1 and 2
-event <- tribble(
- ~H1, ~H2, ~Analysis, ~Event,
- 1, 1, 1, event_tbl %>% filter(analysis == 1, population == "A positive") %>% select(event) %>% as.numeric(),
- 2, 2, 1, event_tbl %>% filter(analysis == 1, population == "B positive") %>% select(event) %>% as.numeric(),
- 3, 3, 1, event_tbl %>% filter(analysis == 1, population == "overall") %>% select(event) %>% as.numeric(),
- 1, 2, 1, event_tbl %>% filter(analysis == 1, population == "AB positive") %>% select(event) %>% as.numeric(),
- 1, 3, 1, event_tbl %>% filter(analysis == 1, population == "A positive") %>% select(event) %>% as.numeric(),
- 2, 3, 1, event_tbl %>% filter(analysis == 1, population == "B positive") %>% select(event) %>% as.numeric(),
- 1, 1, 2, event_tbl %>% filter(analysis == 2, population == "A positive") %>% select(event) %>% as.numeric(),
- 2, 2, 2, event_tbl %>% filter(analysis == 2, population == "B positive") %>% select(event) %>% as.numeric(),
- 3, 3, 2, event_tbl %>% filter(analysis == 2, population == "overall") %>% select(event) %>% as.numeric(),
- 1, 2, 2, event_tbl %>% filter(analysis == 2, population == "AB positive") %>% select(event) %>% as.numeric(),
- 1, 3, 2, event_tbl %>% filter(analysis == 2, population == "A positive") %>% select(event) %>% as.numeric(),
- 2, 3, 2, event_tbl %>% filter(analysis == 2, population == "B positive") %>% select(event) %>% as.numeric()
-)
-event
-## # A tibble: 12 × 4
-## H1 H2 Analysis Event
-## <dbl> <dbl> <dbl> <dbl>
-## 1 1 1 1 100
-## 2 2 2 1 110
-## 3 3 3 1 225
-## 4 1 2 1 80
-## 5 1 3 1 100
-## 6 2 3 1 110
-## 7 1 1 2 200
-## 8 2 2 2 220
-## 9 3 3 2 450
-## 10 1 2 2 160
-## 11 1 3 2 200
-## 12 2 3 2 220
-# Generate correlation from events
-gs_corr <- wpgsd::generate_corr(event)
-gs_corr %>% round(2)
-## H1_A1 H2_A1 H3_A1 H1_A2 H2_A2 H3_A2
-## [1,] 1.00 0.76 0.67 0.71 0.54 0.47
-## [2,] 0.76 1.00 0.70 0.54 0.71 0.49
-## [3,] 0.67 0.70 1.00 0.47 0.49 0.71
-## [4,] 0.71 0.54 0.47 1.00 0.76 0.67
-## [5,] 0.54 0.71 0.49 0.76 1.00 0.70
-## [6,] 0.47 0.49 0.71 0.67 0.70 1.00
-seq_p_IA_H123 <- calc_seq_p(
- test_analysis = 1,
- test_hypothesis = "H1, H2, H3",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ),
- alpha_spending_type = 2,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = gsDesign::sfHSD,
- spending_fun_par = -4,
- info_frac = c(min(IF_IA), 1),
- interval = c(1e-4, 0.2)
-)
-
-seq_p_IA_H12 <- calc_seq_p(
- test_analysis = 1,
- test_hypothesis = "H1, H2",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ),
- alpha_spending_type = 2,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = gsDesign::sfHSD,
- spending_fun_par = -4,
- info_frac = c(min(IF_IA), 1),
- interval = c(1e-4, 0.2)
-)
-
-seq_p_IA_H13 <- calc_seq_p(
- test_analysis = 1,
- test_hypothesis = "H1, H3",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ),
- alpha_spending_type = 2,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = gsDesign::sfHSD,
- spending_fun_par = -4,
- info_frac = c(min(IF_IA), 1),
- interval = c(1e-4, 0.2)
-)
-
-seq_p_IA_H23 <- calc_seq_p(
- test_analysis = 1, # stage of interest
- test_hypothesis = "H2, H3",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ),
- alpha_spending_type = 2,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = gsDesign::sfHSD,
- spending_fun_par = -4,
- info_frac = c(min(IF_IA), 1),
- interval = c(1e-4, 0.2)
-)
-
-seq_p_IA_H1 <- calc_seq_p(
- test_analysis = 1,
- test_hypothesis = "H1",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ),
- alpha_spending_type = 2,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = gsDesign::sfHSD,
- spending_fun_par = -4,
- info_frac = c(min(IF_IA), 1),
- interval = c(1e-4, 0.2)
-)
-
-seq_p_IA_H2 <- calc_seq_p(
- test_analysis = 1,
- test_hypothesis = "H2",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ),
- alpha_spending_type = 2,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = gsDesign::sfHSD,
- spending_fun_par = -4,
- info_frac = c(min(IF_IA), 1),
- interval = c(1e-4, 0.2)
-)
-
-seq_p_IA_H3 <- calc_seq_p(
- test_analysis = 1,
- test_hypothesis = "H3",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ),
- alpha_spending_type = 2,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = gsDesign::sfHSD,
- spending_fun_par = -4,
- info_frac = c(min(IF_IA), 1),
- interval = c(1e-4, 0.2)
-)
-seq_p_IA_H123_B <- calc_seq_p(
- test_analysis = 1, # stage of interest
- test_hypothesis = "H1, H2, H3",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 0,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = list(gsDesign::sfHSD, gsDesign::sfHSD, gsDesign::sfHSD),
- spending_fun_par = list(-4, -4, -4),
- info_frac = list(c(IF_IA[1], 1), c(IF_IA[2], 1), c(IF_IA[2], 1)),
- interval = c(1e-4, 0.3)
-)
-
-seq_p_IA_H12_B <- calc_seq_p(
- test_analysis = 1, # stage of interest
- test_hypothesis = "H1, H2",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 0,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = list(gsDesign::sfHSD, gsDesign::sfHSD, gsDesign::sfHSD),
- spending_fun_par = list(-4, -4, -4),
- info_frac = list(c(IF_IA[1], 1), c(IF_IA[2], 1), c(IF_IA[2], 1)),
- interval = c(1e-4, 0.2)
-)
-
-seq_p_IA_H13_B <- calc_seq_p(
- test_analysis = 1, # stage of interest
- test_hypothesis = "H1, H3",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 0,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = list(gsDesign::sfHSD, gsDesign::sfHSD, gsDesign::sfHSD),
- spending_fun_par = list(-4, -4, -4),
- info_frac = list(c(IF_IA[1], 1), c(IF_IA[2], 1), c(IF_IA[2], 1)),
- interval = c(1e-4, 0.3)
-)
-
-seq_p_IA_H23_B <- calc_seq_p(
- test_analysis = 1, # stage of interest
- test_hypothesis = "H2, H3",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 0,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = list(gsDesign::sfHSD, gsDesign::sfHSD, gsDesign::sfHSD),
- spending_fun_par = list(-4, -4, -4),
- info_frac = list(c(IF_IA[1], 1), c(IF_IA[2], 1), c(IF_IA[2], 1)),
- interval = c(1e-4, 0.3)
-)
-
-seq_p_IA_H1_B <- calc_seq_p(
- test_analysis = 1, # stage of interest
- test_hypothesis = "H1",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 0,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = list(gsDesign::sfHSD, gsDesign::sfHSD, gsDesign::sfHSD),
- spending_fun_par = list(-4, -4, -4),
- info_frac = list(c(IF_IA[1], 1), c(IF_IA[2], 1), c(IF_IA[2], 1)),
- interval = c(1e-4, 0.3)
-)
-
-seq_p_IA_H2_B <- calc_seq_p(
- test_analysis = 1, # stage of interest
- test_hypothesis = "H2",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 0,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = list(gsDesign::sfHSD, gsDesign::sfHSD, gsDesign::sfHSD),
- spending_fun_par = list(-4, -4, -4),
- info_frac = list(c(IF_IA[1], 1), c(IF_IA[2], 1), c(IF_IA[2], 1)),
- interval = c(1e-4, 0.3)
-)
-
-seq_p_IA_H3_B <- calc_seq_p(
- test_analysis = 1, # stage of interest
- test_hypothesis = "H3",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 0,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = list(gsDesign::sfHSD, gsDesign::sfHSD, gsDesign::sfHSD),
- spending_fun_par = list(-4, -4, -4),
- info_frac = list(c(IF_IA[1], 1), c(IF_IA[2], 1), c(IF_IA[2], 1)),
- interval = c(1e-4, 0.3)
-)
-seq_p_FA_H123 <- calc_seq_p(
- test_analysis = 2, # stage of interest
- test_hypothesis = "H1, H2, H3",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 2,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = gsDesign::sfHSD,
- spending_fun_par = -4,
- info_frac = c(min(IF_IA), 1),
- interval = c(1e-4, 0.15)
-)
-
-seq_p_FA_H12 <- calc_seq_p(
- test_analysis = 2, # stage of interest
- test_hypothesis = "H1, H2",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 2,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = gsDesign::sfHSD,
- spending_fun_par = -4,
- info_frac = c(min(IF_IA), 1),
- interval = c(1e-4, 0.15)
-)
-
-seq_p_FA_H13 <- calc_seq_p(
- test_analysis = 2, # stage of interest
- test_hypothesis = "H1, H3",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 2,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = gsDesign::sfHSD,
- spending_fun_par = -4,
- info_frac = c(min(IF_IA), 1),
- interval = c(1e-4, 0.15)
-)
-
-seq_p_FA_H23 <- calc_seq_p(
- test_analysis = 2, # stage of interest
- test_hypothesis = "H2, H3",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 2,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = gsDesign::sfHSD,
- spending_fun_par = -4,
- info_frac = c(min(IF_IA), 1),
- interval = c(1e-4, 0.15)
-)
-
-seq_p_FA_H1 <- calc_seq_p(
- test_analysis = 2, # stage of interest
- test_hypothesis = "H1",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 2,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = gsDesign::sfHSD,
- spending_fun_par = -4,
- info_frac = c(min(IF_IA), 1),
- interval = c(1e-4, 0.2)
-)
-
-seq_p_FA_H2 <- calc_seq_p(
- test_analysis = 2, # stage of interest
- test_hypothesis = "H2",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 2,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = gsDesign::sfHSD,
- spending_fun_par = -4,
- info_frac = c(min(IF_IA), 1),
- interval = c(1e-4, 0.2)
-)
-
-seq_p_FA_H3 <- calc_seq_p(
- test_analysis = 2, # stage of interest
- test_hypothesis = "H3",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 2,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = gsDesign::sfHSD,
- spending_fun_par = -4,
- info_frac = c(min(IF_IA), 1),
- interval = c(1e-4, 0.2)
-)
-seq_p_FA_H123_B <- calc_seq_p(
- test_analysis = 2, # stage of interest
- test_hypothesis = "H1, H2, H3",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 0,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = list(gsDesign::sfHSD, gsDesign::sfHSD, gsDesign::sfHSD),
- spending_fun_par = list(-4, -4, -4),
- info_frac = list(c(IF_IA[1], 1), c(IF_IA[2], 1), c(IF_IA[2], 1)),
- interval = c(1e-4, 0.2)
-)
-
-seq_p_FA_H12_B <- calc_seq_p(
- test_analysis = 2, # stage of interest
- test_hypothesis = "H1, H2",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 0,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = list(gsDesign::sfHSD, gsDesign::sfHSD, gsDesign::sfHSD),
- spending_fun_par = list(-4, -4, -4),
- info_frac = list(c(IF_IA[1], 1), c(IF_IA[2], 1), c(IF_IA[2], 1)),
- interval = c(1e-4, 0.2)
-)
-
-seq_p_FA_H13_B <- calc_seq_p(
- test_analysis = 2, # stage of interest
- test_hypothesis = "H1, H3",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 0,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = list(gsDesign::sfHSD, gsDesign::sfHSD, gsDesign::sfHSD),
- spending_fun_par = list(-4, -4, -4),
- info_frac = list(c(IF_IA[1], 1), c(IF_IA[2], 1), c(IF_IA[2], 1)),
- interval = c(1e-4, 0.2)
-)
-
-seq_p_FA_H23_B <- calc_seq_p(
- test_analysis = 2, # stage of interest
- test_hypothesis = "H2, H3",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 0,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = list(gsDesign::sfHSD, gsDesign::sfHSD, gsDesign::sfHSD),
- spending_fun_par = list(-4, -4, -4),
- info_frac = list(c(IF_IA[1], 1), c(IF_IA[2], 1), c(IF_IA[2], 1)),
- interval = c(1e-4, 0.2)
-)
-
-seq_p_FA_H1_B <- calc_seq_p(
- test_analysis = 2, # stage of interest
- test_hypothesis = "H1",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 0,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = list(gsDesign::sfHSD, gsDesign::sfHSD, gsDesign::sfHSD),
- spending_fun_par = list(-4, -4, -4),
- info_frac = list(c(IF_IA[1], 1), c(IF_IA[2], 1), c(IF_IA[2], 1)),
- interval = c(1e-4, 0.2)
-)
-
-seq_p_FA_H2_B <- calc_seq_p(
- test_analysis = 2, # stage of interest
- test_hypothesis = "H2",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 0,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = list(gsDesign::sfHSD, gsDesign::sfHSD, gsDesign::sfHSD),
- spending_fun_par = list(-4, -4, -4),
- info_frac = list(c(IF_IA[1], 1), c(IF_IA[2], 1), c(IF_IA[2], 1)),
- interval = c(1e-4, 0.2)
-)
-
-seq_p_FA_H3_B <- calc_seq_p(
- test_analysis = 2, # stage of interest
- test_hypothesis = "H3",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 0,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = list(gsDesign::sfHSD, gsDesign::sfHSD, gsDesign::sfHSD),
- spending_fun_par = list(-4, -4, -4),
- info_frac = list(c(IF_IA[1], 1), c(IF_IA[2], 1), c(IF_IA[2], 1)),
- interval = c(1e-4, 0.2)
-)
-adj_seq_IA_H1 <- max(seq_p_IA_H123, seq_p_IA_H12, seq_p_IA_H13, seq_p_IA_H1)
-adj_seq_IA_H2 <- max(seq_p_IA_H123, seq_p_IA_H12, seq_p_IA_H23, seq_p_IA_H2)
-adj_seq_IA_H3 <- max(seq_p_IA_H123, seq_p_IA_H13, seq_p_IA_H23, seq_p_IA_H3)
-
-cat("The adjusted-sequential p-value of H1, H2, H3 in IA via WPGSD is", adj_seq_IA_H1, adj_seq_IA_H2, adj_seq_IA_H3, "\n")
-## The adjusted-sequential p-value of H1, H2, H3 in IA via WPGSD is 0.1942751 0.1942751 0.1942751
-adj_seq_IA_H1_B <- max(seq_p_IA_H123_B, seq_p_IA_H12_B, seq_p_IA_H13_B, seq_p_IA_H1_B)
-adj_seq_IA_H2_B <- max(seq_p_IA_H123_B, seq_p_IA_H12_B, seq_p_IA_H23_B, seq_p_IA_H2_B)
-adj_seq_IA_H3_B <- max(seq_p_IA_H123_B, seq_p_IA_H13_B, seq_p_IA_H23_B, seq_p_IA_H3_B)
-
-cat("The adjusted-sequential p-value of H1, H2, H3 in FA via weighted Bonferroni is", adj_seq_IA_H1_B, adj_seq_IA_H2_B, adj_seq_IA_H3_B, "\n")
-## The adjusted-sequential p-value of H1, H2, H3 in FA via weighted Bonferroni is 0.2516717 0.2516717 0.2516717
-adj_seq_FA_H1 <- max(seq_p_FA_H123, seq_p_FA_H12, seq_p_FA_H13, seq_p_FA_H1)
-adj_seq_FA_H2 <- max(seq_p_FA_H123, seq_p_FA_H12, seq_p_FA_H23, seq_p_FA_H2)
-adj_seq_FA_H3 <- max(seq_p_FA_H123, seq_p_FA_H13, seq_p_FA_H23, seq_p_FA_H3)
-
-cat("The adjusted-sequential p-value of H1, H2, H3 in FA via WPGSD is", adj_seq_FA_H1, adj_seq_FA_H2, adj_seq_FA_H3, "\n")
-## The adjusted-sequential p-value of H1, H2, H3 in FA via WPGSD is 0.02097944 0.02097944 0.02063497
-adj_seq_FA_H1_B <- max(seq_p_FA_H123_B, seq_p_FA_H12_B, seq_p_FA_H13_B, seq_p_FA_H1_B)
-adj_seq_FA_H2_B <- max(seq_p_FA_H123_B, seq_p_FA_H12_B, seq_p_FA_H23_B, seq_p_FA_H2_B)
-adj_seq_FA_H3_B <- max(seq_p_FA_H123_B, seq_p_FA_H13_B, seq_p_FA_H23_B, seq_p_FA_H3_B)
-
-cat("The adjusted-sequential p-value of H1, H2, H3 in FA via weighted Bonferroni is", adj_seq_FA_H1_B, adj_seq_FA_H2_B, adj_seq_FA_H3_B, "\n")
-## The adjusted-sequential p-value of H1, H2, H3 in FA via weighted Bonferroni is 0.0265823 0.0265823 0.0265823
-ans <- tribble(
- ~Analysis, ~Hypothesis, ~`Sequential p-values of WPGSD`, ~`Sequential p-values of Weighted Bonferroni`, ~`Adjusted-sequential p-values of WPGSD`, ~`Adjusted-sequential p-values of Weighted Bonferroni`,
- "IA", "H123", seq_p_IA_H123, seq_p_IA_H123_B, NA, NA,
- "IA", "H12", seq_p_IA_H12, seq_p_IA_H12_B, NA, NA,
- "IA", "H13", seq_p_IA_H13, seq_p_IA_H13_B, NA, NA,
- "IA", "H23", seq_p_IA_H23, seq_p_IA_H23_B, NA, NA,
- "IA", "H1", seq_p_IA_H1, seq_p_IA_H1_B, adj_seq_IA_H1, adj_seq_IA_H1_B,
- "IA", "H2", seq_p_IA_H2, seq_p_IA_H2_B, adj_seq_IA_H2, adj_seq_IA_H2_B,
- "IA", "H3", seq_p_IA_H3, seq_p_IA_H3_B, adj_seq_IA_H3, adj_seq_IA_H3_B,
- "FA", "H123", seq_p_FA_H123, seq_p_FA_H123_B, NA, NA,
- "FA", "H12", seq_p_FA_H12, seq_p_FA_H12_B, NA, NA,
- "FA", "H13", seq_p_FA_H13, seq_p_FA_H13_B, NA, NA,
- "FA", "H23", seq_p_FA_H23, seq_p_FA_H23_B, NA, NA,
- "FA", "H1", seq_p_FA_H1, seq_p_FA_H1_B, adj_seq_FA_H1, adj_seq_FA_H1_B,
- "FA", "H2", seq_p_FA_H2, seq_p_FA_H2_B, adj_seq_FA_H2, adj_seq_FA_H2_B,
- "FA", "H3", seq_p_FA_H3, seq_p_FA_H3_B, adj_seq_FA_H3, adj_seq_FA_H3_B
-)
-
-ans %>%
- select(
- Analysis, Hypothesis,
- `Sequential p-values of Weighted Bonferroni`, `Adjusted-sequential p-values of Weighted Bonferroni`,
- `Sequential p-values of WPGSD`, `Adjusted-sequential p-values of WPGSD`
- ) %>%
- gt() %>%
- tab_spanner(
- label = "Weighted Bonferroni",
- columns = c(`Sequential p-values of Weighted Bonferroni`, `Adjusted-sequential p-values of Weighted Bonferroni`)
- ) %>%
- tab_spanner(
- label = "WPGSD",
- columns = c(`Sequential p-values of WPGSD`, `Adjusted-sequential p-values of WPGSD`)
- ) %>%
- tab_style_body(
- columns = where(is.numeric),
- style = cell_fill(color = "pink"),
- fn = function(x) x <= 0.025
- ) %>%
- fmt_number(columns = 3:6, decimals = 4) %>%
- tab_header(
- title = "(Adjusted-) sequential p-values",
- subtitle = "Multiple populations"
- ) # %>% as_latex()
-| (Adjusted-) sequential p-values | -|||||
| Multiple populations | -|||||
| Analysis | -Hypothesis | -
- Weighted Bonferroni
- |
-
- WPGSD
- |
- ||
|---|---|---|---|---|---|
| Sequential p-values of Weighted Bonferroni | -Adjusted-sequential p-values of Weighted Bonferroni | -Sequential p-values of WPGSD | -Adjusted-sequential p-values of WPGSD | -||
| IA | -H123 | -0.2517 | -NA | -0.1943 | -NA |
| IA | -H12 | -0.1678 | -NA | -0.1400 | -NA |
| IA | -H13 | -0.1762 | -NA | -0.1553 | -NA |
| IA | -H23 | -0.1762 | -NA | -0.1529 | -NA |
| IA | -H1 | -0.1678 | -0.2517 | -0.1678 | -0.1943 |
| IA | -H2 | -0.0839 | -0.2517 | -0.0839 | -0.1943 |
| IA | -H3 | -0.1007 | -0.2517 | -0.1007 | -0.1943 |
| FA | -H123 | -0.0266 | -NA | -0.0206 | -NA |
| FA | -H12 | -0.0255 | -NA | -0.0210 | -NA |
| FA | -H13 | -0.0186 | -NA | -0.0165 | -NA |
| FA | -H23 | -0.0186 | -NA | -0.0162 | -NA |
| FA | -H1 | -0.0159 | -0.0266 | -0.0159 | -0.0210 |
| FA | -H2 | -0.0127 | -0.0266 | -0.0127 | -0.0210 |
| FA | -H3 | -0.0106 | -0.0266 | -0.0106 | -0.0206 |
The weighted parametric group sequential design (WPGSD) (Anderson et al. (2022)) approach allows one to -take advantage of the known correlation structure in constructing -efficacy bounds to control family-wise error rate (FWER) for a group -sequential design. Here correlation may be due to common observations in -nested populations, due to common observations in overlapping -populations, or due to common observations in the control arm.
-Suppose that in a group sequential trial there are \(m\) elementary null hypotheses \(H_i\), \(i \in -I={1,...,m}\), and there are \(K\) analyses. Let \(k\) be the index for the interim analyses -and final analyses, \(k=1,2,...K\). For -any nonempty set \(J \subseteq I\), we -denote the intersection hypothesis \(H_J=\cap_{j \in J}H_j\). We note that \(H_I\) is the global null hypothesis.
-We assume the plan is for all hypotheses to be tested at each of the -\(k\) planned analyses if the trial -continues to the end for all hypotheses. We further assume that the -distribution of the \(m \times K\) -tests of \(m\) individual hypotheses at -all \(k\) analyses is multivariate -normal with a completely known correlation matrix.
-Let \(Z_{ik}\) be the standardized -normal test statistic for hypothesis \(i \in -I\), analysis \(1 \le k \le K\). -Let \(n_{ik}\) be the number of events -collected cumulatively through stage \(k\) for hypothesis \(i\). Then \(n_{i -\wedge i',k \wedge k'}\) is the number of events included -in both \(Z_{ik}\) and \(i\), \(i' \in -I\), \(1 \le k\), \(k' \le K\). The key of the parametric -tests to utilize the correlation among the test statistics. The -correlation between \(Z_{ik}\) and -\(Z_{i'k'}\) is \[Corr(Z_{ik},Z_{i'k'})=\frac{n_{i \wedge -i',k \wedge k'}}{\sqrt{n_{ik}*n_{i'k'}}}\].
-We borrow an example from a paper by Anderson et al. (Anderson et al. (2022)), demonstrated in Section -2 - Motivating Examples, we use Example 1 as the basis here. The setting -will be:
-In a two-arm controlled clinical trial with one primary endpoint, -there are three patient populations defined by the status of two -biomarkers, A and B:
-The 3 primary elementary hypotheses are:
-Assume an interim analysis and a final analysis are planned for the -study. The number of events are listed as
-library(dplyr)
-library(tibble)
-library(gt)
-event_tb <- tribble(
- ~Population, ~"Number of Event in IA", ~"Number of Event in FA",
- "Population 1", 100, 200,
- "Population 2", 110, 220,
- "Overlap of Population 1 and 2", 80, 160,
- "Overall Population", 225, 450
-)
-event_tb %>%
- gt() %>%
- tab_header(title = "Number of events at each population")
-| Number of events at each population | -||
| Population | -Number of Event in IA | -Number of Event in FA | -
|---|---|---|
| Population 1 | -100 | -200 |
| Population 2 | -110 | -220 |
| Overlap of Population 1 and 2 | -80 | -160 |
| Overall Population | -225 | -450 |
Let’s consider a simple situation, we want to compare the population -1 and population 2 in only interim analyses. Then \(k=1\), and to compare \(H_{1}\) and \(H_{2}\), the \(i\) will be \(i=1\) and \(i=2\). The correlation matrix will be \[Corr(Z_{11},Z_{21})=\frac{n_{1 \wedge 2,1 \wedge -1}}{\sqrt{n_{11}*n_{21}}}\] The number of events are listed -as
-event_tbl <- tribble(
- ~Population, ~"Number of Event in IA",
- "Population 1", 100,
- "Population 2", 110,
- "Overlap in population 1 and 2", 80
-)
-event_tbl %>%
- gt() %>%
- tab_header(title = "Number of events at each population in example 1")
-| Number of events at each population in example 1 | -|
| Population | -Number of Event in IA | -
|---|---|
| Population 1 | -100 |
| Population 2 | -110 |
| Overlap in population 1 and 2 | -80 |
The the corrleation could be simply calculated as \[Corr(Z_{11},Z_{21})=\frac{80}{\sqrt{100*110}}=0.76\]
-Corr1 <- 80 / sqrt(100 * 110)
-round(Corr1, 2)
-## [1] 0.76
-Let’s consider another simple situation, we want to compare single -population, for example, the population 1, but in different analyses, -interim and final analyses. Then \(i=1\), and to compare IA and FA, the \(k\) will be \(k=1\) and \(k=2\). The correlation matrix will be \[Corr(Z_{11},Z_{12})=\frac{n_{1 \wedge 1,1 \wedge -2}}{\sqrt{n_{11}*n_{12}}}\] The number of events are listed -as
-event_tb2 <- tribble(
- ~Population, ~"Number of Event in IA", ~"Number of Event in FA",
- "Population 1", 100, 200
-)
-event_tb2 %>%
- gt() %>%
- tab_header(title = "Number of events at each analyses in example 2")
-| Number of events at each analyses in example 2 | -||
| Population | -Number of Event in IA | -Number of Event in FA | -
|---|---|---|
| Population 1 | -100 | -200 |
The the corrleation could be simply calculated as \[\text{Corr}(Z_{11},Z_{12})=\frac{100}{\sqrt{100*200}}=0.71\] -The 100 in the numerator is the overlap number of events of interim -analysis and final analysis in population 1.
-Corr1 <- 100 / sqrt(100 * 200)
-round(Corr1, 2)
-## [1] 0.71
-Let’s consider the situation that we want to compare population 1 in -interim analyses and population 2 in final analyses. Then for different -population, \(i=1\) and \(i=2\), and to compare IA and FA, the \(k\) will be \(k=1\) and \(k=2\). The correlation matrix will be \[\text{Corr}(Z_{11},Z_{22})=\frac{n_{1 \wedge 1,2 -\wedge 2}}{\sqrt{n_{11}*n_{22}}}\] The number of events are -listed as
-event_tb3 <- tribble(
- ~Population, ~"Number of Event in IA", ~"Number of Event in FA",
- "Population 1", 100, 200,
- "Population 2", 110, 220,
- "Overlap in population 1 and 2", 80, 160
-)
-event_tb3 %>%
- gt() %>%
- tab_header(title = "Number of events at each population & analyses in example 3")
-| Number of events at each population & analyses in example 3 | -||
| Population | -Number of Event in IA | -Number of Event in FA | -
|---|---|---|
| Population 1 | -100 | -200 |
| Population 2 | -110 | -220 |
| Overlap in population 1 and 2 | -80 | -160 |
The correlation could be simply calculated as \[\text{Corr}(Z_{11},Z_{22})=\frac{80}{\sqrt{100*220}}=0.54\] -The 80 in the numerator is the overlap number of events of population 1 -in interim analysis and population 2 in final analysis.
-Corr1 <- 80 / sqrt(100 * 220)
-round(Corr1, 2)
-## [1] 0.54
-generate_corr()Now we know how to calculate the correlation values under different
-situations, and the generate_corr() function was built
-based on this logic. We can directly calculate the results for each
-cross situation via the function.
First, we need a event table including the information of the -study.
-H1 refers to one hypothesis, selected depending on the
-interest, while H2 refers to the other hypothesis, both of
-which are listed for multiplicity testing. For example, H1
-means the experimental treatment is superior to the control in the
-population 1/experimental arm 1; H2 means the experimental
-treatment is superior to the control in the population 2/experimental
-arm 2;Analysis means different analysis stages, for example,
-1 means the interim analysis, and 2 means the final analysis;Event is the common events overlap by H1
-and H2.For example: H1=1, H2=1,
-Analysis=1, Event=100indicates that in the
-first population, there are 100 cases where the experimental treatment
-is superior to the control in the interim analysis.
Another example: H1=1, H2=2,
-Analysis=2, Event=160 indicates that the
-number of overlapping cases where the experimental treatment is superior
-to the control in population 1 and 2 in the final analysis is 160.
To be noticed, the column names in this function are fixed to be
-H1, H2, Analysis,
-Event.
library(wpgsd)
-# The event table
-event <- tibble::tribble(
- ~H1, ~H2, ~Analysis, ~Event,
- 1, 1, 1, 100,
- 2, 2, 1, 110,
- 3, 3, 1, 225,
- 1, 2, 1, 80,
- 1, 3, 1, 100,
- 2, 3, 1, 110,
- 1, 1, 2, 200,
- 2, 2, 2, 220,
- 3, 3, 2, 450,
- 1, 2, 2, 160,
- 1, 3, 2, 200,
- 2, 3, 2, 220
-)
-
-event %>%
- gt() %>%
- tab_header(title = "Number of events at each population & analyses")
-| Number of events at each population & analyses | -|||
| H1 | -H2 | -Analysis | -Event | -
|---|---|---|---|
| 1 | -1 | -1 | -100 |
| 2 | -2 | -1 | -110 |
| 3 | -3 | -1 | -225 |
| 1 | -2 | -1 | -80 |
| 1 | -3 | -1 | -100 |
| 2 | -3 | -1 | -110 |
| 1 | -1 | -2 | -200 |
| 2 | -2 | -2 | -220 |
| 3 | -3 | -2 | -450 |
| 1 | -2 | -2 | -160 |
| 1 | -3 | -2 | -200 |
| 2 | -3 | -2 | -220 |
Then we input the above event table to the function of
-generate_corr(), and get the correlation matrix as
-follow.
generate_corr(event)
-## H1_A1 H2_A1 H3_A1 H1_A2 H2_A2 H3_A2
-## [1,] 1.0000000 0.7627701 0.6666667 0.7071068 0.5393599 0.4714045
-## [2,] 0.7627701 1.0000000 0.6992059 0.5393599 0.7071068 0.4944132
-## [3,] 0.6666667 0.6992059 1.0000000 0.4714045 0.4944132 0.7071068
-## [4,] 0.7071068 0.5393599 0.4714045 1.0000000 0.7627701 0.6666667
-## [5,] 0.5393599 0.7071068 0.4944132 0.7627701 1.0000000 0.6992059
-## [6,] 0.4714045 0.4944132 0.7071068 0.6666667 0.6992059 1.0000000
-library(tibble)
-library(gt)
-library(gsDesign)
-library(dplyr)
-library(wpgsd)
-In a 2-arm controlled clinical trial example with one primary -endpoint, there are 3 patient populations defined by the status of two -biomarkers A and B:
-The 3 primary elementary hypotheses are:
-Assume an interim analysis and a final analysis are planned for the -study and the number of events are listed as
-k <- 2 # Number of total analysis
-n_hypotheses <- 3 # Number of hypotheses
-obs_tbl <- tribble(
- ~hypothesis, ~analysis, ~obs_p,
- "H1", 1, 0.02,
- "H2", 1, 0.01,
- "H3", 1, 0.006,
- "H1", 2, 0.015,
- "H2", 2, 0.012,
- "H3", 2, 0.004
-) %>%
- mutate(obs_Z = -qnorm(obs_p))
-
-obs_tbl %>%
- gt() %>%
- tab_header(title = "Nominal p-values")
-| Nominal p-values | -|||
| hypothesis | -analysis | -obs_p | -obs_Z | -
|---|---|---|---|
| H1 | -1 | -0.020 | -2.053749 |
| H2 | -1 | -0.010 | -2.326348 |
| H3 | -1 | -0.006 | -2.512144 |
| H1 | -2 | -0.015 | -2.170090 |
| H2 | -2 | -0.012 | -2.257129 |
| H3 | -2 | -0.004 | -2.652070 |
p_obs_IA <- (obs_tbl %>% filter(analysis == 1))$obs_p
-p_obs_FA <- (obs_tbl %>% filter(analysis == 2))$obs_p
-alpha <- 0.025
-event_tbl <- tribble(
- ~population, ~analysis, ~event,
- "A positive", 1, 80,
- "B positive", 1, 88,
- "AB positive", 1, 64,
- "overall", 1, 180,
- "A positive", 2, 160,
- "B positive", 2, 176,
- "AB positive", 2, 128,
- "overall", 2, 360,
-)
-The information fraction of \(H_1\), -\(H_2\), \(H_3\) at IA is
-IF_IA <- c(
- ((event_tbl %>% filter(analysis == 1, population == "A positive"))$event + (event_tbl %>% filter(analysis == 1, population == "overall"))$event) /
- ((event_tbl %>% filter(analysis == 2, population == "A positive"))$event + (event_tbl %>% filter(analysis == 2, population == "overall"))$event),
- ((event_tbl %>% filter(analysis == 1, population == "B positive"))$event + (event_tbl %>% filter(analysis == 1, population == "overall"))$event) /
- ((event_tbl %>% filter(analysis == 2, population == "B positive"))$event + (event_tbl %>% filter(analysis == 2, population == "overall"))$event),
- ((event_tbl %>% filter(analysis == 1, population == "AB positive"))$event + (event_tbl %>% filter(analysis == 1, population == "overall"))$event) /
- ((event_tbl %>% filter(analysis == 2, population == "AB positive"))$event + (event_tbl %>% filter(analysis == 2, population == "overall"))$event)
-)
-
-IF_IA
-## [1] 0.5 0.5 0.5
-We assign the initial weights of \(H_1\), \(H_2\), \(H_3\) as \(\left(w_1(I), w_2(I), w_3(I) \right) = (0.3, 0.3, -0.4).\) And its multiplicity strategy is visualized in below. If -\(H_1\) is rejected, then \(3/7\) local significance level \(\alpha_1\) will be propagated to \(H_2\), and \(4/7\) will go to \(H_3\). If \(H_3\) is rejected, then half of \(\alpha_3\) goes to \(H_1\), and half goes to \(H_2\).
-m <- matrix(c( # Transition matrix
- 0, 3 / 7, 4 / 7,
- 3 / 7, 0, 4 / 7,
- 1 / 2, 1 / 2, 0
-), nrow = 3, byrow = TRUE)
-
-w <- c(0.3, 0.3, 0.4) # Initial weights
-name_hypotheses <- c(
- "H1: Biomarker A positive",
- "H2: Biomarker B positive",
- "H3: Overall Population"
-)
-
-hplot <- gMCPLite::hGraph(
- 3,
- alphaHypotheses = w, m = m,
- nameHypotheses = name_hypotheses, trhw = .2, trhh = .1,
- digits = 5, trdigits = 3, size = 5, halfWid = 1, halfHgt = 0.5,
- offset = 0.2, trprop = 0.4,
- fill = as.factor(c(2, 3, 1)),
- palette = c("#BDBDBD", "#E0E0E0", "#EEEEEE"),
- wchar = "w"
-)
-hplot
-# Get weights for all intersection hypotheses
-graph <- gMCPLite::matrix2graph(m)
-graph <- gMCPLite::setWeights(graph, w)
-# Set up hypothetical p-values (0 or 1) to obtain all combinations
-pvals <- NULL
-for (i in 1:n_hypotheses) {
- if (i == 1) {
- pvals <- data.frame(x = c(0, 1))
- names(pvals) <- paste("pval_H", i, sep = "")
- } else {
- tmp <- data.frame(x = c(0, 1))
- names(tmp) <- paste("pval_H", i, sep = "")
- pvals <- merge(pvals, tmp)
- }
-}
-# Get the weights for each intersection hypothesis
-inter_weight <- NULL # Create an empty table to store the weight of interaction hypotheses
-for (i in seq_len(nrow(pvals))) { # Each row in `pvals` is 1 possible interaction hypothesis
- pval_tmp <- as.numeric(pvals[i, ])
- graph_tmp <- gMCPLite::gMCP(graph = graph, pvalues = pval_tmp, alpha = alpha)
- weight_tmp <- gMCPLite::getWeights(graph_tmp)
- inter_weight <- dplyr::bind_rows(inter_weight, weight_tmp)
-}
-
-inter_weight <- replace(inter_weight, pvals == 0, NA) # Replace the empty hypothesis as NA
-inter_weight <- inter_weight[-1, ] # Delete the first row since it is empty set
-
-inter_weight %>%
- gt() %>%
- tab_header("Weight of all possible interaction hypothesis")
-| Weight of all possible interaction hypothesis | -||
| H1 | -H2 | -H3 | -
|---|---|---|
| 1.0000000 | -NA | -NA |
| NA | -1.0000000 | -NA |
| 0.5000000 | -0.5000000 | -NA |
| NA | -NA | -1.0000000 |
| 0.4285714 | -NA | -0.5714286 |
| NA | -0.4285714 | -0.5714286 |
| 0.3000000 | -0.3000000 | -0.4000000 |
The correlation of the 6 statistic (2 analyses \(\times\) 3 hypotheses) are
-# Event count of intersection of paired hypotheses - Table 2
-# H1, H2: Hypotheses intersected.
-# (1, 1) represents counts for hypothesis 1
-# (1, 2) for counts for the intersection of hypotheses 1 and 2
-event <- tribble(
- ~H1, ~H2, ~Analysis, ~Event,
- 1, 1, 1, event_tbl %>% filter(analysis == 1, population == "A positive") %>% select(event) %>% as.numeric(),
- 2, 2, 1, event_tbl %>% filter(analysis == 1, population == "B positive") %>% select(event) %>% as.numeric(),
- 3, 3, 1, event_tbl %>% filter(analysis == 1, population == "overall") %>% select(event) %>% as.numeric(),
- 1, 2, 1, event_tbl %>% filter(analysis == 1, population == "AB positive") %>% select(event) %>% as.numeric(),
- 1, 3, 1, event_tbl %>% filter(analysis == 1, population == "A positive") %>% select(event) %>% as.numeric(),
- 2, 3, 1, event_tbl %>% filter(analysis == 1, population == "B positive") %>% select(event) %>% as.numeric(),
- 1, 1, 2, event_tbl %>% filter(analysis == 2, population == "A positive") %>% select(event) %>% as.numeric(),
- 2, 2, 2, event_tbl %>% filter(analysis == 2, population == "B positive") %>% select(event) %>% as.numeric(),
- 3, 3, 2, event_tbl %>% filter(analysis == 2, population == "overall") %>% select(event) %>% as.numeric(),
- 1, 2, 2, event_tbl %>% filter(analysis == 2, population == "AB positive") %>% select(event) %>% as.numeric(),
- 1, 3, 2, event_tbl %>% filter(analysis == 2, population == "A positive") %>% select(event) %>% as.numeric(),
- 2, 3, 2, event_tbl %>% filter(analysis == 2, population == "B positive") %>% select(event) %>% as.numeric()
-)
-event
-## # A tibble: 12 × 4
-## H1 H2 Analysis Event
-## <dbl> <dbl> <dbl> <dbl>
-## 1 1 1 1 80
-## 2 2 2 1 88
-## 3 3 3 1 180
-## 4 1 2 1 64
-## 5 1 3 1 80
-## 6 2 3 1 88
-## 7 1 1 2 160
-## 8 2 2 2 176
-## 9 3 3 2 360
-## 10 1 2 2 128
-## 11 1 3 2 160
-## 12 2 3 2 176
-# Generate correlation from events
-corr <- wpgsd::generate_corr(event)
-corr %>% round(2)
-## H1_A1 H2_A1 H3_A1 H1_A2 H2_A2 H3_A2
-## [1,] 1.00 0.76 0.67 0.71 0.54 0.47
-## [2,] 0.76 1.00 0.70 0.54 0.71 0.49
-## [3,] 0.67 0.70 1.00 0.47 0.49 0.71
-## [4,] 0.71 0.54 0.47 1.00 0.76 0.67
-## [5,] 0.54 0.71 0.49 0.76 1.00 0.70
-## [6,] 0.47 0.49 0.71 0.67 0.70 1.00
-For the elementary hypothesis \(H_1\), its weight is 1, namely,
-w_H1 <- 1
-
-# Index to select from the correlation matrix
-indx <- grep("H1", colnames(corr))
-corr_H1 <- corr[indx, indx]
-
-# Boundary for a single hypothesis across k for the intersection hypothesis
-pval_H1 <- 1 - pnorm(gsDesign::gsDesign(
- k = k,
- test.type = 1,
- usTime = IF_IA[1],
- n.I = corr_H1[, ncol(corr_H1)]^2,
- alpha = alpha * w_H1[1],
- sfu = sfHSD,
- sfupar = -4
-)$upper$bound)
-
-ans <- tibble(
- Analysis = 1:2,
- `Interaction/Elementary hypotheses` = "H1",
- `H1 p-value boundary` = pval_H1,
- `H2 p-value boundary` = NA,
- `H3 p-value boundary` = NA
-)
-ans %>% gt()
-| Analysis | -Interaction/Elementary hypotheses | -H1 p-value boundary | -H2 p-value boundary | -H3 p-value boundary | -
|---|---|---|---|---|
| 1 | -H1 | -0.002980073 | -NA | -NA |
| 2 | -H1 | -0.023788266 | -NA | -NA |
For the elementary hypothesis \(H_2\), its weight is 1, namely,
-w_H2 <- 1
-
-# Index to select from the correlation matrix
-indx <- grep("H2", colnames(corr))
-corr_H2 <- corr[indx, indx]
-
-# Boundary for a single hypothesis across k for the intersection hypothesis
-pval_H2 <- 1 - pnorm(gsDesign::gsDesign(
- k = k,
- test.type = 1,
- usTime = IF_IA[2],
- n.I = corr_H2[, ncol(corr_H2)]^2,
- alpha = alpha * w_H2[1],
- sfu = sfHSD,
- sfupar = -4
-)$upper$bound)
-
-ans_new <- tibble(
- Analysis = 1:2,
- `Interaction/Elementary hypotheses` = "H2",
- `H1 p-value boundary` = NA,
- `H2 p-value boundary` = pval_H2,
- `H3 p-value boundary` = NA
-)
-ans_new %>% gt()
-| Analysis | -Interaction/Elementary hypotheses | -H1 p-value boundary | -H2 p-value boundary | -H3 p-value boundary | -
|---|---|---|---|---|
| 1 | -H2 | -NA | -0.002980073 | -NA |
| 2 | -H2 | -NA | -0.023788266 | -NA |
ans <- rbind(ans, ans_new)
-For the elementary hypothesis \(H_3\), its weight is 1, namely,
-w_H3 <- 1
-
-# Index to select from the correlation matrix
-indx <- grep("H3", colnames(corr))
-corr_H3 <- corr[indx, indx]
-
-# Boundary for a single hypothesis across k for the intersection hypothesis
-pval_H3 <- 1 - pnorm(gsDesign::gsDesign(
- k = k,
- test.type = 1,
- usTime = IF_IA[3],
- n.I = corr_H3[, ncol(corr_H3)]^2,
- alpha = alpha * w_H3[1],
- sfu = sfHSD,
- sfupar = -4
-)$upper$bound)
-
-ans_new <- tibble(
- Analysis = 1:2,
- `Interaction/Elementary hypotheses` = "H3",
- `H1 p-value boundary` = NA,
- `H2 p-value boundary` = NA,
- `H3 p-value boundary` = pval_H1
-)
-ans_new %>% gt()
-| Analysis | -Interaction/Elementary hypotheses | -H1 p-value boundary | -H2 p-value boundary | -H3 p-value boundary | -
|---|---|---|---|---|
| 1 | -H3 | -NA | -NA | -0.002980073 |
| 2 | -H3 | -NA | -NA | -0.023788266 |
ans <- rbind(ans, ans_new)
-For the interaction hypothesis \(H_1 \cap -H_2\), its weight is
-w_H12 <- inter_weight %>% filter(!is.na(H1), !is.na(H2), is.na(H3))
-w_H12 <- w_H12[(!is.na(w_H12))] # Remove NA from weight
-w_H12
-## [1] 0.5 0.5
-And the boundary for \(H_1\) and -\(H_2\) are
-# -------------#
-# H1 #
-# -------------#
-# Index to select from the correlation matrix
-indx <- grep("H1", colnames(corr))
-corr_H1 <- corr[indx, indx]
-
-# Boundary for a single hypothesis across k for the intersection hypothesis
-pval_H1 <- 1 - pnorm(gsDesign::gsDesign(
- k = k,
- test.type = 1,
- usTime = IF_IA[1],
- n.I = corr_H1[, ncol(corr_H1)]^2,
- alpha = alpha * w_H12[1], # alpha is different since the weight is updated
- sfu = sfHSD,
- sfupar = -4
-)$upper$bound)
-
-# -------------#
-# H2 #
-# -------------#
-# Index to select from the correlation matrix
-indx <- grep("H2", colnames(corr))
-corr_H2 <- corr[indx, indx]
-
-# Boundary for a single hypothesis across k for the intersection hypothesis
-pval_H2 <- 1 - pnorm(gsDesign::gsDesign(
- k = k,
- test.type = 1,
- usTime = IF_IA[2],
- n.I = corr_H2[, ncol(corr_H2)]^2,
- alpha = alpha * w_H12[2], # alpha is different since the weight is updated
- sfu = sfHSD,
- sfupar = -4
-)$upper$bound)
-
-ans_new <- tibble(
- Analysis = 1:2,
- `Interaction/Elementary hypotheses` = "H1, H2",
- `H1 p-value boundary` = pval_H1,
- `H2 p-value boundary` = pval_H2,
- `H3 p-value boundary` = NA
-)
-ans_new %>% gt()
-| Analysis | -Interaction/Elementary hypotheses | -H1 p-value boundary | -H2 p-value boundary | -H3 p-value boundary | -
|---|---|---|---|---|
| 1 | -H1, H2 | -0.001490037 | -0.001490037 | -NA |
| 2 | -H1, H2 | -0.011782800 | -0.011782800 | -NA |
ans <- rbind(ans, ans_new)
-For the interaction hypothesis \(H_1 \cap -H_2\), its weight is
-w_H13 <- inter_weight %>% filter(!is.na(H1), is.na(H2), !is.na(H3))
-w_H13 <- w_H13[(!is.na(w_H13))] # Remove NA from weight
-w_H13
-## [1] 0.4285714 0.5714286
-And the boundary for \(H_1\) and -\(H_3\) are
-# -------------#
-# H1 #
-# -------------#
-# Index to select from the correlation matrix
-indx <- grep("H1", colnames(corr))
-corr_H1 <- corr[indx, indx]
-
-# Boundary for a single hypothesis across k for the intersection hypothesis
-pval_H1 <- 1 - pnorm(gsDesign::gsDesign(
- k = k,
- test.type = 1,
- usTime = IF_IA[1],
- n.I = corr_H1[, ncol(corr_H1)]^2,
- alpha = alpha * w_H13[1], # alpha is different since the weight is updated
- sfu = sfHSD,
- sfupar = -4
-)$upper$bound)
-
-# -------------#
-# H3 #
-# -------------#
-# Index to select from the correlation matrix
-indx <- grep("H3", colnames(corr))
-corr_H3 <- corr[indx, indx]
-
-# Boundary for a single hypothesis across k for the intersection hypothesis
-pval_H3 <- 1 - pnorm(gsDesign::gsDesign(
- k = k,
- test.type = 1,
- usTime = IF_IA[3],
- n.I = corr_H3[, ncol(corr_H3)]^2,
- alpha = alpha * w_H13[2], # alpha is different since the weight is updated
- sfu = sfHSD,
- sfupar = -4
-)$upper$bound)
-
-ans_new <- tibble(
- Analysis = 1:2,
- `Interaction/Elementary hypotheses` = "H1, H3",
- `H1 p-value boundary` = pval_H1,
- `H2 p-value boundary` = NA,
- `H3 p-value boundary` = pval_H3
-)
-ans_new %>% gt()
-| Analysis | -Interaction/Elementary hypotheses | -H1 p-value boundary | -H2 p-value boundary | -H3 p-value boundary | -
|---|---|---|---|---|
| 1 | -H1, H3 | -0.001277174 | -NA | -0.001702899 |
| 2 | -H1, H3 | -0.010079863 | -NA | -0.013489389 |
ans <- rbind(ans, ans_new)
-For the interaction hypothesis \(H_2 \cap -H_3\), its weight is
-w_H23 <- inter_weight %>% filter(is.na(H1), !is.na(H2), !is.na(H3))
-w_H23 <- w_H23[(!is.na(w_H23))] # Remove NA from weight
-w_H23
-## [1] 0.4285714 0.5714286
-And the boundary for \(H_2\) and -\(H_3\) are
-# -------------#
-# H2 #
-# -------------#
-# Index to select from the correlation matrix
-indx <- grep("H2", colnames(corr))
-corr_H2 <- corr[indx, indx]
-
-# Boundary for a single hypothesis across k for the intersection hypothesis
-pval_H2 <- 1 - pnorm(gsDesign::gsDesign(
- k = k,
- test.type = 1,
- usTime = IF_IA[2],
- n.I = corr_H2[, ncol(corr_H2)]^2,
- alpha = alpha * w_H23[1], # alpha is different since the weight is updated
- sfu = sfHSD,
- sfupar = -4
-)$upper$bound)
-
-# -------------#
-# H3 #
-# -------------#
-# Index to select from the correlation matrix
-indx <- grep("H3", colnames(corr))
-corr_H3 <- corr[indx, indx]
-
-# Boundary for a single hypothesis across k for the intersection hypothesis
-pval_H3 <- 1 - pnorm(gsDesign::gsDesign(
- k = k,
- test.type = 1,
- usTime = IF_IA[3],
- n.I = corr_H3[, ncol(corr_H3)]^2,
- alpha = alpha * w_H23[2], # alpha is different since the weight is updated
- sfu = sfHSD,
- sfupar = -4
-)$upper$bound)
-
-ans_new <- tibble(
- Analysis = 1:2,
- `Interaction/Elementary hypotheses` = "H2, H3",
- `H1 p-value boundary` = NA,
- `H2 p-value boundary` = pval_H2,
- `H3 p-value boundary` = pval_H3
-)
-ans_new %>% gt()
-| Analysis | -Interaction/Elementary hypotheses | -H1 p-value boundary | -H2 p-value boundary | -H3 p-value boundary | -
|---|---|---|---|---|
| 1 | -H2, H3 | -NA | -0.001277174 | -0.001702899 |
| 2 | -H2, H3 | -NA | -0.010079863 | -0.013489389 |
ans <- rbind(ans, ans_new)
-For the interaction hypothesis \(H_1 \cap -H_2\), its weight is
-w_H123 <- inter_weight %>% filter(!is.na(H1), !is.na(H2), !is.na(H3))
-w_H123 <- w_H123[(!is.na(w_H123))] # Remove NA from weight
-w_H123
-## [1] 0.3 0.3 0.4
-And the boundary for \(H_1\), \(H_2\), and \(H_3\) are
-# -------------#
-# H1 #
-# -------------#
-# Index to select from the correlation matrix
-indx <- grep("H1", colnames(corr))
-corr_H1 <- corr[indx, indx]
-
-# Boundary for a single hypothesis across k for the intersection hypothesis
-pval_H1 <- 1 - pnorm(gsDesign::gsDesign(
- k = k,
- test.type = 1,
- usTime = IF_IA[1],
- n.I = corr_H1[, ncol(corr_H1)]^2,
- alpha = alpha * w_H123[1], # alpha is different since the weight is updated
- sfu = sfHSD,
- sfupar = -4
-)$upper$bound)
-
-# -------------#
-# H2 #
-# -------------#
-# Index to select from the correlation matrix
-indx <- grep("H2", colnames(corr))
-corr_H2 <- corr[indx, indx]
-
-# Boundary for a single hypothesis across k for the intersection hypothesis
-pval_H2 <- 1 - pnorm(gsDesign::gsDesign(
- k = k,
- test.type = 1,
- usTime = IF_IA[2],
- n.I = corr_H2[, ncol(corr_H2)]^2,
- alpha = alpha * w_H123[1], # alpha is different since the weight is updated
- sfu = sfHSD,
- sfupar = -4
-)$upper$bound)
-
-# -------------#
-# H3 #
-# -------------#
-# Index to select from the correlation matrix
-indx <- grep("H3", colnames(corr))
-corr_H3 <- corr[indx, indx]
-
-# Boundary for a single hypothesis across k for the intersection hypothesis
-pval_H3 <- 1 - pnorm(gsDesign::gsDesign(
- k = k,
- test.type = 1,
- usTime = IF_IA[3],
- n.I = corr_H3[, ncol(corr_H3)]^2,
- alpha = alpha * w_H123[3], # alpha is different since the weight is updated
- sfu = sfHSD,
- sfupar = -4
-)$upper$bound)
-
-ans_new <- tibble(
- Analysis = 1:2,
- `Interaction/Elementary hypotheses` = "H1, H2, H3",
- `H1 p-value boundary` = pval_H1,
- `H2 p-value boundary` = pval_H2,
- `H3 p-value boundary` = pval_H3
-)
-ans_new %>% gt()
-| Analysis | -Interaction/Elementary hypotheses | -H1 p-value boundary | -H2 p-value boundary | -H3 p-value boundary | -
|---|---|---|---|---|
| 1 | -H1, H2, H3 | -0.0008940219 | -0.0008940219 | -0.001192029 |
| 2 | -H1, H2, H3 | -0.0070254979 | -0.0070254979 | -0.009399818 |
ans <- rbind(ans, ans_new)
-With the p-value boundaries, one can get the Z-statistics boundaries
-by qnorm().
ans %>%
- mutate(
- `H1 Z-statistics boundary` = -qnorm(`H1 p-value boundary`),
- `H1 Z-statistics boundary` = -qnorm(`H2 p-value boundary`),
- `H1 Z-statistics boundary` = -qnorm(`H3 p-value boundary`)
- ) %>%
- arrange(Analysis, `Interaction/Elementary hypotheses`) %>%
- gt() %>%
- tab_header("p-values/Z-statistics boundaries of weighted Bonferroni")
-| p-values/Z-statistics boundaries of weighted Bonferroni | -|||||
| Analysis | -Interaction/Elementary hypotheses | -H1 p-value boundary | -H2 p-value boundary | -H3 p-value boundary | -H1 Z-statistics boundary | -
|---|---|---|---|---|---|
| 1 | -H1 | -0.0029800731 | -NA | -NA | -NA |
| 1 | -H1, H2 | -0.0014900365 | -0.0014900365 | -NA | -NA |
| 1 | -H1, H2, H3 | -0.0008940219 | -0.0008940219 | -0.001192029 | -3.037681 |
| 1 | -H1, H3 | -0.0012771742 | -NA | -0.001702899 | -2.928520 |
| 1 | -H2 | -NA | -0.0029800731 | -NA | -NA |
| 1 | -H2, H3 | -NA | -0.0012771742 | -0.001702899 | -2.928520 |
| 1 | -H3 | -NA | -NA | -0.002980073 | -2.749966 |
| 2 | -H1 | -0.0237882657 | -NA | -NA | -NA |
| 2 | -H1, H2 | -0.0117828003 | -0.0117828003 | -NA | -NA |
| 2 | -H1, H2, H3 | -0.0070254979 | -0.0070254979 | -0.009399818 | -2.349480 |
| 2 | -H1, H3 | -0.0100798631 | -NA | -0.013489389 | -2.211825 |
| 2 | -H2 | -NA | -0.0237882657 | -NA | -NA |
| 2 | -H2, H3 | -NA | -0.0100798631 | -0.013489389 | -2.211825 |
| 2 | -H3 | -NA | -NA | -0.023788266 | -1.981131 |
The above results can be computed in one function call in wpgsd by
-using the generate_bounds() function as
generate_bounds(
- type = 0,
- k = 2,
- w = w,
- m = m,
- corr = corr,
- alpha = 0.025,
- sf = list(sfHSD, sfHSD, sfHSD),
- sfparm = list(-4, -4, -4),
- t = list(c(0.5, 1), c(0.5, 1), c(0.5, 1))
-) %>% gt()
-| Analysis | -Hypotheses | -H1 | -H2 | -H3 | -
|---|---|---|---|---|
| 1 | -H1 | -0.0029800731 | -NA | -NA |
| 1 | -H1, H2 | -0.0014900365 | -0.0014900365 | -NA |
| 1 | -H1, H2, H3 | -0.0008940219 | -0.0008940219 | -0.001192029 |
| 1 | -H1, H3 | -0.0012771742 | -NA | -0.001702899 |
| 1 | -H2 | -NA | -0.0029800731 | -NA |
| 1 | -H2, H3 | -NA | -0.0012771742 | -0.001702899 |
| 1 | -H3 | -NA | -NA | -0.002980073 |
| 2 | -H1 | -0.0237882657 | -NA | -NA |
| 2 | -H1, H2 | -0.0117828003 | -0.0117828003 | -NA |
| 2 | -H1, H2, H3 | -0.0070254979 | -0.0070254979 | -0.009399818 |
| 2 | -H1, H3 | -0.0100798631 | -NA | -0.013489389 |
| 2 | -H2 | -NA | -0.0237882657 | -NA |
| 2 | -H2, H3 | -NA | -0.0100798631 | -0.013489389 |
| 2 | -H3 | -NA | -NA | -0.023788266 |
library(wpgsd)
-library(gsDesign)
-library(gMCPLite)
-library(haven)
-library(dplyr)
-library(tidyr)
-library(reshape2)
-library(gt)
-The weighted parametric group sequential design (WPGSD) (Anderson et al. (2022)) approach allows one to -take advantage of the known correlation structure in constructing -efficacy bounds to control family-wise error rate (FWER) for a group -sequential design. Here correlation may be due to common observations in -nested populations, due to common observations in overlapping -populations, or due to common observations in the control arm. This -document illustrates the use of the R package wpgsd to implement this -approach.
-We aim to control the familywise error rate (FWER) at level \(\alpha\). Let \(J -\subseteq I\). The intersection hypothesis \(H_J\) assumes the null hypothesis for all -individual hypotheses \(H_i\) with -\(i \in J\). Closed testing principle -is as follows: if for all sets \(J \subseteq -I\) with \(j \in J\), \(H_J\) can be rejected at level \(\alpha\), then \(H_j\) can be rejected. Weighted parametric -tests can be used for this: Bretz et al. -(2011), Xi et al. (2017) for fixed -designs or Maurer and Bretz (2013) for -group sequential.
-A closed procedure is called consonant if the rejection of the -complete intersection null hypothesis \(H_I\) further implies that at least one -elementary hypothesis \(H_i, i \in I\), -is rejected. Consonance is a desirable property leading to short-cut -procedures that give the same rejection decisions as the original closed -procedure but with fewer operations. For WPGSD, consonance does not -always hold and in general the closed-testing procedure is required.
-Following illustrates the first example, which has overlapping -populations (e.g. due to biomarker) and is also example 1 in Anderson et al. (2022).
--Ex1: Populations -
-The multiplicity strategy is defined as follows.
-# Transition matrix
-m <- matrix(c(
- 0, 0, 1,
- 0, 0, 1,
- 0.5, 0.5, 0
-), nrow = 3, byrow = TRUE)
-# Weight matrix
-w <- c(0.3, 0.3, 0.4)
-
-# Multiplicity graph
-cbPalette <- c("#999999", "#E69F00", "#56B4E9")
-
-nameHypotheses <- c(
- "H1: Population 1",
- "H2: Population 2",
- "H3: Overall Population"
-)
-
-hplot <- hGraph(3,
- alphaHypotheses = w,
- m = m,
- nameHypotheses = nameHypotheses,
- trhw = .2, trhh = .1,
- digits = 5, trdigits = 3, size = 5, halfWid = 1,
- halfHgt = 0.5, offset = 0.2, trprop = 0.4,
- fill = as.factor(c(2, 3, 1)),
- palette = cbPalette,
- wchar = "w"
-)
-hplot
-The event count of each hypothesis at each analysis is shown -below.
-| Population | -Number of Events at IA | -Number of Events at FA | -
|---|---|---|
| Population 1 | -100 | -200 | -
| Population 2 | -110 | -220 | -
| Population 1 \(\cap\) 2 | -80 | -160 | -
| Overall Population | -225 | -450 | -
The correlation matrix among test statistics is as follows.
-| \(i,k\) | -1,1 | -2,1 | -3,1 | -1,2 | -2,2 | -3,2 | -
|---|---|---|---|---|---|---|
| 1,1 | -1 | -\(\frac{80}{\sqrt{100\cdot -110}}\) | -\(\frac{100}{\sqrt{100 -\cdot 225}}\) | -\(\frac{100}{\sqrt{100\cdot -200}}\) | -\(\frac{80}{\sqrt{100\cdot -220}}\) | -\(\frac{100}{\sqrt{100 -\cdot 450}}\) | -
| 2,1 | -0.76 | -1 | -\(\frac{110}{\sqrt{110 -\cdot 225}}\) | -\(\frac{80}{\sqrt{110\cdot -200}}\) | -\(\frac{110}{\sqrt{110\cdot -220}}\) | -\(\frac{110}{\sqrt{110 -\cdot 450}}\) | -
| 3,1 | -0.67 | -0.70 | -1 | -\(\frac{100}{\sqrt{225 -\cdot 200}}\) | -\(\frac{110}{\sqrt{225 -\cdot 220}}\) | -\(\frac{225}{\sqrt{225 -\cdot 450}}\) | -
| 1,2 | -0.71 | -0.54 | -0.47 | -1 | -\(\frac{160}{\sqrt{200 -\cdot 220}}\) | -\(\frac{200}{\sqrt{200 -\cdot 450}}\) | -
| 2,2 | -0.54 | -0.71 | -0.49 | -0.76 | -1 | -\(\frac{220}{\sqrt{220 -\cdot 450}}\) | -
| 3,2 | -0.47 | -0.49 | -0.71 | -0.67 | -0.70 | -1 | -
Following illustrates the second example in which correlation comes -from common control arm. This is also example 2 in Anderson et al. (2022).
-| Treatment Arm | -Number of Events at IA | -Number of Events at FA | -
|---|---|---|
| Experimental 1 | -70 | -135 | -
| Experimental 2 | -75 | -150 | -
| Experimental 3 | -80 | -165 | -
| Control | -85 | -170 | -
| \(i,k\) | -1,1 | -2,1 | -3,1 | -1,2 | -2,2 | -3,2 | -
|---|---|---|---|---|---|---|
| 1,1 | -1 | -\(\frac{85}{\sqrt{155\cdot -160}}\) | -\(\frac{85}{\sqrt{155 \cdot -165}}\) | -\(\frac{155}{\sqrt{155\cdot -305}}\) | -\(\frac{85}{\sqrt{155\cdot -320}}\) | -\(\frac{85}{\sqrt{155 \cdot -335}}\) | -
| 2,1 | -0.54 | -1 | -\(\frac{85}{\sqrt{160 \cdot -165}}\) | -\(\frac{85}{\sqrt{160 \cdot -305}}\) | -\(\frac{160}{\sqrt{160\cdot -320}}\) | -\(\frac{85}{\sqrt{160 \cdot -335}}\) | -
| 3,1 | -0.53 | -0.52 | -1 | -\(\frac{85}{\sqrt{165 \cdot -305}}\) | -\(\frac{85}{\sqrt{165\cdot -320}}\) | -\(\frac{165}{\sqrt{165 -\cdot 335}}\) | -
| 1,2 | -0.71 | -0.38 | -0.38 | -1 | -\(\frac{170}{\sqrt{305\cdot -320}}\) | -\(\frac{170}{\sqrt{305 -\cdot 335}}\) | -
| 2,2 | -0.38 | -0.71 | -0.37 | -0.54 | -1 | -\(\frac{170}{\sqrt{320 -\cdot 335}}\) | -
| 3,2 | -0.37 | -0.37 | -0.70 | -0.53 | -0.52 | -1 | -
These 2 examples each has 7 intersection hypotheses and the -corresponding weighting strategies are illustrated below.
-| \(H_J\) | -\(w_1(J)\) | -\(w_2(J)\) | -\(w_3(J)\) | -
|---|---|---|---|
| \(H_1 \cap H_2 \cap -H_3\) | -0.3 | -0.3 | -0.4 | -
| \(H_1 \cap -H_2\) | -0.5 | -0.5 | -- | -
| \(H_1 \cap -H_3\) | -0.3 | -- | -0.7 | -
| \(H_2 \cap -H_3\) | -- | -0.3 | -0.7 | -
| \(H_1\) | -1 | -- | -- | -
| \(H_2\) | -- | -1 | -- | -
| \(H_3\) | -- | -- | -1 | -
| \(H_J\) | -\(w_1(J)\) | -\(w_2(J)\) | -\(w_3(J)\) | -
|---|---|---|---|
| \(H_1 \cap H_2 \cap -H_3\) | -1/3 | -1/3 | -1/3 | -
| \(H_1 \cap -H_2\) | -1/2 | -1/2 | -- | -
| \(H_1 \cap -H_3\) | -1/2 | -- | -1/2 | -
| \(H_2 \cap -H_3\) | -- | -1/2 | -1/2 | -
| \(H_1\) | -1 | -- | -- | -
| \(H_2\) | -- | -1 | -- | -
| \(H_3\) | -- | -- | -1 | -
The WPGSD approach uses all known correlations between tests in the -study. This relaxes bounds and allows increased power or smaller sample -size. Three spending approaches have been proposed:
-Fixed spending (Fleming-Harrington-O’Brien (FHO) approach). -Specify \(0 < \alpha_1(J) < \alpha_2(J) -< \ldots < \alpha_K(J) = \alpha(J) \leq \alpha\) for all -\(J\subseteq I\), where \(\alpha(J)\) is the total alpha for -intersection hypothesis \(H_J\) -according to the graphical approach.
\(\alpha\)-spending approach 1. -We choose a spending function family \(f(t,\alpha)\) and set \(\alpha_k(J)=f(t_k(J),\alpha(J))\) for \(1\le k\le K\) for all intersection -hypotheses \(J\subseteq I\).
\(\alpha\)-spending approach 2. -For each elementary hypothesis \(i\) -(\(i\) = 1, 2, , \(m\)), specify the \(\alpha\)-spending function family \(f_i(t,\gamma)\) where \(\gamma\) is the \(\alpha\) level for the hypothesis and \(f_i(t_{ik},\gamma)\) determines how much -\(\alpha\) to spend up to analysis -\(k\) for hypothesis \(i\) when level \(\gamma\) is allocated to the hypothesis. -Then \(\alpha_k(J) = \sum_{i \in J} -f_i(t_{ik}, w_i(J)\alpha)\).
Assume (\(Z_1,Z_2\)) bivariate -normal with known correlation
Find \(\alpha\)-inflation factor -\(c_J\) such that \[ \alpha = P[\cup_{i \in J} \{p_i \leq -c_Jw_{J,i}\alpha \}] = P[\cup_{i \in J} \{Z_i \geq -\Phi^{-1}(1-c_Jw_{J,i}\alpha \}]\]
Basic algorithm code in Bretz et al. -(2011)
Assume for \(j < k\) that -bounds \(c_{ij} (J), i \in J, j < -k\), have already been set and remain unchanged.
At analysis \(k\), compute the -correlation matrix of \(Z_{ij}\), \(i \in J\), \(j = -1, \ldots, k\).
-i Initialize \(\alpha_{k}^{*}(J) = -\alpha_{k}(J) - \alpha_{k-1}(J)\).
-ii Set \(b_{ik} = \Phi^{-1}(1 - -w_{i}(J)\alpha_{k}^{*} (J))\), \(i\in -J\).
-iii Compute type I error rate up to analysis \(k\) \[ 1 - -Pr(\cap_{i \in J} \{ Z_{ik} < b_{ik} \} \cap_{i \in J, j < k} \{ -Z_{ij} < c_{ij}(J) \} ). \]
-iv Update \(\alpha_{k}^{*}(J)\) -using root-finding with steps ii - iii until the type I error rate -through analysis \(k\) is controlled at -\(\alpha_{k}(J)\) for \(H_J\). That is, \[ 1 - Pr(\cap_{i \in J} \{ Z_{ik} < b_{ik} \} -\cap_{i \in J, j < k} \{ Z_{ij} < c_{ij}(J) \} ) = \alpha_{k}. -\]
-v Set \(c_{ik}(J) = b_{ik}\) from -the previous step. The corresponding nominal \(p\)-value boundary is \(p_{ik}(J)= 1-\Phi(c_{ik}(J)) -= w_i(J)\alpha_k^*(J)\).
Note: interim bound does not depend on future analyses. Solution -only requires root finding for a single \(\alpha_{k}^{*}(J)\) at a time, \(k = 1, \ldots, K\). Requires multivariate -normal computation from mvtnorm R package Genz et al. (2020).
Assume for \(j < k\) that -bounds \(c_{ij} (J), i \in J, j < -k\), have already been set and remain unchanged.
At analysis \(k\), compute the -correlation matrix of \(Z_{ij}\), \(i \in J\), \(j = -1, \ldots, k\).
-i Determine what the nominal \(p\)-value boundary would be for each -elementary hypothesis in \(J\) for a -weighted Bonferroni test in a group sequential design as described in -Maurer and Bretz (2013). Let these nominal -\(p\)-value boundaries be \(\alpha^\prime_{ik}(J)\).
-ii Choose an inflation factor \(\xi_{k}(J) -> 1\) and set \[b_{ik} = -\Phi^{-1}(1 - \xi_k(J) \alpha^\prime_{ik}(J)).\]
-iii Update \(\xi_k(J)\) until this -type I error rate up to analysis \(k\) -is controlled at \(\alpha_{k}(J)\) for -\(H_J\). That is, \[ 1 - Pr(\cap_{i \in J} \{ Z_{ik} < b_{ik} \} -\cap_{i \in J, j < k} \{ Z_{ij} < c_{ij}(J) \} ) = -\alpha_{k}(J).\]
-iv After the appropriate \(\xi_k(J)\) has been derived, the nominal -\(p\)-value boundaries are \(p_{ik}(J)=\xi_k(J) \alpha^\prime_{ik}(J)\), -and \(b_{ik}\) is computed as in step -ii, we set \(c_{ik}(J) = -b_{ik}\).
Note: interim bound does not depend on future analyses. Solution -only requires root finding for a single \(\xi_k(J)\) at a time, \(k = 1, \ldots, K\). Requires multivariate -normal computation from the mvtnorm R package Genz et -al. (2020).
We first define the transition matrix and weights as shown above in -Section 2.5. Next we set up the event count table as follows:
-event <- tribble(
- ~H1, ~H2, ~Analysis, ~Event,
- 1, 1, 1, 100,
- 2, 2, 1, 110,
- 3, 3, 1, 225,
- 1, 2, 1, 80,
- 1, 3, 1, 100,
- 2, 3, 1, 110,
- 1, 1, 2, 200,
- 2, 2, 2, 220,
- 3, 3, 2, 450,
- 1, 2, 2, 160,
- 1, 3, 2, 200,
- 2, 3, 2, 220
-)
-event %>%
- gt() %>%
- tab_header(title = "Event Count")
-| Event Count | -|||
| H1 | -H2 | -Analysis | -Event | -
|---|---|---|---|
| 1 | -1 | -1 | -100 |
| 2 | -2 | -1 | -110 |
| 3 | -3 | -1 | -225 |
| 1 | -2 | -1 | -80 |
| 1 | -3 | -1 | -100 |
| 2 | -3 | -1 | -110 |
| 1 | -1 | -2 | -200 |
| 2 | -2 | -2 | -220 |
| 3 | -3 | -2 | -450 |
| 1 | -2 | -2 | -160 |
| 1 | -3 | -2 | -200 |
| 2 | -3 | -2 | -220 |
# Alternatively, one can manually enter paths for analysis datasets,
-# example below uses an example dataset assuming currently we are at IA1.
-paths <- system.file("extdata/", package = "wpgsd")
-
-### Generate event count table from ADSL and ADTTE datasets
-# Selection criteria for each hypothesis
-h_select <- tribble(
- ~Hypothesis, ~Crit,
- 1, "PARAMCD=='OS' & TRT01P %in% c('Xanomeline High Dose', 'Placebo')",
- 2, "PARAMCD=='OS' & TRT01P %in% c('Xanomeline Low Dose', 'Placebo')"
-)
-
-event2 <- generate_event_table(paths, h_select,
- adsl_name = "adsl", adtte_name = "adtte",
- key_var = "USUBJID", cnsr_var = "CNSR"
-)$event
-
-event2 %>%
- gt() %>%
- tab_header(title = "Event Count - Computed from SAS Datasets Example")
-| Event Count - Computed from SAS Datasets Example | -|||
| H1 | -H2 | -Analysis | -Event | -
|---|---|---|---|
| 1 | -1 | -1 | -66 |
| 2 | -2 | -1 | -59 |
| 1 | -2 | -1 | -45 |
Then we compute correlation matrix using the event count table and
-generate_corr(). We see that the correlations not accounted
-for by the Bonferroni approach are substantial and, thus, might expect a
-non-trivial impact on bounds for hypothesis tests.
## Generate correlation from events
-corr <- generate_corr(event)
-
-corr %>%
- as_tibble() %>%
- gt() %>%
- fmt_number(columns = everything(), decimals = 2) %>%
- tab_header(title = "Correlation Matrix")
-| Correlation Matrix | -|||||
| H1_A1 | -H2_A1 | -H3_A1 | -H1_A2 | -H2_A2 | -H3_A2 | -
|---|---|---|---|---|---|
| 1.00 | -0.76 | -0.67 | -0.71 | -0.54 | -0.47 |
| 0.76 | -1.00 | -0.70 | -0.54 | -0.71 | -0.49 |
| 0.67 | -0.70 | -1.00 | -0.47 | -0.49 | -0.71 |
| 0.71 | -0.54 | -0.47 | -1.00 | -0.76 | -0.67 |
| 0.54 | -0.71 | -0.49 | -0.76 | -1.00 | -0.70 |
| 0.47 | -0.49 | -0.71 | -0.67 | -0.70 | -1.00 |
Bonferroni and WPGSD bounds can then be computed via
-generate_bounds(). In this example, we useHSD(-4) as \(\alpha\)-spending for all hypotheses. Of
-note, generate_bounds() input type specifies boundary
-type.
Compute Bonferroni bounds.
-# Bonferroni bounds
-bound_Bonf <- generate_bounds(
- type = 0, k = 2, w = w, m = m,
- corr = corr, alpha = 0.025,
- sf = list(sfHSD, sfHSD, sfHSD),
- sfparm = list(-4, -4, -4),
- t = list(c(0.5, 1), c(0.5, 1), c(0.5, 1))
-)
-
-bound_Bonf %>%
- gt() %>%
- fmt_number(columns = 3:5, decimals = 4) %>%
- tab_header(title = "Bonferroni bounds")
-| Bonferroni bounds | -||||
| Analysis | -Hypotheses | -H1 | -H2 | -H3 | -
|---|---|---|---|---|
| 1 | -H1 | -0.0030 | -NA | -NA |
| 1 | -H1, H2 | -0.0015 | -0.0015 | -NA |
| 1 | -H1, H2, H3 | -0.0009 | -0.0009 | -0.0012 |
| 1 | -H1, H3 | -0.0013 | -NA | -0.0016 |
| 1 | -H2 | -NA | -0.0030 | -NA |
| 1 | -H2, H3 | -NA | -0.0013 | -0.0016 |
| 1 | -H3 | -NA | -NA | -0.0030 |
| 2 | -H1 | -0.0238 | -NA | -NA |
| 2 | -H1, H2 | -0.0118 | -0.0118 | -NA |
| 2 | -H1, H2, H3 | -0.0070 | -0.0070 | -0.0094 |
| 2 | -H1, H3 | -0.0106 | -NA | -0.0130 |
| 2 | -H2 | -NA | -0.0238 | -NA |
| 2 | -H2, H3 | -NA | -0.0106 | -0.0130 |
| 2 | -H3 | -NA | -NA | -0.0238 |
Compute WPGSD Bounds using \(\alpha\)-spending approach 1 with HSD(-4) -spending. Here spending time was defined as minimum of the 3 observed -information fractions.
-set.seed(1234)
-# WPGSD bounds, spending approach 1
-bound_WPGSD <- generate_bounds(
- type = 2, k = 2, w = w, m = m,
- corr = corr, alpha = 0.025,
- sf = sfHSD,
- sfparm = -4,
- t = c(min(100 / 200, 110 / 220, 225 / 450), 1)
-)
-
-bound_WPGSD %>%
- gt() %>%
- fmt_number(columns = 3:5, decimals = 4) %>%
- tab_header(title = "WPGSD bounds")
-| WPGSD bounds | -||||
| Analysis | -Hypotheses | -H1 | -H2 | -H3 | -
|---|---|---|---|---|
| 1 | -H1 | -0.0030 | -NA | -NA |
| 1 | -H1, H2 | -0.0017 | -0.0017 | -NA |
| 1 | -H1, H2, H3 | -0.0011 | -0.0011 | -0.0014 |
| 1 | -H1, H3 | -0.0014 | -NA | -0.0018 |
| 1 | -H2 | -NA | -0.0030 | -NA |
| 1 | -H2, H3 | -NA | -0.0015 | -0.0018 |
| 1 | -H3 | -NA | -NA | -0.0030 |
| 2 | -H1 | -0.0238 | -NA | -NA |
| 2 | -H1, H2 | -0.0144 | -0.0144 | -NA |
| 2 | -H1, H2, H3 | -0.0092 | -0.0092 | -0.0123 |
| 2 | -H1, H3 | -0.0122 | -NA | -0.0149 |
| 2 | -H2 | -NA | -0.0238 | -NA |
| 2 | -H2, H3 | -NA | -0.0124 | -0.0152 |
| 2 | -H3 | -NA | -NA | -0.0238 |
Below shows the comparison between the Bonferroni and WPGSD bounds. -Nominal level at final analysis by using the WPGSD method increased by -up to 1.3× over those obtained via the Bonferroni approach.
-| Bonferroni and WPGSD Bounds | -||||||||
| Analysis | -Hypotheses | -H1.B | -H2.B | -H3.B | -H1.W | -H2.W | -H3.W | -xi | -
|---|---|---|---|---|---|---|---|---|
| 1 | -H1, H2, H3 | -0.0009 | -0.0009 | -0.0012 | -0.0011 | -0.0011 | -0.0014 | -1.1770 |
| 1 | -H1, H2 | -0.0015 | -0.0015 | -NA | -0.0017 | -0.0017 | -NA | -1.1363 |
| 1 | -H1, H3 | -0.0013 | -NA | -0.0016 | -0.0014 | -NA | -0.0018 | -1.0810 |
| 1 | -H2, H3 | -NA | -0.0013 | -0.0016 | -NA | -0.0015 | -0.0018 | -1.0962 |
| 1 | -H1 | -0.0030 | -NA | -NA | -0.0030 | -NA | -NA | -1.0000 |
| 1 | -H2 | -NA | -0.0030 | -NA | -NA | -0.0030 | -NA | -1.0000 |
| 1 | -H3 | -NA | -NA | -0.0030 | -NA | -NA | -0.0030 | -1.0000 |
| 2 | -H1, H2, H3 | -0.0070 | -0.0070 | -0.0094 | -0.0092 | -0.0092 | -0.0123 | -1.3092 |
| 2 | -H1, H2 | -0.0118 | -0.0118 | -NA | -0.0144 | -0.0144 | -NA | -1.2250 |
| 2 | -H1, H3 | -0.0106 | -NA | -0.0130 | -0.0122 | -NA | -0.0149 | -1.1532 |
| 2 | -H2, H3 | -NA | -0.0106 | -0.0130 | -NA | -0.0124 | -0.0152 | -1.1735 |
| 2 | -H1 | -0.0238 | -NA | -NA | -0.0238 | -NA | -NA | -1.0000 |
| 2 | -H2 | -NA | -0.0238 | -NA | -NA | -0.0238 | -NA | -1.0000 |
| 2 | -H3 | -NA | -NA | -0.0238 | -NA | -NA | -0.0238 | -1.0000 |
Closed testing procedure can then be performed using
-closed_test().
## Observed p-values.
-## The tibble must contain columns Analysis, H1, H2 etc for all hypotheses
-p_obs <- tribble(
- ~Analysis, ~H1, ~H2, ~H3,
- 1, 0.01, 0.0004, 0.03,
- 2, 0.05, 0.002, 0.015
-)
-
-## Closed testing ##
-test_result <- closed_test(bound_WPGSD, p_obs)
-
-p_obs %>%
- gt() %>%
- fmt_number(columns = 2:4, decimals = 8, drop_trailing_zeros = TRUE) %>%
- tab_header("Observed Nominal p-Values")
-| Observed Nominal p-Values | -|||
| Analysis | -H1 | -H2 | -H3 | -
|---|---|---|---|
| 1 | -0.01 | -0.0004 | -0.03 |
| 2 | -0.05 | -0.002 | -0.015 |
test_result %>%
- gt() %>%
- tab_header(title = "Closed Testing Results")
-| Closed Testing Results | -|||
| H1 | -H2 | -H3 | -Analysis | -
|---|---|---|---|
| Fail | -Success | -Fail | -Analysis 1 |
| Fail | -Success | -Fail | -Analysis 2 |
Similarly, codes below reproduce the result of Example 2 of Anderson et al. (2022), which uses spending -method 3c specified in the paper.
-set.seed(1234)
-
-# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Ex2 BH ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
-# Transition matrix in Figure A2
-m <- matrix(c(
- 0, 0.5, 0.5,
- 0.5, 0, 0.5,
- 0.5, 0.5, 0
-), nrow = 3, byrow = TRUE)
-# Initial weights
-w <- c(1 / 3, 1 / 3, 1 / 3)
-
-# Event count of intersection of paired hypotheses - Table 2
-event <- tribble(
- ~H1, ~H2, ~Analysis, ~Event,
- 1, 1, 1, 155,
- 2, 2, 1, 160,
- 3, 3, 1, 165,
- 1, 2, 1, 85,
- 1, 3, 1, 85,
- 2, 3, 1, 85,
- 1, 1, 2, 305,
- 2, 2, 2, 320,
- 3, 3, 2, 335,
- 1, 2, 2, 170,
- 1, 3, 2, 170,
- 2, 3, 2, 170
-)
-
-event %>%
- gt() %>%
- tab_header(title = "Event Count")
-| Event Count | -|||
| H1 | -H2 | -Analysis | -Event | -
|---|---|---|---|
| 1 | -1 | -1 | -155 |
| 2 | -2 | -1 | -160 |
| 3 | -3 | -1 | -165 |
| 1 | -2 | -1 | -85 |
| 1 | -3 | -1 | -85 |
| 2 | -3 | -1 | -85 |
| 1 | -1 | -2 | -305 |
| 2 | -2 | -2 | -320 |
| 3 | -3 | -2 | -335 |
| 1 | -2 | -2 | -170 |
| 1 | -3 | -2 | -170 |
| 2 | -3 | -2 | -170 |
# Generate correlation from events
-corr <- generate_corr(event)
-
-# Correlation matrix in Table 4
-corr %>%
- as_tibble() %>%
- gt() %>%
- fmt_number(columns = everything(), decimals = 2) %>%
- tab_header(title = "Correlation Matrix")
-| Correlation Matrix | -|||||
| H1_A1 | -H2_A1 | -H3_A1 | -H1_A2 | -H2_A2 | -H3_A2 | -
|---|---|---|---|---|---|
| 1.00 | -0.54 | -0.53 | -0.71 | -0.38 | -0.37 |
| 0.54 | -1.00 | -0.52 | -0.38 | -0.71 | -0.37 |
| 0.53 | -0.52 | -1.00 | -0.38 | -0.37 | -0.70 |
| 0.71 | -0.38 | -0.38 | -1.00 | -0.54 | -0.53 |
| 0.38 | -0.71 | -0.37 | -0.54 | -1.00 | -0.52 |
| 0.37 | -0.37 | -0.70 | -0.53 | -0.52 | -1.00 |
# WPGSD bounds, spending method 3c
-bound_WPGSD <- generate_bounds(
- type = 3, k = 2, w = w, m = m, corr = corr, alpha = 0.025,
- sf = list(sfLDOF, sfLDOF, sfLDOF),
- sfparm = list(0, 0, 0),
- t = list(c(155 / 305, 1), c(160 / 320, 1), c(165 / 335, 1))
-)
-
-# Bonferroni bounds
-bound_Bonf <- generate_bounds(
- type = 0, k = 2, w = w, m = m, corr = corr, alpha = 0.025,
- sf = list(sfLDOF, sfLDOF, sfLDOF),
- sfparm = list(0, 0, 0),
- t = list(c(155 / 305, 1), c(160 / 320, 1), c(165 / 335, 1))
-)
-
-bounds <- left_join(bound_Bonf, bound_WPGSD,
- by = c("Hypotheses", "Analysis"),
- suffix = c(".B", ".W")
-)
-
-# Reorder for output
-bounds$order <- rep(c(5, 2, 1, 3, 6, 4, 7), 2)
-bounds <- bounds %>%
- arrange(Analysis, order) %>%
- select(-order)
-
-# Table A6
-bounds %>%
- gt() %>%
- fmt_number(columns = 3:9, decimals = 4) %>%
- tab_header(title = "Bonferroni and WPGSD Bounds")
-| Bonferroni and WPGSD Bounds | -||||||||
| Analysis | -Hypotheses | -H1.B | -H2.B | -H3.B | -H1.W | -H2.W | -H3.W | -xi | -
|---|---|---|---|---|---|---|---|---|
| 1 | -H1, H2, H3 | -0.0002 | -0.0002 | -0.0002 | -0.0002 | -0.0002 | -0.0002 | -1.0421 |
| 1 | -H1, H2 | -0.0005 | -0.0004 | -NA | -0.0005 | -0.0004 | -NA | -1.0266 |
| 1 | -H1, H3 | -0.0005 | -NA | -0.0004 | -0.0005 | -NA | -0.0004 | -1.0247 |
| 1 | -H2, H3 | -NA | -0.0004 | -0.0004 | -NA | -0.0004 | -0.0004 | -1.0230 |
| 1 | -H1 | -0.0017 | -NA | -NA | -0.0017 | -NA | -NA | -1.0000 |
| 1 | -H2 | -NA | -0.0015 | -NA | -NA | -0.0015 | -NA | -1.0000 |
| 1 | -H3 | -NA | -NA | -0.0014 | -NA | -NA | -0.0014 | -1.0000 |
| 2 | -H1, H2, H3 | -0.0083 | -0.0083 | -0.0083 | -0.0095 | -0.0095 | -0.0095 | -1.1493 |
| 2 | -H1, H2 | -0.0123 | -0.0124 | -NA | -0.0135 | -0.0135 | -NA | -1.0942 |
| 2 | -H1, H3 | -0.0123 | -NA | -0.0124 | -0.0135 | -NA | -0.0135 | -1.0898 |
| 2 | -H2, H3 | -NA | -0.0124 | -0.0124 | -NA | -0.0134 | -0.0134 | -1.0855 |
| 2 | -H1 | -0.0245 | -NA | -NA | -0.0245 | -NA | -NA | -1.0000 |
| 2 | -H2 | -NA | -0.0245 | -NA | -NA | -0.0245 | -NA | -1.0000 |
| 2 | -H3 | -NA | -NA | -0.0245 | -NA | -NA | -0.0245 | -1.0000 |
The above illustrates the use of the WPGSD approach to compute bounds -at the analysis stage. At the design stage, one can take one of the -following 2 options: 1) The trial can be first designed as if the -testing would be done with a weighted Bonferroni with conservative -sample size estimate. At the analysis stage, the correlation can be -taken into consideration with the WPGSD approach for bound calculation; -or 2) To adjust the sample size downward using the WPGSD approach at the -design stage, one can power the study by taking the minimum \(p\)-value bound for a given individual -hypothesis from the WPGSD table (with assumed correlation structure). -For example, for \(H_2\) of example 1, -this is \(\hbox{min}(0.0011,0.0017,0.0010,0.0030)=0.0010\) -for \(k=1\) and \(\hbox{min}(0.0092,0.0144,0.0081,0.0238)=0.0081\) -for \(k=2\). Then the \(H_2\) bounds of 0.0010 (\(k=1\)) and 0.0081 (\(k=2\)) can be used to power \(H_2\). R function for the 2nd option is in -development.
-The WPGSD approach provides an unification of previous work on -parametric testing in group sequential design. It enabled more complex -scenarios and requires attention to consonance and intersection -hypotheses. Although detailed closed testing is required, this should -not be a deterrent. The approach accommodates various spending -approaches and provides more relaxed bounds and improved power compared -to the Bonferroni approach.
-
-
-
-[](https://github.com/Merck/wpgsd/actions/workflows/R-CMD-check.yaml)
-[](https://app.codecov.io/gh/Merck/wpgsd?branch=main)
-
-
-Weighted parametric group sequential design (WPGSD) allows one to take advantage
-of the known correlation structure in constructing efficacy bounds to control
-family-wise error rate (FWER) for a group sequential design. Here correlation
-may be due to common observations in nested populations, due to common
-observations in overlapping populations, or due to common observations
-in the control arm.
-
-## Installation
-
-The easiest way to get wpgsd is to install from CRAN:
-
-```r
-install.packages("wpgsd")
-```
-
-Alternatively, to use a new feature or get a bug fix,
-you can install the development version of wpgsd from GitHub:
-
-```r
-# install.packages("remotes")
-remotes::install_github("Merck/wpgsd")
-```
-
-## Current limitations
-
-There are some limitations that are currently being addressed.
-Please use the package with caution in production environments.
-
-- The current implementation may have limitations when handling more
- complex scenarios beyond what has been demonstrated.
-- The API is subject to potential breaking changes as it is currently
- being reviewed and refactored.
-- Further validation is needed to ensure the reliability of the package.
-- More documentation is expected in the future releases.
-
-## References
-
-Anderson, K. M., Guo, Z., Zhao, J., & Sun, L. Z. (2022).
-A unified framework for weighted parametric group sequential design.
-_Biometrical Journal_, 64(7), 1219--1239.
-
-BibTeX entry:
-
-```
-@article{anderson2022unified,
- title = {A unified framework for weighted parametric group sequential design},
- author = {Anderson, Keaven M and Guo, Zifang and Zhao, Jing and Sun, Linda Z},
- journal = {Biometrical Journal},
- volume = {64},
- number = {7},
- pages = {1219--1239},
- year = {2022},
- publisher = {Wiley Online Library}
-}
-```
diff --git a/wpgsd.Rcheck/vign_test/wpgsd/build/partial.rdb b/wpgsd.Rcheck/vign_test/wpgsd/build/partial.rdb
deleted file mode 100644
index c78d6ee..0000000
Binary files a/wpgsd.Rcheck/vign_test/wpgsd/build/partial.rdb and /dev/null differ
diff --git a/wpgsd.Rcheck/vign_test/wpgsd/build/vignette.rds b/wpgsd.Rcheck/vign_test/wpgsd/build/vignette.rds
deleted file mode 100644
index 5ec4d87..0000000
Binary files a/wpgsd.Rcheck/vign_test/wpgsd/build/vignette.rds and /dev/null differ
diff --git a/wpgsd.Rcheck/vign_test/wpgsd/examples/event_table_integration.R b/wpgsd.Rcheck/vign_test/wpgsd/examples/event_table_integration.R
deleted file mode 100644
index 06fb2b6..0000000
--- a/wpgsd.Rcheck/vign_test/wpgsd/examples/event_table_integration.R
+++ /dev/null
@@ -1,71 +0,0 @@
-# Example integration of EventTable S7 class with existing wpgsd functions
-# This shows how EventTable can work with generate_corr()
-
-library(wpgsd)
-library(tibble)
-
-# Example 1: Using EventTable with generate_corr()
-# Create event data using the EventTable class
-event_data_raw <- tibble::tribble(
- ~H1, ~H2, ~Analysis, ~Event,
- 1, 1, 1, 155,
- 2, 2, 1, 160,
- 3, 3, 1, 165,
- 1, 2, 1, 85,
- 1, 3, 1, 85,
- 2, 3, 1, 85,
- 1, 1, 2, 305,
- 2, 2, 2, 320,
- 3, 3, 2, 335,
- 1, 2, 2, 170,
- 1, 3, 2, 170,
- 2, 3, 2, 170
-)
-
-# Create EventTable object
-event_table <- EventTable(data = event_data_raw)
-print("Created EventTable:")
-print(event_table)
-
-# Use with generate_corr (currently expects tibble/data.frame)
-# The @data slot extracts the underlying tibble
-correlation_matrix <- generate_corr(event_table@data)
-print("Generated correlation matrix:")
-print(correlation_matrix)
-
-# Example 2: Data validation before processing
-cat("\nExample 2: Data validation\n")
-cat("==========================\n")
-
-# Try to create EventTable with invalid data
-tryCatch({
- invalid_data <- tibble::tibble(
- H1 = c(1, 2, -1), # Invalid: negative hypothesis index
- H2 = c(1, 2, 3),
- Analysis = c(1, 1, 1),
- Event = c(100, 200, 150)
- )
-
- invalid_table <- EventTable(data = invalid_data)
-}, error = function(e) {
- cat("Validation caught error:", e$message, "\n")
-})
-
-# Example 3: Subsetting capabilities
-cat("\nExample 3: Subsetting\n")
-cat("=====================\n")
-
-# Get only first analysis
-analysis_1 <- subset_event_table(event_table, analysis = 1)
-cat("Analysis 1 events:\n")
-print(analysis_1@data)
-
-# Get only H1-H2 hypothesis pairs
-h1_h2_events <- subset_event_table(event_table, hypotheses = c(1, 2))
-cat("\nH1-H2 hypothesis events:\n")
-print(h1_h2_events@data)
-
-# Example 4: Summary information
-cat("\nExample 4: Summary\n")
-cat("==================\n")
-summary(event_table)
diff --git a/wpgsd.Rcheck/vign_test/wpgsd/examples/test_event_table.R b/wpgsd.Rcheck/vign_test/wpgsd/examples/test_event_table.R
deleted file mode 100644
index d55ecbf..0000000
--- a/wpgsd.Rcheck/vign_test/wpgsd/examples/test_event_table.R
+++ /dev/null
@@ -1,50 +0,0 @@
-# Test script for EventTable S7 class
-# This demonstrates how to use the new EventTable class
-
-library(wpgsd)
-library(tibble)
-
-# Create sample event data as used in the package
-event_data <- tibble::tribble(
- ~H1, ~H2, ~Analysis, ~Event,
- 1, 1, 1, 155,
- 2, 2, 1, 160,
- 3, 3, 1, 165,
- 1, 2, 1, 85,
- 1, 3, 1, 85,
- 2, 3, 1, 85,
- 1, 1, 2, 305,
- 2, 2, 2, 320,
- 3, 3, 2, 335,
- 1, 2, 2, 170,
- 1, 3, 2, 170,
- 2, 3, 2, 170
-)
-
-# Create EventTable object
-event_table <- EventTable(data = event_data)
-
-# Print the object
-print(event_table)
-
-# Get summary
-summary(event_table)
-
-# Subset by analysis
-subset_analysis_1 <- subset_event_table(event_table, analysis = 1)
-print("Analysis 1 subset:")
-print(subset_analysis_1)
-
-# Subset by hypotheses
-subset_h1_h2 <- subset_event_table(event_table, hypotheses = c(1, 2))
-print("H1-H2 subset:")
-print(subset_h1_h2)
-
-# Convert existing tibble to EventTable
-converted_table <- as_event_table(event_data)
-print("Converted table:")
-print(converted_table)
-
-# Validate data
-validate_event_table_data(event_data)
-print("Data validation passed!")
diff --git a/wpgsd.Rcheck/vign_test/wpgsd/inst/CITATION b/wpgsd.Rcheck/vign_test/wpgsd/inst/CITATION
deleted file mode 100644
index 4cf24a4..0000000
--- a/wpgsd.Rcheck/vign_test/wpgsd/inst/CITATION
+++ /dev/null
@@ -1,12 +0,0 @@
-bibentry(
- "Article",
- title = "A unified framework for weighted parametric group sequential design",
- author = "Keaven M Anderson and Zifang Guo and Jing Zhao and Linda Z Sun",
- journal = "Biometrical Journal",
- volume = "64",
- number = "7",
- pages = "1219--1239",
- year = "2022",
- publisher = "Wiley Online Library",
- doi = "10.1002/bimj.202100085",
-)
diff --git a/wpgsd.Rcheck/vign_test/wpgsd/inst/aiprompts/compute_correlations_prompts.txt b/wpgsd.Rcheck/vign_test/wpgsd/inst/aiprompts/compute_correlations_prompts.txt
deleted file mode 100644
index 6c9d599..0000000
--- a/wpgsd.Rcheck/vign_test/wpgsd/inst/aiprompts/compute_correlations_prompts.txt
+++ /dev/null
@@ -1,76 +0,0 @@
-Three functions are defined here, one for checking input data (`check_event_data()`) and
-another for computing correlations (`compute_correlations()`) and one (`gen_corr()`) for transforming a data frame where correlations are computed
-to a correlation matrix.
-All three should be exported.
-When users enter the `event` input to `compute_correlations()`, it is advised to call `check_event_data()` first by
-specifying check = TRUE (the default).
-Output will be output either as a data frame or as a correlation matrix, as specified below.
-An exported function `gen_corr()` will do a transformation from the data frame output to a symmetric matrix.
-
-Let M > 1 be the integer number of hypotheses to be tested.
-Assume there are integer K > 1 analyses.
-Let real-valued Z[i, k] be the standardized normal test statistic for
-hypothesis i, 1 <= i <= M, analysis k, 1 <= k <= K.
-We wish to compute and return correlations for all hypothesis tests
-performed across analyses.
-For tests Z[i1, k1], Z[i2, k2], 1 <= i1 <= i2 <= M, 1 <= k1 <= k2 <= K that are
-performed we will provide the count of events (for time-to-event outcomes) or
-sample size (for other outcomes) that are common between the two tests.
-This is input in a data frame called `event`
-with the following columns:
-
-`H1` - hypothesis i1 = H1
-`H2` - hypothesis i2 = H2
-`Analysis` - value in 1,2,...,K
-`Event` - event count for time-to-event endpoints, otherwise sample size which
-is included in the both test Z[H1, Analysis] and Z[H2, Analysis].
-
-Other arguments for `compute_correlations()` are
-
-`check` - logical indicating whether to perform input checks using `check_event_data()` (default is TRUE)
-`return_matrix` - logical indicating whether to return a correlation matrix (default is TRUE)
-
-The function `check_event_data()` will have the argument `event` as specified above.
-Data checks in `check_event_data()` :
-- Check that `event` is a data frame and return an informative error message if not.
-- Check that columns specified above exist and return informative error message, if not.
-- Check that H1 <= H2 for each row and provide informative error message, if not.
-- Check that the combination of H1, H2, Analysis is a unique in the input;
-this should be evaluated and appropriate error message returned if not unique.
-Check that Event is an integer >= 0 for all rows and provide an
-appropriate error message, if not.
-- Check that unique values of Analysis are sequential positive integers with
-minimum value 1 and maximum value > 1; K from above will be the maximum value.
-Return an informative error message if any of these checks fail.
-- Check that unique values of H1 are sequential integers with minimum value 1
-and maximum value > 1; M from above will be the maximum value.
-Same for H2.
-Return an informative error message if any of these checks fail.
-- Check that for k=1,...,K that if there is a row with H1 < H2, then there is also a row with H1 == H2.
-Return an informative error message if not.
-- Check that for any value of Analysis, and H1 = i1 < H2 = i2 specified that there is also a row with H1 = i1, H2 = i1 and another with
-H1 = i2, H2 = i2. Return an informative error message if not.
-
-
-Returned value from `compute_correlations()`.
-The returned value either a data frame (if `return_matrix = FALSE`) or a correlation matrix, otherwise.
-The data frame has the following columns.
- H1 - hypothesis i1
- H2 - hypothesis i2 >= i1
- Analysis1 - k1.
- Analysis2 - k2 >= k1.
- Correlation - correlation between Z[i1, k1] and Z[i2, k2].
-
-Correlation is computed as
-Event for i1, i2 and k1 divided by square root of the product of
-Event for H1 = i1, H2 = i1, Analysis = min(k1, k2) times
-Event for H1 = i2, H2 = i2, Analysis = k2.
-
-If `return_ matrix = TRUE` (default) then a symmetric matrix should be returned as follows.
-Columns should be named `H1_A1`, `H1_A2`, ..., `H1_AK`, `H2_A1`, `H2_A2`, ..., `H2_AK`, ..., `HM_A1`, `HM_A2`, ..., `HM_AK` where
-M and K are as computed from `event`.
-Rows should be labeled the same way.
-Diagonal elements are all 1.
-Off-diagonal elements are filled with the computed correlations.
-Only cells included should be where `Event` value was provided for the combination of Analysis, H1 and H2.
-
diff --git a/wpgsd.Rcheck/vign_test/wpgsd/inst/aiprompts/s7 prompts.md b/wpgsd.Rcheck/vign_test/wpgsd/inst/aiprompts/s7 prompts.md
deleted file mode 100644
index 69bae54..0000000
--- a/wpgsd.Rcheck/vign_test/wpgsd/inst/aiprompts/s7 prompts.md
+++ /dev/null
@@ -1,101 +0,0 @@
-## Prompt for s7 Class conversion
-
-We want to convert the wpgsd R package to operate with s7 classes throughout.
-Please check for exported functions in the R directory suggest what s7 classes need to be developed.
-
-## Suggested S7 Classes for wpgsd Package Conversion
-
-Based on the exported functions and their data structures, here are the key S7 classes that should be developed:
-
-### 1. **EventTable Class**
-- **Purpose**: Represents event count data structure used in `generate_corr()` and `generate_event_table()`
-- **Key Fields**:
- - `H1`, `H2`: Hypothesis indices
- - `Analysis`: Analysis number
- - `Event`: Event count
-- **Methods**: Validation, printing, subsetting
-
-### 2. **CorrelationMatrix Class**
-- **Purpose**: Represents correlation matrices returned by `generate_corr()`
-- **Key Fields**:
- - `matrix`: The correlation matrix
- - `n_hypotheses`: Number of hypotheses
- - `n_analyses`: Number of analyses
- - `column_names`: Named columns (e.g., "H1_A1", "H2_A1", etc.)
-- **Methods**: Validation (positive definite), printing, subsetting
-
-### 3. **TransitionMatrix Class**
-- **Purpose**: Represents transition matrices used in graphical procedures
-- **Key Fields**:
- - `matrix`: The transition matrix
- - `n_hypotheses`: Number of hypotheses
-- **Methods**: Validation (row sums ≤ 1), printing
-
-### 4. **Bounds Class**
-- **Purpose**: Represents p-value boundaries from `generate_bounds()`
-- **Key Fields**:
- - `data`: Tibble with Analysis, Hypotheses, H1, H2, etc., xi columns
- - `type`: Boundary type (0, 1, 2, or 3)
- - `alpha`: Overall alpha level
- - `n_hypotheses`: Number of hypotheses
- - `n_analyses`: Number of analyses
-- **Methods**: Validation, printing, plotting
-
-### 5. **ObservedPValues Class**
-- **Purpose**: Represents observed p-values for `closed_test()`
-- **Key Fields**:
- - `data`: Tibble with Analysis, H1, H2, etc. columns
- - `n_hypotheses`: Number of hypotheses
- - `n_analyses`: Number of analyses
-- **Methods**: Validation, printing
-
-### 6. **ClosedTestResult Class**
-- **Purpose**: Represents results from `closed_test()`
-- **Key Fields**:
- - `outcome_matrix`: Matrix summarizing testing results
- - `bounds`: Associated bounds used
- - `p_obs`: Observed p-values used
- - `rejected_hypotheses`: List of rejected hypotheses by analysis
-- **Methods**: Validation, printing, summary
-
-### 7. **StudyDesign Class**
-- **Purpose**: High-level class to encapsulate entire study design
-- **Key Fields**:
- - `initial_weights`: Vector of initial hypothesis weights
- - `transition_matrix`: TransitionMatrix object
- - `n_hypotheses`: Number of hypotheses
- - `n_analyses`: Number of planned analyses
- - `alpha`: Overall alpha level
- - `spending_functions`: List of spending functions
-- **Methods**: Validation, printing, summary
-
-### 8. **SequentialPValue Class**
-- **Purpose**: Represents sequential p-values from `calc_seq_p()`
-- **Key Fields**:
- - `value`: The sequential p-value
- - `test_analysis`: Analysis index
- - `test_hypothesis`: Hypothesis being tested
- - `method_type`: Alpha spending type used
-- **Methods**: Validation, printing
-
-## Implementation Benefits
-
-1. **Type Safety**: S7 classes provide strong typing and validation
-2. **Method Dispatch**: Can implement specialized methods for each class
-3. **Consistency**: Standardized data structures across the package
-4. **User Experience**: Better error messages and documentation
-5. **Extensibility**: Easy to add new methods and functionality
-
-## Migration Strategy
-
-1. Start with core data structures (`EventTable`, `CorrelationMatrix`)
-2. Update `generate_corr()` to return `CorrelationMatrix` objects
-3. Update `generate_bounds()` to return `Bounds` objects
-4. Progressively convert other functions
-5. Ensure backward compatibility during transition
-6. Update documentation and vignettes
-
-This S7 class structure would provide a solid foundation for the wpgsd package while maintaining the existing functionality and improving the user experience with better type safety and method dispatch.
-
-Claude Sonnet 4 running under VSCode returned the following:
-
diff --git a/wpgsd.Rcheck/vign_test/wpgsd/inst/doc/adj-seq-p.R b/wpgsd.Rcheck/vign_test/wpgsd/inst/doc/adj-seq-p.R
deleted file mode 100644
index 63d7cc2..0000000
--- a/wpgsd.Rcheck/vign_test/wpgsd/inst/doc/adj-seq-p.R
+++ /dev/null
@@ -1,739 +0,0 @@
-## ----message = FALSE, warning = FALSE-----------------------------------------
-library(wpgsd)
-library(gsDesign)
-library(tibble)
-library(gt)
-library(dplyr)
-
-## -----------------------------------------------------------------------------
-event_tbl <- tribble(
- ~population, ~analysis, ~event,
- "A positive", 1, 100,
- "B positive", 1, 110,
- "AB positive", 1, 80,
- "overall", 1, 225,
- "A positive", 2, 200,
- "B positive", 2, 220,
- "AB positive", 2, 160,
- "overall", 2, 450,
-)
-
-## -----------------------------------------------------------------------------
-obs_tbl <- tribble(
- ~hypothesis, ~analysis, ~obs_p,
- "H1", 1, 0.02,
- "H2", 1, 0.01,
- "H3", 1, 0.012,
- "H1", 2, 0.015,
- "H2", 2, 0.012,
- "H3", 2, 0.010
-) %>%
- mutate(obs_Z = -qnorm(obs_p))
-
-obs_tbl %>%
- gt() %>%
- tab_header(title = "Nominal p-values")
-
-p_obs_IA <- (obs_tbl %>% filter(analysis == 1))$obs_p
-p_obs_FA <- (obs_tbl %>% filter(analysis == 2))$obs_p
-
-## -----------------------------------------------------------------------------
-IF_IA <- c(
- ((event_tbl %>% filter(analysis == 1, population == "A positive"))$event + (event_tbl %>% filter(analysis == 1, population == "overall"))$event) /
- ((event_tbl %>% filter(analysis == 2, population == "A positive"))$event + (event_tbl %>% filter(analysis == 2, population == "overall"))$event),
- ((event_tbl %>% filter(analysis == 1, population == "B positive"))$event + (event_tbl %>% filter(analysis == 1, population == "overall"))$event) /
- ((event_tbl %>% filter(analysis == 2, population == "B positive"))$event + (event_tbl %>% filter(analysis == 2, population == "overall"))$event),
- ((event_tbl %>% filter(analysis == 1, population == "AB positive"))$event + (event_tbl %>% filter(analysis == 1, population == "overall"))$event) /
- ((event_tbl %>% filter(analysis == 2, population == "AB positive"))$event + (event_tbl %>% filter(analysis == 2, population == "overall"))$event)
-)
-
-IF_IA
-
-## -----------------------------------------------------------------------------
-# Transition matrix in Figure A1
-m <- matrix(c(
- 0, 3 / 7, 4 / 7,
- 3 / 7, 0, 4 / 7,
- 0.5, 0.5, 0
-), nrow = 3, byrow = TRUE)
-# Initial weights
-w <- c(0.3, 0.3, 0.4)
-
-## -----------------------------------------------------------------------------
-name_hypotheses <- c("H1: Biomarker A positive", "H2: Biomarker B positive", "H3: Overall Population")
-
-hplot <- gMCPLite::hGraph(
- 3,
- alphaHypotheses = w, m = m,
- nameHypotheses = name_hypotheses, trhw = .2, trhh = .1,
- digits = 5, trdigits = 3, size = 5, halfWid = 1, halfHgt = 0.5,
- offset = 0.2, trprop = 0.4,
- fill = as.factor(c(2, 3, 1)),
- palette = c("#BDBDBD", "#E0E0E0", "#EEEEEE"),
- wchar = "w"
-)
-hplot
-
-## -----------------------------------------------------------------------------
-# Event count of intersection of paired hypotheses - Table 2
-# H1, H2: Hypotheses intersected.
-# (1, 1) represents counts for hypothesis 1
-# (1, 2) for counts for the intersection of hypotheses 1 and 2
-event <- tribble(
- ~H1, ~H2, ~Analysis, ~Event,
- 1, 1, 1, event_tbl %>% filter(analysis == 1, population == "A positive") %>% select(event) %>% as.numeric(),
- 2, 2, 1, event_tbl %>% filter(analysis == 1, population == "B positive") %>% select(event) %>% as.numeric(),
- 3, 3, 1, event_tbl %>% filter(analysis == 1, population == "overall") %>% select(event) %>% as.numeric(),
- 1, 2, 1, event_tbl %>% filter(analysis == 1, population == "AB positive") %>% select(event) %>% as.numeric(),
- 1, 3, 1, event_tbl %>% filter(analysis == 1, population == "A positive") %>% select(event) %>% as.numeric(),
- 2, 3, 1, event_tbl %>% filter(analysis == 1, population == "B positive") %>% select(event) %>% as.numeric(),
- 1, 1, 2, event_tbl %>% filter(analysis == 2, population == "A positive") %>% select(event) %>% as.numeric(),
- 2, 2, 2, event_tbl %>% filter(analysis == 2, population == "B positive") %>% select(event) %>% as.numeric(),
- 3, 3, 2, event_tbl %>% filter(analysis == 2, population == "overall") %>% select(event) %>% as.numeric(),
- 1, 2, 2, event_tbl %>% filter(analysis == 2, population == "AB positive") %>% select(event) %>% as.numeric(),
- 1, 3, 2, event_tbl %>% filter(analysis == 2, population == "A positive") %>% select(event) %>% as.numeric(),
- 2, 3, 2, event_tbl %>% filter(analysis == 2, population == "B positive") %>% select(event) %>% as.numeric()
-)
-event
-
-# Generate correlation from events
-gs_corr <- wpgsd::generate_corr(event)
-gs_corr %>% round(2)
-
-## ----wpgsdSeqIA, message=FALSE------------------------------------------------
-seq_p_IA_H123 <- calc_seq_p(
- test_analysis = 1,
- test_hypothesis = "H1, H2, H3",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ),
- alpha_spending_type = 2,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = gsDesign::sfHSD,
- spending_fun_par = -4,
- info_frac = c(min(IF_IA), 1),
- interval = c(1e-4, 0.2)
-)
-
-seq_p_IA_H12 <- calc_seq_p(
- test_analysis = 1,
- test_hypothesis = "H1, H2",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ),
- alpha_spending_type = 2,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = gsDesign::sfHSD,
- spending_fun_par = -4,
- info_frac = c(min(IF_IA), 1),
- interval = c(1e-4, 0.2)
-)
-
-seq_p_IA_H13 <- calc_seq_p(
- test_analysis = 1,
- test_hypothesis = "H1, H3",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ),
- alpha_spending_type = 2,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = gsDesign::sfHSD,
- spending_fun_par = -4,
- info_frac = c(min(IF_IA), 1),
- interval = c(1e-4, 0.2)
-)
-
-seq_p_IA_H23 <- calc_seq_p(
- test_analysis = 1, # stage of interest
- test_hypothesis = "H2, H3",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ),
- alpha_spending_type = 2,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = gsDesign::sfHSD,
- spending_fun_par = -4,
- info_frac = c(min(IF_IA), 1),
- interval = c(1e-4, 0.2)
-)
-
-seq_p_IA_H1 <- calc_seq_p(
- test_analysis = 1,
- test_hypothesis = "H1",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ),
- alpha_spending_type = 2,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = gsDesign::sfHSD,
- spending_fun_par = -4,
- info_frac = c(min(IF_IA), 1),
- interval = c(1e-4, 0.2)
-)
-
-seq_p_IA_H2 <- calc_seq_p(
- test_analysis = 1,
- test_hypothesis = "H2",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ),
- alpha_spending_type = 2,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = gsDesign::sfHSD,
- spending_fun_par = -4,
- info_frac = c(min(IF_IA), 1),
- interval = c(1e-4, 0.2)
-)
-
-seq_p_IA_H3 <- calc_seq_p(
- test_analysis = 1,
- test_hypothesis = "H3",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ),
- alpha_spending_type = 2,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = gsDesign::sfHSD,
- spending_fun_par = -4,
- info_frac = c(min(IF_IA), 1),
- interval = c(1e-4, 0.2)
-)
-
-## ----weightedBonferroniSeqIA, message=FALSE-----------------------------------
-seq_p_IA_H123_B <- calc_seq_p(
- test_analysis = 1, # stage of interest
- test_hypothesis = "H1, H2, H3",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 0,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = list(gsDesign::sfHSD, gsDesign::sfHSD, gsDesign::sfHSD),
- spending_fun_par = list(-4, -4, -4),
- info_frac = list(c(IF_IA[1], 1), c(IF_IA[2], 1), c(IF_IA[2], 1)),
- interval = c(1e-4, 0.3)
-)
-
-seq_p_IA_H12_B <- calc_seq_p(
- test_analysis = 1, # stage of interest
- test_hypothesis = "H1, H2",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 0,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = list(gsDesign::sfHSD, gsDesign::sfHSD, gsDesign::sfHSD),
- spending_fun_par = list(-4, -4, -4),
- info_frac = list(c(IF_IA[1], 1), c(IF_IA[2], 1), c(IF_IA[2], 1)),
- interval = c(1e-4, 0.2)
-)
-
-seq_p_IA_H13_B <- calc_seq_p(
- test_analysis = 1, # stage of interest
- test_hypothesis = "H1, H3",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 0,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = list(gsDesign::sfHSD, gsDesign::sfHSD, gsDesign::sfHSD),
- spending_fun_par = list(-4, -4, -4),
- info_frac = list(c(IF_IA[1], 1), c(IF_IA[2], 1), c(IF_IA[2], 1)),
- interval = c(1e-4, 0.3)
-)
-
-seq_p_IA_H23_B <- calc_seq_p(
- test_analysis = 1, # stage of interest
- test_hypothesis = "H2, H3",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 0,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = list(gsDesign::sfHSD, gsDesign::sfHSD, gsDesign::sfHSD),
- spending_fun_par = list(-4, -4, -4),
- info_frac = list(c(IF_IA[1], 1), c(IF_IA[2], 1), c(IF_IA[2], 1)),
- interval = c(1e-4, 0.3)
-)
-
-seq_p_IA_H1_B <- calc_seq_p(
- test_analysis = 1, # stage of interest
- test_hypothesis = "H1",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 0,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = list(gsDesign::sfHSD, gsDesign::sfHSD, gsDesign::sfHSD),
- spending_fun_par = list(-4, -4, -4),
- info_frac = list(c(IF_IA[1], 1), c(IF_IA[2], 1), c(IF_IA[2], 1)),
- interval = c(1e-4, 0.3)
-)
-
-seq_p_IA_H2_B <- calc_seq_p(
- test_analysis = 1, # stage of interest
- test_hypothesis = "H2",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 0,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = list(gsDesign::sfHSD, gsDesign::sfHSD, gsDesign::sfHSD),
- spending_fun_par = list(-4, -4, -4),
- info_frac = list(c(IF_IA[1], 1), c(IF_IA[2], 1), c(IF_IA[2], 1)),
- interval = c(1e-4, 0.3)
-)
-
-seq_p_IA_H3_B <- calc_seq_p(
- test_analysis = 1, # stage of interest
- test_hypothesis = "H3",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 0,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = list(gsDesign::sfHSD, gsDesign::sfHSD, gsDesign::sfHSD),
- spending_fun_par = list(-4, -4, -4),
- info_frac = list(c(IF_IA[1], 1), c(IF_IA[2], 1), c(IF_IA[2], 1)),
- interval = c(1e-4, 0.3)
-)
-
-## ----wpgsdSeqFA, message=FALSE------------------------------------------------
-seq_p_FA_H123 <- calc_seq_p(
- test_analysis = 2, # stage of interest
- test_hypothesis = "H1, H2, H3",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 2,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = gsDesign::sfHSD,
- spending_fun_par = -4,
- info_frac = c(min(IF_IA), 1),
- interval = c(1e-4, 0.15)
-)
-
-seq_p_FA_H12 <- calc_seq_p(
- test_analysis = 2, # stage of interest
- test_hypothesis = "H1, H2",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 2,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = gsDesign::sfHSD,
- spending_fun_par = -4,
- info_frac = c(min(IF_IA), 1),
- interval = c(1e-4, 0.15)
-)
-
-seq_p_FA_H13 <- calc_seq_p(
- test_analysis = 2, # stage of interest
- test_hypothesis = "H1, H3",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 2,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = gsDesign::sfHSD,
- spending_fun_par = -4,
- info_frac = c(min(IF_IA), 1),
- interval = c(1e-4, 0.15)
-)
-
-seq_p_FA_H23 <- calc_seq_p(
- test_analysis = 2, # stage of interest
- test_hypothesis = "H2, H3",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 2,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = gsDesign::sfHSD,
- spending_fun_par = -4,
- info_frac = c(min(IF_IA), 1),
- interval = c(1e-4, 0.15)
-)
-
-seq_p_FA_H1 <- calc_seq_p(
- test_analysis = 2, # stage of interest
- test_hypothesis = "H1",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 2,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = gsDesign::sfHSD,
- spending_fun_par = -4,
- info_frac = c(min(IF_IA), 1),
- interval = c(1e-4, 0.2)
-)
-
-seq_p_FA_H2 <- calc_seq_p(
- test_analysis = 2, # stage of interest
- test_hypothesis = "H2",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 2,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = gsDesign::sfHSD,
- spending_fun_par = -4,
- info_frac = c(min(IF_IA), 1),
- interval = c(1e-4, 0.2)
-)
-
-seq_p_FA_H3 <- calc_seq_p(
- test_analysis = 2, # stage of interest
- test_hypothesis = "H3",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 2,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = gsDesign::sfHSD,
- spending_fun_par = -4,
- info_frac = c(min(IF_IA), 1),
- interval = c(1e-4, 0.2)
-)
-
-## ----weightedBonferroniSeqFA, message=FALSE-----------------------------------
-seq_p_FA_H123_B <- calc_seq_p(
- test_analysis = 2, # stage of interest
- test_hypothesis = "H1, H2, H3",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 0,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = list(gsDesign::sfHSD, gsDesign::sfHSD, gsDesign::sfHSD),
- spending_fun_par = list(-4, -4, -4),
- info_frac = list(c(IF_IA[1], 1), c(IF_IA[2], 1), c(IF_IA[2], 1)),
- interval = c(1e-4, 0.2)
-)
-
-seq_p_FA_H12_B <- calc_seq_p(
- test_analysis = 2, # stage of interest
- test_hypothesis = "H1, H2",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 0,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = list(gsDesign::sfHSD, gsDesign::sfHSD, gsDesign::sfHSD),
- spending_fun_par = list(-4, -4, -4),
- info_frac = list(c(IF_IA[1], 1), c(IF_IA[2], 1), c(IF_IA[2], 1)),
- interval = c(1e-4, 0.2)
-)
-
-seq_p_FA_H13_B <- calc_seq_p(
- test_analysis = 2, # stage of interest
- test_hypothesis = "H1, H3",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 0,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = list(gsDesign::sfHSD, gsDesign::sfHSD, gsDesign::sfHSD),
- spending_fun_par = list(-4, -4, -4),
- info_frac = list(c(IF_IA[1], 1), c(IF_IA[2], 1), c(IF_IA[2], 1)),
- interval = c(1e-4, 0.2)
-)
-
-seq_p_FA_H23_B <- calc_seq_p(
- test_analysis = 2, # stage of interest
- test_hypothesis = "H2, H3",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 0,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = list(gsDesign::sfHSD, gsDesign::sfHSD, gsDesign::sfHSD),
- spending_fun_par = list(-4, -4, -4),
- info_frac = list(c(IF_IA[1], 1), c(IF_IA[2], 1), c(IF_IA[2], 1)),
- interval = c(1e-4, 0.2)
-)
-
-seq_p_FA_H1_B <- calc_seq_p(
- test_analysis = 2, # stage of interest
- test_hypothesis = "H1",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 0,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = list(gsDesign::sfHSD, gsDesign::sfHSD, gsDesign::sfHSD),
- spending_fun_par = list(-4, -4, -4),
- info_frac = list(c(IF_IA[1], 1), c(IF_IA[2], 1), c(IF_IA[2], 1)),
- interval = c(1e-4, 0.2)
-)
-
-seq_p_FA_H2_B <- calc_seq_p(
- test_analysis = 2, # stage of interest
- test_hypothesis = "H2",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 0,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = list(gsDesign::sfHSD, gsDesign::sfHSD, gsDesign::sfHSD),
- spending_fun_par = list(-4, -4, -4),
- info_frac = list(c(IF_IA[1], 1), c(IF_IA[2], 1), c(IF_IA[2], 1)),
- interval = c(1e-4, 0.2)
-)
-
-seq_p_FA_H3_B <- calc_seq_p(
- test_analysis = 2, # stage of interest
- test_hypothesis = "H3",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 0,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = list(gsDesign::sfHSD, gsDesign::sfHSD, gsDesign::sfHSD),
- spending_fun_par = list(-4, -4, -4),
- info_frac = list(c(IF_IA[1], 1), c(IF_IA[2], 1), c(IF_IA[2], 1)),
- interval = c(1e-4, 0.2)
-)
-
-## ----wpgsdAdjSeqIA------------------------------------------------------------
-adj_seq_IA_H1 <- max(seq_p_IA_H123, seq_p_IA_H12, seq_p_IA_H13, seq_p_IA_H1)
-adj_seq_IA_H2 <- max(seq_p_IA_H123, seq_p_IA_H12, seq_p_IA_H23, seq_p_IA_H2)
-adj_seq_IA_H3 <- max(seq_p_IA_H123, seq_p_IA_H13, seq_p_IA_H23, seq_p_IA_H3)
-
-cat("The adjusted-sequential p-value of H1, H2, H3 in IA via WPGSD is", adj_seq_IA_H1, adj_seq_IA_H2, adj_seq_IA_H3, "\n")
-
-## ----weightedBonferroniAdjSeqIA-----------------------------------------------
-adj_seq_IA_H1_B <- max(seq_p_IA_H123_B, seq_p_IA_H12_B, seq_p_IA_H13_B, seq_p_IA_H1_B)
-adj_seq_IA_H2_B <- max(seq_p_IA_H123_B, seq_p_IA_H12_B, seq_p_IA_H23_B, seq_p_IA_H2_B)
-adj_seq_IA_H3_B <- max(seq_p_IA_H123_B, seq_p_IA_H13_B, seq_p_IA_H23_B, seq_p_IA_H3_B)
-
-cat("The adjusted-sequential p-value of H1, H2, H3 in FA via weighted Bonferroni is", adj_seq_IA_H1_B, adj_seq_IA_H2_B, adj_seq_IA_H3_B, "\n")
-
-## ----wpgsdAdjSeqFA------------------------------------------------------------
-adj_seq_FA_H1 <- max(seq_p_FA_H123, seq_p_FA_H12, seq_p_FA_H13, seq_p_FA_H1)
-adj_seq_FA_H2 <- max(seq_p_FA_H123, seq_p_FA_H12, seq_p_FA_H23, seq_p_FA_H2)
-adj_seq_FA_H3 <- max(seq_p_FA_H123, seq_p_FA_H13, seq_p_FA_H23, seq_p_FA_H3)
-
-cat("The adjusted-sequential p-value of H1, H2, H3 in FA via WPGSD is", adj_seq_FA_H1, adj_seq_FA_H2, adj_seq_FA_H3, "\n")
-
-## ----weightedBonferroniAdjSeqFA-----------------------------------------------
-adj_seq_FA_H1_B <- max(seq_p_FA_H123_B, seq_p_FA_H12_B, seq_p_FA_H13_B, seq_p_FA_H1_B)
-adj_seq_FA_H2_B <- max(seq_p_FA_H123_B, seq_p_FA_H12_B, seq_p_FA_H23_B, seq_p_FA_H2_B)
-adj_seq_FA_H3_B <- max(seq_p_FA_H123_B, seq_p_FA_H13_B, seq_p_FA_H23_B, seq_p_FA_H3_B)
-
-cat("The adjusted-sequential p-value of H1, H2, H3 in FA via weighted Bonferroni is", adj_seq_FA_H1_B, adj_seq_FA_H2_B, adj_seq_FA_H3_B, "\n")
-
-## -----------------------------------------------------------------------------
-ans <- tribble(
- ~Analysis, ~Hypothesis, ~`Sequential p-values of WPGSD`, ~`Sequential p-values of Weighted Bonferroni`, ~`Adjusted-sequential p-values of WPGSD`, ~`Adjusted-sequential p-values of Weighted Bonferroni`,
- "IA", "H123", seq_p_IA_H123, seq_p_IA_H123_B, NA, NA,
- "IA", "H12", seq_p_IA_H12, seq_p_IA_H12_B, NA, NA,
- "IA", "H13", seq_p_IA_H13, seq_p_IA_H13_B, NA, NA,
- "IA", "H23", seq_p_IA_H23, seq_p_IA_H23_B, NA, NA,
- "IA", "H1", seq_p_IA_H1, seq_p_IA_H1_B, adj_seq_IA_H1, adj_seq_IA_H1_B,
- "IA", "H2", seq_p_IA_H2, seq_p_IA_H2_B, adj_seq_IA_H2, adj_seq_IA_H2_B,
- "IA", "H3", seq_p_IA_H3, seq_p_IA_H3_B, adj_seq_IA_H3, adj_seq_IA_H3_B,
- "FA", "H123", seq_p_FA_H123, seq_p_FA_H123_B, NA, NA,
- "FA", "H12", seq_p_FA_H12, seq_p_FA_H12_B, NA, NA,
- "FA", "H13", seq_p_FA_H13, seq_p_FA_H13_B, NA, NA,
- "FA", "H23", seq_p_FA_H23, seq_p_FA_H23_B, NA, NA,
- "FA", "H1", seq_p_FA_H1, seq_p_FA_H1_B, adj_seq_FA_H1, adj_seq_FA_H1_B,
- "FA", "H2", seq_p_FA_H2, seq_p_FA_H2_B, adj_seq_FA_H2, adj_seq_FA_H2_B,
- "FA", "H3", seq_p_FA_H3, seq_p_FA_H3_B, adj_seq_FA_H3, adj_seq_FA_H3_B
-)
-
-ans %>%
- select(
- Analysis, Hypothesis,
- `Sequential p-values of Weighted Bonferroni`, `Adjusted-sequential p-values of Weighted Bonferroni`,
- `Sequential p-values of WPGSD`, `Adjusted-sequential p-values of WPGSD`
- ) %>%
- gt() %>%
- tab_spanner(
- label = "Weighted Bonferroni",
- columns = c(`Sequential p-values of Weighted Bonferroni`, `Adjusted-sequential p-values of Weighted Bonferroni`)
- ) %>%
- tab_spanner(
- label = "WPGSD",
- columns = c(`Sequential p-values of WPGSD`, `Adjusted-sequential p-values of WPGSD`)
- ) %>%
- tab_style_body(
- columns = where(is.numeric),
- style = cell_fill(color = "pink"),
- fn = function(x) x <= 0.025
- ) %>%
- fmt_number(columns = 3:6, decimals = 4) %>%
- tab_header(
- title = "(Adjusted-) sequential p-values",
- subtitle = "Multiple populations"
- ) # %>% as_latex()
-
diff --git a/wpgsd.Rcheck/vign_test/wpgsd/inst/doc/adj-seq-p.Rmd b/wpgsd.Rcheck/vign_test/wpgsd/inst/doc/adj-seq-p.Rmd
deleted file mode 100644
index df94c1c..0000000
--- a/wpgsd.Rcheck/vign_test/wpgsd/inst/doc/adj-seq-p.Rmd
+++ /dev/null
@@ -1,810 +0,0 @@
----
-title: "Adjusted sequential p-values"
-author: "Yujie Zhao, Qi Liu, Linda Sun, Keaven Anderson"
-output:
- rmarkdown::html_document:
- toc: true
- toc_float: true
- toc_depth: 2
- number_sections: true
- highlight: "textmate"
- css: "custom.css"
-# bibliography: "example.bib"
-vignette: >
- %\VignetteEngine{knitr::rmarkdown}
- %\VignetteIndexEntry{Adjusted sequential p-values}
----
-
-```{r, message = FALSE, warning = FALSE}
-library(wpgsd)
-library(gsDesign)
-library(tibble)
-library(gt)
-library(dplyr)
-```
-
-# Example overview
-
-In a 2-arm controlled clinical trial example with one primary endpoint, there are 3 patient populations defined by the status of two biomarkers A and B:
-
-- biomarker A positive,
-- biomarker B positive,
-- overall population.
-
-The 3 primary elementary hypotheses are:
-
-- $H_1$: the experimental treatment is superior to the control in the biomarker A positive population;
-- \item $H_2$: the experimental treatment is superior to the control in the biomarker B positive population;
-- \item $H_3$: the experimental treatment is superior to the control in the overall population.
-
-Assume an interim analysis and a final analysis are planned for the study and the number of events are listed as
-
-```{r}
-event_tbl <- tribble(
- ~population, ~analysis, ~event,
- "A positive", 1, 100,
- "B positive", 1, 110,
- "AB positive", 1, 80,
- "overall", 1, 225,
- "A positive", 2, 200,
- "B positive", 2, 220,
- "AB positive", 2, 160,
- "overall", 2, 450,
-)
-```
-
-The observed p-values are
-
-```{r}
-obs_tbl <- tribble(
- ~hypothesis, ~analysis, ~obs_p,
- "H1", 1, 0.02,
- "H2", 1, 0.01,
- "H3", 1, 0.012,
- "H1", 2, 0.015,
- "H2", 2, 0.012,
- "H3", 2, 0.010
-) %>%
- mutate(obs_Z = -qnorm(obs_p))
-
-obs_tbl %>%
- gt() %>%
- tab_header(title = "Nominal p-values")
-
-p_obs_IA <- (obs_tbl %>% filter(analysis == 1))$obs_p
-p_obs_FA <- (obs_tbl %>% filter(analysis == 2))$obs_p
-```
-
-The information fraction of $H_1, H_2, H_3$ at IA is
-
-```{r}
-IF_IA <- c(
- ((event_tbl %>% filter(analysis == 1, population == "A positive"))$event + (event_tbl %>% filter(analysis == 1, population == "overall"))$event) /
- ((event_tbl %>% filter(analysis == 2, population == "A positive"))$event + (event_tbl %>% filter(analysis == 2, population == "overall"))$event),
- ((event_tbl %>% filter(analysis == 1, population == "B positive"))$event + (event_tbl %>% filter(analysis == 1, population == "overall"))$event) /
- ((event_tbl %>% filter(analysis == 2, population == "B positive"))$event + (event_tbl %>% filter(analysis == 2, population == "overall"))$event),
- ((event_tbl %>% filter(analysis == 1, population == "AB positive"))$event + (event_tbl %>% filter(analysis == 1, population == "overall"))$event) /
- ((event_tbl %>% filter(analysis == 2, population == "AB positive"))$event + (event_tbl %>% filter(analysis == 2, population == "overall"))$event)
-)
-
-IF_IA
-```
-
-We assign the initial weights of $H_1, H_2, H_3$ as$\left(w_1(I), w_2(I), w_3(I) \right) = (0.3, 0.3, 0.4).$ And its multiplicity strategy is visualized in below. If $H_1$ is rejected, then $3/7$ local significance level $\alpha_1$ will be propagated to $H_2$, and $4/7$ will go to $H_3$. If $H_3$ is rejected, then half of $\alpha_3$ goes to $H_1$, and half goes to $H_2$.
-
-```{r}
-# Transition matrix in Figure A1
-m <- matrix(c(
- 0, 3 / 7, 4 / 7,
- 3 / 7, 0, 4 / 7,
- 0.5, 0.5, 0
-), nrow = 3, byrow = TRUE)
-# Initial weights
-w <- c(0.3, 0.3, 0.4)
-```
-
-```{r}
-name_hypotheses <- c("H1: Biomarker A positive", "H2: Biomarker B positive", "H3: Overall Population")
-
-hplot <- gMCPLite::hGraph(
- 3,
- alphaHypotheses = w, m = m,
- nameHypotheses = name_hypotheses, trhw = .2, trhh = .1,
- digits = 5, trdigits = 3, size = 5, halfWid = 1, halfHgt = 0.5,
- offset = 0.2, trprop = 0.4,
- fill = as.factor(c(2, 3, 1)),
- palette = c("#BDBDBD", "#E0E0E0", "#EEEEEE"),
- wchar = "w"
-)
-hplot
-```
-
-The correlation of the 6 statistisc (2 analysis $\times$ 3 hypothesis) are
-
-```{r}
-# Event count of intersection of paired hypotheses - Table 2
-# H1, H2: Hypotheses intersected.
-# (1, 1) represents counts for hypothesis 1
-# (1, 2) for counts for the intersection of hypotheses 1 and 2
-event <- tribble(
- ~H1, ~H2, ~Analysis, ~Event,
- 1, 1, 1, event_tbl %>% filter(analysis == 1, population == "A positive") %>% select(event) %>% as.numeric(),
- 2, 2, 1, event_tbl %>% filter(analysis == 1, population == "B positive") %>% select(event) %>% as.numeric(),
- 3, 3, 1, event_tbl %>% filter(analysis == 1, population == "overall") %>% select(event) %>% as.numeric(),
- 1, 2, 1, event_tbl %>% filter(analysis == 1, population == "AB positive") %>% select(event) %>% as.numeric(),
- 1, 3, 1, event_tbl %>% filter(analysis == 1, population == "A positive") %>% select(event) %>% as.numeric(),
- 2, 3, 1, event_tbl %>% filter(analysis == 1, population == "B positive") %>% select(event) %>% as.numeric(),
- 1, 1, 2, event_tbl %>% filter(analysis == 2, population == "A positive") %>% select(event) %>% as.numeric(),
- 2, 2, 2, event_tbl %>% filter(analysis == 2, population == "B positive") %>% select(event) %>% as.numeric(),
- 3, 3, 2, event_tbl %>% filter(analysis == 2, population == "overall") %>% select(event) %>% as.numeric(),
- 1, 2, 2, event_tbl %>% filter(analysis == 2, population == "AB positive") %>% select(event) %>% as.numeric(),
- 1, 3, 2, event_tbl %>% filter(analysis == 2, population == "A positive") %>% select(event) %>% as.numeric(),
- 2, 3, 2, event_tbl %>% filter(analysis == 2, population == "B positive") %>% select(event) %>% as.numeric()
-)
-event
-
-# Generate correlation from events
-gs_corr <- wpgsd::generate_corr(event)
-gs_corr %>% round(2)
-```
-
-# Sequential p-value
-
-## IA
-
-```{r, wpgsdSeqIA, message=FALSE}
-seq_p_IA_H123 <- calc_seq_p(
- test_analysis = 1,
- test_hypothesis = "H1, H2, H3",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ),
- alpha_spending_type = 2,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = gsDesign::sfHSD,
- spending_fun_par = -4,
- info_frac = c(min(IF_IA), 1),
- interval = c(1e-4, 0.2)
-)
-
-seq_p_IA_H12 <- calc_seq_p(
- test_analysis = 1,
- test_hypothesis = "H1, H2",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ),
- alpha_spending_type = 2,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = gsDesign::sfHSD,
- spending_fun_par = -4,
- info_frac = c(min(IF_IA), 1),
- interval = c(1e-4, 0.2)
-)
-
-seq_p_IA_H13 <- calc_seq_p(
- test_analysis = 1,
- test_hypothesis = "H1, H3",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ),
- alpha_spending_type = 2,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = gsDesign::sfHSD,
- spending_fun_par = -4,
- info_frac = c(min(IF_IA), 1),
- interval = c(1e-4, 0.2)
-)
-
-seq_p_IA_H23 <- calc_seq_p(
- test_analysis = 1, # stage of interest
- test_hypothesis = "H2, H3",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ),
- alpha_spending_type = 2,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = gsDesign::sfHSD,
- spending_fun_par = -4,
- info_frac = c(min(IF_IA), 1),
- interval = c(1e-4, 0.2)
-)
-
-seq_p_IA_H1 <- calc_seq_p(
- test_analysis = 1,
- test_hypothesis = "H1",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ),
- alpha_spending_type = 2,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = gsDesign::sfHSD,
- spending_fun_par = -4,
- info_frac = c(min(IF_IA), 1),
- interval = c(1e-4, 0.2)
-)
-
-seq_p_IA_H2 <- calc_seq_p(
- test_analysis = 1,
- test_hypothesis = "H2",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ),
- alpha_spending_type = 2,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = gsDesign::sfHSD,
- spending_fun_par = -4,
- info_frac = c(min(IF_IA), 1),
- interval = c(1e-4, 0.2)
-)
-
-seq_p_IA_H3 <- calc_seq_p(
- test_analysis = 1,
- test_hypothesis = "H3",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ),
- alpha_spending_type = 2,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = gsDesign::sfHSD,
- spending_fun_par = -4,
- info_frac = c(min(IF_IA), 1),
- interval = c(1e-4, 0.2)
-)
-```
-
-```{r, weightedBonferroniSeqIA, message=FALSE}
-seq_p_IA_H123_B <- calc_seq_p(
- test_analysis = 1, # stage of interest
- test_hypothesis = "H1, H2, H3",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 0,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = list(gsDesign::sfHSD, gsDesign::sfHSD, gsDesign::sfHSD),
- spending_fun_par = list(-4, -4, -4),
- info_frac = list(c(IF_IA[1], 1), c(IF_IA[2], 1), c(IF_IA[2], 1)),
- interval = c(1e-4, 0.3)
-)
-
-seq_p_IA_H12_B <- calc_seq_p(
- test_analysis = 1, # stage of interest
- test_hypothesis = "H1, H2",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 0,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = list(gsDesign::sfHSD, gsDesign::sfHSD, gsDesign::sfHSD),
- spending_fun_par = list(-4, -4, -4),
- info_frac = list(c(IF_IA[1], 1), c(IF_IA[2], 1), c(IF_IA[2], 1)),
- interval = c(1e-4, 0.2)
-)
-
-seq_p_IA_H13_B <- calc_seq_p(
- test_analysis = 1, # stage of interest
- test_hypothesis = "H1, H3",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 0,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = list(gsDesign::sfHSD, gsDesign::sfHSD, gsDesign::sfHSD),
- spending_fun_par = list(-4, -4, -4),
- info_frac = list(c(IF_IA[1], 1), c(IF_IA[2], 1), c(IF_IA[2], 1)),
- interval = c(1e-4, 0.3)
-)
-
-seq_p_IA_H23_B <- calc_seq_p(
- test_analysis = 1, # stage of interest
- test_hypothesis = "H2, H3",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 0,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = list(gsDesign::sfHSD, gsDesign::sfHSD, gsDesign::sfHSD),
- spending_fun_par = list(-4, -4, -4),
- info_frac = list(c(IF_IA[1], 1), c(IF_IA[2], 1), c(IF_IA[2], 1)),
- interval = c(1e-4, 0.3)
-)
-
-seq_p_IA_H1_B <- calc_seq_p(
- test_analysis = 1, # stage of interest
- test_hypothesis = "H1",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 0,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = list(gsDesign::sfHSD, gsDesign::sfHSD, gsDesign::sfHSD),
- spending_fun_par = list(-4, -4, -4),
- info_frac = list(c(IF_IA[1], 1), c(IF_IA[2], 1), c(IF_IA[2], 1)),
- interval = c(1e-4, 0.3)
-)
-
-seq_p_IA_H2_B <- calc_seq_p(
- test_analysis = 1, # stage of interest
- test_hypothesis = "H2",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 0,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = list(gsDesign::sfHSD, gsDesign::sfHSD, gsDesign::sfHSD),
- spending_fun_par = list(-4, -4, -4),
- info_frac = list(c(IF_IA[1], 1), c(IF_IA[2], 1), c(IF_IA[2], 1)),
- interval = c(1e-4, 0.3)
-)
-
-seq_p_IA_H3_B <- calc_seq_p(
- test_analysis = 1, # stage of interest
- test_hypothesis = "H3",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 0,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = list(gsDesign::sfHSD, gsDesign::sfHSD, gsDesign::sfHSD),
- spending_fun_par = list(-4, -4, -4),
- info_frac = list(c(IF_IA[1], 1), c(IF_IA[2], 1), c(IF_IA[2], 1)),
- interval = c(1e-4, 0.3)
-)
-```
-
-## FA
-
-```{r, wpgsdSeqFA, message=FALSE}
-seq_p_FA_H123 <- calc_seq_p(
- test_analysis = 2, # stage of interest
- test_hypothesis = "H1, H2, H3",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 2,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = gsDesign::sfHSD,
- spending_fun_par = -4,
- info_frac = c(min(IF_IA), 1),
- interval = c(1e-4, 0.15)
-)
-
-seq_p_FA_H12 <- calc_seq_p(
- test_analysis = 2, # stage of interest
- test_hypothesis = "H1, H2",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 2,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = gsDesign::sfHSD,
- spending_fun_par = -4,
- info_frac = c(min(IF_IA), 1),
- interval = c(1e-4, 0.15)
-)
-
-seq_p_FA_H13 <- calc_seq_p(
- test_analysis = 2, # stage of interest
- test_hypothesis = "H1, H3",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 2,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = gsDesign::sfHSD,
- spending_fun_par = -4,
- info_frac = c(min(IF_IA), 1),
- interval = c(1e-4, 0.15)
-)
-
-seq_p_FA_H23 <- calc_seq_p(
- test_analysis = 2, # stage of interest
- test_hypothesis = "H2, H3",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 2,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = gsDesign::sfHSD,
- spending_fun_par = -4,
- info_frac = c(min(IF_IA), 1),
- interval = c(1e-4, 0.15)
-)
-
-seq_p_FA_H1 <- calc_seq_p(
- test_analysis = 2, # stage of interest
- test_hypothesis = "H1",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 2,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = gsDesign::sfHSD,
- spending_fun_par = -4,
- info_frac = c(min(IF_IA), 1),
- interval = c(1e-4, 0.2)
-)
-
-seq_p_FA_H2 <- calc_seq_p(
- test_analysis = 2, # stage of interest
- test_hypothesis = "H2",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 2,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = gsDesign::sfHSD,
- spending_fun_par = -4,
- info_frac = c(min(IF_IA), 1),
- interval = c(1e-4, 0.2)
-)
-
-seq_p_FA_H3 <- calc_seq_p(
- test_analysis = 2, # stage of interest
- test_hypothesis = "H3",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 2,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = gsDesign::sfHSD,
- spending_fun_par = -4,
- info_frac = c(min(IF_IA), 1),
- interval = c(1e-4, 0.2)
-)
-```
-
-```{r, weightedBonferroniSeqFA, message=FALSE}
-seq_p_FA_H123_B <- calc_seq_p(
- test_analysis = 2, # stage of interest
- test_hypothesis = "H1, H2, H3",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 0,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = list(gsDesign::sfHSD, gsDesign::sfHSD, gsDesign::sfHSD),
- spending_fun_par = list(-4, -4, -4),
- info_frac = list(c(IF_IA[1], 1), c(IF_IA[2], 1), c(IF_IA[2], 1)),
- interval = c(1e-4, 0.2)
-)
-
-seq_p_FA_H12_B <- calc_seq_p(
- test_analysis = 2, # stage of interest
- test_hypothesis = "H1, H2",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 0,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = list(gsDesign::sfHSD, gsDesign::sfHSD, gsDesign::sfHSD),
- spending_fun_par = list(-4, -4, -4),
- info_frac = list(c(IF_IA[1], 1), c(IF_IA[2], 1), c(IF_IA[2], 1)),
- interval = c(1e-4, 0.2)
-)
-
-seq_p_FA_H13_B <- calc_seq_p(
- test_analysis = 2, # stage of interest
- test_hypothesis = "H1, H3",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 0,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = list(gsDesign::sfHSD, gsDesign::sfHSD, gsDesign::sfHSD),
- spending_fun_par = list(-4, -4, -4),
- info_frac = list(c(IF_IA[1], 1), c(IF_IA[2], 1), c(IF_IA[2], 1)),
- interval = c(1e-4, 0.2)
-)
-
-seq_p_FA_H23_B <- calc_seq_p(
- test_analysis = 2, # stage of interest
- test_hypothesis = "H2, H3",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 0,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = list(gsDesign::sfHSD, gsDesign::sfHSD, gsDesign::sfHSD),
- spending_fun_par = list(-4, -4, -4),
- info_frac = list(c(IF_IA[1], 1), c(IF_IA[2], 1), c(IF_IA[2], 1)),
- interval = c(1e-4, 0.2)
-)
-
-seq_p_FA_H1_B <- calc_seq_p(
- test_analysis = 2, # stage of interest
- test_hypothesis = "H1",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 0,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = list(gsDesign::sfHSD, gsDesign::sfHSD, gsDesign::sfHSD),
- spending_fun_par = list(-4, -4, -4),
- info_frac = list(c(IF_IA[1], 1), c(IF_IA[2], 1), c(IF_IA[2], 1)),
- interval = c(1e-4, 0.2)
-)
-
-seq_p_FA_H2_B <- calc_seq_p(
- test_analysis = 2, # stage of interest
- test_hypothesis = "H2",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 0,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = list(gsDesign::sfHSD, gsDesign::sfHSD, gsDesign::sfHSD),
- spending_fun_par = list(-4, -4, -4),
- info_frac = list(c(IF_IA[1], 1), c(IF_IA[2], 1), c(IF_IA[2], 1)),
- interval = c(1e-4, 0.2)
-)
-
-seq_p_FA_H3_B <- calc_seq_p(
- test_analysis = 2, # stage of interest
- test_hypothesis = "H3",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 0,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = list(gsDesign::sfHSD, gsDesign::sfHSD, gsDesign::sfHSD),
- spending_fun_par = list(-4, -4, -4),
- info_frac = list(c(IF_IA[1], 1), c(IF_IA[2], 1), c(IF_IA[2], 1)),
- interval = c(1e-4, 0.2)
-)
-```
-
-# Adjusted-Sequential p-value
-
-## IA
-
-```{r, wpgsdAdjSeqIA}
-adj_seq_IA_H1 <- max(seq_p_IA_H123, seq_p_IA_H12, seq_p_IA_H13, seq_p_IA_H1)
-adj_seq_IA_H2 <- max(seq_p_IA_H123, seq_p_IA_H12, seq_p_IA_H23, seq_p_IA_H2)
-adj_seq_IA_H3 <- max(seq_p_IA_H123, seq_p_IA_H13, seq_p_IA_H23, seq_p_IA_H3)
-
-cat("The adjusted-sequential p-value of H1, H2, H3 in IA via WPGSD is", adj_seq_IA_H1, adj_seq_IA_H2, adj_seq_IA_H3, "\n")
-```
-
-```{r, weightedBonferroniAdjSeqIA}
-adj_seq_IA_H1_B <- max(seq_p_IA_H123_B, seq_p_IA_H12_B, seq_p_IA_H13_B, seq_p_IA_H1_B)
-adj_seq_IA_H2_B <- max(seq_p_IA_H123_B, seq_p_IA_H12_B, seq_p_IA_H23_B, seq_p_IA_H2_B)
-adj_seq_IA_H3_B <- max(seq_p_IA_H123_B, seq_p_IA_H13_B, seq_p_IA_H23_B, seq_p_IA_H3_B)
-
-cat("The adjusted-sequential p-value of H1, H2, H3 in FA via weighted Bonferroni is", adj_seq_IA_H1_B, adj_seq_IA_H2_B, adj_seq_IA_H3_B, "\n")
-```
-
-## FA
-
-### WPGSD
-```{r, wpgsdAdjSeqFA}
-adj_seq_FA_H1 <- max(seq_p_FA_H123, seq_p_FA_H12, seq_p_FA_H13, seq_p_FA_H1)
-adj_seq_FA_H2 <- max(seq_p_FA_H123, seq_p_FA_H12, seq_p_FA_H23, seq_p_FA_H2)
-adj_seq_FA_H3 <- max(seq_p_FA_H123, seq_p_FA_H13, seq_p_FA_H23, seq_p_FA_H3)
-
-cat("The adjusted-sequential p-value of H1, H2, H3 in FA via WPGSD is", adj_seq_FA_H1, adj_seq_FA_H2, adj_seq_FA_H3, "\n")
-```
-
-```{r, weightedBonferroniAdjSeqFA}
-adj_seq_FA_H1_B <- max(seq_p_FA_H123_B, seq_p_FA_H12_B, seq_p_FA_H13_B, seq_p_FA_H1_B)
-adj_seq_FA_H2_B <- max(seq_p_FA_H123_B, seq_p_FA_H12_B, seq_p_FA_H23_B, seq_p_FA_H2_B)
-adj_seq_FA_H3_B <- max(seq_p_FA_H123_B, seq_p_FA_H13_B, seq_p_FA_H23_B, seq_p_FA_H3_B)
-
-cat("The adjusted-sequential p-value of H1, H2, H3 in FA via weighted Bonferroni is", adj_seq_FA_H1_B, adj_seq_FA_H2_B, adj_seq_FA_H3_B, "\n")
-```
-
-# Summary
-
-```{r}
-ans <- tribble(
- ~Analysis, ~Hypothesis, ~`Sequential p-values of WPGSD`, ~`Sequential p-values of Weighted Bonferroni`, ~`Adjusted-sequential p-values of WPGSD`, ~`Adjusted-sequential p-values of Weighted Bonferroni`,
- "IA", "H123", seq_p_IA_H123, seq_p_IA_H123_B, NA, NA,
- "IA", "H12", seq_p_IA_H12, seq_p_IA_H12_B, NA, NA,
- "IA", "H13", seq_p_IA_H13, seq_p_IA_H13_B, NA, NA,
- "IA", "H23", seq_p_IA_H23, seq_p_IA_H23_B, NA, NA,
- "IA", "H1", seq_p_IA_H1, seq_p_IA_H1_B, adj_seq_IA_H1, adj_seq_IA_H1_B,
- "IA", "H2", seq_p_IA_H2, seq_p_IA_H2_B, adj_seq_IA_H2, adj_seq_IA_H2_B,
- "IA", "H3", seq_p_IA_H3, seq_p_IA_H3_B, adj_seq_IA_H3, adj_seq_IA_H3_B,
- "FA", "H123", seq_p_FA_H123, seq_p_FA_H123_B, NA, NA,
- "FA", "H12", seq_p_FA_H12, seq_p_FA_H12_B, NA, NA,
- "FA", "H13", seq_p_FA_H13, seq_p_FA_H13_B, NA, NA,
- "FA", "H23", seq_p_FA_H23, seq_p_FA_H23_B, NA, NA,
- "FA", "H1", seq_p_FA_H1, seq_p_FA_H1_B, adj_seq_FA_H1, adj_seq_FA_H1_B,
- "FA", "H2", seq_p_FA_H2, seq_p_FA_H2_B, adj_seq_FA_H2, adj_seq_FA_H2_B,
- "FA", "H3", seq_p_FA_H3, seq_p_FA_H3_B, adj_seq_FA_H3, adj_seq_FA_H3_B
-)
-
-ans %>%
- select(
- Analysis, Hypothesis,
- `Sequential p-values of Weighted Bonferroni`, `Adjusted-sequential p-values of Weighted Bonferroni`,
- `Sequential p-values of WPGSD`, `Adjusted-sequential p-values of WPGSD`
- ) %>%
- gt() %>%
- tab_spanner(
- label = "Weighted Bonferroni",
- columns = c(`Sequential p-values of Weighted Bonferroni`, `Adjusted-sequential p-values of Weighted Bonferroni`)
- ) %>%
- tab_spanner(
- label = "WPGSD",
- columns = c(`Sequential p-values of WPGSD`, `Adjusted-sequential p-values of WPGSD`)
- ) %>%
- tab_style_body(
- columns = where(is.numeric),
- style = cell_fill(color = "pink"),
- fn = function(x) x <= 0.025
- ) %>%
- fmt_number(columns = 3:6, decimals = 4) %>%
- tab_header(
- title = "(Adjusted-) sequential p-values",
- subtitle = "Multiple populations"
- ) # %>% as_latex()
-```
diff --git a/wpgsd.Rcheck/vign_test/wpgsd/inst/doc/adj-seq-p.html b/wpgsd.Rcheck/vign_test/wpgsd/inst/doc/adj-seq-p.html
deleted file mode 100644
index 0236e45..0000000
--- a/wpgsd.Rcheck/vign_test/wpgsd/inst/doc/adj-seq-p.html
+++ /dev/null
@@ -1,3399 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-library(wpgsd)
-library(gsDesign)
-library(tibble)
-library(gt)
-library(dplyr)
-In a 2-arm controlled clinical trial example with one primary -endpoint, there are 3 patient populations defined by the status of two -biomarkers A and B:
-The 3 primary elementary hypotheses are:
-Assume an interim analysis and a final analysis are planned for the -study and the number of events are listed as
-event_tbl <- tribble(
- ~population, ~analysis, ~event,
- "A positive", 1, 100,
- "B positive", 1, 110,
- "AB positive", 1, 80,
- "overall", 1, 225,
- "A positive", 2, 200,
- "B positive", 2, 220,
- "AB positive", 2, 160,
- "overall", 2, 450,
-)
-The observed p-values are
-obs_tbl <- tribble(
- ~hypothesis, ~analysis, ~obs_p,
- "H1", 1, 0.02,
- "H2", 1, 0.01,
- "H3", 1, 0.012,
- "H1", 2, 0.015,
- "H2", 2, 0.012,
- "H3", 2, 0.010
-) %>%
- mutate(obs_Z = -qnorm(obs_p))
-
-obs_tbl %>%
- gt() %>%
- tab_header(title = "Nominal p-values")
-| Nominal p-values | -|||
| hypothesis | -analysis | -obs_p | -obs_Z | -
|---|---|---|---|
| H1 | -1 | -0.020 | -2.053749 |
| H2 | -1 | -0.010 | -2.326348 |
| H3 | -1 | -0.012 | -2.257129 |
| H1 | -2 | -0.015 | -2.170090 |
| H2 | -2 | -0.012 | -2.257129 |
| H3 | -2 | -0.010 | -2.326348 |
p_obs_IA <- (obs_tbl %>% filter(analysis == 1))$obs_p
-p_obs_FA <- (obs_tbl %>% filter(analysis == 2))$obs_p
-The information fraction of \(H_1, H_2, -H_3\) at IA is
-IF_IA <- c(
- ((event_tbl %>% filter(analysis == 1, population == "A positive"))$event + (event_tbl %>% filter(analysis == 1, population == "overall"))$event) /
- ((event_tbl %>% filter(analysis == 2, population == "A positive"))$event + (event_tbl %>% filter(analysis == 2, population == "overall"))$event),
- ((event_tbl %>% filter(analysis == 1, population == "B positive"))$event + (event_tbl %>% filter(analysis == 1, population == "overall"))$event) /
- ((event_tbl %>% filter(analysis == 2, population == "B positive"))$event + (event_tbl %>% filter(analysis == 2, population == "overall"))$event),
- ((event_tbl %>% filter(analysis == 1, population == "AB positive"))$event + (event_tbl %>% filter(analysis == 1, population == "overall"))$event) /
- ((event_tbl %>% filter(analysis == 2, population == "AB positive"))$event + (event_tbl %>% filter(analysis == 2, population == "overall"))$event)
-)
-
-IF_IA
-## [1] 0.5 0.5 0.5
-We assign the initial weights of \(H_1, -H_2, H_3\) as\(\left(w_1(I), w_2(I), -w_3(I) \right) = (0.3, 0.3, 0.4).\) And its multiplicity strategy -is visualized in below. If \(H_1\) is -rejected, then \(3/7\) local -significance level \(\alpha_1\) will be -propagated to \(H_2\), and \(4/7\) will go to \(H_3\). If \(H_3\) is rejected, then half of \(\alpha_3\) goes to \(H_1\), and half goes to \(H_2\).
-# Transition matrix in Figure A1
-m <- matrix(c(
- 0, 3 / 7, 4 / 7,
- 3 / 7, 0, 4 / 7,
- 0.5, 0.5, 0
-), nrow = 3, byrow = TRUE)
-# Initial weights
-w <- c(0.3, 0.3, 0.4)
-name_hypotheses <- c("H1: Biomarker A positive", "H2: Biomarker B positive", "H3: Overall Population")
-
-hplot <- gMCPLite::hGraph(
- 3,
- alphaHypotheses = w, m = m,
- nameHypotheses = name_hypotheses, trhw = .2, trhh = .1,
- digits = 5, trdigits = 3, size = 5, halfWid = 1, halfHgt = 0.5,
- offset = 0.2, trprop = 0.4,
- fill = as.factor(c(2, 3, 1)),
- palette = c("#BDBDBD", "#E0E0E0", "#EEEEEE"),
- wchar = "w"
-)
-hplot
-The correlation of the 6 statistisc (2 analysis \(\times\) 3 hypothesis) are
-# Event count of intersection of paired hypotheses - Table 2
-# H1, H2: Hypotheses intersected.
-# (1, 1) represents counts for hypothesis 1
-# (1, 2) for counts for the intersection of hypotheses 1 and 2
-event <- tribble(
- ~H1, ~H2, ~Analysis, ~Event,
- 1, 1, 1, event_tbl %>% filter(analysis == 1, population == "A positive") %>% select(event) %>% as.numeric(),
- 2, 2, 1, event_tbl %>% filter(analysis == 1, population == "B positive") %>% select(event) %>% as.numeric(),
- 3, 3, 1, event_tbl %>% filter(analysis == 1, population == "overall") %>% select(event) %>% as.numeric(),
- 1, 2, 1, event_tbl %>% filter(analysis == 1, population == "AB positive") %>% select(event) %>% as.numeric(),
- 1, 3, 1, event_tbl %>% filter(analysis == 1, population == "A positive") %>% select(event) %>% as.numeric(),
- 2, 3, 1, event_tbl %>% filter(analysis == 1, population == "B positive") %>% select(event) %>% as.numeric(),
- 1, 1, 2, event_tbl %>% filter(analysis == 2, population == "A positive") %>% select(event) %>% as.numeric(),
- 2, 2, 2, event_tbl %>% filter(analysis == 2, population == "B positive") %>% select(event) %>% as.numeric(),
- 3, 3, 2, event_tbl %>% filter(analysis == 2, population == "overall") %>% select(event) %>% as.numeric(),
- 1, 2, 2, event_tbl %>% filter(analysis == 2, population == "AB positive") %>% select(event) %>% as.numeric(),
- 1, 3, 2, event_tbl %>% filter(analysis == 2, population == "A positive") %>% select(event) %>% as.numeric(),
- 2, 3, 2, event_tbl %>% filter(analysis == 2, population == "B positive") %>% select(event) %>% as.numeric()
-)
-event
-## # A tibble: 12 × 4
-## H1 H2 Analysis Event
-## <dbl> <dbl> <dbl> <dbl>
-## 1 1 1 1 100
-## 2 2 2 1 110
-## 3 3 3 1 225
-## 4 1 2 1 80
-## 5 1 3 1 100
-## 6 2 3 1 110
-## 7 1 1 2 200
-## 8 2 2 2 220
-## 9 3 3 2 450
-## 10 1 2 2 160
-## 11 1 3 2 200
-## 12 2 3 2 220
-# Generate correlation from events
-gs_corr <- wpgsd::generate_corr(event)
-gs_corr %>% round(2)
-## H1_A1 H2_A1 H3_A1 H1_A2 H2_A2 H3_A2
-## [1,] 1.00 0.76 0.67 0.71 0.54 0.47
-## [2,] 0.76 1.00 0.70 0.54 0.71 0.49
-## [3,] 0.67 0.70 1.00 0.47 0.49 0.71
-## [4,] 0.71 0.54 0.47 1.00 0.76 0.67
-## [5,] 0.54 0.71 0.49 0.76 1.00 0.70
-## [6,] 0.47 0.49 0.71 0.67 0.70 1.00
-seq_p_IA_H123 <- calc_seq_p(
- test_analysis = 1,
- test_hypothesis = "H1, H2, H3",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ),
- alpha_spending_type = 2,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = gsDesign::sfHSD,
- spending_fun_par = -4,
- info_frac = c(min(IF_IA), 1),
- interval = c(1e-4, 0.2)
-)
-
-seq_p_IA_H12 <- calc_seq_p(
- test_analysis = 1,
- test_hypothesis = "H1, H2",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ),
- alpha_spending_type = 2,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = gsDesign::sfHSD,
- spending_fun_par = -4,
- info_frac = c(min(IF_IA), 1),
- interval = c(1e-4, 0.2)
-)
-
-seq_p_IA_H13 <- calc_seq_p(
- test_analysis = 1,
- test_hypothesis = "H1, H3",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ),
- alpha_spending_type = 2,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = gsDesign::sfHSD,
- spending_fun_par = -4,
- info_frac = c(min(IF_IA), 1),
- interval = c(1e-4, 0.2)
-)
-
-seq_p_IA_H23 <- calc_seq_p(
- test_analysis = 1, # stage of interest
- test_hypothesis = "H2, H3",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ),
- alpha_spending_type = 2,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = gsDesign::sfHSD,
- spending_fun_par = -4,
- info_frac = c(min(IF_IA), 1),
- interval = c(1e-4, 0.2)
-)
-
-seq_p_IA_H1 <- calc_seq_p(
- test_analysis = 1,
- test_hypothesis = "H1",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ),
- alpha_spending_type = 2,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = gsDesign::sfHSD,
- spending_fun_par = -4,
- info_frac = c(min(IF_IA), 1),
- interval = c(1e-4, 0.2)
-)
-
-seq_p_IA_H2 <- calc_seq_p(
- test_analysis = 1,
- test_hypothesis = "H2",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ),
- alpha_spending_type = 2,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = gsDesign::sfHSD,
- spending_fun_par = -4,
- info_frac = c(min(IF_IA), 1),
- interval = c(1e-4, 0.2)
-)
-
-seq_p_IA_H3 <- calc_seq_p(
- test_analysis = 1,
- test_hypothesis = "H3",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ),
- alpha_spending_type = 2,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = gsDesign::sfHSD,
- spending_fun_par = -4,
- info_frac = c(min(IF_IA), 1),
- interval = c(1e-4, 0.2)
-)
-seq_p_IA_H123_B <- calc_seq_p(
- test_analysis = 1, # stage of interest
- test_hypothesis = "H1, H2, H3",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 0,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = list(gsDesign::sfHSD, gsDesign::sfHSD, gsDesign::sfHSD),
- spending_fun_par = list(-4, -4, -4),
- info_frac = list(c(IF_IA[1], 1), c(IF_IA[2], 1), c(IF_IA[2], 1)),
- interval = c(1e-4, 0.3)
-)
-
-seq_p_IA_H12_B <- calc_seq_p(
- test_analysis = 1, # stage of interest
- test_hypothesis = "H1, H2",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 0,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = list(gsDesign::sfHSD, gsDesign::sfHSD, gsDesign::sfHSD),
- spending_fun_par = list(-4, -4, -4),
- info_frac = list(c(IF_IA[1], 1), c(IF_IA[2], 1), c(IF_IA[2], 1)),
- interval = c(1e-4, 0.2)
-)
-
-seq_p_IA_H13_B <- calc_seq_p(
- test_analysis = 1, # stage of interest
- test_hypothesis = "H1, H3",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 0,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = list(gsDesign::sfHSD, gsDesign::sfHSD, gsDesign::sfHSD),
- spending_fun_par = list(-4, -4, -4),
- info_frac = list(c(IF_IA[1], 1), c(IF_IA[2], 1), c(IF_IA[2], 1)),
- interval = c(1e-4, 0.3)
-)
-
-seq_p_IA_H23_B <- calc_seq_p(
- test_analysis = 1, # stage of interest
- test_hypothesis = "H2, H3",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 0,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = list(gsDesign::sfHSD, gsDesign::sfHSD, gsDesign::sfHSD),
- spending_fun_par = list(-4, -4, -4),
- info_frac = list(c(IF_IA[1], 1), c(IF_IA[2], 1), c(IF_IA[2], 1)),
- interval = c(1e-4, 0.3)
-)
-
-seq_p_IA_H1_B <- calc_seq_p(
- test_analysis = 1, # stage of interest
- test_hypothesis = "H1",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 0,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = list(gsDesign::sfHSD, gsDesign::sfHSD, gsDesign::sfHSD),
- spending_fun_par = list(-4, -4, -4),
- info_frac = list(c(IF_IA[1], 1), c(IF_IA[2], 1), c(IF_IA[2], 1)),
- interval = c(1e-4, 0.3)
-)
-
-seq_p_IA_H2_B <- calc_seq_p(
- test_analysis = 1, # stage of interest
- test_hypothesis = "H2",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 0,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = list(gsDesign::sfHSD, gsDesign::sfHSD, gsDesign::sfHSD),
- spending_fun_par = list(-4, -4, -4),
- info_frac = list(c(IF_IA[1], 1), c(IF_IA[2], 1), c(IF_IA[2], 1)),
- interval = c(1e-4, 0.3)
-)
-
-seq_p_IA_H3_B <- calc_seq_p(
- test_analysis = 1, # stage of interest
- test_hypothesis = "H3",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 0,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = list(gsDesign::sfHSD, gsDesign::sfHSD, gsDesign::sfHSD),
- spending_fun_par = list(-4, -4, -4),
- info_frac = list(c(IF_IA[1], 1), c(IF_IA[2], 1), c(IF_IA[2], 1)),
- interval = c(1e-4, 0.3)
-)
-seq_p_FA_H123 <- calc_seq_p(
- test_analysis = 2, # stage of interest
- test_hypothesis = "H1, H2, H3",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 2,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = gsDesign::sfHSD,
- spending_fun_par = -4,
- info_frac = c(min(IF_IA), 1),
- interval = c(1e-4, 0.15)
-)
-
-seq_p_FA_H12 <- calc_seq_p(
- test_analysis = 2, # stage of interest
- test_hypothesis = "H1, H2",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 2,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = gsDesign::sfHSD,
- spending_fun_par = -4,
- info_frac = c(min(IF_IA), 1),
- interval = c(1e-4, 0.15)
-)
-
-seq_p_FA_H13 <- calc_seq_p(
- test_analysis = 2, # stage of interest
- test_hypothesis = "H1, H3",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 2,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = gsDesign::sfHSD,
- spending_fun_par = -4,
- info_frac = c(min(IF_IA), 1),
- interval = c(1e-4, 0.15)
-)
-
-seq_p_FA_H23 <- calc_seq_p(
- test_analysis = 2, # stage of interest
- test_hypothesis = "H2, H3",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 2,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = gsDesign::sfHSD,
- spending_fun_par = -4,
- info_frac = c(min(IF_IA), 1),
- interval = c(1e-4, 0.15)
-)
-
-seq_p_FA_H1 <- calc_seq_p(
- test_analysis = 2, # stage of interest
- test_hypothesis = "H1",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 2,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = gsDesign::sfHSD,
- spending_fun_par = -4,
- info_frac = c(min(IF_IA), 1),
- interval = c(1e-4, 0.2)
-)
-
-seq_p_FA_H2 <- calc_seq_p(
- test_analysis = 2, # stage of interest
- test_hypothesis = "H2",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 2,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = gsDesign::sfHSD,
- spending_fun_par = -4,
- info_frac = c(min(IF_IA), 1),
- interval = c(1e-4, 0.2)
-)
-
-seq_p_FA_H3 <- calc_seq_p(
- test_analysis = 2, # stage of interest
- test_hypothesis = "H3",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 2,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = gsDesign::sfHSD,
- spending_fun_par = -4,
- info_frac = c(min(IF_IA), 1),
- interval = c(1e-4, 0.2)
-)
-seq_p_FA_H123_B <- calc_seq_p(
- test_analysis = 2, # stage of interest
- test_hypothesis = "H1, H2, H3",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 0,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = list(gsDesign::sfHSD, gsDesign::sfHSD, gsDesign::sfHSD),
- spending_fun_par = list(-4, -4, -4),
- info_frac = list(c(IF_IA[1], 1), c(IF_IA[2], 1), c(IF_IA[2], 1)),
- interval = c(1e-4, 0.2)
-)
-
-seq_p_FA_H12_B <- calc_seq_p(
- test_analysis = 2, # stage of interest
- test_hypothesis = "H1, H2",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 0,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = list(gsDesign::sfHSD, gsDesign::sfHSD, gsDesign::sfHSD),
- spending_fun_par = list(-4, -4, -4),
- info_frac = list(c(IF_IA[1], 1), c(IF_IA[2], 1), c(IF_IA[2], 1)),
- interval = c(1e-4, 0.2)
-)
-
-seq_p_FA_H13_B <- calc_seq_p(
- test_analysis = 2, # stage of interest
- test_hypothesis = "H1, H3",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 0,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = list(gsDesign::sfHSD, gsDesign::sfHSD, gsDesign::sfHSD),
- spending_fun_par = list(-4, -4, -4),
- info_frac = list(c(IF_IA[1], 1), c(IF_IA[2], 1), c(IF_IA[2], 1)),
- interval = c(1e-4, 0.2)
-)
-
-seq_p_FA_H23_B <- calc_seq_p(
- test_analysis = 2, # stage of interest
- test_hypothesis = "H2, H3",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 0,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = list(gsDesign::sfHSD, gsDesign::sfHSD, gsDesign::sfHSD),
- spending_fun_par = list(-4, -4, -4),
- info_frac = list(c(IF_IA[1], 1), c(IF_IA[2], 1), c(IF_IA[2], 1)),
- interval = c(1e-4, 0.2)
-)
-
-seq_p_FA_H1_B <- calc_seq_p(
- test_analysis = 2, # stage of interest
- test_hypothesis = "H1",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 0,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = list(gsDesign::sfHSD, gsDesign::sfHSD, gsDesign::sfHSD),
- spending_fun_par = list(-4, -4, -4),
- info_frac = list(c(IF_IA[1], 1), c(IF_IA[2], 1), c(IF_IA[2], 1)),
- interval = c(1e-4, 0.2)
-)
-
-seq_p_FA_H2_B <- calc_seq_p(
- test_analysis = 2, # stage of interest
- test_hypothesis = "H2",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 0,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = list(gsDesign::sfHSD, gsDesign::sfHSD, gsDesign::sfHSD),
- spending_fun_par = list(-4, -4, -4),
- info_frac = list(c(IF_IA[1], 1), c(IF_IA[2], 1), c(IF_IA[2], 1)),
- interval = c(1e-4, 0.2)
-)
-
-seq_p_FA_H3_B <- calc_seq_p(
- test_analysis = 2, # stage of interest
- test_hypothesis = "H3",
- p_obs = tibble(
- analysis = 1:2,
- H1 = c(p_obs_IA[1], p_obs_FA[1]),
- H2 = c(p_obs_IA[2], p_obs_FA[2]),
- H3 = c(p_obs_IA[3], p_obs_FA[3])
- ), # observed p-value
- alpha_spending_type = 0,
- n_analysis = 2,
- initial_weight = w,
- transition_mat = m,
- z_corr = gs_corr,
- spending_fun = list(gsDesign::sfHSD, gsDesign::sfHSD, gsDesign::sfHSD),
- spending_fun_par = list(-4, -4, -4),
- info_frac = list(c(IF_IA[1], 1), c(IF_IA[2], 1), c(IF_IA[2], 1)),
- interval = c(1e-4, 0.2)
-)
-adj_seq_IA_H1 <- max(seq_p_IA_H123, seq_p_IA_H12, seq_p_IA_H13, seq_p_IA_H1)
-adj_seq_IA_H2 <- max(seq_p_IA_H123, seq_p_IA_H12, seq_p_IA_H23, seq_p_IA_H2)
-adj_seq_IA_H3 <- max(seq_p_IA_H123, seq_p_IA_H13, seq_p_IA_H23, seq_p_IA_H3)
-
-cat("The adjusted-sequential p-value of H1, H2, H3 in IA via WPGSD is", adj_seq_IA_H1, adj_seq_IA_H2, adj_seq_IA_H3, "\n")
-## The adjusted-sequential p-value of H1, H2, H3 in IA via WPGSD is 0.1942751 0.1942751 0.1942751
-adj_seq_IA_H1_B <- max(seq_p_IA_H123_B, seq_p_IA_H12_B, seq_p_IA_H13_B, seq_p_IA_H1_B)
-adj_seq_IA_H2_B <- max(seq_p_IA_H123_B, seq_p_IA_H12_B, seq_p_IA_H23_B, seq_p_IA_H2_B)
-adj_seq_IA_H3_B <- max(seq_p_IA_H123_B, seq_p_IA_H13_B, seq_p_IA_H23_B, seq_p_IA_H3_B)
-
-cat("The adjusted-sequential p-value of H1, H2, H3 in FA via weighted Bonferroni is", adj_seq_IA_H1_B, adj_seq_IA_H2_B, adj_seq_IA_H3_B, "\n")
-## The adjusted-sequential p-value of H1, H2, H3 in FA via weighted Bonferroni is 0.2516717 0.2516717 0.2516717
-adj_seq_FA_H1 <- max(seq_p_FA_H123, seq_p_FA_H12, seq_p_FA_H13, seq_p_FA_H1)
-adj_seq_FA_H2 <- max(seq_p_FA_H123, seq_p_FA_H12, seq_p_FA_H23, seq_p_FA_H2)
-adj_seq_FA_H3 <- max(seq_p_FA_H123, seq_p_FA_H13, seq_p_FA_H23, seq_p_FA_H3)
-
-cat("The adjusted-sequential p-value of H1, H2, H3 in FA via WPGSD is", adj_seq_FA_H1, adj_seq_FA_H2, adj_seq_FA_H3, "\n")
-## The adjusted-sequential p-value of H1, H2, H3 in FA via WPGSD is 0.02097944 0.02097944 0.02063497
-adj_seq_FA_H1_B <- max(seq_p_FA_H123_B, seq_p_FA_H12_B, seq_p_FA_H13_B, seq_p_FA_H1_B)
-adj_seq_FA_H2_B <- max(seq_p_FA_H123_B, seq_p_FA_H12_B, seq_p_FA_H23_B, seq_p_FA_H2_B)
-adj_seq_FA_H3_B <- max(seq_p_FA_H123_B, seq_p_FA_H13_B, seq_p_FA_H23_B, seq_p_FA_H3_B)
-
-cat("The adjusted-sequential p-value of H1, H2, H3 in FA via weighted Bonferroni is", adj_seq_FA_H1_B, adj_seq_FA_H2_B, adj_seq_FA_H3_B, "\n")
-## The adjusted-sequential p-value of H1, H2, H3 in FA via weighted Bonferroni is 0.0265823 0.0265823 0.0265823
-ans <- tribble(
- ~Analysis, ~Hypothesis, ~`Sequential p-values of WPGSD`, ~`Sequential p-values of Weighted Bonferroni`, ~`Adjusted-sequential p-values of WPGSD`, ~`Adjusted-sequential p-values of Weighted Bonferroni`,
- "IA", "H123", seq_p_IA_H123, seq_p_IA_H123_B, NA, NA,
- "IA", "H12", seq_p_IA_H12, seq_p_IA_H12_B, NA, NA,
- "IA", "H13", seq_p_IA_H13, seq_p_IA_H13_B, NA, NA,
- "IA", "H23", seq_p_IA_H23, seq_p_IA_H23_B, NA, NA,
- "IA", "H1", seq_p_IA_H1, seq_p_IA_H1_B, adj_seq_IA_H1, adj_seq_IA_H1_B,
- "IA", "H2", seq_p_IA_H2, seq_p_IA_H2_B, adj_seq_IA_H2, adj_seq_IA_H2_B,
- "IA", "H3", seq_p_IA_H3, seq_p_IA_H3_B, adj_seq_IA_H3, adj_seq_IA_H3_B,
- "FA", "H123", seq_p_FA_H123, seq_p_FA_H123_B, NA, NA,
- "FA", "H12", seq_p_FA_H12, seq_p_FA_H12_B, NA, NA,
- "FA", "H13", seq_p_FA_H13, seq_p_FA_H13_B, NA, NA,
- "FA", "H23", seq_p_FA_H23, seq_p_FA_H23_B, NA, NA,
- "FA", "H1", seq_p_FA_H1, seq_p_FA_H1_B, adj_seq_FA_H1, adj_seq_FA_H1_B,
- "FA", "H2", seq_p_FA_H2, seq_p_FA_H2_B, adj_seq_FA_H2, adj_seq_FA_H2_B,
- "FA", "H3", seq_p_FA_H3, seq_p_FA_H3_B, adj_seq_FA_H3, adj_seq_FA_H3_B
-)
-
-ans %>%
- select(
- Analysis, Hypothesis,
- `Sequential p-values of Weighted Bonferroni`, `Adjusted-sequential p-values of Weighted Bonferroni`,
- `Sequential p-values of WPGSD`, `Adjusted-sequential p-values of WPGSD`
- ) %>%
- gt() %>%
- tab_spanner(
- label = "Weighted Bonferroni",
- columns = c(`Sequential p-values of Weighted Bonferroni`, `Adjusted-sequential p-values of Weighted Bonferroni`)
- ) %>%
- tab_spanner(
- label = "WPGSD",
- columns = c(`Sequential p-values of WPGSD`, `Adjusted-sequential p-values of WPGSD`)
- ) %>%
- tab_style_body(
- columns = where(is.numeric),
- style = cell_fill(color = "pink"),
- fn = function(x) x <= 0.025
- ) %>%
- fmt_number(columns = 3:6, decimals = 4) %>%
- tab_header(
- title = "(Adjusted-) sequential p-values",
- subtitle = "Multiple populations"
- ) # %>% as_latex()
-| (Adjusted-) sequential p-values | -|||||
| Multiple populations | -|||||
| Analysis | -Hypothesis | -
- Weighted Bonferroni
- |
-
- WPGSD
- |
- ||
|---|---|---|---|---|---|
| Sequential p-values of Weighted Bonferroni | -Adjusted-sequential p-values of Weighted Bonferroni | -Sequential p-values of WPGSD | -Adjusted-sequential p-values of WPGSD | -||
| IA | -H123 | -0.2517 | -NA | -0.1943 | -NA |
| IA | -H12 | -0.1678 | -NA | -0.1400 | -NA |
| IA | -H13 | -0.1762 | -NA | -0.1553 | -NA |
| IA | -H23 | -0.1762 | -NA | -0.1529 | -NA |
| IA | -H1 | -0.1678 | -0.2517 | -0.1678 | -0.1943 |
| IA | -H2 | -0.0839 | -0.2517 | -0.0839 | -0.1943 |
| IA | -H3 | -0.1007 | -0.2517 | -0.1007 | -0.1943 |
| FA | -H123 | -0.0266 | -NA | -0.0206 | -NA |
| FA | -H12 | -0.0255 | -NA | -0.0210 | -NA |
| FA | -H13 | -0.0186 | -NA | -0.0165 | -NA |
| FA | -H23 | -0.0186 | -NA | -0.0162 | -NA |
| FA | -H1 | -0.0159 | -0.0266 | -0.0159 | -0.0210 |
| FA | -H2 | -0.0127 | -0.0266 | -0.0127 | -0.0210 |
| FA | -H3 | -0.0106 | -0.0266 | -0.0106 | -0.0206 |
The weighted parametric group sequential design (WPGSD) (Anderson et al. (2022)) approach allows one to -take advantage of the known correlation structure in constructing -efficacy bounds to control family-wise error rate (FWER) for a group -sequential design. Here correlation may be due to common observations in -nested populations, due to common observations in overlapping -populations, or due to common observations in the control arm.
-Suppose that in a group sequential trial there are \(m\) elementary null hypotheses \(H_i\), \(i \in -I={1,...,m}\), and there are \(K\) analyses. Let \(k\) be the index for the interim analyses -and final analyses, \(k=1,2,...K\). For -any nonempty set \(J \subseteq I\), we -denote the intersection hypothesis \(H_J=\cap_{j \in J}H_j\). We note that \(H_I\) is the global null hypothesis.
-We assume the plan is for all hypotheses to be tested at each of the -\(k\) planned analyses if the trial -continues to the end for all hypotheses. We further assume that the -distribution of the \(m \times K\) -tests of \(m\) individual hypotheses at -all \(k\) analyses is multivariate -normal with a completely known correlation matrix.
-Let \(Z_{ik}\) be the standardized -normal test statistic for hypothesis \(i \in -I\), analysis \(1 \le k \le K\). -Let \(n_{ik}\) be the number of events -collected cumulatively through stage \(k\) for hypothesis \(i\). Then \(n_{i -\wedge i',k \wedge k'}\) is the number of events included -in both \(Z_{ik}\) and \(i\), \(i' \in -I\), \(1 \le k\), \(k' \le K\). The key of the parametric -tests to utilize the correlation among the test statistics. The -correlation between \(Z_{ik}\) and -\(Z_{i'k'}\) is \[Corr(Z_{ik},Z_{i'k'})=\frac{n_{i \wedge -i',k \wedge k'}}{\sqrt{n_{ik}*n_{i'k'}}}\].
-We borrow an example from a paper by Anderson et al. (Anderson et al. (2022)), demonstrated in Section -2 - Motivating Examples, we use Example 1 as the basis here. The setting -will be:
-In a two-arm controlled clinical trial with one primary endpoint, -there are three patient populations defined by the status of two -biomarkers, A and B:
-The 3 primary elementary hypotheses are:
-Assume an interim analysis and a final analysis are planned for the -study. The number of events are listed as
-library(dplyr)
-library(tibble)
-library(gt)
-event_tb <- tribble(
- ~Population, ~"Number of Event in IA", ~"Number of Event in FA",
- "Population 1", 100, 200,
- "Population 2", 110, 220,
- "Overlap of Population 1 and 2", 80, 160,
- "Overall Population", 225, 450
-)
-event_tb %>%
- gt() %>%
- tab_header(title = "Number of events at each population")
-| Number of events at each population | -||
| Population | -Number of Event in IA | -Number of Event in FA | -
|---|---|---|
| Population 1 | -100 | -200 |
| Population 2 | -110 | -220 |
| Overlap of Population 1 and 2 | -80 | -160 |
| Overall Population | -225 | -450 |
Let’s consider a simple situation, we want to compare the population -1 and population 2 in only interim analyses. Then \(k=1\), and to compare \(H_{1}\) and \(H_{2}\), the \(i\) will be \(i=1\) and \(i=2\). The correlation matrix will be \[Corr(Z_{11},Z_{21})=\frac{n_{1 \wedge 2,1 \wedge -1}}{\sqrt{n_{11}*n_{21}}}\] The number of events are listed -as
-event_tbl <- tribble(
- ~Population, ~"Number of Event in IA",
- "Population 1", 100,
- "Population 2", 110,
- "Overlap in population 1 and 2", 80
-)
-event_tbl %>%
- gt() %>%
- tab_header(title = "Number of events at each population in example 1")
-| Number of events at each population in example 1 | -|
| Population | -Number of Event in IA | -
|---|---|
| Population 1 | -100 |
| Population 2 | -110 |
| Overlap in population 1 and 2 | -80 |
The the corrleation could be simply calculated as \[Corr(Z_{11},Z_{21})=\frac{80}{\sqrt{100*110}}=0.76\]
-Corr1 <- 80 / sqrt(100 * 110)
-round(Corr1, 2)
-## [1] 0.76
-Let’s consider another simple situation, we want to compare single -population, for example, the population 1, but in different analyses, -interim and final analyses. Then \(i=1\), and to compare IA and FA, the \(k\) will be \(k=1\) and \(k=2\). The correlation matrix will be \[Corr(Z_{11},Z_{12})=\frac{n_{1 \wedge 1,1 \wedge -2}}{\sqrt{n_{11}*n_{12}}}\] The number of events are listed -as
-event_tb2 <- tribble(
- ~Population, ~"Number of Event in IA", ~"Number of Event in FA",
- "Population 1", 100, 200
-)
-event_tb2 %>%
- gt() %>%
- tab_header(title = "Number of events at each analyses in example 2")
-| Number of events at each analyses in example 2 | -||
| Population | -Number of Event in IA | -Number of Event in FA | -
|---|---|---|
| Population 1 | -100 | -200 |
The the corrleation could be simply calculated as \[\text{Corr}(Z_{11},Z_{12})=\frac{100}{\sqrt{100*200}}=0.71\] -The 100 in the numerator is the overlap number of events of interim -analysis and final analysis in population 1.
-Corr1 <- 100 / sqrt(100 * 200)
-round(Corr1, 2)
-## [1] 0.71
-Let’s consider the situation that we want to compare population 1 in -interim analyses and population 2 in final analyses. Then for different -population, \(i=1\) and \(i=2\), and to compare IA and FA, the \(k\) will be \(k=1\) and \(k=2\). The correlation matrix will be \[\text{Corr}(Z_{11},Z_{22})=\frac{n_{1 \wedge 1,2 -\wedge 2}}{\sqrt{n_{11}*n_{22}}}\] The number of events are -listed as
-event_tb3 <- tribble(
- ~Population, ~"Number of Event in IA", ~"Number of Event in FA",
- "Population 1", 100, 200,
- "Population 2", 110, 220,
- "Overlap in population 1 and 2", 80, 160
-)
-event_tb3 %>%
- gt() %>%
- tab_header(title = "Number of events at each population & analyses in example 3")
-| Number of events at each population & analyses in example 3 | -||
| Population | -Number of Event in IA | -Number of Event in FA | -
|---|---|---|
| Population 1 | -100 | -200 |
| Population 2 | -110 | -220 |
| Overlap in population 1 and 2 | -80 | -160 |
The correlation could be simply calculated as \[\text{Corr}(Z_{11},Z_{22})=\frac{80}{\sqrt{100*220}}=0.54\] -The 80 in the numerator is the overlap number of events of population 1 -in interim analysis and population 2 in final analysis.
-Corr1 <- 80 / sqrt(100 * 220)
-round(Corr1, 2)
-## [1] 0.54
-generate_corr()Now we know how to calculate the correlation values under different
-situations, and the generate_corr() function was built
-based on this logic. We can directly calculate the results for each
-cross situation via the function.
First, we need a event table including the information of the -study.
-H1 refers to one hypothesis, selected depending on the
-interest, while H2 refers to the other hypothesis, both of
-which are listed for multiplicity testing. For example, H1
-means the experimental treatment is superior to the control in the
-population 1/experimental arm 1; H2 means the experimental
-treatment is superior to the control in the population 2/experimental
-arm 2;Analysis means different analysis stages, for example,
-1 means the interim analysis, and 2 means the final analysis;Event is the common events overlap by H1
-and H2.For example: H1=1, H2=1,
-Analysis=1, Event=100indicates that in the
-first population, there are 100 cases where the experimental treatment
-is superior to the control in the interim analysis.
Another example: H1=1, H2=2,
-Analysis=2, Event=160 indicates that the
-number of overlapping cases where the experimental treatment is superior
-to the control in population 1 and 2 in the final analysis is 160.
To be noticed, the column names in this function are fixed to be
-H1, H2, Analysis,
-Event.
library(wpgsd)
-# The event table
-event <- tibble::tribble(
- ~H1, ~H2, ~Analysis, ~Event,
- 1, 1, 1, 100,
- 2, 2, 1, 110,
- 3, 3, 1, 225,
- 1, 2, 1, 80,
- 1, 3, 1, 100,
- 2, 3, 1, 110,
- 1, 1, 2, 200,
- 2, 2, 2, 220,
- 3, 3, 2, 450,
- 1, 2, 2, 160,
- 1, 3, 2, 200,
- 2, 3, 2, 220
-)
-
-event %>%
- gt() %>%
- tab_header(title = "Number of events at each population & analyses")
-| Number of events at each population & analyses | -|||
| H1 | -H2 | -Analysis | -Event | -
|---|---|---|---|
| 1 | -1 | -1 | -100 |
| 2 | -2 | -1 | -110 |
| 3 | -3 | -1 | -225 |
| 1 | -2 | -1 | -80 |
| 1 | -3 | -1 | -100 |
| 2 | -3 | -1 | -110 |
| 1 | -1 | -2 | -200 |
| 2 | -2 | -2 | -220 |
| 3 | -3 | -2 | -450 |
| 1 | -2 | -2 | -160 |
| 1 | -3 | -2 | -200 |
| 2 | -3 | -2 | -220 |
Then we input the above event table to the function of
-generate_corr(), and get the correlation matrix as
-follow.
generate_corr(event)
-## H1_A1 H2_A1 H3_A1 H1_A2 H2_A2 H3_A2
-## [1,] 1.0000000 0.7627701 0.6666667 0.7071068 0.5393599 0.4714045
-## [2,] 0.7627701 1.0000000 0.6992059 0.5393599 0.7071068 0.4944132
-## [3,] 0.6666667 0.6992059 1.0000000 0.4714045 0.4944132 0.7071068
-## [4,] 0.7071068 0.5393599 0.4714045 1.0000000 0.7627701 0.6666667
-## [5,] 0.5393599 0.7071068 0.4944132 0.7627701 1.0000000 0.6992059
-## [6,] 0.4714045 0.4944132 0.7071068 0.6666667 0.6992059 1.0000000
-library(tibble)
-library(gt)
-library(gsDesign)
-library(dplyr)
-library(wpgsd)
-In a 2-arm controlled clinical trial example with one primary -endpoint, there are 3 patient populations defined by the status of two -biomarkers A and B:
-The 3 primary elementary hypotheses are:
-Assume an interim analysis and a final analysis are planned for the -study and the number of events are listed as
-k <- 2 # Number of total analysis
-n_hypotheses <- 3 # Number of hypotheses
-obs_tbl <- tribble(
- ~hypothesis, ~analysis, ~obs_p,
- "H1", 1, 0.02,
- "H2", 1, 0.01,
- "H3", 1, 0.006,
- "H1", 2, 0.015,
- "H2", 2, 0.012,
- "H3", 2, 0.004
-) %>%
- mutate(obs_Z = -qnorm(obs_p))
-
-obs_tbl %>%
- gt() %>%
- tab_header(title = "Nominal p-values")
-| Nominal p-values | -|||
| hypothesis | -analysis | -obs_p | -obs_Z | -
|---|---|---|---|
| H1 | -1 | -0.020 | -2.053749 |
| H2 | -1 | -0.010 | -2.326348 |
| H3 | -1 | -0.006 | -2.512144 |
| H1 | -2 | -0.015 | -2.170090 |
| H2 | -2 | -0.012 | -2.257129 |
| H3 | -2 | -0.004 | -2.652070 |
p_obs_IA <- (obs_tbl %>% filter(analysis == 1))$obs_p
-p_obs_FA <- (obs_tbl %>% filter(analysis == 2))$obs_p
-alpha <- 0.025
-event_tbl <- tribble(
- ~population, ~analysis, ~event,
- "A positive", 1, 80,
- "B positive", 1, 88,
- "AB positive", 1, 64,
- "overall", 1, 180,
- "A positive", 2, 160,
- "B positive", 2, 176,
- "AB positive", 2, 128,
- "overall", 2, 360,
-)
-The information fraction of \(H_1\), -\(H_2\), \(H_3\) at IA is
-IF_IA <- c(
- ((event_tbl %>% filter(analysis == 1, population == "A positive"))$event + (event_tbl %>% filter(analysis == 1, population == "overall"))$event) /
- ((event_tbl %>% filter(analysis == 2, population == "A positive"))$event + (event_tbl %>% filter(analysis == 2, population == "overall"))$event),
- ((event_tbl %>% filter(analysis == 1, population == "B positive"))$event + (event_tbl %>% filter(analysis == 1, population == "overall"))$event) /
- ((event_tbl %>% filter(analysis == 2, population == "B positive"))$event + (event_tbl %>% filter(analysis == 2, population == "overall"))$event),
- ((event_tbl %>% filter(analysis == 1, population == "AB positive"))$event + (event_tbl %>% filter(analysis == 1, population == "overall"))$event) /
- ((event_tbl %>% filter(analysis == 2, population == "AB positive"))$event + (event_tbl %>% filter(analysis == 2, population == "overall"))$event)
-)
-
-IF_IA
-## [1] 0.5 0.5 0.5
-We assign the initial weights of \(H_1\), \(H_2\), \(H_3\) as \(\left(w_1(I), w_2(I), w_3(I) \right) = (0.3, 0.3, -0.4).\) And its multiplicity strategy is visualized in below. If -\(H_1\) is rejected, then \(3/7\) local significance level \(\alpha_1\) will be propagated to \(H_2\), and \(4/7\) will go to \(H_3\). If \(H_3\) is rejected, then half of \(\alpha_3\) goes to \(H_1\), and half goes to \(H_2\).
-m <- matrix(c( # Transition matrix
- 0, 3 / 7, 4 / 7,
- 3 / 7, 0, 4 / 7,
- 1 / 2, 1 / 2, 0
-), nrow = 3, byrow = TRUE)
-
-w <- c(0.3, 0.3, 0.4) # Initial weights
-name_hypotheses <- c(
- "H1: Biomarker A positive",
- "H2: Biomarker B positive",
- "H3: Overall Population"
-)
-
-hplot <- gMCPLite::hGraph(
- 3,
- alphaHypotheses = w, m = m,
- nameHypotheses = name_hypotheses, trhw = .2, trhh = .1,
- digits = 5, trdigits = 3, size = 5, halfWid = 1, halfHgt = 0.5,
- offset = 0.2, trprop = 0.4,
- fill = as.factor(c(2, 3, 1)),
- palette = c("#BDBDBD", "#E0E0E0", "#EEEEEE"),
- wchar = "w"
-)
-hplot
-# Get weights for all intersection hypotheses
-graph <- gMCPLite::matrix2graph(m)
-graph <- gMCPLite::setWeights(graph, w)
-# Set up hypothetical p-values (0 or 1) to obtain all combinations
-pvals <- NULL
-for (i in 1:n_hypotheses) {
- if (i == 1) {
- pvals <- data.frame(x = c(0, 1))
- names(pvals) <- paste("pval_H", i, sep = "")
- } else {
- tmp <- data.frame(x = c(0, 1))
- names(tmp) <- paste("pval_H", i, sep = "")
- pvals <- merge(pvals, tmp)
- }
-}
-# Get the weights for each intersection hypothesis
-inter_weight <- NULL # Create an empty table to store the weight of interaction hypotheses
-for (i in seq_len(nrow(pvals))) { # Each row in `pvals` is 1 possible interaction hypothesis
- pval_tmp <- as.numeric(pvals[i, ])
- graph_tmp <- gMCPLite::gMCP(graph = graph, pvalues = pval_tmp, alpha = alpha)
- weight_tmp <- gMCPLite::getWeights(graph_tmp)
- inter_weight <- dplyr::bind_rows(inter_weight, weight_tmp)
-}
-
-inter_weight <- replace(inter_weight, pvals == 0, NA) # Replace the empty hypothesis as NA
-inter_weight <- inter_weight[-1, ] # Delete the first row since it is empty set
-
-inter_weight %>%
- gt() %>%
- tab_header("Weight of all possible interaction hypothesis")
-| Weight of all possible interaction hypothesis | -||
| H1 | -H2 | -H3 | -
|---|---|---|
| 1.0000000 | -NA | -NA |
| NA | -1.0000000 | -NA |
| 0.5000000 | -0.5000000 | -NA |
| NA | -NA | -1.0000000 |
| 0.4285714 | -NA | -0.5714286 |
| NA | -0.4285714 | -0.5714286 |
| 0.3000000 | -0.3000000 | -0.4000000 |
The correlation of the 6 statistic (2 analyses \(\times\) 3 hypotheses) are
-# Event count of intersection of paired hypotheses - Table 2
-# H1, H2: Hypotheses intersected.
-# (1, 1) represents counts for hypothesis 1
-# (1, 2) for counts for the intersection of hypotheses 1 and 2
-event <- tribble(
- ~H1, ~H2, ~Analysis, ~Event,
- 1, 1, 1, event_tbl %>% filter(analysis == 1, population == "A positive") %>% select(event) %>% as.numeric(),
- 2, 2, 1, event_tbl %>% filter(analysis == 1, population == "B positive") %>% select(event) %>% as.numeric(),
- 3, 3, 1, event_tbl %>% filter(analysis == 1, population == "overall") %>% select(event) %>% as.numeric(),
- 1, 2, 1, event_tbl %>% filter(analysis == 1, population == "AB positive") %>% select(event) %>% as.numeric(),
- 1, 3, 1, event_tbl %>% filter(analysis == 1, population == "A positive") %>% select(event) %>% as.numeric(),
- 2, 3, 1, event_tbl %>% filter(analysis == 1, population == "B positive") %>% select(event) %>% as.numeric(),
- 1, 1, 2, event_tbl %>% filter(analysis == 2, population == "A positive") %>% select(event) %>% as.numeric(),
- 2, 2, 2, event_tbl %>% filter(analysis == 2, population == "B positive") %>% select(event) %>% as.numeric(),
- 3, 3, 2, event_tbl %>% filter(analysis == 2, population == "overall") %>% select(event) %>% as.numeric(),
- 1, 2, 2, event_tbl %>% filter(analysis == 2, population == "AB positive") %>% select(event) %>% as.numeric(),
- 1, 3, 2, event_tbl %>% filter(analysis == 2, population == "A positive") %>% select(event) %>% as.numeric(),
- 2, 3, 2, event_tbl %>% filter(analysis == 2, population == "B positive") %>% select(event) %>% as.numeric()
-)
-event
-## # A tibble: 12 × 4
-## H1 H2 Analysis Event
-## <dbl> <dbl> <dbl> <dbl>
-## 1 1 1 1 80
-## 2 2 2 1 88
-## 3 3 3 1 180
-## 4 1 2 1 64
-## 5 1 3 1 80
-## 6 2 3 1 88
-## 7 1 1 2 160
-## 8 2 2 2 176
-## 9 3 3 2 360
-## 10 1 2 2 128
-## 11 1 3 2 160
-## 12 2 3 2 176
-# Generate correlation from events
-corr <- wpgsd::generate_corr(event)
-corr %>% round(2)
-## H1_A1 H2_A1 H3_A1 H1_A2 H2_A2 H3_A2
-## [1,] 1.00 0.76 0.67 0.71 0.54 0.47
-## [2,] 0.76 1.00 0.70 0.54 0.71 0.49
-## [3,] 0.67 0.70 1.00 0.47 0.49 0.71
-## [4,] 0.71 0.54 0.47 1.00 0.76 0.67
-## [5,] 0.54 0.71 0.49 0.76 1.00 0.70
-## [6,] 0.47 0.49 0.71 0.67 0.70 1.00
-For the elementary hypothesis \(H_1\), its weight is 1, namely,
-w_H1 <- 1
-
-# Index to select from the correlation matrix
-indx <- grep("H1", colnames(corr))
-corr_H1 <- corr[indx, indx]
-
-# Boundary for a single hypothesis across k for the intersection hypothesis
-pval_H1 <- 1 - pnorm(gsDesign::gsDesign(
- k = k,
- test.type = 1,
- usTime = IF_IA[1],
- n.I = corr_H1[, ncol(corr_H1)]^2,
- alpha = alpha * w_H1[1],
- sfu = sfHSD,
- sfupar = -4
-)$upper$bound)
-
-ans <- tibble(
- Analysis = 1:2,
- `Interaction/Elementary hypotheses` = "H1",
- `H1 p-value boundary` = pval_H1,
- `H2 p-value boundary` = NA,
- `H3 p-value boundary` = NA
-)
-ans %>% gt()
-| Analysis | -Interaction/Elementary hypotheses | -H1 p-value boundary | -H2 p-value boundary | -H3 p-value boundary | -
|---|---|---|---|---|
| 1 | -H1 | -0.002980073 | -NA | -NA |
| 2 | -H1 | -0.023788266 | -NA | -NA |
For the elementary hypothesis \(H_2\), its weight is 1, namely,
-w_H2 <- 1
-
-# Index to select from the correlation matrix
-indx <- grep("H2", colnames(corr))
-corr_H2 <- corr[indx, indx]
-
-# Boundary for a single hypothesis across k for the intersection hypothesis
-pval_H2 <- 1 - pnorm(gsDesign::gsDesign(
- k = k,
- test.type = 1,
- usTime = IF_IA[2],
- n.I = corr_H2[, ncol(corr_H2)]^2,
- alpha = alpha * w_H2[1],
- sfu = sfHSD,
- sfupar = -4
-)$upper$bound)
-
-ans_new <- tibble(
- Analysis = 1:2,
- `Interaction/Elementary hypotheses` = "H2",
- `H1 p-value boundary` = NA,
- `H2 p-value boundary` = pval_H2,
- `H3 p-value boundary` = NA
-)
-ans_new %>% gt()
-| Analysis | -Interaction/Elementary hypotheses | -H1 p-value boundary | -H2 p-value boundary | -H3 p-value boundary | -
|---|---|---|---|---|
| 1 | -H2 | -NA | -0.002980073 | -NA |
| 2 | -H2 | -NA | -0.023788266 | -NA |
ans <- rbind(ans, ans_new)
-For the elementary hypothesis \(H_3\), its weight is 1, namely,
-w_H3 <- 1
-
-# Index to select from the correlation matrix
-indx <- grep("H3", colnames(corr))
-corr_H3 <- corr[indx, indx]
-
-# Boundary for a single hypothesis across k for the intersection hypothesis
-pval_H3 <- 1 - pnorm(gsDesign::gsDesign(
- k = k,
- test.type = 1,
- usTime = IF_IA[3],
- n.I = corr_H3[, ncol(corr_H3)]^2,
- alpha = alpha * w_H3[1],
- sfu = sfHSD,
- sfupar = -4
-)$upper$bound)
-
-ans_new <- tibble(
- Analysis = 1:2,
- `Interaction/Elementary hypotheses` = "H3",
- `H1 p-value boundary` = NA,
- `H2 p-value boundary` = NA,
- `H3 p-value boundary` = pval_H1
-)
-ans_new %>% gt()
-| Analysis | -Interaction/Elementary hypotheses | -H1 p-value boundary | -H2 p-value boundary | -H3 p-value boundary | -
|---|---|---|---|---|
| 1 | -H3 | -NA | -NA | -0.002980073 |
| 2 | -H3 | -NA | -NA | -0.023788266 |
ans <- rbind(ans, ans_new)
-For the interaction hypothesis \(H_1 \cap -H_2\), its weight is
-w_H12 <- inter_weight %>% filter(!is.na(H1), !is.na(H2), is.na(H3))
-w_H12 <- w_H12[(!is.na(w_H12))] # Remove NA from weight
-w_H12
-## [1] 0.5 0.5
-And the boundary for \(H_1\) and -\(H_2\) are
-# -------------#
-# H1 #
-# -------------#
-# Index to select from the correlation matrix
-indx <- grep("H1", colnames(corr))
-corr_H1 <- corr[indx, indx]
-
-# Boundary for a single hypothesis across k for the intersection hypothesis
-pval_H1 <- 1 - pnorm(gsDesign::gsDesign(
- k = k,
- test.type = 1,
- usTime = IF_IA[1],
- n.I = corr_H1[, ncol(corr_H1)]^2,
- alpha = alpha * w_H12[1], # alpha is different since the weight is updated
- sfu = sfHSD,
- sfupar = -4
-)$upper$bound)
-
-# -------------#
-# H2 #
-# -------------#
-# Index to select from the correlation matrix
-indx <- grep("H2", colnames(corr))
-corr_H2 <- corr[indx, indx]
-
-# Boundary for a single hypothesis across k for the intersection hypothesis
-pval_H2 <- 1 - pnorm(gsDesign::gsDesign(
- k = k,
- test.type = 1,
- usTime = IF_IA[2],
- n.I = corr_H2[, ncol(corr_H2)]^2,
- alpha = alpha * w_H12[2], # alpha is different since the weight is updated
- sfu = sfHSD,
- sfupar = -4
-)$upper$bound)
-
-ans_new <- tibble(
- Analysis = 1:2,
- `Interaction/Elementary hypotheses` = "H1, H2",
- `H1 p-value boundary` = pval_H1,
- `H2 p-value boundary` = pval_H2,
- `H3 p-value boundary` = NA
-)
-ans_new %>% gt()
-| Analysis | -Interaction/Elementary hypotheses | -H1 p-value boundary | -H2 p-value boundary | -H3 p-value boundary | -
|---|---|---|---|---|
| 1 | -H1, H2 | -0.001490037 | -0.001490037 | -NA |
| 2 | -H1, H2 | -0.011782800 | -0.011782800 | -NA |
ans <- rbind(ans, ans_new)
-For the interaction hypothesis \(H_1 \cap -H_2\), its weight is
-w_H13 <- inter_weight %>% filter(!is.na(H1), is.na(H2), !is.na(H3))
-w_H13 <- w_H13[(!is.na(w_H13))] # Remove NA from weight
-w_H13
-## [1] 0.4285714 0.5714286
-And the boundary for \(H_1\) and -\(H_3\) are
-# -------------#
-# H1 #
-# -------------#
-# Index to select from the correlation matrix
-indx <- grep("H1", colnames(corr))
-corr_H1 <- corr[indx, indx]
-
-# Boundary for a single hypothesis across k for the intersection hypothesis
-pval_H1 <- 1 - pnorm(gsDesign::gsDesign(
- k = k,
- test.type = 1,
- usTime = IF_IA[1],
- n.I = corr_H1[, ncol(corr_H1)]^2,
- alpha = alpha * w_H13[1], # alpha is different since the weight is updated
- sfu = sfHSD,
- sfupar = -4
-)$upper$bound)
-
-# -------------#
-# H3 #
-# -------------#
-# Index to select from the correlation matrix
-indx <- grep("H3", colnames(corr))
-corr_H3 <- corr[indx, indx]
-
-# Boundary for a single hypothesis across k for the intersection hypothesis
-pval_H3 <- 1 - pnorm(gsDesign::gsDesign(
- k = k,
- test.type = 1,
- usTime = IF_IA[3],
- n.I = corr_H3[, ncol(corr_H3)]^2,
- alpha = alpha * w_H13[2], # alpha is different since the weight is updated
- sfu = sfHSD,
- sfupar = -4
-)$upper$bound)
-
-ans_new <- tibble(
- Analysis = 1:2,
- `Interaction/Elementary hypotheses` = "H1, H3",
- `H1 p-value boundary` = pval_H1,
- `H2 p-value boundary` = NA,
- `H3 p-value boundary` = pval_H3
-)
-ans_new %>% gt()
-| Analysis | -Interaction/Elementary hypotheses | -H1 p-value boundary | -H2 p-value boundary | -H3 p-value boundary | -
|---|---|---|---|---|
| 1 | -H1, H3 | -0.001277174 | -NA | -0.001702899 |
| 2 | -H1, H3 | -0.010079863 | -NA | -0.013489389 |
ans <- rbind(ans, ans_new)
-For the interaction hypothesis \(H_2 \cap -H_3\), its weight is
-w_H23 <- inter_weight %>% filter(is.na(H1), !is.na(H2), !is.na(H3))
-w_H23 <- w_H23[(!is.na(w_H23))] # Remove NA from weight
-w_H23
-## [1] 0.4285714 0.5714286
-And the boundary for \(H_2\) and -\(H_3\) are
-# -------------#
-# H2 #
-# -------------#
-# Index to select from the correlation matrix
-indx <- grep("H2", colnames(corr))
-corr_H2 <- corr[indx, indx]
-
-# Boundary for a single hypothesis across k for the intersection hypothesis
-pval_H2 <- 1 - pnorm(gsDesign::gsDesign(
- k = k,
- test.type = 1,
- usTime = IF_IA[2],
- n.I = corr_H2[, ncol(corr_H2)]^2,
- alpha = alpha * w_H23[1], # alpha is different since the weight is updated
- sfu = sfHSD,
- sfupar = -4
-)$upper$bound)
-
-# -------------#
-# H3 #
-# -------------#
-# Index to select from the correlation matrix
-indx <- grep("H3", colnames(corr))
-corr_H3 <- corr[indx, indx]
-
-# Boundary for a single hypothesis across k for the intersection hypothesis
-pval_H3 <- 1 - pnorm(gsDesign::gsDesign(
- k = k,
- test.type = 1,
- usTime = IF_IA[3],
- n.I = corr_H3[, ncol(corr_H3)]^2,
- alpha = alpha * w_H23[2], # alpha is different since the weight is updated
- sfu = sfHSD,
- sfupar = -4
-)$upper$bound)
-
-ans_new <- tibble(
- Analysis = 1:2,
- `Interaction/Elementary hypotheses` = "H2, H3",
- `H1 p-value boundary` = NA,
- `H2 p-value boundary` = pval_H2,
- `H3 p-value boundary` = pval_H3
-)
-ans_new %>% gt()
-| Analysis | -Interaction/Elementary hypotheses | -H1 p-value boundary | -H2 p-value boundary | -H3 p-value boundary | -
|---|---|---|---|---|
| 1 | -H2, H3 | -NA | -0.001277174 | -0.001702899 |
| 2 | -H2, H3 | -NA | -0.010079863 | -0.013489389 |
ans <- rbind(ans, ans_new)
-For the interaction hypothesis \(H_1 \cap -H_2\), its weight is
-w_H123 <- inter_weight %>% filter(!is.na(H1), !is.na(H2), !is.na(H3))
-w_H123 <- w_H123[(!is.na(w_H123))] # Remove NA from weight
-w_H123
-## [1] 0.3 0.3 0.4
-And the boundary for \(H_1\), \(H_2\), and \(H_3\) are
-# -------------#
-# H1 #
-# -------------#
-# Index to select from the correlation matrix
-indx <- grep("H1", colnames(corr))
-corr_H1 <- corr[indx, indx]
-
-# Boundary for a single hypothesis across k for the intersection hypothesis
-pval_H1 <- 1 - pnorm(gsDesign::gsDesign(
- k = k,
- test.type = 1,
- usTime = IF_IA[1],
- n.I = corr_H1[, ncol(corr_H1)]^2,
- alpha = alpha * w_H123[1], # alpha is different since the weight is updated
- sfu = sfHSD,
- sfupar = -4
-)$upper$bound)
-
-# -------------#
-# H2 #
-# -------------#
-# Index to select from the correlation matrix
-indx <- grep("H2", colnames(corr))
-corr_H2 <- corr[indx, indx]
-
-# Boundary for a single hypothesis across k for the intersection hypothesis
-pval_H2 <- 1 - pnorm(gsDesign::gsDesign(
- k = k,
- test.type = 1,
- usTime = IF_IA[2],
- n.I = corr_H2[, ncol(corr_H2)]^2,
- alpha = alpha * w_H123[1], # alpha is different since the weight is updated
- sfu = sfHSD,
- sfupar = -4
-)$upper$bound)
-
-# -------------#
-# H3 #
-# -------------#
-# Index to select from the correlation matrix
-indx <- grep("H3", colnames(corr))
-corr_H3 <- corr[indx, indx]
-
-# Boundary for a single hypothesis across k for the intersection hypothesis
-pval_H3 <- 1 - pnorm(gsDesign::gsDesign(
- k = k,
- test.type = 1,
- usTime = IF_IA[3],
- n.I = corr_H3[, ncol(corr_H3)]^2,
- alpha = alpha * w_H123[3], # alpha is different since the weight is updated
- sfu = sfHSD,
- sfupar = -4
-)$upper$bound)
-
-ans_new <- tibble(
- Analysis = 1:2,
- `Interaction/Elementary hypotheses` = "H1, H2, H3",
- `H1 p-value boundary` = pval_H1,
- `H2 p-value boundary` = pval_H2,
- `H3 p-value boundary` = pval_H3
-)
-ans_new %>% gt()
-| Analysis | -Interaction/Elementary hypotheses | -H1 p-value boundary | -H2 p-value boundary | -H3 p-value boundary | -
|---|---|---|---|---|
| 1 | -H1, H2, H3 | -0.0008940219 | -0.0008940219 | -0.001192029 |
| 2 | -H1, H2, H3 | -0.0070254979 | -0.0070254979 | -0.009399818 |
ans <- rbind(ans, ans_new)
-With the p-value boundaries, one can get the Z-statistics boundaries
-by qnorm().
ans %>%
- mutate(
- `H1 Z-statistics boundary` = -qnorm(`H1 p-value boundary`),
- `H1 Z-statistics boundary` = -qnorm(`H2 p-value boundary`),
- `H1 Z-statistics boundary` = -qnorm(`H3 p-value boundary`)
- ) %>%
- arrange(Analysis, `Interaction/Elementary hypotheses`) %>%
- gt() %>%
- tab_header("p-values/Z-statistics boundaries of weighted Bonferroni")
-| p-values/Z-statistics boundaries of weighted Bonferroni | -|||||
| Analysis | -Interaction/Elementary hypotheses | -H1 p-value boundary | -H2 p-value boundary | -H3 p-value boundary | -H1 Z-statistics boundary | -
|---|---|---|---|---|---|
| 1 | -H1 | -0.0029800731 | -NA | -NA | -NA |
| 1 | -H1, H2 | -0.0014900365 | -0.0014900365 | -NA | -NA |
| 1 | -H1, H2, H3 | -0.0008940219 | -0.0008940219 | -0.001192029 | -3.037681 |
| 1 | -H1, H3 | -0.0012771742 | -NA | -0.001702899 | -2.928520 |
| 1 | -H2 | -NA | -0.0029800731 | -NA | -NA |
| 1 | -H2, H3 | -NA | -0.0012771742 | -0.001702899 | -2.928520 |
| 1 | -H3 | -NA | -NA | -0.002980073 | -2.749966 |
| 2 | -H1 | -0.0237882657 | -NA | -NA | -NA |
| 2 | -H1, H2 | -0.0117828003 | -0.0117828003 | -NA | -NA |
| 2 | -H1, H2, H3 | -0.0070254979 | -0.0070254979 | -0.009399818 | -2.349480 |
| 2 | -H1, H3 | -0.0100798631 | -NA | -0.013489389 | -2.211825 |
| 2 | -H2 | -NA | -0.0237882657 | -NA | -NA |
| 2 | -H2, H3 | -NA | -0.0100798631 | -0.013489389 | -2.211825 |
| 2 | -H3 | -NA | -NA | -0.023788266 | -1.981131 |
The above results can be computed in one function call in wpgsd by
-using the generate_bounds() function as
generate_bounds(
- type = 0,
- k = 2,
- w = w,
- m = m,
- corr = corr,
- alpha = 0.025,
- sf = list(sfHSD, sfHSD, sfHSD),
- sfparm = list(-4, -4, -4),
- t = list(c(0.5, 1), c(0.5, 1), c(0.5, 1))
-) %>% gt()
-| Analysis | -Hypotheses | -H1 | -H2 | -H3 | -
|---|---|---|---|---|
| 1 | -H1 | -0.0029800731 | -NA | -NA |
| 1 | -H1, H2 | -0.0014900365 | -0.0014900365 | -NA |
| 1 | -H1, H2, H3 | -0.0008940219 | -0.0008940219 | -0.001192029 |
| 1 | -H1, H3 | -0.0012771742 | -NA | -0.001702899 |
| 1 | -H2 | -NA | -0.0029800731 | -NA |
| 1 | -H2, H3 | -NA | -0.0012771742 | -0.001702899 |
| 1 | -H3 | -NA | -NA | -0.002980073 |
| 2 | -H1 | -0.0237882657 | -NA | -NA |
| 2 | -H1, H2 | -0.0117828003 | -0.0117828003 | -NA |
| 2 | -H1, H2, H3 | -0.0070254979 | -0.0070254979 | -0.009399818 |
| 2 | -H1, H3 | -0.0100798631 | -NA | -0.013489389 |
| 2 | -H2 | -NA | -0.0237882657 | -NA |
| 2 | -H2, H3 | -NA | -0.0100798631 | -0.013489389 |
| 2 | -H3 | -NA | -NA | -0.023788266 |
library(wpgsd)
-library(gsDesign)
-library(gMCPLite)
-library(haven)
-library(dplyr)
-library(tidyr)
-library(reshape2)
-library(gt)
-The weighted parametric group sequential design (WPGSD) (Anderson et al. (2022)) approach allows one to -take advantage of the known correlation structure in constructing -efficacy bounds to control family-wise error rate (FWER) for a group -sequential design. Here correlation may be due to common observations in -nested populations, due to common observations in overlapping -populations, or due to common observations in the control arm. This -document illustrates the use of the R package wpgsd to implement this -approach.
-We aim to control the familywise error rate (FWER) at level \(\alpha\). Let \(J -\subseteq I\). The intersection hypothesis \(H_J\) assumes the null hypothesis for all -individual hypotheses \(H_i\) with -\(i \in J\). Closed testing principle -is as follows: if for all sets \(J \subseteq -I\) with \(j \in J\), \(H_J\) can be rejected at level \(\alpha\), then \(H_j\) can be rejected. Weighted parametric -tests can be used for this: Bretz et al. -(2011), Xi et al. (2017) for fixed -designs or Maurer and Bretz (2013) for -group sequential.
-A closed procedure is called consonant if the rejection of the -complete intersection null hypothesis \(H_I\) further implies that at least one -elementary hypothesis \(H_i, i \in I\), -is rejected. Consonance is a desirable property leading to short-cut -procedures that give the same rejection decisions as the original closed -procedure but with fewer operations. For WPGSD, consonance does not -always hold and in general the closed-testing procedure is required.
-Following illustrates the first example, which has overlapping -populations (e.g. due to biomarker) and is also example 1 in Anderson et al. (2022).
--Ex1: Populations -
-The multiplicity strategy is defined as follows.
-# Transition matrix
-m <- matrix(c(
- 0, 0, 1,
- 0, 0, 1,
- 0.5, 0.5, 0
-), nrow = 3, byrow = TRUE)
-# Weight matrix
-w <- c(0.3, 0.3, 0.4)
-
-# Multiplicity graph
-cbPalette <- c("#999999", "#E69F00", "#56B4E9")
-
-nameHypotheses <- c(
- "H1: Population 1",
- "H2: Population 2",
- "H3: Overall Population"
-)
-
-hplot <- hGraph(3,
- alphaHypotheses = w,
- m = m,
- nameHypotheses = nameHypotheses,
- trhw = .2, trhh = .1,
- digits = 5, trdigits = 3, size = 5, halfWid = 1,
- halfHgt = 0.5, offset = 0.2, trprop = 0.4,
- fill = as.factor(c(2, 3, 1)),
- palette = cbPalette,
- wchar = "w"
-)
-hplot
-The event count of each hypothesis at each analysis is shown -below.
-| Population | -Number of Events at IA | -Number of Events at FA | -
|---|---|---|
| Population 1 | -100 | -200 | -
| Population 2 | -110 | -220 | -
| Population 1 \(\cap\) 2 | -80 | -160 | -
| Overall Population | -225 | -450 | -
The correlation matrix among test statistics is as follows.
-| \(i,k\) | -1,1 | -2,1 | -3,1 | -1,2 | -2,2 | -3,2 | -
|---|---|---|---|---|---|---|
| 1,1 | -1 | -\(\frac{80}{\sqrt{100\cdot -110}}\) | -\(\frac{100}{\sqrt{100 -\cdot 225}}\) | -\(\frac{100}{\sqrt{100\cdot -200}}\) | -\(\frac{80}{\sqrt{100\cdot -220}}\) | -\(\frac{100}{\sqrt{100 -\cdot 450}}\) | -
| 2,1 | -0.76 | -1 | -\(\frac{110}{\sqrt{110 -\cdot 225}}\) | -\(\frac{80}{\sqrt{110\cdot -200}}\) | -\(\frac{110}{\sqrt{110\cdot -220}}\) | -\(\frac{110}{\sqrt{110 -\cdot 450}}\) | -
| 3,1 | -0.67 | -0.70 | -1 | -\(\frac{100}{\sqrt{225 -\cdot 200}}\) | -\(\frac{110}{\sqrt{225 -\cdot 220}}\) | -\(\frac{225}{\sqrt{225 -\cdot 450}}\) | -
| 1,2 | -0.71 | -0.54 | -0.47 | -1 | -\(\frac{160}{\sqrt{200 -\cdot 220}}\) | -\(\frac{200}{\sqrt{200 -\cdot 450}}\) | -
| 2,2 | -0.54 | -0.71 | -0.49 | -0.76 | -1 | -\(\frac{220}{\sqrt{220 -\cdot 450}}\) | -
| 3,2 | -0.47 | -0.49 | -0.71 | -0.67 | -0.70 | -1 | -
Following illustrates the second example in which correlation comes -from common control arm. This is also example 2 in Anderson et al. (2022).
-| Treatment Arm | -Number of Events at IA | -Number of Events at FA | -
|---|---|---|
| Experimental 1 | -70 | -135 | -
| Experimental 2 | -75 | -150 | -
| Experimental 3 | -80 | -165 | -
| Control | -85 | -170 | -
| \(i,k\) | -1,1 | -2,1 | -3,1 | -1,2 | -2,2 | -3,2 | -
|---|---|---|---|---|---|---|
| 1,1 | -1 | -\(\frac{85}{\sqrt{155\cdot -160}}\) | -\(\frac{85}{\sqrt{155 \cdot -165}}\) | -\(\frac{155}{\sqrt{155\cdot -305}}\) | -\(\frac{85}{\sqrt{155\cdot -320}}\) | -\(\frac{85}{\sqrt{155 \cdot -335}}\) | -
| 2,1 | -0.54 | -1 | -\(\frac{85}{\sqrt{160 \cdot -165}}\) | -\(\frac{85}{\sqrt{160 \cdot -305}}\) | -\(\frac{160}{\sqrt{160\cdot -320}}\) | -\(\frac{85}{\sqrt{160 \cdot -335}}\) | -
| 3,1 | -0.53 | -0.52 | -1 | -\(\frac{85}{\sqrt{165 \cdot -305}}\) | -\(\frac{85}{\sqrt{165\cdot -320}}\) | -\(\frac{165}{\sqrt{165 -\cdot 335}}\) | -
| 1,2 | -0.71 | -0.38 | -0.38 | -1 | -\(\frac{170}{\sqrt{305\cdot -320}}\) | -\(\frac{170}{\sqrt{305 -\cdot 335}}\) | -
| 2,2 | -0.38 | -0.71 | -0.37 | -0.54 | -1 | -\(\frac{170}{\sqrt{320 -\cdot 335}}\) | -
| 3,2 | -0.37 | -0.37 | -0.70 | -0.53 | -0.52 | -1 | -
These 2 examples each has 7 intersection hypotheses and the -corresponding weighting strategies are illustrated below.
-| \(H_J\) | -\(w_1(J)\) | -\(w_2(J)\) | -\(w_3(J)\) | -
|---|---|---|---|
| \(H_1 \cap H_2 \cap -H_3\) | -0.3 | -0.3 | -0.4 | -
| \(H_1 \cap -H_2\) | -0.5 | -0.5 | -- | -
| \(H_1 \cap -H_3\) | -0.3 | -- | -0.7 | -
| \(H_2 \cap -H_3\) | -- | -0.3 | -0.7 | -
| \(H_1\) | -1 | -- | -- | -
| \(H_2\) | -- | -1 | -- | -
| \(H_3\) | -- | -- | -1 | -
| \(H_J\) | -\(w_1(J)\) | -\(w_2(J)\) | -\(w_3(J)\) | -
|---|---|---|---|
| \(H_1 \cap H_2 \cap -H_3\) | -1/3 | -1/3 | -1/3 | -
| \(H_1 \cap -H_2\) | -1/2 | -1/2 | -- | -
| \(H_1 \cap -H_3\) | -1/2 | -- | -1/2 | -
| \(H_2 \cap -H_3\) | -- | -1/2 | -1/2 | -
| \(H_1\) | -1 | -- | -- | -
| \(H_2\) | -- | -1 | -- | -
| \(H_3\) | -- | -- | -1 | -
The WPGSD approach uses all known correlations between tests in the -study. This relaxes bounds and allows increased power or smaller sample -size. Three spending approaches have been proposed:
-Fixed spending (Fleming-Harrington-O’Brien (FHO) approach). -Specify \(0 < \alpha_1(J) < \alpha_2(J) -< \ldots < \alpha_K(J) = \alpha(J) \leq \alpha\) for all -\(J\subseteq I\), where \(\alpha(J)\) is the total alpha for -intersection hypothesis \(H_J\) -according to the graphical approach.
\(\alpha\)-spending approach 1. -We choose a spending function family \(f(t,\alpha)\) and set \(\alpha_k(J)=f(t_k(J),\alpha(J))\) for \(1\le k\le K\) for all intersection -hypotheses \(J\subseteq I\).
\(\alpha\)-spending approach 2. -For each elementary hypothesis \(i\) -(\(i\) = 1, 2, , \(m\)), specify the \(\alpha\)-spending function family \(f_i(t,\gamma)\) where \(\gamma\) is the \(\alpha\) level for the hypothesis and \(f_i(t_{ik},\gamma)\) determines how much -\(\alpha\) to spend up to analysis -\(k\) for hypothesis \(i\) when level \(\gamma\) is allocated to the hypothesis. -Then \(\alpha_k(J) = \sum_{i \in J} -f_i(t_{ik}, w_i(J)\alpha)\).
Assume (\(Z_1,Z_2\)) bivariate -normal with known correlation
Find \(\alpha\)-inflation factor -\(c_J\) such that \[ \alpha = P[\cup_{i \in J} \{p_i \leq -c_Jw_{J,i}\alpha \}] = P[\cup_{i \in J} \{Z_i \geq -\Phi^{-1}(1-c_Jw_{J,i}\alpha \}]\]
Basic algorithm code in Bretz et al. -(2011)
Assume for \(j < k\) that -bounds \(c_{ij} (J), i \in J, j < -k\), have already been set and remain unchanged.
At analysis \(k\), compute the -correlation matrix of \(Z_{ij}\), \(i \in J\), \(j = -1, \ldots, k\).
-i Initialize \(\alpha_{k}^{*}(J) = -\alpha_{k}(J) - \alpha_{k-1}(J)\).
-ii Set \(b_{ik} = \Phi^{-1}(1 - -w_{i}(J)\alpha_{k}^{*} (J))\), \(i\in -J\).
-iii Compute type I error rate up to analysis \(k\) \[ 1 - -Pr(\cap_{i \in J} \{ Z_{ik} < b_{ik} \} \cap_{i \in J, j < k} \{ -Z_{ij} < c_{ij}(J) \} ). \]
-iv Update \(\alpha_{k}^{*}(J)\) -using root-finding with steps ii - iii until the type I error rate -through analysis \(k\) is controlled at -\(\alpha_{k}(J)\) for \(H_J\). That is, \[ 1 - Pr(\cap_{i \in J} \{ Z_{ik} < b_{ik} \} -\cap_{i \in J, j < k} \{ Z_{ij} < c_{ij}(J) \} ) = \alpha_{k}. -\]
-v Set \(c_{ik}(J) = b_{ik}\) from -the previous step. The corresponding nominal \(p\)-value boundary is \(p_{ik}(J)= 1-\Phi(c_{ik}(J)) -= w_i(J)\alpha_k^*(J)\).
Note: interim bound does not depend on future analyses. Solution -only requires root finding for a single \(\alpha_{k}^{*}(J)\) at a time, \(k = 1, \ldots, K\). Requires multivariate -normal computation from mvtnorm R package Genz et al. (2020).
Assume for \(j < k\) that -bounds \(c_{ij} (J), i \in J, j < -k\), have already been set and remain unchanged.
At analysis \(k\), compute the -correlation matrix of \(Z_{ij}\), \(i \in J\), \(j = -1, \ldots, k\).
-i Determine what the nominal \(p\)-value boundary would be for each -elementary hypothesis in \(J\) for a -weighted Bonferroni test in a group sequential design as described in -Maurer and Bretz (2013). Let these nominal -\(p\)-value boundaries be \(\alpha^\prime_{ik}(J)\).
-ii Choose an inflation factor \(\xi_{k}(J) -> 1\) and set \[b_{ik} = -\Phi^{-1}(1 - \xi_k(J) \alpha^\prime_{ik}(J)).\]
-iii Update \(\xi_k(J)\) until this -type I error rate up to analysis \(k\) -is controlled at \(\alpha_{k}(J)\) for -\(H_J\). That is, \[ 1 - Pr(\cap_{i \in J} \{ Z_{ik} < b_{ik} \} -\cap_{i \in J, j < k} \{ Z_{ij} < c_{ij}(J) \} ) = -\alpha_{k}(J).\]
-iv After the appropriate \(\xi_k(J)\) has been derived, the nominal -\(p\)-value boundaries are \(p_{ik}(J)=\xi_k(J) \alpha^\prime_{ik}(J)\), -and \(b_{ik}\) is computed as in step -ii, we set \(c_{ik}(J) = -b_{ik}\).
Note: interim bound does not depend on future analyses. Solution -only requires root finding for a single \(\xi_k(J)\) at a time, \(k = 1, \ldots, K\). Requires multivariate -normal computation from the mvtnorm R package Genz et -al. (2020).
We first define the transition matrix and weights as shown above in -Section 2.5. Next we set up the event count table as follows:
-event <- tribble(
- ~H1, ~H2, ~Analysis, ~Event,
- 1, 1, 1, 100,
- 2, 2, 1, 110,
- 3, 3, 1, 225,
- 1, 2, 1, 80,
- 1, 3, 1, 100,
- 2, 3, 1, 110,
- 1, 1, 2, 200,
- 2, 2, 2, 220,
- 3, 3, 2, 450,
- 1, 2, 2, 160,
- 1, 3, 2, 200,
- 2, 3, 2, 220
-)
-event %>%
- gt() %>%
- tab_header(title = "Event Count")
-| Event Count | -|||
| H1 | -H2 | -Analysis | -Event | -
|---|---|---|---|
| 1 | -1 | -1 | -100 |
| 2 | -2 | -1 | -110 |
| 3 | -3 | -1 | -225 |
| 1 | -2 | -1 | -80 |
| 1 | -3 | -1 | -100 |
| 2 | -3 | -1 | -110 |
| 1 | -1 | -2 | -200 |
| 2 | -2 | -2 | -220 |
| 3 | -3 | -2 | -450 |
| 1 | -2 | -2 | -160 |
| 1 | -3 | -2 | -200 |
| 2 | -3 | -2 | -220 |
# Alternatively, one can manually enter paths for analysis datasets,
-# example below uses an example dataset assuming currently we are at IA1.
-paths <- system.file("extdata/", package = "wpgsd")
-
-### Generate event count table from ADSL and ADTTE datasets
-# Selection criteria for each hypothesis
-h_select <- tribble(
- ~Hypothesis, ~Crit,
- 1, "PARAMCD=='OS' & TRT01P %in% c('Xanomeline High Dose', 'Placebo')",
- 2, "PARAMCD=='OS' & TRT01P %in% c('Xanomeline Low Dose', 'Placebo')"
-)
-
-event2 <- generate_event_table(paths, h_select,
- adsl_name = "adsl", adtte_name = "adtte",
- key_var = "USUBJID", cnsr_var = "CNSR"
-)$event
-
-event2 %>%
- gt() %>%
- tab_header(title = "Event Count - Computed from SAS Datasets Example")
-| Event Count - Computed from SAS Datasets Example | -|||
| H1 | -H2 | -Analysis | -Event | -
|---|---|---|---|
| 1 | -1 | -1 | -66 |
| 2 | -2 | -1 | -59 |
| 1 | -2 | -1 | -45 |
Then we compute correlation matrix using the event count table and
-generate_corr(). We see that the correlations not accounted
-for by the Bonferroni approach are substantial and, thus, might expect a
-non-trivial impact on bounds for hypothesis tests.
## Generate correlation from events
-corr <- generate_corr(event)
-
-corr %>%
- as_tibble() %>%
- gt() %>%
- fmt_number(columns = everything(), decimals = 2) %>%
- tab_header(title = "Correlation Matrix")
-| Correlation Matrix | -|||||
| H1_A1 | -H2_A1 | -H3_A1 | -H1_A2 | -H2_A2 | -H3_A2 | -
|---|---|---|---|---|---|
| 1.00 | -0.76 | -0.67 | -0.71 | -0.54 | -0.47 |
| 0.76 | -1.00 | -0.70 | -0.54 | -0.71 | -0.49 |
| 0.67 | -0.70 | -1.00 | -0.47 | -0.49 | -0.71 |
| 0.71 | -0.54 | -0.47 | -1.00 | -0.76 | -0.67 |
| 0.54 | -0.71 | -0.49 | -0.76 | -1.00 | -0.70 |
| 0.47 | -0.49 | -0.71 | -0.67 | -0.70 | -1.00 |
Bonferroni and WPGSD bounds can then be computed via
-generate_bounds(). In this example, we useHSD(-4) as \(\alpha\)-spending for all hypotheses. Of
-note, generate_bounds() input type specifies boundary
-type.
Compute Bonferroni bounds.
-# Bonferroni bounds
-bound_Bonf <- generate_bounds(
- type = 0, k = 2, w = w, m = m,
- corr = corr, alpha = 0.025,
- sf = list(sfHSD, sfHSD, sfHSD),
- sfparm = list(-4, -4, -4),
- t = list(c(0.5, 1), c(0.5, 1), c(0.5, 1))
-)
-
-bound_Bonf %>%
- gt() %>%
- fmt_number(columns = 3:5, decimals = 4) %>%
- tab_header(title = "Bonferroni bounds")
-| Bonferroni bounds | -||||
| Analysis | -Hypotheses | -H1 | -H2 | -H3 | -
|---|---|---|---|---|
| 1 | -H1 | -0.0030 | -NA | -NA |
| 1 | -H1, H2 | -0.0015 | -0.0015 | -NA |
| 1 | -H1, H2, H3 | -0.0009 | -0.0009 | -0.0012 |
| 1 | -H1, H3 | -0.0013 | -NA | -0.0016 |
| 1 | -H2 | -NA | -0.0030 | -NA |
| 1 | -H2, H3 | -NA | -0.0013 | -0.0016 |
| 1 | -H3 | -NA | -NA | -0.0030 |
| 2 | -H1 | -0.0238 | -NA | -NA |
| 2 | -H1, H2 | -0.0118 | -0.0118 | -NA |
| 2 | -H1, H2, H3 | -0.0070 | -0.0070 | -0.0094 |
| 2 | -H1, H3 | -0.0106 | -NA | -0.0130 |
| 2 | -H2 | -NA | -0.0238 | -NA |
| 2 | -H2, H3 | -NA | -0.0106 | -0.0130 |
| 2 | -H3 | -NA | -NA | -0.0238 |
Compute WPGSD Bounds using \(\alpha\)-spending approach 1 with HSD(-4) -spending. Here spending time was defined as minimum of the 3 observed -information fractions.
-set.seed(1234)
-# WPGSD bounds, spending approach 1
-bound_WPGSD <- generate_bounds(
- type = 2, k = 2, w = w, m = m,
- corr = corr, alpha = 0.025,
- sf = sfHSD,
- sfparm = -4,
- t = c(min(100 / 200, 110 / 220, 225 / 450), 1)
-)
-
-bound_WPGSD %>%
- gt() %>%
- fmt_number(columns = 3:5, decimals = 4) %>%
- tab_header(title = "WPGSD bounds")
-| WPGSD bounds | -||||
| Analysis | -Hypotheses | -H1 | -H2 | -H3 | -
|---|---|---|---|---|
| 1 | -H1 | -0.0030 | -NA | -NA |
| 1 | -H1, H2 | -0.0017 | -0.0017 | -NA |
| 1 | -H1, H2, H3 | -0.0011 | -0.0011 | -0.0014 |
| 1 | -H1, H3 | -0.0014 | -NA | -0.0018 |
| 1 | -H2 | -NA | -0.0030 | -NA |
| 1 | -H2, H3 | -NA | -0.0015 | -0.0018 |
| 1 | -H3 | -NA | -NA | -0.0030 |
| 2 | -H1 | -0.0238 | -NA | -NA |
| 2 | -H1, H2 | -0.0144 | -0.0144 | -NA |
| 2 | -H1, H2, H3 | -0.0092 | -0.0092 | -0.0123 |
| 2 | -H1, H3 | -0.0122 | -NA | -0.0149 |
| 2 | -H2 | -NA | -0.0238 | -NA |
| 2 | -H2, H3 | -NA | -0.0124 | -0.0152 |
| 2 | -H3 | -NA | -NA | -0.0238 |
Below shows the comparison between the Bonferroni and WPGSD bounds. -Nominal level at final analysis by using the WPGSD method increased by -up to 1.3× over those obtained via the Bonferroni approach.
-| Bonferroni and WPGSD Bounds | -||||||||
| Analysis | -Hypotheses | -H1.B | -H2.B | -H3.B | -H1.W | -H2.W | -H3.W | -xi | -
|---|---|---|---|---|---|---|---|---|
| 1 | -H1, H2, H3 | -0.0009 | -0.0009 | -0.0012 | -0.0011 | -0.0011 | -0.0014 | -1.1770 |
| 1 | -H1, H2 | -0.0015 | -0.0015 | -NA | -0.0017 | -0.0017 | -NA | -1.1363 |
| 1 | -H1, H3 | -0.0013 | -NA | -0.0016 | -0.0014 | -NA | -0.0018 | -1.0810 |
| 1 | -H2, H3 | -NA | -0.0013 | -0.0016 | -NA | -0.0015 | -0.0018 | -1.0962 |
| 1 | -H1 | -0.0030 | -NA | -NA | -0.0030 | -NA | -NA | -1.0000 |
| 1 | -H2 | -NA | -0.0030 | -NA | -NA | -0.0030 | -NA | -1.0000 |
| 1 | -H3 | -NA | -NA | -0.0030 | -NA | -NA | -0.0030 | -1.0000 |
| 2 | -H1, H2, H3 | -0.0070 | -0.0070 | -0.0094 | -0.0092 | -0.0092 | -0.0123 | -1.3092 |
| 2 | -H1, H2 | -0.0118 | -0.0118 | -NA | -0.0144 | -0.0144 | -NA | -1.2250 |
| 2 | -H1, H3 | -0.0106 | -NA | -0.0130 | -0.0122 | -NA | -0.0149 | -1.1532 |
| 2 | -H2, H3 | -NA | -0.0106 | -0.0130 | -NA | -0.0124 | -0.0152 | -1.1735 |
| 2 | -H1 | -0.0238 | -NA | -NA | -0.0238 | -NA | -NA | -1.0000 |
| 2 | -H2 | -NA | -0.0238 | -NA | -NA | -0.0238 | -NA | -1.0000 |
| 2 | -H3 | -NA | -NA | -0.0238 | -NA | -NA | -0.0238 | -1.0000 |
Closed testing procedure can then be performed using
-closed_test().
## Observed p-values.
-## The tibble must contain columns Analysis, H1, H2 etc for all hypotheses
-p_obs <- tribble(
- ~Analysis, ~H1, ~H2, ~H3,
- 1, 0.01, 0.0004, 0.03,
- 2, 0.05, 0.002, 0.015
-)
-
-## Closed testing ##
-test_result <- closed_test(bound_WPGSD, p_obs)
-
-p_obs %>%
- gt() %>%
- fmt_number(columns = 2:4, decimals = 8, drop_trailing_zeros = TRUE) %>%
- tab_header("Observed Nominal p-Values")
-| Observed Nominal p-Values | -|||
| Analysis | -H1 | -H2 | -H3 | -
|---|---|---|---|
| 1 | -0.01 | -0.0004 | -0.03 |
| 2 | -0.05 | -0.002 | -0.015 |
test_result %>%
- gt() %>%
- tab_header(title = "Closed Testing Results")
-| Closed Testing Results | -|||
| H1 | -H2 | -H3 | -Analysis | -
|---|---|---|---|
| Fail | -Success | -Fail | -Analysis 1 |
| Fail | -Success | -Fail | -Analysis 2 |
Similarly, codes below reproduce the result of Example 2 of Anderson et al. (2022), which uses spending -method 3c specified in the paper.
-set.seed(1234)
-
-# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Ex2 BH ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
-# Transition matrix in Figure A2
-m <- matrix(c(
- 0, 0.5, 0.5,
- 0.5, 0, 0.5,
- 0.5, 0.5, 0
-), nrow = 3, byrow = TRUE)
-# Initial weights
-w <- c(1 / 3, 1 / 3, 1 / 3)
-
-# Event count of intersection of paired hypotheses - Table 2
-event <- tribble(
- ~H1, ~H2, ~Analysis, ~Event,
- 1, 1, 1, 155,
- 2, 2, 1, 160,
- 3, 3, 1, 165,
- 1, 2, 1, 85,
- 1, 3, 1, 85,
- 2, 3, 1, 85,
- 1, 1, 2, 305,
- 2, 2, 2, 320,
- 3, 3, 2, 335,
- 1, 2, 2, 170,
- 1, 3, 2, 170,
- 2, 3, 2, 170
-)
-
-event %>%
- gt() %>%
- tab_header(title = "Event Count")
-| Event Count | -|||
| H1 | -H2 | -Analysis | -Event | -
|---|---|---|---|
| 1 | -1 | -1 | -155 |
| 2 | -2 | -1 | -160 |
| 3 | -3 | -1 | -165 |
| 1 | -2 | -1 | -85 |
| 1 | -3 | -1 | -85 |
| 2 | -3 | -1 | -85 |
| 1 | -1 | -2 | -305 |
| 2 | -2 | -2 | -320 |
| 3 | -3 | -2 | -335 |
| 1 | -2 | -2 | -170 |
| 1 | -3 | -2 | -170 |
| 2 | -3 | -2 | -170 |
# Generate correlation from events
-corr <- generate_corr(event)
-
-# Correlation matrix in Table 4
-corr %>%
- as_tibble() %>%
- gt() %>%
- fmt_number(columns = everything(), decimals = 2) %>%
- tab_header(title = "Correlation Matrix")
-| Correlation Matrix | -|||||
| H1_A1 | -H2_A1 | -H3_A1 | -H1_A2 | -H2_A2 | -H3_A2 | -
|---|---|---|---|---|---|
| 1.00 | -0.54 | -0.53 | -0.71 | -0.38 | -0.37 |
| 0.54 | -1.00 | -0.52 | -0.38 | -0.71 | -0.37 |
| 0.53 | -0.52 | -1.00 | -0.38 | -0.37 | -0.70 |
| 0.71 | -0.38 | -0.38 | -1.00 | -0.54 | -0.53 |
| 0.38 | -0.71 | -0.37 | -0.54 | -1.00 | -0.52 |
| 0.37 | -0.37 | -0.70 | -0.53 | -0.52 | -1.00 |
# WPGSD bounds, spending method 3c
-bound_WPGSD <- generate_bounds(
- type = 3, k = 2, w = w, m = m, corr = corr, alpha = 0.025,
- sf = list(sfLDOF, sfLDOF, sfLDOF),
- sfparm = list(0, 0, 0),
- t = list(c(155 / 305, 1), c(160 / 320, 1), c(165 / 335, 1))
-)
-
-# Bonferroni bounds
-bound_Bonf <- generate_bounds(
- type = 0, k = 2, w = w, m = m, corr = corr, alpha = 0.025,
- sf = list(sfLDOF, sfLDOF, sfLDOF),
- sfparm = list(0, 0, 0),
- t = list(c(155 / 305, 1), c(160 / 320, 1), c(165 / 335, 1))
-)
-
-bounds <- left_join(bound_Bonf, bound_WPGSD,
- by = c("Hypotheses", "Analysis"),
- suffix = c(".B", ".W")
-)
-
-# Reorder for output
-bounds$order <- rep(c(5, 2, 1, 3, 6, 4, 7), 2)
-bounds <- bounds %>%
- arrange(Analysis, order) %>%
- select(-order)
-
-# Table A6
-bounds %>%
- gt() %>%
- fmt_number(columns = 3:9, decimals = 4) %>%
- tab_header(title = "Bonferroni and WPGSD Bounds")
-| Bonferroni and WPGSD Bounds | -||||||||
| Analysis | -Hypotheses | -H1.B | -H2.B | -H3.B | -H1.W | -H2.W | -H3.W | -xi | -
|---|---|---|---|---|---|---|---|---|
| 1 | -H1, H2, H3 | -0.0002 | -0.0002 | -0.0002 | -0.0002 | -0.0002 | -0.0002 | -1.0421 |
| 1 | -H1, H2 | -0.0005 | -0.0004 | -NA | -0.0005 | -0.0004 | -NA | -1.0266 |
| 1 | -H1, H3 | -0.0005 | -NA | -0.0004 | -0.0005 | -NA | -0.0004 | -1.0247 |
| 1 | -H2, H3 | -NA | -0.0004 | -0.0004 | -NA | -0.0004 | -0.0004 | -1.0230 |
| 1 | -H1 | -0.0017 | -NA | -NA | -0.0017 | -NA | -NA | -1.0000 |
| 1 | -H2 | -NA | -0.0015 | -NA | -NA | -0.0015 | -NA | -1.0000 |
| 1 | -H3 | -NA | -NA | -0.0014 | -NA | -NA | -0.0014 | -1.0000 |
| 2 | -H1, H2, H3 | -0.0083 | -0.0083 | -0.0083 | -0.0095 | -0.0095 | -0.0095 | -1.1493 |
| 2 | -H1, H2 | -0.0123 | -0.0124 | -NA | -0.0135 | -0.0135 | -NA | -1.0942 |
| 2 | -H1, H3 | -0.0123 | -NA | -0.0124 | -0.0135 | -NA | -0.0135 | -1.0898 |
| 2 | -H2, H3 | -NA | -0.0124 | -0.0124 | -NA | -0.0134 | -0.0134 | -1.0855 |
| 2 | -H1 | -0.0245 | -NA | -NA | -0.0245 | -NA | -NA | -1.0000 |
| 2 | -H2 | -NA | -0.0245 | -NA | -NA | -0.0245 | -NA | -1.0000 |
| 2 | -H3 | -NA | -NA | -0.0245 | -NA | -NA | -0.0245 | -1.0000 |
The above illustrates the use of the WPGSD approach to compute bounds -at the analysis stage. At the design stage, one can take one of the -following 2 options: 1) The trial can be first designed as if the -testing would be done with a weighted Bonferroni with conservative -sample size estimate. At the analysis stage, the correlation can be -taken into consideration with the WPGSD approach for bound calculation; -or 2) To adjust the sample size downward using the WPGSD approach at the -design stage, one can power the study by taking the minimum \(p\)-value bound for a given individual -hypothesis from the WPGSD table (with assumed correlation structure). -For example, for \(H_2\) of example 1, -this is \(\hbox{min}(0.0011,0.0017,0.0010,0.0030)=0.0010\) -for \(k=1\) and \(\hbox{min}(0.0092,0.0144,0.0081,0.0238)=0.0081\) -for \(k=2\). Then the \(H_2\) bounds of 0.0010 (\(k=1\)) and 0.0081 (\(k=2\)) can be used to power \(H_2\). R function for the 2nd option is in -development.
-The WPGSD approach provides an unification of previous work on -parametric testing in group sequential design. It enabled more complex -scenarios and requires attention to consonance and intersection -hypotheses. Although detailed closed testing is required, this should -not be a deterrent. The approach accommodates various spending -approaches and provides more relaxed bounds and improved power compared -to the Bonferroni approach.
-