Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions NAMESPACE
Original file line number Diff line number Diff line change
Expand Up @@ -187,6 +187,8 @@ export(finalize_model)
export(finalize_recipe)
export(finalize_workflow)
export(finalize_workflow_preprocessor)
export(first_eval_time)
export(first_metric)
export(fit_best)
export(fit_max_value)
export(fit_resamples)
Expand Down
51 changes: 51 additions & 0 deletions R/metric-selection.R
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
#' Tools for selecting metrics and evaluation times
#'
#' @param mtr_set A [yardstick::metric_set()].
#' @param metric A character value for which metric is being used.
#' @param eval_time An optional vector of times to compute dynamic and/or
#' integrated metrics.
#' @keywords internal
#' @export
first_metric <- function(mtr_set) {
tibble::as_tibble(mtr_set)[1,]
}

#' @rdname first_metric
#' @keywords internal
#' @export
first_eval_time <- function(mtr_set, metric = NULL, eval_time = NULL) {
num_times <- length(eval_time)

if (is.null(metric)) {
mtr_info <- first_metric(mtr_set)
metric <- mtr_info$metric
} else {
mtr_info <- tibble::as_tibble(mtr_set)
mtr_info <- mtr_info[mtr_info$metric == metric,]
}

# Not a survival metric
if (!any(grepl("_survival_", mtr_info$class))) {
return(NULL)
}

# Not a metric that requires an eval_time
no_time_req <- c("static_survival_metric", "integrated_survival_metric")
if (mtr_info$class %in% no_time_req) {
if (num_times > 0) {
cli::cli_warn("Evaluation times are only required when dynmanic or integrated metrics are selected as the primary metric.")
}
return(NULL)
}

# checks for dynamic metrics
if (num_times == 0) {
cli::cli_abort("A single evaluation time is required to use this metric.")
} else if ( num_times > 1 ) {
eval_time <- eval_time[1]
print_time <- format(eval_time, digits = 3)
cli::cli_warn("{num_times} evaluation times were available; the first ({print_time}) will be used.")
}

eval_time
}
9 changes: 8 additions & 1 deletion R/select_best.R
Original file line number Diff line number Diff line change
Expand Up @@ -76,6 +76,8 @@ show_best.default <- function(x, ...) {
#' @export
#' @rdname show_best
show_best.tune_results <- function(x, metric = NULL, n = 5, eval_time = NULL, ...) {
# TODO should return the as_tibble(metric_set) results to get the class etc.
# TODO new function start
metric <- choose_metric(metric, x)

dots <- rlang::enquos(...)
Expand All @@ -92,8 +94,12 @@ show_best.tune_results <- function(x, metric = NULL, n = 5, eval_time = NULL, ..
metric <- metrics
}

# TODO new function stop

# get estimates/summarise
summary_res <- summary_res %>% dplyr::filter(.metric == metric)

# TODO split selecting the req time and seeing if it is in the data
summary_res <- choose_eval_time(summary_res, x, eval_time)

if (nrow(summary_res) == 0) {
Expand Down Expand Up @@ -349,7 +355,8 @@ middle_eval_time <- function(x) {
eval_time
}


# NOTE this chooses the time and subsets the data; break it up to only select
# time
choose_eval_time <- function(x, object, eval_time) {
mtrs <- .get_tune_metrics(object)
mtrs <- tibble::as_tibble(mtrs)
Expand Down
23 changes: 23 additions & 0 deletions man/first_metric.Rd

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

156 changes: 156 additions & 0 deletions tests/testthat/_snaps/eval-time-single-selection.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,156 @@
# selecting single eval time - pure metric sets

Code
stc_one <- first_eval_time(met_stc, eval_time = times_1)
Condition
Warning:
Evaluation times are only required when dynmanic or integrated metrics are selected as the primary metric.

---

Code
stc_multi <- first_eval_time(met_stc, eval_time = times_2)
Condition
Warning:
Evaluation times are only required when dynmanic or integrated metrics are selected as the primary metric.

---

Code
first_eval_time(met_dyn, eval_time = NULL)
Condition
Error in `first_eval_time()`:
! A single evaluation time is required to use this metric.

---

Code
first_eval_time(met_dyn, "brier_survival", eval_time = NULL)
Condition
Error in `first_eval_time()`:
! A single evaluation time is required to use this metric.

---

Code
dyn_multi <- first_eval_time(met_dyn, eval_time = times_2)
Condition
Warning:
2 evaluation times were available; the first (0.714) will be used.

---

Code
int_1 <- first_eval_time(met_int, eval_time = times_1)
Condition
Warning:
Evaluation times are only required when dynmanic or integrated metrics are selected as the primary metric.

---

Code
int_multi <- first_eval_time(met_int, eval_time = times_2)
Condition
Warning:
Evaluation times are only required when dynmanic or integrated metrics are selected as the primary metric.

# selecting single eval time - mixed metric sets - static first

Code
stc_1 <- first_eval_time(met_mix_stc, eval_time = times_1)
Condition
Warning:
Evaluation times are only required when dynmanic or integrated metrics are selected as the primary metric.

---

Code
stc_multi <- first_eval_time(met_mix_stc, eval_time = times_2)
Condition
Warning:
Evaluation times are only required when dynmanic or integrated metrics are selected as the primary metric.

---

Code
stc_1 <- first_eval_time(met_mix_stc_all, eval_time = times_1)
Condition
Warning:
Evaluation times are only required when dynmanic or integrated metrics are selected as the primary metric.

---

Code
stc_multi <- first_eval_time(met_mix_stc_all, eval_time = times_2)
Condition
Warning:
Evaluation times are only required when dynmanic or integrated metrics are selected as the primary metric.

# selecting single eval time - mixed metric sets - dynamic first

Code
first_eval_time(met_mix_dyn, eval_time = NULL)
Condition
Error in `first_eval_time()`:
! A single evaluation time is required to use this metric.

---

Code
dyn_multi <- first_eval_time(met_mix_dyn, eval_time = times_2)
Condition
Warning:
2 evaluation times were available; the first (0.714) will be used.

---

Code
first_eval_time(met_mix_dyn_all, eval_time = NULL)
Condition
Error in `first_eval_time()`:
! A single evaluation time is required to use this metric.

---

Code
dyn_multi <- first_eval_time(met_mix_dyn_all, eval_time = times_2)
Condition
Warning:
2 evaluation times were available; the first (0.714) will be used.

# selecting single eval time - mixed metric sets - integrated first

Code
first_eval_time(met_mix_int, eval_time = times_1)
Condition
Warning:
Evaluation times are only required when dynmanic or integrated metrics are selected as the primary metric.
Output
NULL

---

Code
int_multi <- first_eval_time(met_mix_int, eval_time = times_2)
Condition
Warning:
Evaluation times are only required when dynmanic or integrated metrics are selected as the primary metric.

---

Code
first_eval_time(met_mix_int_all, eval_time = times_1)
Condition
Warning:
Evaluation times are only required when dynmanic or integrated metrics are selected as the primary metric.
Output
NULL

---

Code
int_multi <- first_eval_time(met_mix_int_all, eval_time = times_2)
Condition
Warning:
Evaluation times are only required when dynmanic or integrated metrics are selected as the primary metric.

Loading