diff --git a/R/boost_tree.R b/R/boost_tree.R index 795c34210..034390d6b 100644 --- a/R/boost_tree.R +++ b/R/boost_tree.R @@ -114,12 +114,12 @@ boost_tree <- function(mode = "unknown", + ..., mtry = NULL, trees = NULL, min_n = NULL, tree_depth = NULL, learn_rate = NULL, loss_reduction = NULL, sample_size = NULL, - others = list(), - ...) { + others = list()) { check_empty_ellipse(...) if (!(mode %in% boost_tree_modes)) diff --git a/R/linear_reg.R b/R/linear_reg.R index 853a761f0..d2aed4342 100644 --- a/R/linear_reg.R +++ b/R/linear_reg.R @@ -105,10 +105,10 @@ #' @importFrom purrr map_lgl linear_reg <- function(mode = "regression", + ..., penalty = NULL, mixture = NULL, - others = list(), - ...) { + others = list()) { check_empty_ellipse(...) if (!(mode %in% linear_reg_modes)) stop( diff --git a/R/logistic_reg.R b/R/logistic_reg.R index 151d4e2a2..7051b46a6 100644 --- a/R/logistic_reg.R +++ b/R/logistic_reg.R @@ -103,10 +103,10 @@ #' @importFrom purrr map_lgl logistic_reg <- function(mode = "classification", + ..., penalty = NULL, mixture = NULL, - others = list(), - ...) { + others = list()) { check_empty_ellipse(...) if (!(mode %in% logistic_reg_modes)) stop( diff --git a/R/mars.R b/R/mars.R index 0268615bd..dbaa8e381 100644 --- a/R/mars.R +++ b/R/mars.R @@ -71,9 +71,9 @@ mars <- function(mode = "unknown", + ..., num_terms = NULL, prod_degree = NULL, prune_method = NULL, - others = list(), - ...) { + others = list()) { check_empty_ellipse(...) if (!(mode %in% mars_modes)) diff --git a/R/mlp.R b/R/mlp.R index 5f44fbfdf..e4c3df660 100644 --- a/R/mlp.R +++ b/R/mlp.R @@ -93,10 +93,10 @@ mlp <- function(mode = "unknown", + ..., hidden_units = NULL, penalty = NULL, dropout = NULL, epochs = NULL, activation = NULL, - others = list(), - ...) { + others = list()) { check_empty_ellipse(...) act_funs <- c("linear", "softmax", "relu", "elu") diff --git a/R/multinom_reg.R b/R/multinom_reg.R index be0045363..6f079f167 100644 --- a/R/multinom_reg.R +++ b/R/multinom_reg.R @@ -85,10 +85,10 @@ #' @importFrom purrr map_lgl multinom_reg <- function(mode = "classification", + ..., penalty = NULL, mixture = NULL, - others = list(), - ...) { + others = list()) { check_empty_ellipse(...) if (!(mode %in% multinom_reg_modes)) stop( diff --git a/R/nearest_neighbor.R b/R/nearest_neighbor.R index 8bd53abca..499be4ea0 100644 --- a/R/nearest_neighbor.R +++ b/R/nearest_neighbor.R @@ -77,11 +77,11 @@ #' #' @export nearest_neighbor <- function(mode = "unknown", + ..., neighbors = NULL, weight_func = NULL, dist_power = NULL, - others = list(), - ...) { + others = list()) { check_empty_ellipse(...) diff --git a/R/rand_forest.R b/R/rand_forest.R index b6999b281..bfc7cc587 100644 --- a/R/rand_forest.R +++ b/R/rand_forest.R @@ -103,9 +103,9 @@ rand_forest <- function(mode = "unknown", + ..., mtry = NULL, trees = NULL, min_n = NULL, - others = list(), - ...) { + others = list()) { check_empty_ellipse(...) ## TODO: make a utility function here diff --git a/R/surv_reg.R b/R/surv_reg.R index 0d72fb83b..16ad84b70 100644 --- a/R/surv_reg.R +++ b/R/surv_reg.R @@ -20,7 +20,7 @@ #' Since survival models typically involve censoring (and require the use of #' [survival::Surv()] objects), the [fit()] function will require that the #' survival model be specified via the formula interface. -#' +#' #' Also, for the `flexsurv::flexsurvfit` engine, the typical #' `strata` function cannot be used. To achieve the same effect, #' the extra parameter roles can be used (as described above). @@ -51,9 +51,9 @@ #' @export surv_reg <- function(mode = "regression", + ..., dist = NULL, - others = list(), - ...) { + others = list()) { check_empty_ellipse(...) if (!(mode %in% surv_reg_modes)) stop( diff --git a/docs/articles/articles/Classification.html b/docs/articles/articles/Classification.html index f1f1b8a2f..9b3e6090c 100644 --- a/docs/articles/articles/Classification.html +++ b/docs/articles/articles/Classification.html @@ -162,17 +162,17 @@

Classification Example

#> # A tibble: 1 x 2 #> .metric .estimate #> <chr> <dbl> -#> 1 roc_auc 0.822 +#> 1 roc_auc 0.823 test_results %>% accuracy(truth = Status, estimate = `nnet class`) #> # A tibble: 1 x 2 #> .metric .estimate #> <chr> <dbl> -#> 1 accuracy 0.800 +#> 1 accuracy 0.801 test_results %>% conf_mat(truth = Status, estimate = `nnet class`) #> Truth #> Prediction bad good -#> bad 171 81 -#> good 142 719 +#> bad 174 82 +#> good 139 718 -
linear_reg(mode = "regression", penalty = NULL, mixture = NULL,
-  others = list(), ...)
+    
linear_reg(mode = "regression", ..., penalty = NULL, mixture = NULL,
+  others = list())
 
 # S3 method for linear_reg
 update(object, penalty = NULL, mixture = NULL,
@@ -131,6 +131,11 @@ 

Arg mode

A single character string for the type of model. The only possible value for this model is "regression".

+ + + ... +

Used for S3 method consistency. Any arguments passed to +the ellipses will result in an error. Use others instead.

penalty @@ -151,11 +156,6 @@

Arg rstanarm::stan_glm, etc.). These are not evaluated until the model is fit and will be substituted into the model fit expression.

- - - ... -

Used for S3 method consistency. Any arguments passed to -the ellipses will result in an error. Use others instead.

object diff --git a/docs/reference/logistic_reg.html b/docs/reference/logistic_reg.html index c29d25122..c95b52713 100644 --- a/docs/reference/logistic_reg.html +++ b/docs/reference/logistic_reg.html @@ -117,8 +117,8 @@

General Interface for Logistic Regression Models

-
logistic_reg(mode = "classification", penalty = NULL, mixture = NULL,
-  others = list(), ...)
+    
logistic_reg(mode = "classification", ..., penalty = NULL,
+  mixture = NULL, others = list())
 
 # S3 method for logistic_reg
 update(object, penalty = NULL, mixture = NULL,
@@ -131,6 +131,11 @@ 

Arg mode

A single character string for the type of model. The only possible value for this model is "classification".

+ + + ... +

Used for S3 method consistency. Any arguments passed to +the ellipses will result in an error. Use others instead.

penalty @@ -151,11 +156,6 @@

Arg rstanarm::stan_glm, etc.). These are not evaluated until the model is fit and will be substituted into the model fit expression.

- - - ... -

Used for S3 method consistency. Any arguments passed to -the ellipses will result in an error. Use others instead.

object diff --git a/docs/reference/mars.html b/docs/reference/mars.html index 4dab481f6..50857713b 100644 --- a/docs/reference/mars.html +++ b/docs/reference/mars.html @@ -120,8 +120,8 @@

General Interface for MARS

-
mars(mode = "unknown", num_terms = NULL, prod_degree = NULL,
-  prune_method = NULL, others = list(), ...)
+    
mars(mode = "unknown", ..., num_terms = NULL, prod_degree = NULL,
+  prune_method = NULL, others = list())
 
 # S3 method for mars
 update(object, num_terms = NULL, prod_degree = NULL,
@@ -135,6 +135,11 @@ 

Arg

A single character string for the type of model. Possible values for this model are "unknown", "regression", or "classification".

+ + + ... +

Used for method consistency. Any arguments passed to +the ellipses will result in an error. Use others instead.

num_terms @@ -156,11 +161,6 @@

Arg and mode = "classification", others can include the glm argument to earth::earth. If this argument is not passed, it will be added prior to the fitting occurs.

- - - ... -

Used for method consistency. Any arguments passed to -the ellipses will result in an error. Use others instead.

object diff --git a/docs/reference/mlp.html b/docs/reference/mlp.html index 33dd68334..36106bbae 100644 --- a/docs/reference/mlp.html +++ b/docs/reference/mlp.html @@ -120,9 +120,9 @@

General Interface for Single Layer Neural Network

-
mlp(mode = "unknown", hidden_units = NULL, penalty = NULL,
+    
mlp(mode = "unknown", ..., hidden_units = NULL, penalty = NULL,
   dropout = NULL, epochs = NULL, activation = NULL,
-  others = list(), ...)
+  others = list())
 
 # S3 method for mlp
 update(object, hidden_units = NULL, penalty = NULL,
@@ -137,6 +137,11 @@ 

Arg

A single character string for the type of model. Possible values for this model are "unknown", "regression", or "classification".

+ + + ... +

Used for method consistency. Any arguments passed to +the ellipses will result in an error. Use others instead.

hidden_units @@ -169,11 +174,6 @@

Arg

A named list of arguments to be used by the underlying models (e.g., nnet::nnet, keras::fit, keras::compile, etc.). .

- - - ... -

Used for method consistency. Any arguments passed to -the ellipses will result in an error. Use others instead.

object diff --git a/docs/reference/multinom_reg.html b/docs/reference/multinom_reg.html index df9b907ba..fb468e37d 100644 --- a/docs/reference/multinom_reg.html +++ b/docs/reference/multinom_reg.html @@ -117,8 +117,8 @@

General Interface for Multinomial Regression Models

-
multinom_reg(mode = "classification", penalty = NULL, mixture = NULL,
-  others = list(), ...)
+    
multinom_reg(mode = "classification", ..., penalty = NULL,
+  mixture = NULL, others = list())
 
 # S3 method for multinom_reg
 update(object, penalty = NULL, mixture = NULL,
@@ -131,6 +131,11 @@ 

Arg mode

A single character string for the type of model. The only possible value for this model is "classification".

+ + + ... +

Used for S3 method consistency. Any arguments passed to +the ellipses will result in an error. Use others instead.

penalty @@ -150,11 +155,6 @@

Arg underlying models (e.g., glmnet::glmnet etc.). These are not evaluated until the model is fit and will be substituted into the model fit expression.

- - - ... -

Used for S3 method consistency. Any arguments passed to -the ellipses will result in an error. Use others instead.

object diff --git a/docs/reference/nearest_neighbor.html b/docs/reference/nearest_neighbor.html index 2448260ec..af2f3cf0d 100644 --- a/docs/reference/nearest_neighbor.html +++ b/docs/reference/nearest_neighbor.html @@ -120,8 +120,8 @@

General Interface for K-Nearest Neighbor Models

-
nearest_neighbor(mode = "unknown", neighbors = NULL,
-  weight_func = NULL, dist_power = NULL, others = list(), ...)
+
nearest_neighbor(mode = "unknown", ..., neighbors = NULL,
+  weight_func = NULL, dist_power = NULL, others = list())

Arguments

@@ -131,6 +131,11 @@

Arg

+ + + + @@ -155,11 +160,6 @@

Arg underlying models (e.g., kknn::train.kknn). These are not evaluated until the model is fit and will be substituted into the model fit expression.

-

- - -

A single character string for the type of model. Possible values for this model are "unknown", "regression", or "classification".

...

Used for S3 method consistency. Any arguments passed to +the ellipses will result in an error. Use others instead.

neighbors
...

Used for S3 method consistency. Any arguments passed to -the ellipses will result in an error. Use others instead.

diff --git a/docs/reference/rand_forest.html b/docs/reference/rand_forest.html index 70c1465db..c6f5d68c0 100644 --- a/docs/reference/rand_forest.html +++ b/docs/reference/rand_forest.html @@ -118,8 +118,8 @@

General Interface for Random Forest Models

-
rand_forest(mode = "unknown", mtry = NULL, trees = NULL,
-  min_n = NULL, others = list(), ...)
+    
rand_forest(mode = "unknown", ..., mtry = NULL, trees = NULL,
+  min_n = NULL, others = list())
 
 # S3 method for rand_forest
 update(object, mtry = NULL, trees = NULL,
@@ -133,6 +133,11 @@ 

Arg

A single character string for the type of model. Possible values for this model are "unknown", "regression", or "classification".

+ + + ... +

Used for method consistency. Any arguments passed to +the ellipses will result in an error. Use others instead.

mtry @@ -154,11 +159,6 @@

Arg

A named list of arguments to be used by the underlying models (e.g., ranger::ranger, randomForest::randomForest, etc.). .

- - - ... -

Used for method consistency. Any arguments passed to -the ellipses will result in an error. Use others instead.

object diff --git a/docs/reference/surv_reg.html b/docs/reference/surv_reg.html index c375b04d9..2a3c77fef 100644 --- a/docs/reference/surv_reg.html +++ b/docs/reference/surv_reg.html @@ -115,7 +115,7 @@

General Interface for Parametric Survival Models

-
surv_reg(mode = "regression", dist = NULL, others = list(), ...)
+    
surv_reg(mode = "regression", ..., dist = NULL, others = list())
 
 # S3 method for surv_reg
 update(object, dist = NULL, others = list(),
@@ -128,6 +128,11 @@ 

Arg mode

A single character string for the type of model. The only possible value for this model is "regression".

+ + + ... +

Used for S3 method consistency. Any arguments passed to +the ellipses will result in an error. Use others instead.

dist @@ -140,11 +145,6 @@

Arg underlying models (e.g., flexsurv::flexsurvreg). These are not evaluated until the model is fit and will be substituted into the model fit expression.

- - - ... -

Used for S3 method consistency. Any arguments passed to -the ellipses will result in an error. Use others instead.

object diff --git a/man/boost_tree.Rd b/man/boost_tree.Rd index 00a66e5ca..a7520d251 100644 --- a/man/boost_tree.Rd +++ b/man/boost_tree.Rd @@ -5,9 +5,9 @@ \alias{update.boost_tree} \title{General Interface for Boosted Trees} \usage{ -boost_tree(mode = "unknown", mtry = NULL, trees = NULL, +boost_tree(mode = "unknown", ..., mtry = NULL, trees = NULL, min_n = NULL, tree_depth = NULL, learn_rate = NULL, - loss_reduction = NULL, sample_size = NULL, others = list(), ...) + loss_reduction = NULL, sample_size = NULL, others = list()) \method{update}{boost_tree}(object, mtry = NULL, trees = NULL, min_n = NULL, tree_depth = NULL, learn_rate = NULL, @@ -19,6 +19,9 @@ boost_tree(mode = "unknown", mtry = NULL, trees = NULL, Possible values for this model are "unknown", "regression", or "classification".} +\item{...}{Used for method consistency. Any arguments passed to +the ellipses will result in an error. Use \code{others} instead.} + \item{mtry}{An number for the number (or proportion) of predictors that will be randomly sampled at each split when creating the tree models (\code{xgboost} only).} @@ -45,9 +48,6 @@ each iteration while \code{C5.0} samples once during traning.} \item{others}{A named list of arguments to be used by the underlying models (e.g., \code{xgboost::xgb.train}, etc.). .} -\item{...}{Used for method consistency. Any arguments passed to -the ellipses will result in an error. Use \code{others} instead.} - \item{object}{A boosted tree model specification.} \item{fresh}{A logical for whether the arguments should be diff --git a/man/linear_reg.Rd b/man/linear_reg.Rd index 1b7c8bd4a..b108de728 100644 --- a/man/linear_reg.Rd +++ b/man/linear_reg.Rd @@ -5,8 +5,8 @@ \alias{update.linear_reg} \title{General Interface for Linear Regression Models} \usage{ -linear_reg(mode = "regression", penalty = NULL, mixture = NULL, - others = list(), ...) +linear_reg(mode = "regression", ..., penalty = NULL, mixture = NULL, + others = list()) \method{update}{linear_reg}(object, penalty = NULL, mixture = NULL, others = list(), fresh = FALSE, ...) @@ -15,6 +15,9 @@ linear_reg(mode = "regression", penalty = NULL, mixture = NULL, \item{mode}{A single character string for the type of model. The only possible value for this model is "regression".} +\item{...}{Used for S3 method consistency. Any arguments passed to +the ellipses will result in an error. Use \code{others} instead.} + \item{penalty}{An non-negative number representing the total amount of regularization (\code{glmnet} and \code{spark} only).} @@ -29,9 +32,6 @@ underlying models (e.g., \code{stats::lm}, until the model is fit and will be substituted into the model fit expression.} -\item{...}{Used for S3 method consistency. Any arguments passed to -the ellipses will result in an error. Use \code{others} instead.} - \item{object}{A linear regression model specification.} \item{fresh}{A logical for whether the arguments should be diff --git a/man/logistic_reg.Rd b/man/logistic_reg.Rd index 8694db90b..1d4fc0533 100644 --- a/man/logistic_reg.Rd +++ b/man/logistic_reg.Rd @@ -5,8 +5,8 @@ \alias{update.logistic_reg} \title{General Interface for Logistic Regression Models} \usage{ -logistic_reg(mode = "classification", penalty = NULL, mixture = NULL, - others = list(), ...) +logistic_reg(mode = "classification", ..., penalty = NULL, + mixture = NULL, others = list()) \method{update}{logistic_reg}(object, penalty = NULL, mixture = NULL, others = list(), fresh = FALSE, ...) @@ -15,6 +15,9 @@ logistic_reg(mode = "classification", penalty = NULL, mixture = NULL, \item{mode}{A single character string for the type of model. The only possible value for this model is "classification".} +\item{...}{Used for S3 method consistency. Any arguments passed to +the ellipses will result in an error. Use \code{others} instead.} + \item{penalty}{An non-negative number representing the total amount of regularization (\code{glmnet} and \code{spark} only).} @@ -29,9 +32,6 @@ underlying models (e.g., \code{stats::glm}, until the model is fit and will be substituted into the model fit expression.} -\item{...}{Used for S3 method consistency. Any arguments passed to -the ellipses will result in an error. Use \code{others} instead.} - \item{object}{A logistic regression model specification.} \item{fresh}{A logical for whether the arguments should be diff --git a/man/mars.Rd b/man/mars.Rd index 4d4bb280e..f19dbc139 100644 --- a/man/mars.Rd +++ b/man/mars.Rd @@ -5,8 +5,8 @@ \alias{update.mars} \title{General Interface for MARS} \usage{ -mars(mode = "unknown", num_terms = NULL, prod_degree = NULL, - prune_method = NULL, others = list(), ...) +mars(mode = "unknown", ..., num_terms = NULL, prod_degree = NULL, + prune_method = NULL, others = list()) \method{update}{mars}(object, num_terms = NULL, prod_degree = NULL, prune_method = NULL, others = list(), fresh = FALSE, ...) @@ -16,6 +16,9 @@ mars(mode = "unknown", num_terms = NULL, prod_degree = NULL, Possible values for this model are "unknown", "regression", or "classification".} +\item{...}{Used for method consistency. Any arguments passed to +the ellipses will result in an error. Use \code{others} instead.} + \item{num_terms}{The number of features that will be retained in the final model, including the intercept.} @@ -29,9 +32,6 @@ and \code{mode = "classification"}, \code{others} can include the \code{glm} arg \code{earth::earth}. If this argument is not passed, it will be added prior to the fitting occurs.} -\item{...}{Used for method consistency. Any arguments passed to -the ellipses will result in an error. Use \code{others} instead.} - \item{object}{A MARS model specification.} \item{fresh}{A logical for whether the arguments should be diff --git a/man/mlp.Rd b/man/mlp.Rd index 887cbdef1..437e93f79 100644 --- a/man/mlp.Rd +++ b/man/mlp.Rd @@ -5,9 +5,9 @@ \alias{update.mlp} \title{General Interface for Single Layer Neural Network} \usage{ -mlp(mode = "unknown", hidden_units = NULL, penalty = NULL, +mlp(mode = "unknown", ..., hidden_units = NULL, penalty = NULL, dropout = NULL, epochs = NULL, activation = NULL, - others = list(), ...) + others = list()) \method{update}{mlp}(object, hidden_units = NULL, penalty = NULL, dropout = NULL, epochs = NULL, activation = NULL, @@ -18,6 +18,9 @@ mlp(mode = "unknown", hidden_units = NULL, penalty = NULL, Possible values for this model are "unknown", "regression", or "classification".} +\item{...}{Used for method consistency. Any arguments passed to +the ellipses will result in an error. Use \code{others} instead.} + \item{hidden_units}{An integer for the number of units in the hidden model.} \item{penalty}{A non-negative numeric value for the amount of weight @@ -38,9 +41,6 @@ function between the hidden and output layers is automatically set to either underlying models (e.g., \code{nnet::nnet}, \code{keras::fit}, \code{keras::compile}, etc.). .} -\item{...}{Used for method consistency. Any arguments passed to -the ellipses will result in an error. Use \code{others} instead.} - \item{object}{A random forest model specification.} \item{fresh}{A logical for whether the arguments should be diff --git a/man/multinom_reg.Rd b/man/multinom_reg.Rd index a0f71a868..91a650952 100644 --- a/man/multinom_reg.Rd +++ b/man/multinom_reg.Rd @@ -5,8 +5,8 @@ \alias{update.multinom_reg} \title{General Interface for Multinomial Regression Models} \usage{ -multinom_reg(mode = "classification", penalty = NULL, mixture = NULL, - others = list(), ...) +multinom_reg(mode = "classification", ..., penalty = NULL, + mixture = NULL, others = list()) \method{update}{multinom_reg}(object, penalty = NULL, mixture = NULL, others = list(), fresh = FALSE, ...) @@ -15,6 +15,9 @@ multinom_reg(mode = "classification", penalty = NULL, mixture = NULL, \item{mode}{A single character string for the type of model. The only possible value for this model is "classification".} +\item{...}{Used for S3 method consistency. Any arguments passed to +the ellipses will result in an error. Use \code{others} instead.} + \item{penalty}{An non-negative number representing the total amount of regularization.} @@ -28,9 +31,6 @@ underlying models (e.g., \code{glmnet::glmnet} etc.). These are not evaluated until the model is fit and will be substituted into the model fit expression.} -\item{...}{Used for S3 method consistency. Any arguments passed to -the ellipses will result in an error. Use \code{others} instead.} - \item{object}{A multinomial regression model specification.} \item{fresh}{A logical for whether the arguments should be diff --git a/man/nearest_neighbor.Rd b/man/nearest_neighbor.Rd index 95ddc722b..33bf3d34c 100644 --- a/man/nearest_neighbor.Rd +++ b/man/nearest_neighbor.Rd @@ -4,14 +4,17 @@ \alias{nearest_neighbor} \title{General Interface for K-Nearest Neighbor Models} \usage{ -nearest_neighbor(mode = "unknown", neighbors = NULL, - weight_func = NULL, dist_power = NULL, others = list(), ...) +nearest_neighbor(mode = "unknown", ..., neighbors = NULL, + weight_func = NULL, dist_power = NULL, others = list()) } \arguments{ \item{mode}{A single character string for the type of model. Possible values for this model are \code{"unknown"}, \code{"regression"}, or \code{"classification"}.} +\item{...}{Used for S3 method consistency. Any arguments passed to +the ellipses will result in an error. Use \code{others} instead.} + \item{neighbors}{A single integer for the number of neighbors to consider (often called \code{k}).} @@ -27,9 +30,6 @@ calculating Minkowski distance.} underlying models (e.g., \code{kknn::train.kknn}). These are not evaluated until the model is fit and will be substituted into the model fit expression.} - -\item{...}{Used for S3 method consistency. Any arguments passed to -the ellipses will result in an error. Use \code{others} instead.} } \description{ \code{nearest_neighbor()} is a way to generate a \emph{specification} of a model diff --git a/man/rand_forest.Rd b/man/rand_forest.Rd index b65ad5762..a7f23e074 100644 --- a/man/rand_forest.Rd +++ b/man/rand_forest.Rd @@ -5,8 +5,8 @@ \alias{update.rand_forest} \title{General Interface for Random Forest Models} \usage{ -rand_forest(mode = "unknown", mtry = NULL, trees = NULL, - min_n = NULL, others = list(), ...) +rand_forest(mode = "unknown", ..., mtry = NULL, trees = NULL, + min_n = NULL, others = list()) \method{update}{rand_forest}(object, mtry = NULL, trees = NULL, min_n = NULL, others = list(), fresh = FALSE, ...) @@ -16,6 +16,9 @@ rand_forest(mode = "unknown", mtry = NULL, trees = NULL, Possible values for this model are "unknown", "regression", or "classification".} +\item{...}{Used for method consistency. Any arguments passed to +the ellipses will result in an error. Use \code{others} instead.} + \item{mtry}{An integer for the number of predictors that will be randomly sampled at each split when creating the tree models.} @@ -29,9 +32,6 @@ in a node that are required for the node to be split further.} underlying models (e.g., \code{ranger::ranger}, \code{randomForest::randomForest}, etc.). .} -\item{...}{Used for method consistency. Any arguments passed to -the ellipses will result in an error. Use \code{others} instead.} - \item{object}{A random forest model specification.} \item{fresh}{A logical for whether the arguments should be diff --git a/man/surv_reg.Rd b/man/surv_reg.Rd index 037f4504a..a9ee647a6 100644 --- a/man/surv_reg.Rd +++ b/man/surv_reg.Rd @@ -5,7 +5,7 @@ \alias{update.surv_reg} \title{General Interface for Parametric Survival Models} \usage{ -surv_reg(mode = "regression", dist = NULL, others = list(), ...) +surv_reg(mode = "regression", ..., dist = NULL, others = list()) \method{update}{surv_reg}(object, dist = NULL, others = list(), fresh = FALSE, ...) @@ -14,6 +14,9 @@ surv_reg(mode = "regression", dist = NULL, others = list(), ...) \item{mode}{A single character string for the type of model. The only possible value for this model is "regression".} +\item{...}{Used for S3 method consistency. Any arguments passed to +the ellipses will result in an error. Use \code{others} instead.} + \item{dist}{A character string for the outcome distribution. "weibull" is the default.} @@ -22,9 +25,6 @@ underlying models (e.g., \code{flexsurv::flexsurvreg}). These are not evaluated until the model is fit and will be substituted into the model fit expression.} -\item{...}{Used for S3 method consistency. Any arguments passed to -the ellipses will result in an error. Use \code{others} instead.} - \item{object}{A survival regression model specification.} \item{fresh}{A logical for whether the arguments should be diff --git a/vignettes/articles/Scratch.Rmd b/vignettes/articles/Scratch.Rmd index 0f3df7728..e2920ef46 100644 --- a/vignettes/articles/Scratch.Rmd +++ b/vignettes/articles/Scratch.Rmd @@ -91,13 +91,13 @@ This is a fairly simple function that can follow a basic template. The main argu * The mode. If the model can do more than one mode, you might default this to "unknown". In our case, since it is only a classification model, it makes sense to default it to that mode. * The argument names (`subclasses` here). These should be defaulted to `NULL`. * An argument, `others`, that can be used to pass in other arguments to the underlying model fit functions. - * `...`, although they are not currently used. + * `...`, although they are not currently used. We encourage developers to move the `...` after mode so that users are encouraged to use named arguments to the model specification. A basic version of the function is: ```{r model-fun} mixture_da <- - function(mode = "classification", subclasses = NULL, others = list(), ...) { + function(mode = "classification", ..., subclasses = NULL, others = list()) { # start with some basic error traps check_empty_ellipse(...)