From 0671fca905bd3a4e73f462422f132aab2ba8a16e Mon Sep 17 00:00:00 2001 From: Hannah Frick Date: Wed, 8 Feb 2023 16:24:12 +0000 Subject: [PATCH] update roxygen2 version --- DESCRIPTION | 2 +- man/details_C5_rules_C5.0.Rd | 9 ++++---- man/details_auto_ml_h2o.Rd | 10 ++++----- man/details_bag_mars_earth.Rd | 6 ++--- man/details_bag_mlp_nnet.Rd | 3 +-- man/details_bag_tree_C5.0.Rd | 3 +-- man/details_bag_tree_rpart.Rd | 3 +-- man/details_bart_dbarts.Rd | 11 +++++----- man/details_boost_tree_C5.0.Rd | 7 +++--- man/details_boost_tree_h2o.Rd | 9 ++++---- man/details_boost_tree_lightgbm.Rd | 11 +++++----- man/details_boost_tree_mboost.Rd | 3 +-- man/details_boost_tree_spark.Rd | 15 +++++-------- man/details_boost_tree_xgboost.Rd | 16 ++++++-------- man/details_cubist_rules_Cubist.Rd | 9 ++++---- man/details_decision_tree_C5.0.Rd | 3 +-- man/details_decision_tree_partykit.Rd | 3 +-- man/details_decision_tree_rpart.Rd | 3 +-- man/details_decision_tree_spark.Rd | 12 +++++----- man/details_discrim_flexible_earth.Rd | 7 +++--- man/details_discrim_linear_MASS.Rd | 3 +-- man/details_discrim_linear_sparsediscrim.Rd | 7 +++--- man/details_discrim_quad_MASS.Rd | 3 +-- man/details_discrim_quad_sparsediscrim.Rd | 3 +-- man/details_discrim_regularized_klaR.Rd | 11 +++++----- man/details_gen_additive_mod_mgcv.Rd | 7 +++--- man/details_linear_reg_brulee.Rd | 7 +++--- man/details_linear_reg_glm.Rd | 3 +-- man/details_linear_reg_glmer.Rd | 12 +++++----- man/details_linear_reg_glmnet.Rd | 10 ++++----- man/details_linear_reg_gls.Rd | 4 ++-- man/details_linear_reg_h2o.Rd | 3 +-- man/details_linear_reg_keras.Rd | 4 ++-- man/details_linear_reg_lm.Rd | 3 +-- man/details_linear_reg_lme.Rd | 12 +++++----- man/details_linear_reg_lmer.Rd | 12 +++++----- man/details_linear_reg_spark.Rd | 20 ++++++++--------- man/details_linear_reg_stan.Rd | 11 +++++----- man/details_linear_reg_stan_glmer.Rd | 9 ++++---- man/details_logistic_reg_LiblineaR.Rd | 10 ++++----- man/details_logistic_reg_brulee.Rd | 7 +++--- man/details_logistic_reg_glm.Rd | 3 +-- man/details_logistic_reg_glmer.Rd | 12 +++++----- man/details_logistic_reg_glmnet.Rd | 14 +++++------- man/details_logistic_reg_h2o.Rd | 3 +-- man/details_logistic_reg_keras.Rd | 4 ++-- man/details_logistic_reg_spark.Rd | 20 ++++++++--------- man/details_logistic_reg_stan.Rd | 9 ++++---- man/details_logistic_reg_stan_glmer.Rd | 9 ++++---- man/details_mars_earth.Rd | 6 ++--- man/details_mlp_brulee.Rd | 7 +++--- man/details_mlp_h2o.Rd | 23 ++++++++++---------- man/details_mlp_keras.Rd | 3 +-- man/details_mlp_nnet.Rd | 3 +-- man/details_multinom_reg_brulee.Rd | 7 +++--- man/details_multinom_reg_glmnet.Rd | 14 +++++------- man/details_multinom_reg_h2o.Rd | 3 +-- man/details_multinom_reg_keras.Rd | 4 ++-- man/details_multinom_reg_nnet.Rd | 7 +++--- man/details_multinom_reg_spark.Rd | 20 ++++++++--------- man/details_naive_Bayes_h2o.Rd | 8 +++---- man/details_naive_Bayes_klaR.Rd | 3 +-- man/details_naive_Bayes_naivebayes.Rd | 3 +-- man/details_nearest_neighbor_kknn.Rd | 7 +++--- man/details_pls_mixOmics.Rd | 7 +++--- man/details_poisson_reg_glmer.Rd | 12 +++++----- man/details_poisson_reg_glmnet.Rd | 7 +++--- man/details_poisson_reg_h2o.Rd | 3 +-- man/details_poisson_reg_stan.Rd | 8 +++---- man/details_poisson_reg_stan_glmer.Rd | 9 ++++---- man/details_proportional_hazards_glmnet.Rd | 11 +++++----- man/details_proportional_hazards_survival.Rd | 3 +-- man/details_rand_forest_aorsf.Rd | 4 ++-- man/details_rand_forest_partykit.Rd | 3 +-- man/details_rand_forest_randomForest.Rd | 3 +-- man/details_rand_forest_ranger.Rd | 3 +-- man/details_rand_forest_spark.Rd | 12 +++++----- man/details_rule_fit_h2o.Rd | 8 +++---- man/details_rule_fit_xrf.Rd | 13 +++++------ man/details_surv_reg_survival.Rd | 4 ++-- man/details_survival_reg_survival.Rd | 4 ++-- man/details_svm_linear_LiblineaR.Rd | 3 +-- man/details_svm_linear_kernlab.Rd | 6 ++--- man/details_svm_poly_kernlab.Rd | 6 ++--- man/details_svm_rbf_kernlab.Rd | 6 ++--- 85 files changed, 273 insertions(+), 360 deletions(-) diff --git a/DESCRIPTION b/DESCRIPTION index 7897b2213..8d5f7fe66 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -75,4 +75,4 @@ Config/testthat/edition: 3 Encoding: UTF-8 LazyData: true Roxygen: list(markdown = TRUE) -RoxygenNote: 7.2.1.9000 +RoxygenNote: 7.2.3 diff --git a/man/details_C5_rules_C5.0.Rd b/man/details_C5_rules_C5.0.Rd index 965278933..9ca163215 100644 --- a/man/details_C5_rules_C5.0.Rd +++ b/man/details_C5_rules_C5.0.Rd @@ -84,11 +84,10 @@ functions from the \href{https://butcher.tidymodels.org}{butcher} package. \item Quinlan R (1992). “Learning with Continuous Classes.” Proceedings of the 5th Australian Joint Conference On Artificial Intelligence, pp. 343-348. -\item Quinlan R (1993).”Combining Instance-Based and Model-Based -Learning.” Proceedings of the Tenth International Conference on -Machine Learning, pp. 236-243. -\item Kuhn M and Johnson K (2013). \emph{Applied Predictive Modeling}. -Springer. +\item Quinlan R (1993).”Combining Instance-Based and Model-Based Learning.” +Proceedings of the Tenth International Conference on Machine Learning, +pp. 236-243. +\item Kuhn M and Johnson K (2013). \emph{Applied Predictive Modeling}. Springer. } } } diff --git a/man/details_auto_ml_h2o.Rd b/man/details_auto_ml_h2o.Rd index 569cdc1e8..b3e541004 100644 --- a/man/details_auto_ml_h2o.Rd +++ b/man/details_auto_ml_h2o.Rd @@ -15,11 +15,11 @@ This model has no tuning parameters. Engine arguments of interest \itemize{ -\item \code{max_runtime_secs} and \code{max_models}: controls the maximum running -time and number of models to build in the automatic process. -\item \code{exclude_algos} and \code{include_algos}: a character vector indicating -the excluded or included algorithms during model building. To see a -full list of supported models, see the details section in +\item \code{max_runtime_secs} and \code{max_models}: controls the maximum running time +and number of models to build in the automatic process. +\item \code{exclude_algos} and \code{include_algos}: a character vector indicating the +excluded or included algorithms during model building. To see a full +list of supported models, see the details section in \code{\link[h2o:h2o.automl]{h2o::h2o.automl()}}. \item \code{validation}: An integer between 0 and 1 specifying the \emph{proportion} of training data reserved as validation set. This is used by h2o for diff --git a/man/details_bag_mars_earth.Rd b/man/details_bag_mars_earth.Rd index aea7cd508..0b6759aaf 100644 --- a/man/details_bag_mars_earth.Rd +++ b/man/details_bag_mars_earth.Rd @@ -14,8 +14,7 @@ For this engine, there are multiple modes: classification and regression This model has 3 tuning parameters: \itemize{ \item \code{prod_degree}: Degree of Interaction (type: integer, default: 1L) -\item \code{prune_method}: Pruning Method (type: character, default: -‘backward’) +\item \code{prune_method}: Pruning Method (type: character, default: ‘backward’) \item \code{num_terms}: # Model Terms (type: integer, default: see below) } @@ -108,8 +107,7 @@ The \code{fit()} and \code{fit_xy()} arguments have arguments called \item Friedman, J. 1991. “Multivariate Adaptive Regression Splines.” \emph{The Annals of Statistics}, vol. 19, no. 1, pp. 1-67. \item Milborrow, S. \href{http://www.milbo.org/doc/earth-notes.pdf}{“Notes on the earth package.”} -\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. -Springer. +\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. Springer. } } } diff --git a/man/details_bag_mlp_nnet.Rd b/man/details_bag_mlp_nnet.Rd index 7d7af070f..0f46443a9 100644 --- a/man/details_bag_mlp_nnet.Rd +++ b/man/details_bag_mlp_nnet.Rd @@ -97,8 +97,7 @@ The underlying model implementation does not allow for case weights. \itemize{ \item Breiman L. 1996. “Bagging predictors”. Machine Learning. 24 (2): 123-140 -\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. -Springer. +\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. Springer. } } } diff --git a/man/details_bag_tree_C5.0.Rd b/man/details_bag_tree_C5.0.Rd index c6370328b..53509ac36 100644 --- a/man/details_bag_tree_C5.0.Rd +++ b/man/details_bag_tree_C5.0.Rd @@ -65,8 +65,7 @@ The \code{fit()} and \code{fit_xy()} arguments have arguments called \itemize{ \item Breiman, L. 1996. “Bagging predictors”. Machine Learning. 24 (2): 123-140 -\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. -Springer. +\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. Springer. } } } diff --git a/man/details_bag_tree_rpart.Rd b/man/details_bag_tree_rpart.Rd index bb1f3c672..e84e45cd9 100644 --- a/man/details_bag_tree_rpart.Rd +++ b/man/details_bag_tree_rpart.Rd @@ -143,8 +143,7 @@ time. 123-140 \item Hothorn T, Lausen B, Benner A, Radespiel-Troeger M. 2004. Bagging Survival Trees. \emph{Statistics in Medicine}, 23(1), 77–91. -\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. -Springer. +\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. Springer. } } } diff --git a/man/details_bart_dbarts.Rd b/man/details_bart_dbarts.Rd index 942c8971c..5e250becc 100644 --- a/man/details_bart_dbarts.Rd +++ b/man/details_bart_dbarts.Rd @@ -18,8 +18,8 @@ This model has 4 tuning parameters: double, default: 0.95) \item \code{prior_terminal_node_expo}: Terminal Node Prior Exponent (type: double, default: 2.00) -\item \code{prior_outcome_range}: Prior for Outcome Range (type: double, -default: 2.00) +\item \code{prior_outcome_range}: Prior for Outcome Range (type: double, default: +2.00) } } @@ -33,16 +33,15 @@ to the user. Useful for “thinning” samples. formulation. \item \code{ndpost}, \code{n.samples}: The number of posterior draws after burn in, \code{ndpost} / \code{keepevery} will actually be returned. -\item \code{nskip}, \code{n.burn}: Number of MCMC iterations to be treated as burn -in. +\item \code{nskip}, \code{n.burn}: Number of MCMC iterations to be treated as burn in. \item \code{nchain}, \code{n.chains}: Integer specifying how many independent tree sets and fits should be calculated. \item \code{nthread}, \code{n.threads}: Integer specifying how many threads to use. Depending on the CPU architecture, using more than the number of chains can degrade performance for small/medium data sets. As such some calculations may be executed single threaded regardless. -\item \code{combinechains}, \code{combineChains}: Logical; if \code{TRUE}, samples will -be returned in arrays of dimensions equal to \code{nchain} times \code{ndpost} +\item \code{combinechains}, \code{combineChains}: Logical; if \code{TRUE}, samples will be +returned in arrays of dimensions equal to \code{nchain} times \code{ndpost} times number of observations. } } diff --git a/man/details_boost_tree_C5.0.Rd b/man/details_boost_tree_C5.0.Rd index 9c53ca52e..c55c43425 100644 --- a/man/details_boost_tree_C5.0.Rd +++ b/man/details_boost_tree_C5.0.Rd @@ -16,8 +16,8 @@ This model has 3 tuning parameters: \itemize{ \item \code{trees}: # Trees (type: integer, default: 15L) \item \code{min_n}: Minimal Node Size (type: integer, default: 2L) -\item \code{sample_size}: Proportion Observations Sampled (type: double, -default: 1.0) +\item \code{sample_size}: Proportion Observations Sampled (type: double, default: +1.0) } The implementation of C5.0 limits the number of trees to be between 1 @@ -96,8 +96,7 @@ for \code{boost_tree()} with the \code{"C5.0"} engine. \subsection{References}{ \itemize{ -\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. -Springer. +\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. Springer. } } } diff --git a/man/details_boost_tree_h2o.Rd b/man/details_boost_tree_h2o.Rd index 76f58fb74..81c5a1974 100644 --- a/man/details_boost_tree_h2o.Rd +++ b/man/details_boost_tree_h2o.Rd @@ -21,8 +21,7 @@ This model has 8 tuning parameters: \item \code{sample_size}: # Observations Sampled (type: integer, default: 1) \item \code{mtry}: # Randomly Selected Predictors (type: integer, default: 1) \item \code{loss_reduction}: Minimum Loss Reduction (type: double, default: 0) -\item \code{stop_iter}: # Iterations Before Stopping (type: integer, default: -0) +\item \code{stop_iter}: # Iterations Before Stopping (type: integer, default: 0) } \code{min_n} represents the fewest allowed observations in a terminal node, @@ -136,8 +135,8 @@ their analogue to the \code{mtry} argument as the \emph{proportion} of predictor that will be randomly sampled at each split rather than the \emph{count}. In some settings, such as when tuning over preprocessors that influence the number of predictors, this parameterization is quite -helpful—interpreting \code{mtry} as a proportion means that [0,1] is always -a valid range for that parameter, regardless of input data. +helpful—interpreting \code{mtry} as a proportion means that $\link{0, 1}$ is +always a valid range for that parameter, regardless of input data. parsnip and its extensions accommodate this parameterization using the \code{counts} argument: a logical indicating whether \code{mtry} should be @@ -154,7 +153,7 @@ to \code{TRUE}. For engines that support the proportion interpretation (currently \code{"xgboost"} and \code{"xrf"}, via the rules package, and \code{"lightgbm"} via the bonsai package) the user can pass the \code{counts = FALSE} argument to \code{set_engine()} to supply \code{mtry} values -within [0,1]. +within $\link{0, 1}$. } \subsection{Initializing h2o}{ diff --git a/man/details_boost_tree_lightgbm.Rd b/man/details_boost_tree_lightgbm.Rd index 98fea2e18..bf7296493 100644 --- a/man/details_boost_tree_lightgbm.Rd +++ b/man/details_boost_tree_lightgbm.Rd @@ -137,8 +137,8 @@ their analogue to the \code{mtry} argument as the \emph{proportion} of predictor that will be randomly sampled at each split rather than the \emph{count}. In some settings, such as when tuning over preprocessors that influence the number of predictors, this parameterization is quite -helpful—interpreting \code{mtry} as a proportion means that [0,1] is always -a valid range for that parameter, regardless of input data. +helpful—interpreting \code{mtry} as a proportion means that $\link{0, 1}$ is +always a valid range for that parameter, regardless of input data. parsnip and its extensions accommodate this parameterization using the \code{counts} argument: a logical indicating whether \code{mtry} should be @@ -155,7 +155,7 @@ to \code{TRUE}. For engines that support the proportion interpretation (currently \code{"xgboost"} and \code{"xrf"}, via the rules package, and \code{"lightgbm"} via the bonsai package) the user can pass the \code{counts = FALSE} argument to \code{set_engine()} to supply \code{mtry} values -within [0,1]. +within $\link{0, 1}$. } \subsection{Saving fitted model objects}{ @@ -180,7 +180,7 @@ that the booster will perform bagging at every \code{k}th boosting iteration. Thus, by default, the \code{sample_size} argument would be ignored without setting this argument manually. Other boosting libraries, like xgboost, do not have an analogous argument to \code{bagging_freq} and use \code{k = 1} when -the analogue to \code{bagging_fraction} is in (0,1). \emph{bonsai will thus +the analogue to \code{bagging_fraction} is in $(0, 1)$. \emph{bonsai will thus automatically set} \code{bagging_freq = 1} \emph{in} \code{set_engine("lightgbm", ...)} if \code{sample_size} (i.e. \code{bagging_fraction}) is not equal to 1 and no \code{bagging_freq} value is supplied. This default can be overridden by @@ -207,8 +207,7 @@ The “Introduction to bonsai” article contains \subsection{References}{ \itemize{ \item \href{https://papers.nips.cc/paper/2017/hash/6449f44a102fde848669bdd9eb6b76fa-Abstract.html}{LightGBM: A Highly Efficient Gradient Boosting Decision Tree} -\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. -Springer. +\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. Springer. } } } diff --git a/man/details_boost_tree_mboost.Rd b/man/details_boost_tree_mboost.Rd index 738f7c1da..cd1975d23 100644 --- a/man/details_boost_tree_mboost.Rd +++ b/man/details_boost_tree_mboost.Rd @@ -69,8 +69,7 @@ Predictions of type \code{"time"} are predictions of the mean survival time. \itemize{ \item Buehlmann P, Hothorn T. 2007. Boosting algorithms: regularization, prediction and model fitting. \emph{Statistical Science}, 22(4), 477–505. -\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. -Springer. +\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. Springer. } } } diff --git a/man/details_boost_tree_spark.Rd b/man/details_boost_tree_spark.Rd index 7d105c259..9f8e29482 100644 --- a/man/details_boost_tree_spark.Rd +++ b/man/details_boost_tree_spark.Rd @@ -21,8 +21,7 @@ This model has 7 tuning parameters: \item \code{mtry}: # Randomly Selected Predictors (type: integer, default: see below) \item \code{min_n}: Minimal Node Size (type: integer, default: 1L) -\item \code{loss_reduction}: Minimum Loss Reduction (type: double, default: -0.0) +\item \code{loss_reduction}: Minimum Loss Reduction (type: double, default: 0.0) \item \code{sample_size}: # Observations Sampled (type: integer, default: 1.0) } @@ -126,15 +125,14 @@ to consider. \itemize{ \item Only the formula interface to via \code{fit()} is available; using \code{fit_xy()} will generate an error. -\item The predictions will always be in a Spark table format. The names -will be the same as documented but without the dots. +\item The predictions will always be in a Spark table format. The names will +be the same as documented but without the dots. \item There is no equivalent to factor columns in Spark tables so class predictions are returned as character columns. \item To retain the model object for a new R session (via \code{save()}), the \code{model$fit} element of the parsnip object should be serialized via -\code{ml_save(object$fit)} and separately saved to disk. In a new -session, the object can be reloaded and reattached to the parsnip -object. +\code{ml_save(object$fit)} and separately saved to disk. In a new session, +the object can be reloaded and reattached to the parsnip object. } } @@ -142,8 +140,7 @@ object. \itemize{ \item Luraschi, J, K Kuo, and E Ruiz. 2019. \emph{Mastering Spark with R}. O’Reilly Media -\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. -Springer. +\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. Springer. } } } diff --git a/man/details_boost_tree_xgboost.Rd b/man/details_boost_tree_xgboost.Rd index 7ae60409a..588c7fe8f 100644 --- a/man/details_boost_tree_xgboost.Rd +++ b/man/details_boost_tree_xgboost.Rd @@ -20,10 +20,9 @@ This model has 8 tuning parameters: \item \code{mtry}: # Randomly Selected Predictors (type: integer, default: see below) \item \code{min_n}: Minimal Node Size (type: integer, default: 1L) -\item \code{loss_reduction}: Minimum Loss Reduction (type: double, default: -0.0) -\item \code{sample_size}: Proportion Observations Sampled (type: double, -default: 1.0) +\item \code{loss_reduction}: Minimum Loss Reduction (type: double, default: 0.0) +\item \code{sample_size}: Proportion Observations Sampled (type: double, default: +1.0) \item \code{stop_iter}: # Iterations Before Stopping (type: integer, default: Inf) } @@ -191,8 +190,8 @@ their analogue to the \code{mtry} argument as the \emph{proportion} of predictor that will be randomly sampled at each split rather than the \emph{count}. In some settings, such as when tuning over preprocessors that influence the number of predictors, this parameterization is quite -helpful—interpreting \code{mtry} as a proportion means that [0,1] is always -a valid range for that parameter, regardless of input data. +helpful—interpreting \code{mtry} as a proportion means that $\link{0, 1}$ is +always a valid range for that parameter, regardless of input data. parsnip and its extensions accommodate this parameterization using the \code{counts} argument: a logical indicating whether \code{mtry} should be @@ -209,7 +208,7 @@ to \code{TRUE}. For engines that support the proportion interpretation (currently \code{"xgboost"} and \code{"xrf"}, via the rules package, and \code{"lightgbm"} via the bonsai package) the user can pass the \code{counts = FALSE} argument to \code{set_engine()} to supply \code{mtry} values -within [0,1]. +within $\link{0, 1}$. } \subsection{Early stopping}{ @@ -264,8 +263,7 @@ for \code{boost_tree()} with the \code{"xgboost"} engine. \subsection{References}{ \itemize{ \item \href{https://arxiv.org/abs/1603.02754}{XGBoost: A Scalable Tree Boosting System} -\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. -Springer. +\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. Springer. } } } diff --git a/man/details_cubist_rules_Cubist.Rd b/man/details_cubist_rules_Cubist.Rd index 8e6c6f39f..f7a73a7f1 100644 --- a/man/details_cubist_rules_Cubist.Rd +++ b/man/details_cubist_rules_Cubist.Rd @@ -64,11 +64,10 @@ are not required for this model. \item Quinlan R (1992). “Learning with Continuous Classes.” Proceedings of the 5th Australian Joint Conference On Artificial Intelligence, pp. 343-348. -\item Quinlan R (1993).”Combining Instance-Based and Model-Based -Learning.” Proceedings of the Tenth International Conference on -Machine Learning, pp. 236-243. -\item Kuhn M and Johnson K (2013). \emph{Applied Predictive Modeling}. -Springer. +\item Quinlan R (1993).”Combining Instance-Based and Model-Based Learning.” +Proceedings of the Tenth International Conference on Machine Learning, +pp. 236-243. +\item Kuhn M and Johnson K (2013). \emph{Applied Predictive Modeling}. Springer. } } } diff --git a/man/details_decision_tree_C5.0.Rd b/man/details_decision_tree_C5.0.Rd index 900fc9079..b97b7c6fb 100644 --- a/man/details_decision_tree_C5.0.Rd +++ b/man/details_decision_tree_C5.0.Rd @@ -76,8 +76,7 @@ for \code{decision_tree()} with the \code{"C5.0"} engine. \subsection{References}{ \itemize{ -\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. -Springer. +\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. Springer. } } } diff --git a/man/details_decision_tree_partykit.Rd b/man/details_decision_tree_partykit.Rd index a7746f10b..87afcbcaa 100644 --- a/man/details_decision_tree_partykit.Rd +++ b/man/details_decision_tree_partykit.Rd @@ -134,8 +134,7 @@ time. \subsection{References}{ \itemize{ \item \href{https://jmlr.org/papers/v16/hothorn15a.html}{partykit: A Modular Toolkit for Recursive Partytioning in R} -\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. -Springer. +\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. Springer. } } } diff --git a/man/details_decision_tree_rpart.Rd b/man/details_decision_tree_rpart.Rd index abe3c3934..0d1064f1c 100644 --- a/man/details_decision_tree_rpart.Rd +++ b/man/details_decision_tree_rpart.Rd @@ -141,8 +141,7 @@ for \code{decision_tree()} with the \code{"rpart"} engine. \subsection{References}{ \itemize{ -\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. -Springer. +\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. Springer. } } } diff --git a/man/details_decision_tree_spark.Rd b/man/details_decision_tree_spark.Rd index c1ab4a8ee..8e3b362a1 100644 --- a/man/details_decision_tree_spark.Rd +++ b/man/details_decision_tree_spark.Rd @@ -92,22 +92,20 @@ to consider. \itemize{ \item Only the formula interface to via \code{fit()} is available; using \code{fit_xy()} will generate an error. -\item The predictions will always be in a Spark table format. The names -will be the same as documented but without the dots. +\item The predictions will always be in a Spark table format. The names will +be the same as documented but without the dots. \item There is no equivalent to factor columns in Spark tables so class predictions are returned as character columns. \item To retain the model object for a new R session (via \code{save()}), the \code{model$fit} element of the parsnip object should be serialized via -\code{ml_save(object$fit)} and separately saved to disk. In a new -session, the object can be reloaded and reattached to the parsnip -object. +\code{ml_save(object$fit)} and separately saved to disk. In a new session, +the object can be reloaded and reattached to the parsnip object. } } \subsection{References}{ \itemize{ -\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. -Springer. +\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. Springer. } } } diff --git a/man/details_discrim_flexible_earth.Rd b/man/details_discrim_flexible_earth.Rd index a2bfca061..465ff19ce 100644 --- a/man/details_discrim_flexible_earth.Rd +++ b/man/details_discrim_flexible_earth.Rd @@ -17,8 +17,7 @@ This model has 3 tuning parameter: \itemize{ \item \code{num_terms}: # Model Terms (type: integer, default: (see below)) \item \code{prod_degree}: Degree of Interaction (type: integer, default: 1L) -\item \code{prune_method}: Pruning Method (type: character, default: -‘backward’) +\item \code{prune_method}: Pruning Method (type: character, default: ‘backward’) } The default value of \code{num_terms} depends on the number of columns (\code{p}): @@ -79,8 +78,8 @@ The \code{fit()} and \code{fit_xy()} arguments have arguments called \item Hastie, Tibshirani & Buja (1994) Flexible Discriminant Analysis by Optimal Scoring, \emph{Journal of the American Statistical Association}, 89:428, 1255-1270 -\item Friedman (1991). Multivariate Adaptive Regression Splines. \emph{The -Annals of Statistics}, 19(1), 1-67. +\item Friedman (1991). Multivariate Adaptive Regression Splines. \emph{The Annals +of Statistics}, 19(1), 1-67. } } } diff --git a/man/details_discrim_linear_MASS.Rd b/man/details_discrim_linear_MASS.Rd index 704888d05..3f046607d 100644 --- a/man/details_discrim_linear_MASS.Rd +++ b/man/details_discrim_linear_MASS.Rd @@ -55,8 +55,7 @@ The underlying model implementation does not allow for case weights. \subsection{References}{ \itemize{ -\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. -Springer. +\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. Springer. } } } diff --git a/man/details_discrim_linear_sparsediscrim.Rd b/man/details_discrim_linear_sparsediscrim.Rd index fb0e1cbee..3e1c2eed6 100644 --- a/man/details_discrim_linear_sparsediscrim.Rd +++ b/man/details_discrim_linear_sparsediscrim.Rd @@ -21,8 +21,7 @@ default: ‘diagonal’) The possible values of this parameter, and the functions that they execute, are: \itemize{ -\item \code{"diagonal"}: -\code{\link[sparsediscrim:lda_diag]{sparsediscrim::lda_diag()}} +\item \code{"diagonal"}: \code{\link[sparsediscrim:lda_diag]{sparsediscrim::lda_diag()}} \item \code{"min_distance"}: \code{\link[sparsediscrim:lda_emp_bayes_eigen]{sparsediscrim::lda_emp_bayes_eigen()}} \item \code{"shrink_mean"}: @@ -85,8 +84,8 @@ Volume 28, Issue 4, 15 February 2012, Pages 531-537. \item \code{lda_shrink_cov()}: Pang, Tong and Zhao (2009), Shrinkage-based Diagonal Discriminant Analysis and Its Applications in High-Dimensional Data. \emph{Biometrics}, 65, 1021-1029. -\item \code{lda_emp_bayes_eigen()}: Srivistava and Kubokawa (2007), Comparison -of Discrimination Methods for High Dimensional Data, \emph{Journal of the +\item \code{lda_emp_bayes_eigen()}: Srivistava and Kubokawa (2007), Comparison of +Discrimination Methods for High Dimensional Data, \emph{Journal of the Japan Statistical Society}, 37:1, 123-134. } } diff --git a/man/details_discrim_quad_MASS.Rd b/man/details_discrim_quad_MASS.Rd index 2b83b92c5..1deb9a025 100644 --- a/man/details_discrim_quad_MASS.Rd +++ b/man/details_discrim_quad_MASS.Rd @@ -56,8 +56,7 @@ The underlying model implementation does not allow for case weights. \subsection{References}{ \itemize{ -\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. -Springer. +\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. Springer. } } } diff --git a/man/details_discrim_quad_sparsediscrim.Rd b/man/details_discrim_quad_sparsediscrim.Rd index 962ee6a8c..9d588c471 100644 --- a/man/details_discrim_quad_sparsediscrim.Rd +++ b/man/details_discrim_quad_sparsediscrim.Rd @@ -21,8 +21,7 @@ default: ‘diagonal’) The possible values of this parameter, and the functions that they execute, are: \itemize{ -\item \code{"diagonal"}: -\code{\link[sparsediscrim:qda_diag]{sparsediscrim::qda_diag()}} +\item \code{"diagonal"}: \code{\link[sparsediscrim:qda_diag]{sparsediscrim::qda_diag()}} \item \code{"shrink_mean"}: \code{\link[sparsediscrim:qda_shrink_mean]{sparsediscrim::qda_shrink_mean()}} \item \code{"shrink_cov"}: diff --git a/man/details_discrim_regularized_klaR.Rd b/man/details_discrim_regularized_klaR.Rd index fb946b0d1..a3c153d22 100644 --- a/man/details_discrim_regularized_klaR.Rd +++ b/man/details_discrim_regularized_klaR.Rd @@ -24,8 +24,8 @@ default: (see below)) Some special cases for the RDA model: \itemize{ -\item \code{frac_identity = 0} and \code{frac_common_cov = 1} is a linear -discriminant analysis (LDA) model. +\item \code{frac_identity = 0} and \code{frac_common_cov = 1} is a linear discriminant +analysis (LDA) model. \item \code{frac_identity = 0} and \code{frac_common_cov = 0} is a quadratic discriminant analysis (QDA) model. } @@ -76,10 +76,9 @@ The underlying model implementation does not allow for case weights. \subsection{References}{ \itemize{ -\item Friedman, J (1989). Regularized Discriminant Analysis. \emph{Journal of -the American Statistical Association}, 84, 165-175. -\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. -Springer. +\item Friedman, J (1989). Regularized Discriminant Analysis. \emph{Journal of the +American Statistical Association}, 84, 165-175. +\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. Springer. } } } diff --git a/man/details_gen_additive_mod_mgcv.Rd b/man/details_gen_additive_mod_mgcv.Rd index c63b23913..1eb1b6bb9 100644 --- a/man/details_gen_additive_mod_mgcv.Rd +++ b/man/details_gen_additive_mod_mgcv.Rd @@ -14,8 +14,7 @@ For this engine, there are multiple modes: regression and classification This model has 2 tuning parameters: \itemize{ \item \code{select_features}: Select Features? (type: logical, default: FALSE) -\item \code{adjust_deg_free}: Smoothness Adjustment (type: double, default: -1.0) +\item \code{adjust_deg_free}: Smoothness Adjustment (type: double, default: 1.0) } } @@ -158,8 +157,8 @@ functions from the \href{https://butcher.tidymodels.org}{butcher} package. \subsection{References}{ \itemize{ \item Ross, W. 2021. \href{https://noamross.github.io/gams-in-r-course/}{\emph{Generalized Additive Models in R: A Free, Interactive Course using mgcv}} -\item Wood, S. 2017. \emph{Generalized Additive Models: An Introduction with -R}. Chapman and Hall/CRC. +\item Wood, S. 2017. \emph{Generalized Additive Models: An Introduction with R}. +Chapman and Hall/CRC. } } } diff --git a/man/details_linear_reg_brulee.Rd b/man/details_linear_reg_brulee.Rd index 82780893d..b97c95185 100644 --- a/man/details_linear_reg_brulee.Rd +++ b/man/details_linear_reg_brulee.Rd @@ -34,8 +34,8 @@ process. during optimization (\code{optimizer = "SGD"} only). \item \code{batch_size()}: An integer for the number of training set points in each batch. -\item \code{stop_iter()}: A non-negative integer for how many iterations with -no improvement before stopping. (default: 5L). +\item \code{stop_iter()}: A non-negative integer for how many iterations with no +improvement before stopping. (default: 5L). } } @@ -78,8 +78,7 @@ The underlying model implementation does not allow for case weights. \subsection{References}{ \itemize{ -\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. -Springer. +\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. Springer. } } } diff --git a/man/details_linear_reg_glm.Rd b/man/details_linear_reg_glm.Rd index 1016481b0..ea90688d2 100644 --- a/man/details_linear_reg_glm.Rd +++ b/man/details_linear_reg_glm.Rd @@ -98,8 +98,7 @@ for \code{linear_reg()} with the \code{"glm"} engine. \subsection{References}{ \itemize{ -\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. -Springer. +\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. Springer. } } } diff --git a/man/details_linear_reg_glmer.Rd b/man/details_linear_reg_glmer.Rd index eab0901ef..67d8745bb 100644 --- a/man/details_linear_reg_glmer.Rd +++ b/man/details_linear_reg_glmer.Rd @@ -52,7 +52,7 @@ linear predictor (\verb{\eta}) for a random intercept: \if{html}{\out{
}}\preformatted{\eta_\{i\} = (\beta_0 + b_\{0i\}) + \beta_1x_\{i1\} }\if{html}{\out{
}} -where \emph{i} denotes the \code{i}th independent experimental unit +where $i$ denotes the \code{i}th independent experimental unit (e.g. subject). When the model has seen subject \code{i}, it can use that subject’s data to adjust the \emph{population} intercept to be more specific to that subjects results. @@ -128,8 +128,8 @@ The \code{fit()} and \code{fit_xy()} arguments have arguments called \subsection{References}{ \itemize{ -\item J Pinheiro, and D Bates. 2000. \emph{Mixed-effects models in S and -S-PLUS}. Springer, New York, NY +\item J Pinheiro, and D Bates. 2000. \emph{Mixed-effects models in S and S-PLUS}. +Springer, New York, NY \item West, K, Band Welch, and A Galecki. 2014. \emph{Linear Mixed Models: A Practical Guide Using Statistical Software}. CRC Press. \item Thorson, J, Minto, C. 2015, Mixed effects: a unifying framework for @@ -139,9 +139,9 @@ Science}, Volume 72, Issue 5, Pages 1245–1256. Goodwin, CED, Robinson, BS, Hodgson, DJ, Inger, R. 2018. \emph{A brief introduction to mixed effects modelling and multi-model inference in ecology}. PeerJ 6:e4794. -\item DeBruine LM, Barr DJ. Understanding Mixed-Effects Models Through -Data Simulation. 2021. \emph{Advances in Methods and Practices in -Psychological Science}. +\item DeBruine LM, Barr DJ. Understanding Mixed-Effects Models Through Data +Simulation. 2021. \emph{Advances in Methods and Practices in Psychological +Science}. } } } diff --git a/man/details_linear_reg_glmnet.Rd b/man/details_linear_reg_glmnet.Rd index e9de07f00..3092e1d63 100644 --- a/man/details_linear_reg_glmnet.Rd +++ b/man/details_linear_reg_glmnet.Rd @@ -12,8 +12,7 @@ For this engine, there is a single mode: regression This model has 2 tuning parameters: \itemize{ -\item \code{penalty}: Amount of Regularization (type: double, default: see -below) +\item \code{penalty}: Amount of Regularization (type: double, default: see below) \item \code{mixture}: Proportion of Lasso Penalty (type: double, default: 1.0) } @@ -86,10 +85,9 @@ for \code{linear_reg()} with the \code{"glmnet"} engine. \subsection{References}{ \itemize{ -\item Hastie, T, R Tibshirani, and M Wainwright. 2015. \emph{Statistical -Learning with Sparsity}. CRC Press. -\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. -Springer. +\item Hastie, T, R Tibshirani, and M Wainwright. 2015. \emph{Statistical Learning +with Sparsity}. CRC Press. +\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. Springer. } } } diff --git a/man/details_linear_reg_gls.Rd b/man/details_linear_reg_gls.Rd index 7609e9e15..1757de712 100644 --- a/man/details_linear_reg_gls.Rd +++ b/man/details_linear_reg_gls.Rd @@ -109,8 +109,8 @@ The underlying model implementation does not allow for case weights. \subsection{References}{ \itemize{ -\item J Pinheiro, and D Bates. 2000. \emph{Mixed-effects models in S and -S-PLUS}. Springer, New York, NY +\item J Pinheiro, and D Bates. 2000. \emph{Mixed-effects models in S and S-PLUS}. +Springer, New York, NY } } } diff --git a/man/details_linear_reg_h2o.Rd b/man/details_linear_reg_h2o.Rd index 0c6f22d5f..4b37502da 100644 --- a/man/details_linear_reg_h2o.Rd +++ b/man/details_linear_reg_h2o.Rd @@ -14,8 +14,7 @@ This model has 2 tuning parameters: \itemize{ \item \code{mixture}: Proportion of Lasso Penalty (type: double, default: see below) -\item \code{penalty}: Amount of Regularization (type: double, default: see -below) +\item \code{penalty}: Amount of Regularization (type: double, default: see below) } By default, when not given a fixed \code{penalty}, diff --git a/man/details_linear_reg_keras.Rd b/man/details_linear_reg_keras.Rd index bbef52386..9142acf7c 100644 --- a/man/details_linear_reg_keras.Rd +++ b/man/details_linear_reg_keras.Rd @@ -69,8 +69,8 @@ for \code{linear_reg()} with the \code{"keras"} engine. \subsection{References}{ \itemize{ -\item Hoerl, A., & Kennard, R. (2000). \emph{Ridge Regression: Biased -Estimation for Nonorthogonal Problems}. Technometrics, 42(1), 80-86. +\item Hoerl, A., & Kennard, R. (2000). \emph{Ridge Regression: Biased Estimation +for Nonorthogonal Problems}. Technometrics, 42(1), 80-86. } } } diff --git a/man/details_linear_reg_lm.Rd b/man/details_linear_reg_lm.Rd index 4f08f9345..a046e4f67 100644 --- a/man/details_linear_reg_lm.Rd +++ b/man/details_linear_reg_lm.Rd @@ -81,8 +81,7 @@ for \code{linear_reg()} with the \code{"lm"} engine. \subsection{References}{ \itemize{ -\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. -Springer. +\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. Springer. } } } diff --git a/man/details_linear_reg_lme.Rd b/man/details_linear_reg_lme.Rd index ffa2c41db..3c9c70096 100644 --- a/man/details_linear_reg_lme.Rd +++ b/man/details_linear_reg_lme.Rd @@ -44,7 +44,7 @@ linear predictor (\verb{\eta}) for a random intercept: \if{html}{\out{
}}\preformatted{\eta_\{i\} = (\beta_0 + b_\{0i\}) + \beta_1x_\{i1\} }\if{html}{\out{
}} -where \emph{i} denotes the \code{i}th independent experimental unit +where $i$ denotes the \code{i}th independent experimental unit (e.g. subject). When the model has seen subject \code{i}, it can use that subject’s data to adjust the \emph{population} intercept to be more specific to that subjects results. @@ -117,8 +117,8 @@ The underlying model implementation does not allow for case weights. \subsection{References}{ \itemize{ -\item J Pinheiro, and D Bates. 2000. \emph{Mixed-effects models in S and -S-PLUS}. Springer, New York, NY +\item J Pinheiro, and D Bates. 2000. \emph{Mixed-effects models in S and S-PLUS}. +Springer, New York, NY \item West, K, Band Welch, and A Galecki. 2014. \emph{Linear Mixed Models: A Practical Guide Using Statistical Software}. CRC Press. \item Thorson, J, Minto, C. 2015, Mixed effects: a unifying framework for @@ -128,9 +128,9 @@ Science}, Volume 72, Issue 5, Pages 1245–1256. Goodwin, CED, Robinson, BS, Hodgson, DJ, Inger, R. 2018. \emph{A brief introduction to mixed effects modelling and multi-model inference in ecology}. PeerJ 6:e4794. -\item DeBruine LM, Barr DJ. Understanding Mixed-Effects Models Through -Data Simulation. 2021. \emph{Advances in Methods and Practices in -Psychological Science}. +\item DeBruine LM, Barr DJ. Understanding Mixed-Effects Models Through Data +Simulation. 2021. \emph{Advances in Methods and Practices in Psychological +Science}. } } } diff --git a/man/details_linear_reg_lmer.Rd b/man/details_linear_reg_lmer.Rd index c204bfce3..0441f464a 100644 --- a/man/details_linear_reg_lmer.Rd +++ b/man/details_linear_reg_lmer.Rd @@ -44,7 +44,7 @@ linear predictor (\verb{\eta}) for a random intercept: \if{html}{\out{
}}\preformatted{\eta_\{i\} = (\beta_0 + b_\{0i\}) + \beta_1x_\{i1\} }\if{html}{\out{
}} -where \emph{i} denotes the \code{i}th independent experimental unit +where $i$ denotes the \code{i}th independent experimental unit (e.g. subject). When the model has seen subject \code{i}, it can use that subject’s data to adjust the \emph{population} intercept to be more specific to that subjects results. @@ -120,8 +120,8 @@ The \code{fit()} and \code{fit_xy()} arguments have arguments called \subsection{References}{ \itemize{ -\item J Pinheiro, and D Bates. 2000. \emph{Mixed-effects models in S and -S-PLUS}. Springer, New York, NY +\item J Pinheiro, and D Bates. 2000. \emph{Mixed-effects models in S and S-PLUS}. +Springer, New York, NY \item West, K, Band Welch, and A Galecki. 2014. \emph{Linear Mixed Models: A Practical Guide Using Statistical Software}. CRC Press. \item Thorson, J, Minto, C. 2015, Mixed effects: a unifying framework for @@ -131,9 +131,9 @@ Science}, Volume 72, Issue 5, Pages 1245–1256. Goodwin, CED, Robinson, BS, Hodgson, DJ, Inger, R. 2018. \emph{A brief introduction to mixed effects modelling and multi-model inference in ecology}. PeerJ 6:e4794. -\item DeBruine LM, Barr DJ. Understanding Mixed-Effects Models Through -Data Simulation. 2021. \emph{Advances in Methods and Practices in -Psychological Science}. +\item DeBruine LM, Barr DJ. Understanding Mixed-Effects Models Through Data +Simulation. 2021. \emph{Advances in Methods and Practices in Psychological +Science}. } } } diff --git a/man/details_linear_reg_spark.Rd b/man/details_linear_reg_spark.Rd index 2133218c7..d19c16e7f 100644 --- a/man/details_linear_reg_spark.Rd +++ b/man/details_linear_reg_spark.Rd @@ -23,8 +23,8 @@ For \code{penalty}, the amount of regularization includes both the L1 penalty \itemize{ \item \code{mixture = 1} specifies a pure lasso model, \item \code{mixture = 0} specifies a ridge regression model, and -\item \verb{0 < mixture < 1} specifies an elastic net model, interpolating -lasso and ridge. +\item \verb{0 < mixture < 1} specifies an elastic net model, interpolating lasso +and ridge. } } @@ -84,15 +84,14 @@ to consider. \itemize{ \item Only the formula interface to via \code{fit()} is available; using \code{fit_xy()} will generate an error. -\item The predictions will always be in a Spark table format. The names -will be the same as documented but without the dots. +\item The predictions will always be in a Spark table format. The names will +be the same as documented but without the dots. \item There is no equivalent to factor columns in Spark tables so class predictions are returned as character columns. \item To retain the model object for a new R session (via \code{save()}), the \code{model$fit} element of the parsnip object should be serialized via -\code{ml_save(object$fit)} and separately saved to disk. In a new -session, the object can be reloaded and reattached to the parsnip -object. +\code{ml_save(object$fit)} and separately saved to disk. In a new session, +the object can be reloaded and reattached to the parsnip object. } } @@ -100,10 +99,9 @@ object. \itemize{ \item Luraschi, J, K Kuo, and E Ruiz. 2019. \emph{Mastering Spark with R}. O’Reilly Media -\item Hastie, T, R Tibshirani, and M Wainwright. 2015. \emph{Statistical -Learning with Sparsity}. CRC Press. -\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. -Springer. +\item Hastie, T, R Tibshirani, and M Wainwright. 2015. \emph{Statistical Learning +with Sparsity}. CRC Press. +\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. Springer. } } } diff --git a/man/details_linear_reg_stan.Rd b/man/details_linear_reg_stan.Rd index 98652904f..fa4939331 100644 --- a/man/details_linear_reg_stan.Rd +++ b/man/details_linear_reg_stan.Rd @@ -22,12 +22,11 @@ The default is 4. \item \code{iter}: A positive integer specifying the number of iterations for each chain (including warmup). The default is 2000. \item \code{seed}: The seed for random number generation. -\item \code{cores}: Number of cores to use when executing the chains in -parallel. -\item \code{prior}: The prior distribution for the (non-hierarchical) -regression coefficients. The \code{"stan"} engine does not fit any -hierarchical terms. See the \code{"stan_glmer"} engine from the -multilevelmod package for that type of model. +\item \code{cores}: Number of cores to use when executing the chains in parallel. +\item \code{prior}: The prior distribution for the (non-hierarchical) regression +coefficients. The \code{"stan"} engine does not fit any hierarchical terms. +See the \code{"stan_glmer"} engine from the multilevelmod package for that +type of model. \item \code{prior_intercept}: The prior distribution for the intercept (after centering all predictors). } diff --git a/man/details_linear_reg_stan_glmer.Rd b/man/details_linear_reg_stan_glmer.Rd index ccdd56bb5..3bcb67ddf 100644 --- a/man/details_linear_reg_stan_glmer.Rd +++ b/man/details_linear_reg_stan_glmer.Rd @@ -23,10 +23,9 @@ The default is 4. \item \code{iter}: A positive integer specifying the number of iterations for each chain (including warmup). The default is 2000. \item \code{seed}: The seed for random number generation. -\item \code{cores}: Number of cores to use when executing the chains in -parallel. -\item \code{prior}: The prior distribution for the (non-hierarchical) -regression coefficients. +\item \code{cores}: Number of cores to use when executing the chains in parallel. +\item \code{prior}: The prior distribution for the (non-hierarchical) regression +coefficients. \item \code{prior_intercept}: The prior distribution for the intercept (after centering all predictors). } @@ -65,7 +64,7 @@ linear predictor (\verb{\eta}) for a random intercept: \if{html}{\out{
}}\preformatted{\eta_\{i\} = (\beta_0 + b_\{0i\}) + \beta_1x_\{i1\} }\if{html}{\out{
}} -where \emph{i} denotes the \code{i}th independent experimental unit +where $i$ denotes the \code{i}th independent experimental unit (e.g. subject). When the model has seen subject \code{i}, it can use that subject’s data to adjust the \emph{population} intercept to be more specific to that subjects results. diff --git a/man/details_logistic_reg_LiblineaR.Rd b/man/details_logistic_reg_LiblineaR.Rd index e6d10eef2..5aeaa83d7 100644 --- a/man/details_logistic_reg_LiblineaR.Rd +++ b/man/details_logistic_reg_LiblineaR.Rd @@ -14,8 +14,7 @@ For this engine, there is a single mode: classification This model has 2 tuning parameters: \itemize{ -\item \code{penalty}: Amount of Regularization (type: double, default: see -below) +\item \code{penalty}: Amount of Regularization (type: double, default: see below) \item \code{mixture}: Proportion of Lasso Penalty (type: double, default: 0) } @@ -71,10 +70,9 @@ for \code{logistic_reg()} with the \code{"LiblineaR"} engine. \subsection{References}{ \itemize{ -\item Hastie, T, R Tibshirani, and M Wainwright. 2015. \emph{Statistical -Learning with Sparsity}. CRC Press. -\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. -Springer. +\item Hastie, T, R Tibshirani, and M Wainwright. 2015. \emph{Statistical Learning +with Sparsity}. CRC Press. +\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. Springer. } } } diff --git a/man/details_logistic_reg_brulee.Rd b/man/details_logistic_reg_brulee.Rd index 0a688aa16..5a7ceba30 100644 --- a/man/details_logistic_reg_brulee.Rd +++ b/man/details_logistic_reg_brulee.Rd @@ -35,8 +35,8 @@ process. during optimization (\code{optimizer = "SGD"} only). \item \code{batch_size()}: An integer for the number of training set points in each batch. -\item \code{stop_iter()}: A non-negative integer for how many iterations with -no improvement before stopping. (default: 5L). +\item \code{stop_iter()}: A non-negative integer for how many iterations with no +improvement before stopping. (default: 5L). \item \code{class_weights()}: Numeric class weights. See \code{\link[brulee:brulee_logistic_reg]{brulee::brulee_logistic_reg()}}. } @@ -78,8 +78,7 @@ The underlying model implementation does not allow for case weights. \subsection{References}{ \itemize{ -\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. -Springer. +\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. Springer. } } } diff --git a/man/details_logistic_reg_glm.Rd b/man/details_logistic_reg_glm.Rd index 34c9c7d45..d56803e06 100644 --- a/man/details_logistic_reg_glm.Rd +++ b/man/details_logistic_reg_glm.Rd @@ -98,8 +98,7 @@ for \code{logistic_reg()} with the \code{"glm"} engine. \subsection{References}{ \itemize{ -\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. -Springer. +\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. Springer. } } } diff --git a/man/details_logistic_reg_glmer.Rd b/man/details_logistic_reg_glmer.Rd index fcdd796ed..b848df19c 100644 --- a/man/details_logistic_reg_glmer.Rd +++ b/man/details_logistic_reg_glmer.Rd @@ -44,7 +44,7 @@ linear predictor (\verb{\eta}) for a random intercept: \if{html}{\out{
}}\preformatted{\eta_\{i\} = (\beta_0 + b_\{0i\}) + \beta_1x_\{i1\} }\if{html}{\out{
}} -where \emph{i} denotes the \code{i}th independent experimental unit +where $i$ denotes the \code{i}th independent experimental unit (e.g. subject). When the model has seen subject \code{i}, it can use that subject’s data to adjust the \emph{population} intercept to be more specific to that subjects results. @@ -120,8 +120,8 @@ The \code{fit()} and \code{fit_xy()} arguments have arguments called \subsection{References}{ \itemize{ -\item J Pinheiro, and D Bates. 2000. \emph{Mixed-effects models in S and -S-PLUS}. Springer, New York, NY +\item J Pinheiro, and D Bates. 2000. \emph{Mixed-effects models in S and S-PLUS}. +Springer, New York, NY \item West, K, Band Welch, and A Galecki. 2014. \emph{Linear Mixed Models: A Practical Guide Using Statistical Software}. CRC Press. \item Thorson, J, Minto, C. 2015, Mixed effects: a unifying framework for @@ -131,9 +131,9 @@ Science}, Volume 72, Issue 5, Pages 1245–1256. Goodwin, CED, Robinson, BS, Hodgson, DJ, Inger, R. 2018. \emph{A brief introduction to mixed effects modelling and multi-model inference in ecology}. PeerJ 6:e4794. -\item DeBruine LM, Barr DJ. Understanding Mixed-Effects Models Through -Data Simulation. 2021. \emph{Advances in Methods and Practices in -Psychological Science}. +\item DeBruine LM, Barr DJ. Understanding Mixed-Effects Models Through Data +Simulation. 2021. \emph{Advances in Methods and Practices in Psychological +Science}. } } } diff --git a/man/details_logistic_reg_glmnet.Rd b/man/details_logistic_reg_glmnet.Rd index 5044cdabb..2b13c3698 100644 --- a/man/details_logistic_reg_glmnet.Rd +++ b/man/details_logistic_reg_glmnet.Rd @@ -14,8 +14,7 @@ For this engine, there is a single mode: classification This model has 2 tuning parameters: \itemize{ -\item \code{penalty}: Amount of Regularization (type: double, default: see -below) +\item \code{penalty}: Amount of Regularization (type: double, default: see below) \item \code{mixture}: Proportion of Lasso Penalty (type: double, default: 1.0) } @@ -25,8 +24,8 @@ see \link{glmnet-details}. As for \code{mixture}: \itemize{ \item \code{mixture = 1} specifies a pure lasso model, \item \code{mixture = 0} specifies a ridge regression model, and -\item \verb{0 < mixture < 1} specifies an elastic net model, interpolating -lasso and ridge. +\item \verb{0 < mixture < 1} specifies an elastic net model, interpolating lasso +and ridge. } } @@ -91,10 +90,9 @@ for \code{logistic_reg()} with the \code{"glmnet"} engine. \subsection{References}{ \itemize{ -\item Hastie, T, R Tibshirani, and M Wainwright. 2015. \emph{Statistical -Learning with Sparsity}. CRC Press. -\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. -Springer. +\item Hastie, T, R Tibshirani, and M Wainwright. 2015. \emph{Statistical Learning +with Sparsity}. CRC Press. +\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. Springer. } } } diff --git a/man/details_logistic_reg_h2o.Rd b/man/details_logistic_reg_h2o.Rd index a1af564fe..6cde3f710 100644 --- a/man/details_logistic_reg_h2o.Rd +++ b/man/details_logistic_reg_h2o.Rd @@ -16,8 +16,7 @@ This model has 2 tuning parameters: \itemize{ \item \code{mixture}: Proportion of Lasso Penalty (type: double, default: see below) -\item \code{penalty}: Amount of Regularization (type: double, default: see -below) +\item \code{penalty}: Amount of Regularization (type: double, default: see below) } By default, when not given a fixed \code{penalty}, diff --git a/man/details_logistic_reg_keras.Rd b/man/details_logistic_reg_keras.Rd index fa5101bf4..cdf29b0d6 100644 --- a/man/details_logistic_reg_keras.Rd +++ b/man/details_logistic_reg_keras.Rd @@ -78,8 +78,8 @@ for \code{logistic_reg()} with the \code{"keras"} engine. \subsection{References}{ \itemize{ -\item Hoerl, A., & Kennard, R. (2000). \emph{Ridge Regression: Biased -Estimation for Nonorthogonal Problems}. Technometrics, 42(1), 80-86. +\item Hoerl, A., & Kennard, R. (2000). \emph{Ridge Regression: Biased Estimation +for Nonorthogonal Problems}. Technometrics, 42(1), 80-86. } } } diff --git a/man/details_logistic_reg_spark.Rd b/man/details_logistic_reg_spark.Rd index 275b5155d..bf8d9696e 100644 --- a/man/details_logistic_reg_spark.Rd +++ b/man/details_logistic_reg_spark.Rd @@ -24,8 +24,8 @@ For \code{penalty}, the amount of regularization includes both the L1 penalty \itemize{ \item \code{mixture = 1} specifies a pure lasso model, \item \code{mixture = 0} specifies a ridge regression model, and -\item \verb{0 < mixture < 1} specifies an elastic net model, interpolating -lasso and ridge. +\item \verb{0 < mixture < 1} specifies an elastic net model, interpolating lasso +and ridge. } } @@ -86,15 +86,14 @@ to consider. \itemize{ \item Only the formula interface to via \code{fit()} is available; using \code{fit_xy()} will generate an error. -\item The predictions will always be in a Spark table format. The names -will be the same as documented but without the dots. +\item The predictions will always be in a Spark table format. The names will +be the same as documented but without the dots. \item There is no equivalent to factor columns in Spark tables so class predictions are returned as character columns. \item To retain the model object for a new R session (via \code{save()}), the \code{model$fit} element of the parsnip object should be serialized via -\code{ml_save(object$fit)} and separately saved to disk. In a new -session, the object can be reloaded and reattached to the parsnip -object. +\code{ml_save(object$fit)} and separately saved to disk. In a new session, +the object can be reloaded and reattached to the parsnip object. } } @@ -102,10 +101,9 @@ object. \itemize{ \item Luraschi, J, K Kuo, and E Ruiz. 2019. \emph{Mastering Spark with R}. O’Reilly Media -\item Hastie, T, R Tibshirani, and M Wainwright. 2015. \emph{Statistical -Learning with Sparsity}. CRC Press. -\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. -Springer. +\item Hastie, T, R Tibshirani, and M Wainwright. 2015. \emph{Statistical Learning +with Sparsity}. CRC Press. +\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. Springer. } } } diff --git a/man/details_logistic_reg_stan.Rd b/man/details_logistic_reg_stan.Rd index a4268b0ef..d50263b14 100644 --- a/man/details_logistic_reg_stan.Rd +++ b/man/details_logistic_reg_stan.Rd @@ -24,11 +24,10 @@ The default is 4. \item \code{iter}: A positive integer specifying the number of iterations for each chain (including warmup). The default is 2000. \item \code{seed}: The seed for random number generation. -\item \code{cores}: Number of cores to use when executing the chains in -parallel. -\item \code{prior}: The prior distribution for the (non-hierarchical) -regression coefficients. This \code{"stan"} engine does not fit any -hierarchical terms. +\item \code{cores}: Number of cores to use when executing the chains in parallel. +\item \code{prior}: The prior distribution for the (non-hierarchical) regression +coefficients. This \code{"stan"} engine does not fit any hierarchical +terms. \item \code{prior_intercept}: The prior distribution for the intercept (after centering all predictors). } diff --git a/man/details_logistic_reg_stan_glmer.Rd b/man/details_logistic_reg_stan_glmer.Rd index 0dc528dc5..ce1281501 100644 --- a/man/details_logistic_reg_stan_glmer.Rd +++ b/man/details_logistic_reg_stan_glmer.Rd @@ -23,10 +23,9 @@ The default is 4. \item \code{iter}: A positive integer specifying the number of iterations for each chain (including warmup). The default is 2000. \item \code{seed}: The seed for random number generation. -\item \code{cores}: Number of cores to use when executing the chains in -parallel. -\item \code{prior}: The prior distribution for the (non-hierarchical) -regression coefficients. +\item \code{cores}: Number of cores to use when executing the chains in parallel. +\item \code{prior}: The prior distribution for the (non-hierarchical) regression +coefficients. \item \code{prior_intercept}: The prior distribution for the intercept (after centering all predictors). } @@ -64,7 +63,7 @@ linear predictor (\verb{\eta}) for a random intercept: \if{html}{\out{
}}\preformatted{\eta_\{i\} = (\beta_0 + b_\{0i\}) + \beta_1x_\{i1\} }\if{html}{\out{
}} -where \emph{i} denotes the \code{i}th independent experimental unit +where $i$ denotes the \code{i}th independent experimental unit (e.g. subject). When the model has seen subject \code{i}, it can use that subject’s data to adjust the \emph{population} intercept to be more specific to that subjects results. diff --git a/man/details_mars_earth.Rd b/man/details_mars_earth.Rd index 92f2ae6a9..66b1638d9 100644 --- a/man/details_mars_earth.Rd +++ b/man/details_mars_earth.Rd @@ -16,8 +16,7 @@ This model has 3 tuning parameters: \itemize{ \item \code{num_terms}: # Model Terms (type: integer, default: see below) \item \code{prod_degree}: Degree of Interaction (type: integer, default: 1L) -\item \code{prune_method}: Pruning Method (type: character, default: -‘backward’) +\item \code{prune_method}: Pruning Method (type: character, default: ‘backward’) } The default value of \code{num_terms} depends on the number of predictor @@ -118,8 +117,7 @@ for \code{mars()} with the \code{"earth"} engine. \item Friedman, J. 1991. “Multivariate Adaptive Regression Splines.” \emph{The Annals of Statistics}, vol. 19, no. 1, pp. 1-67. \item Milborrow, S. \href{http://www.milbo.org/doc/earth-notes.pdf}{“Notes on the earth package.”} -\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. -Springer. +\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. Springer. } } } diff --git a/man/details_mlp_brulee.Rd b/man/details_mlp_brulee.Rd index e8b8ebec9..775d831c2 100644 --- a/man/details_mlp_brulee.Rd +++ b/man/details_mlp_brulee.Rd @@ -36,8 +36,8 @@ during optimization. each batch. \item \code{class_weights()}: Numeric class weights. See \code{\link[brulee:brulee_mlp]{brulee::brulee_mlp()}}. -\item \code{stop_iter()}: A non-negative integer for how many iterations with -no improvement before stopping. (default: 5L). +\item \code{stop_iter()}: A non-negative integer for how many iterations with no +improvement before stopping. (default: 5L). } } @@ -131,8 +131,7 @@ The underlying model implementation does not allow for case weights. \subsection{References}{ \itemize{ -\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. -Springer. +\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. Springer. } } } diff --git a/man/details_mlp_h2o.Rd b/man/details_mlp_h2o.Rd index 0681982cc..cbf50f0b5 100644 --- a/man/details_mlp_h2o.Rd +++ b/man/details_mlp_h2o.Rd @@ -34,20 +34,19 @@ specifying the l1 penalty directly with the engine argument \code{l1}. Other engine arguments of interest: \itemize{ \item \code{stopping_rounds} controls early stopping rounds based on the -convergence of another engine parameter \code{stopping_metric}. By -default, \link[h2o:h2o.deeplearning]{h2o::h2o.deeplearning} stops -training if simple moving average of length 5 of the stopping_metric -does not improve for 5 scoring events. This is mostly useful when -used alongside the engine parameter \code{validation}, which is the -\strong{proportion} of train-validation split, parsnip will split and -pass the two data frames to h2o. Then +convergence of another engine parameter \code{stopping_metric}. By default, +\link[h2o:h2o.deeplearning]{h2o::h2o.deeplearning} stops training if +simple moving average of length 5 of the stopping_metric does not +improve for 5 scoring events. This is mostly useful when used +alongside the engine parameter \code{validation}, which is the +\strong{proportion} of train-validation split, parsnip will split and pass +the two data frames to h2o. Then \link[h2o:h2o.deeplearning]{h2o::h2o.deeplearning} will evaluate the metric and early stopping criteria on the validation set. -\item h2o uses a 50\% dropout ratio controlled by \code{dropout} for hidden -layers by default. -\code{\link[h2o:h2o.deeplearning]{h2o::h2o.deeplearning()}} provides an -engine argument \code{input_dropout_ratio} for dropout ratios in the -input layer, which defaults to 0. +\item h2o uses a 50\% dropout ratio controlled by \code{dropout} for hidden layers +by default. \code{\link[h2o:h2o.deeplearning]{h2o::h2o.deeplearning()}} +provides an engine argument \code{input_dropout_ratio} for dropout ratios +in the input layer, which defaults to 0. } } diff --git a/man/details_mlp_keras.Rd b/man/details_mlp_keras.Rd index 0aa3d6bc6..90b4d3fdc 100644 --- a/man/details_mlp_keras.Rd +++ b/man/details_mlp_keras.Rd @@ -118,8 +118,7 @@ for \code{mlp()} with the \code{"keras"} engine. \subsection{References}{ \itemize{ -\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. -Springer. +\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. Springer. } } } diff --git a/man/details_mlp_nnet.Rd b/man/details_mlp_nnet.Rd index e1375f554..495b1d7b0 100644 --- a/man/details_mlp_nnet.Rd +++ b/man/details_mlp_nnet.Rd @@ -114,8 +114,7 @@ for \code{mlp()} with the \code{"nnet"} engine. \subsection{References}{ \itemize{ -\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. -Springer. +\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. Springer. } } } diff --git a/man/details_multinom_reg_brulee.Rd b/man/details_multinom_reg_brulee.Rd index 5285ca592..8066cfffb 100644 --- a/man/details_multinom_reg_brulee.Rd +++ b/man/details_multinom_reg_brulee.Rd @@ -34,8 +34,8 @@ process. during optimization (\code{optimizer = "SGD"} only). \item \code{batch_size()}: An integer for the number of training set points in each batch. -\item \code{stop_iter()}: A non-negative integer for how many iterations with -no improvement before stopping. (default: 5L). +\item \code{stop_iter()}: A non-negative integer for how many iterations with no +improvement before stopping. (default: 5L). \item \code{class_weights()}: Numeric class weights. See \code{\link[brulee:brulee_multinomial_reg]{brulee::brulee_multinomial_reg()}}. } @@ -77,8 +77,7 @@ The underlying model implementation does not allow for case weights. \subsection{References}{ \itemize{ -\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. -Springer. +\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. Springer. } } } diff --git a/man/details_multinom_reg_glmnet.Rd b/man/details_multinom_reg_glmnet.Rd index efca15a82..ae6043c31 100644 --- a/man/details_multinom_reg_glmnet.Rd +++ b/man/details_multinom_reg_glmnet.Rd @@ -13,8 +13,7 @@ For this engine, there is a single mode: classification This model has 2 tuning parameters: \itemize{ -\item \code{penalty}: Amount of Regularization (type: double, default: see -below) +\item \code{penalty}: Amount of Regularization (type: double, default: see below) \item \code{mixture}: Proportion of Lasso Penalty (type: double, default: 1.0) } @@ -24,8 +23,8 @@ see \link{glmnet-details}. As for \code{mixture}: \itemize{ \item \code{mixture = 1} specifies a pure lasso model, \item \code{mixture = 0} specifies a ridge regression model, and -\item \verb{0 < mixture < 1} specifies an elastic net model, interpolating -lasso and ridge. +\item \verb{0 < mixture < 1} specifies an elastic net model, interpolating lasso +and ridge. } } @@ -90,10 +89,9 @@ functions from the \href{https://butcher.tidymodels.org}{butcher} package. \subsection{References}{ \itemize{ -\item Hastie, T, R Tibshirani, and M Wainwright. 2015. \emph{Statistical -Learning with Sparsity}. CRC Press. -\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. -Springer. +\item Hastie, T, R Tibshirani, and M Wainwright. 2015. \emph{Statistical Learning +with Sparsity}. CRC Press. +\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. Springer. } } } diff --git a/man/details_multinom_reg_h2o.Rd b/man/details_multinom_reg_h2o.Rd index 265ef2896..ad1fd40c4 100644 --- a/man/details_multinom_reg_h2o.Rd +++ b/man/details_multinom_reg_h2o.Rd @@ -15,8 +15,7 @@ This model has 2 tuning parameters: \itemize{ \item \code{mixture}: Proportion of Lasso Penalty (type: double, default: see below) -\item \code{penalty}: Amount of Regularization (type: double, default: see -below) +\item \code{penalty}: Amount of Regularization (type: double, default: see below) } By default, when not given a fixed \code{penalty}, diff --git a/man/details_multinom_reg_keras.Rd b/man/details_multinom_reg_keras.Rd index 83bbe7e08..256a0461a 100644 --- a/man/details_multinom_reg_keras.Rd +++ b/man/details_multinom_reg_keras.Rd @@ -77,8 +77,8 @@ for \code{multinom_reg()} with the \code{"keras"} engine. \subsection{References}{ \itemize{ -\item Hoerl, A., & Kennard, R. (2000). \emph{Ridge Regression: Biased -Estimation for Nonorthogonal Problems}. Technometrics, 42(1), 80-86. +\item Hoerl, A., & Kennard, R. (2000). \emph{Ridge Regression: Biased Estimation +for Nonorthogonal Problems}. Technometrics, 42(1), 80-86. } } } diff --git a/man/details_multinom_reg_nnet.Rd b/man/details_multinom_reg_nnet.Rd index 1fdd0b3aa..1e4732d16 100644 --- a/man/details_multinom_reg_nnet.Rd +++ b/man/details_multinom_reg_nnet.Rd @@ -76,10 +76,9 @@ functions from the \href{https://butcher.tidymodels.org}{butcher} package. \itemize{ \item Luraschi, J, K Kuo, and E Ruiz. 2019. \emph{Mastering nnet with R}. O’Reilly Media -\item Hastie, T, R Tibshirani, and M Wainwright. 2015. \emph{Statistical -Learning with Sparsity}. CRC Press. -\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. -Springer. +\item Hastie, T, R Tibshirani, and M Wainwright. 2015. \emph{Statistical Learning +with Sparsity}. CRC Press. +\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. Springer. } } } diff --git a/man/details_multinom_reg_spark.Rd b/man/details_multinom_reg_spark.Rd index ff7f9a1e7..5ab0475fa 100644 --- a/man/details_multinom_reg_spark.Rd +++ b/man/details_multinom_reg_spark.Rd @@ -23,8 +23,8 @@ For \code{penalty}, the amount of regularization includes both the L1 penalty \itemize{ \item \code{mixture = 1} specifies a pure lasso model, \item \code{mixture = 0} specifies a ridge regression model, and -\item \verb{0 < mixture < 1} specifies an elastic net model, interpolating -lasso and ridge. +\item \verb{0 < mixture < 1} specifies an elastic net model, interpolating lasso +and ridge. } } @@ -85,15 +85,14 @@ to consider. \itemize{ \item Only the formula interface to via \code{fit()} is available; using \code{fit_xy()} will generate an error. -\item The predictions will always be in a Spark table format. The names -will be the same as documented but without the dots. +\item The predictions will always be in a Spark table format. The names will +be the same as documented but without the dots. \item There is no equivalent to factor columns in Spark tables so class predictions are returned as character columns. \item To retain the model object for a new R session (via \code{save()}), the \code{model$fit} element of the parsnip object should be serialized via -\code{ml_save(object$fit)} and separately saved to disk. In a new -session, the object can be reloaded and reattached to the parsnip -object. +\code{ml_save(object$fit)} and separately saved to disk. In a new session, +the object can be reloaded and reattached to the parsnip object. } } @@ -101,10 +100,9 @@ object. \itemize{ \item Luraschi, J, K Kuo, and E Ruiz. 2019. \emph{Mastering Spark with R}. O’Reilly Media -\item Hastie, T, R Tibshirani, and M Wainwright. 2015. \emph{Statistical -Learning with Sparsity}. CRC Press. -\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. -Springer. +\item Hastie, T, R Tibshirani, and M Wainwright. 2015. \emph{Statistical Learning +with Sparsity}. CRC Press. +\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. Springer. } } } diff --git a/man/details_naive_Bayes_h2o.Rd b/man/details_naive_Bayes_h2o.Rd index cbeb33980..c2588df4e 100644 --- a/man/details_naive_Bayes_h2o.Rd +++ b/man/details_naive_Bayes_h2o.Rd @@ -19,10 +19,10 @@ This model has 1 tuning parameter: \code{\link[h2o:h2o.naiveBayes]{h2o::h2o.naiveBayes()}} provides several engine arguments to deal with imbalances and rare classes: \itemize{ -\item \code{balance_classes} A logical value controlling over/under-sampling -(for imbalanced data). Defaults to \code{FALSE}. -\item \code{class_sampling_factors} The over/under-sampling ratios per class -(in lexicographic order). If not specified, sampling factors will be +\item \code{balance_classes} A logical value controlling over/under-sampling (for +imbalanced data). Defaults to \code{FALSE}. +\item \code{class_sampling_factors} The over/under-sampling ratios per class (in +lexicographic order). If not specified, sampling factors will be automatically computed to obtain class balance during training. Require \code{balance_classes} to be \code{TRUE}. \item \code{min_sdev}: The minimum standard deviation to use for observations diff --git a/man/details_naive_Bayes_klaR.Rd b/man/details_naive_Bayes_klaR.Rd index 9fa60decc..c9e3dd4d1 100644 --- a/man/details_naive_Bayes_klaR.Rd +++ b/man/details_naive_Bayes_klaR.Rd @@ -65,8 +65,7 @@ The underlying model implementation does not allow for case weights. \subsection{References}{ \itemize{ -\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. -Springer. +\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. Springer. } } } diff --git a/man/details_naive_Bayes_naivebayes.Rd b/man/details_naive_Bayes_naivebayes.Rd index ad99f199e..8df46db63 100644 --- a/man/details_naive_Bayes_naivebayes.Rd +++ b/man/details_naive_Bayes_naivebayes.Rd @@ -68,8 +68,7 @@ The underlying model implementation does not allow for case weights. \subsection{References}{ \itemize{ -\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. -Springer. +\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. Springer. } } } diff --git a/man/details_nearest_neighbor_kknn.Rd b/man/details_nearest_neighbor_kknn.Rd index 4efd77d56..e931e78c3 100644 --- a/man/details_nearest_neighbor_kknn.Rd +++ b/man/details_nearest_neighbor_kknn.Rd @@ -14,8 +14,8 @@ For this engine, there are multiple modes: classification and regression This model has 3 tuning parameters: \itemize{ \item \code{neighbors}: # Nearest Neighbors (type: integer, default: 5L) -\item \code{weight_func}: Distance Weighting Function (type: character, -default: ‘optimal’) +\item \code{weight_func}: Distance Weighting Function (type: character, default: +‘optimal’) \item \code{dist_power}: Minkowski Distance Order (type: double, default: 2.0) } } @@ -113,8 +113,7 @@ functions from the \href{https://butcher.tidymodels.org}{butcher} package. \itemize{ \item Hechenbichler K. and Schliep K.P. (2004) \href{https://epub.ub.uni-muenchen.de/1769/}{Weighted k-Nearest-Neighbor Techniques and Ordinal Classification}, Discussion Paper 399, SFB 386, Ludwig-Maximilians University Munich -\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. -Springer. +\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. Springer. } } } diff --git a/man/details_pls_mixOmics.Rd b/man/details_pls_mixOmics.Rd index 7b1a1bd1a..439d20695 100644 --- a/man/details_pls_mixOmics.Rd +++ b/man/details_pls_mixOmics.Rd @@ -12,8 +12,8 @@ For this engine, there are multiple modes: classification and regression This model has 2 tuning parameters: \itemize{ -\item \code{predictor_prop}: Proportion of Predictors (type: double, default: -see below) +\item \code{predictor_prop}: Proportion of Predictors (type: double, default: see +below) \item \code{num_comp}: # Components (type: integer, default: 2L) } } @@ -46,8 +46,7 @@ pls(num_comp = integer(1), predictor_prop = double(1)) \%>\% \code{\link[plsmod:pls_fit]{plsmod::pls_fit()}} is a function that: \itemize{ \item Determines the number of predictors in the data. -\item Adjusts \code{num_comp} if the value is larger than the number of -factors. +\item Adjusts \code{num_comp} if the value is larger than the number of factors. \item Determines whether sparsity is required based on the value of \code{predictor_prop}. \item Sets the \code{keepX} argument of \code{mixOmics::spls()} for sparse models. diff --git a/man/details_poisson_reg_glmer.Rd b/man/details_poisson_reg_glmer.Rd index 452148326..5a32c17bd 100644 --- a/man/details_poisson_reg_glmer.Rd +++ b/man/details_poisson_reg_glmer.Rd @@ -44,7 +44,7 @@ linear predictor (\verb{\eta}) for a random intercept: \if{html}{\out{
}}\preformatted{\eta_\{i\} = (\beta_0 + b_\{0i\}) + \beta_1x_\{i1\} }\if{html}{\out{
}} -where \emph{i} denotes the \code{i}th independent experimental unit +where $i$ denotes the \code{i}th independent experimental unit (e.g. subject). When the model has seen subject \code{i}, it can use that subject’s data to adjust the \emph{population} intercept to be more specific to that subjects results. @@ -119,8 +119,8 @@ The \code{fit()} and \code{fit_xy()} arguments have arguments called \subsection{References}{ \itemize{ -\item J Pinheiro, and D Bates. 2000. \emph{Mixed-effects models in S and -S-PLUS}. Springer, New York, NY +\item J Pinheiro, and D Bates. 2000. \emph{Mixed-effects models in S and S-PLUS}. +Springer, New York, NY \item West, K, Band Welch, and A Galecki. 2014. \emph{Linear Mixed Models: A Practical Guide Using Statistical Software}. CRC Press. \item Thorson, J, Minto, C. 2015, Mixed effects: a unifying framework for @@ -130,9 +130,9 @@ Science}, Volume 72, Issue 5, Pages 1245–1256. Goodwin, CED, Robinson, BS, Hodgson, DJ, Inger, R. 2018. \emph{A brief introduction to mixed effects modelling and multi-model inference in ecology}. PeerJ 6:e4794. -\item DeBruine LM, Barr DJ. Understanding Mixed-Effects Models Through -Data Simulation. 2021. \emph{Advances in Methods and Practices in -Psychological Science}. +\item DeBruine LM, Barr DJ. Understanding Mixed-Effects Models Through Data +Simulation. 2021. \emph{Advances in Methods and Practices in Psychological +Science}. } } } diff --git a/man/details_poisson_reg_glmnet.Rd b/man/details_poisson_reg_glmnet.Rd index e615a3371..e17beed29 100644 --- a/man/details_poisson_reg_glmnet.Rd +++ b/man/details_poisson_reg_glmnet.Rd @@ -13,8 +13,7 @@ For this engine, there is a single mode: regression This model has 2 tuning parameters: \itemize{ -\item \code{penalty}: Amount of Regularization (type: double, default: see -below) +\item \code{penalty}: Amount of Regularization (type: double, default: see below) \item \code{mixture}: Proportion of Lasso Penalty (type: double, default: 1.0) } @@ -24,8 +23,8 @@ see \link{glmnet-details}. As for \code{mixture}: \itemize{ \item \code{mixture = 1} specifies a pure lasso model, \item \code{mixture = 0} specifies a ridge regression model, and -\item \verb{0 < mixture < 1} specifies an elastic net model, interpolating -lasso and ridge. +\item \verb{0 < mixture < 1} specifies an elastic net model, interpolating lasso +and ridge. } } diff --git a/man/details_poisson_reg_h2o.Rd b/man/details_poisson_reg_h2o.Rd index 37ce3fe01..bc8147560 100644 --- a/man/details_poisson_reg_h2o.Rd +++ b/man/details_poisson_reg_h2o.Rd @@ -15,8 +15,7 @@ This model has 2 tuning parameters: \itemize{ \item \code{mixture}: Proportion of Lasso Penalty (type: double, default: see below) -\item \code{penalty}: Amount of Regularization (type: double, default: see -below) +\item \code{penalty}: Amount of Regularization (type: double, default: see below) } By default, when not given a fixed \code{penalty}, diff --git a/man/details_poisson_reg_stan.Rd b/man/details_poisson_reg_stan.Rd index 9c5084970..9147920d1 100644 --- a/man/details_poisson_reg_stan.Rd +++ b/man/details_poisson_reg_stan.Rd @@ -23,11 +23,9 @@ The default is 4. \item \code{iter}: A positive integer specifying the number of iterations for each chain (including warmup). The default is 2000. \item \code{seed}: The seed for random number generation. -\item \code{cores}: Number of cores to use when executing the chains in -parallel. -\item \code{prior}: The prior distribution for the (non-hierarchical) -regression coefficients. The \code{"stan"} engine does not fit any -hierarchical terms. +\item \code{cores}: Number of cores to use when executing the chains in parallel. +\item \code{prior}: The prior distribution for the (non-hierarchical) regression +coefficients. The \code{"stan"} engine does not fit any hierarchical terms. \item \code{prior_intercept}: The prior distribution for the intercept (after centering all predictors). } diff --git a/man/details_poisson_reg_stan_glmer.Rd b/man/details_poisson_reg_stan_glmer.Rd index 331251337..ef1065ada 100644 --- a/man/details_poisson_reg_stan_glmer.Rd +++ b/man/details_poisson_reg_stan_glmer.Rd @@ -23,10 +23,9 @@ The default is 4. \item \code{iter}: A positive integer specifying the number of iterations for each chain (including warmup). The default is 2000. \item \code{seed}: The seed for random number generation. -\item \code{cores}: Number of cores to use when executing the chains in -parallel. -\item \code{prior}: The prior distribution for the (non-hierarchical) -regression coefficients. +\item \code{cores}: Number of cores to use when executing the chains in parallel. +\item \code{prior}: The prior distribution for the (non-hierarchical) regression +coefficients. \item \code{prior_intercept}: The prior distribution for the intercept (after centering all predictors). } @@ -64,7 +63,7 @@ linear predictor (\verb{\eta}) for a random intercept: \if{html}{\out{
}}\preformatted{\eta_\{i\} = (\beta_0 + b_\{0i\}) + \beta_1x_\{i1\} }\if{html}{\out{
}} -where \emph{i} denotes the \code{i}th independent experimental unit +where $i$ denotes the \code{i}th independent experimental unit (e.g. subject). When the model has seen subject \code{i}, it can use that subject’s data to adjust the \emph{population} intercept to be more specific to that subjects results. diff --git a/man/details_proportional_hazards_glmnet.Rd b/man/details_proportional_hazards_glmnet.Rd index eb266d0ef..1e1f9e1a3 100644 --- a/man/details_proportional_hazards_glmnet.Rd +++ b/man/details_proportional_hazards_glmnet.Rd @@ -12,8 +12,7 @@ For this engine, there is a single mode: censored regression This model has 2 tuning parameters: \itemize{ -\item \code{penalty}: Amount of Regularization (type: double, default: see -below) +\item \code{penalty}: Amount of Regularization (type: double, default: see below) \item \code{mixture}: Proportion of Lasso Penalty (type: double, default: 1.0) } @@ -24,8 +23,8 @@ see \link{glmnet-details}. As for \itemize{ \item \code{mixture = 1} specifies a pure lasso model, \item \code{mixture = 0} specifies a ridge regression model, and -\item \verb{0 < mixture < 1} specifies an elastic net model, interpolating -lasso and ridge. +\item \verb{0 < mixture < 1} specifies an elastic net model, interpolating lasso +and ridge. } } @@ -159,8 +158,8 @@ functions from the \href{https://butcher.tidymodels.org}{butcher} package. \item Simon N, Friedman J, Hastie T, Tibshirani R. 2011. “Regularization Paths for Cox’s Proportional Hazards Model via Coordinate Descent.” \emph{Journal of Statistical Software}, Articles 39 (5): 1–13. . -\item Hastie T, Tibshirani R, Wainwright M. 2015. \emph{Statistical Learning -with Sparsity}. CRC Press. +\item Hastie T, Tibshirani R, Wainwright M. 2015. \emph{Statistical Learning with +Sparsity}. CRC Press. \item Kuhn M, Johnson K. 2013. \emph{Applied Predictive Modeling}. Springer. } } diff --git a/man/details_proportional_hazards_survival.Rd b/man/details_proportional_hazards_survival.Rd index 4a5f4c251..1e6cb151f 100644 --- a/man/details_proportional_hazards_survival.Rd +++ b/man/details_proportional_hazards_survival.Rd @@ -129,8 +129,7 @@ The \code{fit()} and \code{fit_xy()} arguments have arguments called \subsection{References}{ \itemize{ \item Andersen P, Gill R. 1982. Cox’s regression model for counting -processes, a large sample study. \emph{Annals of Statistics} 10, -1100-1120. +processes, a large sample study. \emph{Annals of Statistics} 10, 1100-1120. } } } diff --git a/man/details_rand_forest_aorsf.Rd b/man/details_rand_forest_aorsf.Rd index e98f0c6ab..166334d9b 100644 --- a/man/details_rand_forest_aorsf.Rd +++ b/man/details_rand_forest_aorsf.Rd @@ -77,8 +77,8 @@ maximum observed time in the training data. LA, Howard G, Simon N. Oblique random survival forests. Annals of applied statistics 2019 Sep; 13(3):1847-83. DOI: 10.1214/19-AOAS1261 \item Jaeger BC, Welden S, Lenoir K, Pajewski NM. aorsf: An R package for -supervised learning using the oblique random survival forest. -Journal of Open Source Software 2022, 7(77), 1 4705. . +supervised learning using the oblique random survival forest. Journal +of Open Source Software 2022, 7(77), 1 4705. . \item Jaeger BC, Welden S, Lenoir K, Speiser JL, Segar MW, Pandey A, Pajewski NM. Accelerated and interpretable oblique random survival forests. arXiv e-prints 2022 Aug; arXiv-2208. URL: diff --git a/man/details_rand_forest_partykit.Rd b/man/details_rand_forest_partykit.Rd index 9e00500b1..25184df2e 100644 --- a/man/details_rand_forest_partykit.Rd +++ b/man/details_rand_forest_partykit.Rd @@ -109,8 +109,7 @@ time. \subsection{References}{ \itemize{ \item \href{https://jmlr.org/papers/v16/hothorn15a.html}{partykit: A Modular Toolkit for Recursive Partytioning in R} -\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. -Springer. +\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. Springer. } } } diff --git a/man/details_rand_forest_randomForest.Rd b/man/details_rand_forest_randomForest.Rd index 8db1df3b4..a35b9fa3a 100644 --- a/man/details_rand_forest_randomForest.Rd +++ b/man/details_rand_forest_randomForest.Rd @@ -113,8 +113,7 @@ for \code{rand_forest()} with the \code{"randomForest"} engine. \subsection{References}{ \itemize{ -\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. -Springer. +\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. Springer. } } } diff --git a/man/details_rand_forest_ranger.Rd b/man/details_rand_forest_ranger.Rd index effbfa4ee..b7bb1f813 100644 --- a/man/details_rand_forest_ranger.Rd +++ b/man/details_rand_forest_ranger.Rd @@ -140,8 +140,7 @@ for \code{rand_forest()} with the \code{"ranger"} engine. \subsection{References}{ \itemize{ -\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. -Springer. +\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. Springer. } } } diff --git a/man/details_rand_forest_spark.Rd b/man/details_rand_forest_spark.Rd index 4a791bb6b..8fdc04559 100644 --- a/man/details_rand_forest_spark.Rd +++ b/man/details_rand_forest_spark.Rd @@ -102,15 +102,14 @@ to consider. \itemize{ \item Only the formula interface to via \code{fit()} is available; using \code{fit_xy()} will generate an error. -\item The predictions will always be in a Spark table format. The names -will be the same as documented but without the dots. +\item The predictions will always be in a Spark table format. The names will +be the same as documented but without the dots. \item There is no equivalent to factor columns in Spark tables so class predictions are returned as character columns. \item To retain the model object for a new R session (via \code{save()}), the \code{model$fit} element of the parsnip object should be serialized via -\code{ml_save(object$fit)} and separately saved to disk. In a new -session, the object can be reloaded and reattached to the parsnip -object. +\code{ml_save(object$fit)} and separately saved to disk. In a new session, +the object can be reloaded and reattached to the parsnip object. } } @@ -129,8 +128,7 @@ a character string to specify the column with the numeric case weights. \subsection{References}{ \itemize{ -\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. -Springer. +\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. Springer. } } } diff --git a/man/details_rule_fit_h2o.Rd b/man/details_rule_fit_h2o.Rd index 59f16f0a0..b4fb9486d 100644 --- a/man/details_rule_fit_h2o.Rd +++ b/man/details_rule_fit_h2o.Rd @@ -23,15 +23,15 @@ the L1 penalty (LASSO). Other engine arguments of interest: \itemize{ -\item \code{algorithm}: The algorithm to use to generate rules. should be one -of “AUTO”, “DRF”, “GBM”, defaults to “AUTO”. +\item \code{algorithm}: The algorithm to use to generate rules. should be one of +“AUTO”, “DRF”, “GBM”, defaults to “AUTO”. \item \code{min_rule_length}: Minimum length of tree depth, opposite of \code{tree_dpeth}, defaults to 3. \item \code{max_num_rules}: The maximum number of rules to return. The default value of -1 means the number of rules is selected by diminishing returns in model deviance. -\item \code{model_type}: The type of base learners in the ensemble, should be -one of: “rules_and_linear”, “rules”, “linear”, defaults to +\item \code{model_type}: The type of base learners in the ensemble, should be one +of: “rules_and_linear”, “rules”, “linear”, defaults to “rules_and_linear”. } } diff --git a/man/details_rule_fit_xrf.Rd b/man/details_rule_fit_xrf.Rd index c4429550e..ee12446f8 100644 --- a/man/details_rule_fit_xrf.Rd +++ b/man/details_rule_fit_xrf.Rd @@ -20,10 +20,9 @@ default: see below) \item \code{min_n}: Minimal Node Size (type: integer, default: 1L) \item \code{tree_depth}: Tree Depth (type: integer, default: 6L) \item \code{learn_rate}: Learning Rate (type: double, default: 0.3) -\item \code{loss_reduction}: Minimum Loss Reduction (type: double, default: -0.0) -\item \code{sample_size}: Proportion Observations Sampled (type: double, -default: 1.0) +\item \code{loss_reduction}: Minimum Loss Reduction (type: double, default: 0.0) +\item \code{sample_size}: Proportion Observations Sampled (type: double, default: +1.0) \item \code{penalty}: Amount of Regularization (type: double, default: 0.1) } } @@ -156,8 +155,8 @@ their analogue to the \code{mtry} argument as the \emph{proportion} of predictor that will be randomly sampled at each split rather than the \emph{count}. In some settings, such as when tuning over preprocessors that influence the number of predictors, this parameterization is quite -helpful—interpreting \code{mtry} as a proportion means that [0,1] is always -a valid range for that parameter, regardless of input data. +helpful—interpreting \code{mtry} as a proportion means that $\link{0, 1}$ is +always a valid range for that parameter, regardless of input data. parsnip and its extensions accommodate this parameterization using the \code{counts} argument: a logical indicating whether \code{mtry} should be @@ -174,7 +173,7 @@ to \code{TRUE}. For engines that support the proportion interpretation (currently \code{"xgboost"} and \code{"xrf"}, via the rules package, and \code{"lightgbm"} via the bonsai package) the user can pass the \code{counts = FALSE} argument to \code{set_engine()} to supply \code{mtry} values -within [0,1]. +within $\link{0, 1}$. } \subsection{Early stopping}{ diff --git a/man/details_surv_reg_survival.Rd b/man/details_surv_reg_survival.Rd index b57f851d8..91e12434d 100644 --- a/man/details_surv_reg_survival.Rd +++ b/man/details_surv_reg_survival.Rd @@ -81,8 +81,8 @@ surv_reg() \%>\% \subsection{References}{ \itemize{ -\item Kalbfleisch, J. D. and Prentice, R. L. 2002 \emph{The statistical -analysis of failure time data}, Wiley. +\item Kalbfleisch, J. D. and Prentice, R. L. 2002 \emph{The statistical analysis +of failure time data}, Wiley. } } } diff --git a/man/details_survival_reg_survival.Rd b/man/details_survival_reg_survival.Rd index 695e9f16c..2b68ebf62 100644 --- a/man/details_survival_reg_survival.Rd +++ b/man/details_survival_reg_survival.Rd @@ -106,8 +106,8 @@ functions from the \href{https://butcher.tidymodels.org}{butcher} package. \subsection{References}{ \itemize{ -\item Kalbfleisch, J. D. and Prentice, R. L. 2002 \emph{The statistical -analysis of failure time data}, Wiley. +\item Kalbfleisch, J. D. and Prentice, R. L. 2002 \emph{The statistical analysis +of failure time data}, Wiley. } } } diff --git a/man/details_svm_linear_LiblineaR.Rd b/man/details_svm_linear_LiblineaR.Rd index 0d7270ff5..ac1f786c1 100644 --- a/man/details_svm_linear_LiblineaR.Rd +++ b/man/details_svm_linear_LiblineaR.Rd @@ -106,8 +106,7 @@ for \code{svm_linear()} with the \code{"LiblineaR"} engine. \subsection{References}{ \itemize{ -\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. -Springer. +\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. Springer. } } } diff --git a/man/details_svm_linear_kernlab.Rd b/man/details_svm_linear_kernlab.Rd index c2604a3c6..aa60e8fee 100644 --- a/man/details_svm_linear_kernlab.Rd +++ b/man/details_svm_linear_kernlab.Rd @@ -112,11 +112,9 @@ for \code{svm_linear()} with the \code{"kernlab"} engine. \subsection{References}{ \itemize{ \item Lin, HT, and R Weng. \href{https://www.csie.ntu.edu.tw/~cjlin/papers/plattprob.pdf}{“A Note on Platt’s Probabilistic Outputs for Support Vector Machines”} -\item Karatzoglou, A, Smola, A, Hornik, K, and A Zeileis. 2004. -\href{https://www.jstatsoft.org/article/view/v011i09}{“kernlab - An S4 Package for Kernel Methods in R.”}, \emph{Journal of +\item Karatzoglou, A, Smola, A, Hornik, K, and A Zeileis. 2004. \href{https://www.jstatsoft.org/article/view/v011i09}{“kernlab - An S4 Package for Kernel Methods in R.”}, \emph{Journal of Statistical Software}. -\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. -Springer. +\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. Springer. } } } diff --git a/man/details_svm_poly_kernlab.Rd b/man/details_svm_poly_kernlab.Rd index af9fe891b..30f6c00fc 100644 --- a/man/details_svm_poly_kernlab.Rd +++ b/man/details_svm_poly_kernlab.Rd @@ -124,11 +124,9 @@ functions from the \href{https://butcher.tidymodels.org}{butcher} package. \subsection{References}{ \itemize{ \item Lin, HT, and R Weng. \href{https://www.csie.ntu.edu.tw/~cjlin/papers/plattprob.pdf}{“A Note on Platt’s Probabilistic Outputs for Support Vector Machines”} -\item Karatzoglou, A, Smola, A, Hornik, K, and A Zeileis. 2004. -\href{https://www.jstatsoft.org/article/view/v011i09}{“kernlab - An S4 Package for Kernel Methods in R.”}, \emph{Journal of +\item Karatzoglou, A, Smola, A, Hornik, K, and A Zeileis. 2004. \href{https://www.jstatsoft.org/article/view/v011i09}{“kernlab - An S4 Package for Kernel Methods in R.”}, \emph{Journal of Statistical Software}. -\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. -Springer. +\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. Springer. } } } diff --git a/man/details_svm_rbf_kernlab.Rd b/man/details_svm_rbf_kernlab.Rd index 033230039..7e4f8f6bc 100644 --- a/man/details_svm_rbf_kernlab.Rd +++ b/man/details_svm_rbf_kernlab.Rd @@ -124,11 +124,9 @@ for \code{svm_rbf()} with the \code{"kernlab"} engine. \subsection{References}{ \itemize{ \item Lin, HT, and R Weng. \href{https://www.csie.ntu.edu.tw/~cjlin/papers/plattprob.pdf}{“A Note on Platt’s Probabilistic Outputs for Support Vector Machines”} -\item Karatzoglou, A, Smola, A, Hornik, K, and A Zeileis. 2004. -\href{https://www.jstatsoft.org/article/view/v011i09}{“kernlab - An S4 Package for Kernel Methods in R.”}, \emph{Journal of +\item Karatzoglou, A, Smola, A, Hornik, K, and A Zeileis. 2004. \href{https://www.jstatsoft.org/article/view/v011i09}{“kernlab - An S4 Package for Kernel Methods in R.”}, \emph{Journal of Statistical Software}. -\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. -Springer. +\item Kuhn, M, and K Johnson. 2013. \emph{Applied Predictive Modeling}. Springer. } } }