From 9ed60b0902caa391d5b4592402e156d96ec754ee Mon Sep 17 00:00:00 2001
From: topepo A list of all Data sets previously found in SVM models produced by MARS models produced by Under-the-hood changes were made so that non-standard data arguments in the modeling packages can be accomodated. (#315) A new main argument was added to If A function named
# Or the development version from GitHub:
# install.packages("devtools")
-devtools::install_github("tidymodels/parsnip")
+devtools::install_github("tidymodels/parsnip")
@@ -199,7 +200,7 @@
fit(mpg ~ ., data = mtcars)
#> parsnip model object
#>
-#> Fit time: 75ms
+#> Fit time: 69ms
#> Ranger result
#>
#> Call:
@@ -213,8 +214,8 @@
#> Target node size: 5
#> Variable importance mode: impurity
#> Splitrule: variance
-#> OOB prediction error (MSE): 5.779248
-#> R squared (OOB): 0.8408977
parsnip models across different CRAN packages can be found at tidymodels.org.parsnip are now find in the modeldata package.
Other Changes
-
tidyr >= 1.0.0 is now required.tidyr >= 1.0.0 is now required.kernlab now use the formula method. This change was due to how ksvm() made indicator variables for factor predictors (with one-hot encodings). Since the ordinary formula method did not do this, the data are passed as-is to ksvm() so that the results are closer to what one would get if ksmv() were called directly.earth now use the formula method.
New Features
-
boost_tree() called stop_iter for early stopping. The xgb_train() function gained arguments for early stopping and a percentage of data to leave out for a validation set.boost_tree() called stop_iter for early stopping. The xgb_train() function gained arguments for early stopping and a percentage of data to leave out for a validation set.fit() is used and the underlying model uses a formula, the actual formula is pass to the model (instead of a placeholder). This makes the model call better.repair_call() was added. This can help change the underlying models call object to better reflect what they would have obtained if the model function had been used directly (instead of via parsnip). This is only useful when the user chooses a formula interface and the model uses a formula interface. It will also be of limited use when a recipes is used to construct the feature set in workflows or tune.
boost_tree() %>% - set_engine("xgboost") %>% - set_mode("regression") %>% - translate()
## Boosted Tree Model Specification (regression) -## -## Computational engine: xgboost -## -## Model fit template: -## parsnip::xgb_train(x = missing_arg(), y = missing_arg(), nthread = 1, -## verbose = 0) -
boost_tree() %>% - set_engine("xgboost") %>% - set_mode("classification") %>% - translate()
## Boosted Tree Model Specification (classification) -## -## Computational engine: xgboost -## -## Model fit template: -## parsnip::xgb_train(x = missing_arg(), y = missing_arg(), nthread = 1, -## verbose = 0) -+
boost_tree() %>% + set_engine("xgboost") %>% + set_mode("regression") %>% + translate()
## Boosted Tree Model Specification (regression) +## +## Computational engine: xgboost +## +## Model fit template: +## parsnip::xgb_train(x = missing_arg(), y = missing_arg(), nthread = 1, +## verbose = 0)
boost_tree() %>% + set_engine("xgboost") %>% + set_mode("classification") %>% + translate()
## Boosted Tree Model Specification (classification) +## +## Computational engine: xgboost +## +## Model fit template: +## parsnip::xgb_train(x = missing_arg(), y = missing_arg(), nthread = 1, +## verbose = 0)
boost_tree() %>% - set_engine("C5.0") %>% - set_mode("classification") %>% - translate()
## Boosted Tree Model Specification (classification) -## -## Computational engine: C5.0 -## -## Model fit template: -## parsnip::C5.0_train(x = missing_arg(), y = missing_arg(), weights = missing_arg()) -+
boost_tree() %>% + set_engine("C5.0") %>% + set_mode("classification") %>% + translate()
## Boosted Tree Model Specification (classification) +## +## Computational engine: C5.0 +## +## Model fit template: +## parsnip::C5.0_train(x = missing_arg(), y = missing_arg(), weights = missing_arg())
Note that C50::C5.0() does not require factor
predictors to be converted to indicator variables.
boost_tree() %>% - set_engine("spark") %>% - set_mode("regression") %>% - translate()
## Boosted Tree Model Specification (regression) -## -## Computational engine: spark -## -## Model fit template: -## sparklyr::ml_gradient_boosted_trees(x = missing_arg(), formula = missing_arg(), -## type = "regression", seed = sample.int(10^5, 1)) -
boost_tree() %>% - set_engine("spark") %>% - set_mode("classification") %>% - translate()
## Boosted Tree Model Specification (classification) -## -## Computational engine: spark -## -## Model fit template: -## sparklyr::ml_gradient_boosted_trees(x = missing_arg(), formula = missing_arg(), -## type = "classification", seed = sample.int(10^5, 1)) -+
boost_tree() %>% + set_engine("spark") %>% + set_mode("regression") %>% + translate()
## Boosted Tree Model Specification (regression) +## +## Computational engine: spark +## +## Model fit template: +## sparklyr::ml_gradient_boosted_trees(x = missing_arg(), formula = missing_arg(), +## type = "regression", seed = sample.int(10^5, 1))
boost_tree() %>% + set_engine("spark") %>% + set_mode("classification") %>% + translate()
## Boosted Tree Model Specification (classification) +## +## Computational engine: spark +## +## Model fit template: +## sparklyr::ml_gradient_boosted_trees(x = missing_arg(), formula = missing_arg(), +## type = "classification", seed = sample.int(10^5, 1))
decision_tree() %>% - set_engine("rpart") %>% - set_mode("regression") %>% - translate()
## Decision Tree Model Specification (regression) -## -## Computational engine: rpart -## -## Model fit template: -## rpart::rpart(formula = missing_arg(), data = missing_arg(), weights = missing_arg()) -
decision_tree() %>% - set_engine("rpart") %>% - set_mode("classification") %>% - translate()
## Decision Tree Model Specification (classification) -## -## Computational engine: rpart -## -## Model fit template: -## rpart::rpart(formula = missing_arg(), data = missing_arg(), weights = missing_arg()) -+
decision_tree() %>% + set_engine("rpart") %>% + set_mode("regression") %>% + translate()
## Decision Tree Model Specification (regression) +## +## Computational engine: rpart +## +## Model fit template: +## rpart::rpart(formula = missing_arg(), data = missing_arg(), weights = missing_arg())
decision_tree() %>% + set_engine("rpart") %>% + set_mode("classification") %>% + translate()
## Decision Tree Model Specification (classification) +## +## Computational engine: rpart +## +## Model fit template: +## rpart::rpart(formula = missing_arg(), data = missing_arg(), weights = missing_arg())
Note that rpart::rpart() does not require factor
predictors to be converted to indicator variables.
decision_tree() %>% - set_engine("C5.0") %>% - set_mode("classification") %>% - translate()
## Decision Tree Model Specification (classification) -## -## Computational engine: C5.0 -## -## Model fit template: -## parsnip::C5.0_train(x = missing_arg(), y = missing_arg(), weights = missing_arg(), -## trials = 1) -+
decision_tree() %>% + set_engine("C5.0") %>% + set_mode("classification") %>% + translate()
## Decision Tree Model Specification (classification) +## +## Computational engine: C5.0 +## +## Model fit template: +## parsnip::C5.0_train(x = missing_arg(), y = missing_arg(), weights = missing_arg(), +## trials = 1)
Note that C50::C5.0() does not require factor
predictors to be converted to indicator variables.
decision_tree() %>% - set_engine("spark") %>% - set_mode("regression") %>% - translate()
## Decision Tree Model Specification (regression) -## -## Computational engine: spark -## -## Model fit template: -## sparklyr::ml_decision_tree_classifier(x = missing_arg(), formula = missing_arg(), -## seed = sample.int(10^5, 1)) -
decision_tree() %>% - set_engine("spark") %>% - set_mode("classification") %>% - translate()
## Decision Tree Model Specification (classification) -## -## Computational engine: spark -## -## Model fit template: -## sparklyr::ml_decision_tree_classifier(x = missing_arg(), formula = missing_arg(), -## seed = sample.int(10^5, 1)) -+
decision_tree() %>% + set_engine("spark") %>% + set_mode("regression") %>% + translate()
## Decision Tree Model Specification (regression) +## +## Computational engine: spark +## +## Model fit template: +## sparklyr::ml_decision_tree_classifier(x = missing_arg(), formula = missing_arg(), +## seed = sample.int(10^5, 1))
decision_tree() %>% + set_engine("spark") %>% + set_mode("classification") %>% + translate()
## Decision Tree Model Specification (classification) +## +## Computational engine: spark +## +## Model fit template: +## sparklyr::ml_decision_tree_classifier(x = missing_arg(), formula = missing_arg(), +## seed = sample.int(10^5, 1))
If the formula Species ~ . where used:
@@ -206,9 +206,9 @@Details .obs() = 150 .lvls() = c(setosa = 50, versicolor = 50, virginica = 50) .facts() = 0 - .y() = <vector> (Species as a vector) - .x() = <data.frame> (The other 4 columns as a data frame) - .dat() = <data.frame> (The full data set) + .y() = <vector> (Species as a vector) + .x() = <data.frame> (The other 4 columns as a data frame) + .dat() = <data.frame> (The full data set)
To use these in a model fit, pass them to a model specification. diff --git a/docs/dev/reference/fit.html b/docs/dev/reference/fit.html index 37b87cc8f..52522745e 100644 --- a/docs/dev/reference/fit.html +++ b/docs/dev/reference/fit.html @@ -263,7 +263,8 @@
## Linear Regression Model Specification (regression) -## -## Computational engine: lm -## -## Model fit template: -## stats::lm(formula = missing_arg(), data = missing_arg(), weights = missing_arg()) -+
linear_reg() %>% + set_engine("lm") %>% + set_mode("regression") %>% + translate()
## Linear Regression Model Specification (regression) +## +## Computational engine: lm +## +## Model fit template: +## stats::lm(formula = missing_arg(), data = missing_arg(), weights = missing_arg())
linear_reg() %>% - set_engine("glmnet") %>% - set_mode("regression") %>% - translate()
## Linear Regression Model Specification (regression) -## -## Computational engine: glmnet -## -## Model fit template: -## glmnet::glmnet(x = missing_arg(), y = missing_arg(), weights = missing_arg(), -## family = "gaussian") -+
linear_reg() %>% + set_engine("glmnet") %>% + set_mode("regression") %>% + translate()
## Linear Regression Model Specification (regression) +## +## Computational engine: glmnet +## +## Model fit template: +## glmnet::glmnet(x = missing_arg(), y = missing_arg(), weights = missing_arg(), +## family = "gaussian")
For glmnet models, the full regularization path is always fit
regardless of the value given to penalty. Also, there is the option to
@@ -293,17 +291,16 @@
linear_reg() %>% - set_engine("stan") %>% - set_mode("regression") %>% - translate()
## Linear Regression Model Specification (regression) -## -## Computational engine: stan -## -## Model fit template: -## rstanarm::stan_glm(formula = missing_arg(), data = missing_arg(), -## weights = missing_arg(), family = stats::gaussian, refresh = 0) -+
linear_reg() %>% + set_engine("stan") %>% + set_mode("regression") %>% + translate()
## Linear Regression Model Specification (regression) +## +## Computational engine: stan +## +## Model fit template: +## rstanarm::stan_glm(formula = missing_arg(), data = missing_arg(), +## weights = missing_arg(), family = stats::gaussian, refresh = 0)
Note that the refresh default prevents logging of the estimation
process. Change this value in set_engine() will show the logs.
linear_reg() %>% - set_engine("spark") %>% - set_mode("regression") %>% - translate()
## Linear Regression Model Specification (regression) -## -## Computational engine: spark -## -## Model fit template: -## sparklyr::ml_linear_regression(x = missing_arg(), formula = missing_arg(), -## weight_col = missing_arg()) -+
linear_reg() %>% + set_engine("spark") %>% + set_mode("regression") %>% + translate()
## Linear Regression Model Specification (regression) +## +## Computational engine: spark +## +## Model fit template: +## sparklyr::ml_linear_regression(x = missing_arg(), formula = missing_arg(), +## weight_col = missing_arg())
linear_reg() %>% - set_engine("keras") %>% - set_mode("regression") %>% - translate()
## Linear Regression Model Specification (regression) -## -## Computational engine: keras -## -## Model fit template: -## parsnip::keras_mlp(x = missing_arg(), y = missing_arg(), hidden_units = 1, -## act = "linear") -+
linear_reg() %>% + set_engine("keras") %>% + set_mode("regression") %>% + translate()
## Linear Regression Model Specification (regression) +## +## Computational engine: keras +## +## Model fit template: +## parsnip::keras_mlp(x = missing_arg(), y = missing_arg(), hidden_units = 1, +## act = "linear")
logistic_reg() %>% - set_engine("glm") %>% - set_mode("classification") %>% - translate()
## Logistic Regression Model Specification (classification) -## -## Computational engine: glm -## -## Model fit template: -## stats::glm(formula = missing_arg(), data = missing_arg(), weights = missing_arg(), -## family = stats::binomial) -+
logistic_reg() %>% + set_engine("glm") %>% + set_mode("classification") %>% + translate()
## Logistic Regression Model Specification (classification) +## +## Computational engine: glm +## +## Model fit template: +## stats::glm(formula = missing_arg(), data = missing_arg(), weights = missing_arg(), +## family = stats::binomial)
logistic_reg() %>% - set_engine("glmnet") %>% - set_mode("classification") %>% - translate()
## Logistic Regression Model Specification (classification) -## -## Computational engine: glmnet -## -## Model fit template: -## glmnet::glmnet(x = missing_arg(), y = missing_arg(), weights = missing_arg(), -## family = "binomial") -+
logistic_reg() %>% + set_engine("glmnet") %>% + set_mode("classification") %>% + translate()
## Logistic Regression Model Specification (classification) +## +## Computational engine: glmnet +## +## Model fit template: +## glmnet::glmnet(x = missing_arg(), y = missing_arg(), weights = missing_arg(), +## family = "binomial")
For glmnet models, the full regularization path is always fit
regardless of the value given to penalty. Also, there is the option to
@@ -292,17 +290,16 @@
logistic_reg() %>% - set_engine("stan") %>% - set_mode("classification") %>% - translate()
## Logistic Regression Model Specification (classification) -## -## Computational engine: stan -## -## Model fit template: -## rstanarm::stan_glm(formula = missing_arg(), data = missing_arg(), -## weights = missing_arg(), family = stats::binomial, refresh = 0) -+
logistic_reg() %>% + set_engine("stan") %>% + set_mode("classification") %>% + translate()
## Logistic Regression Model Specification (classification) +## +## Computational engine: stan +## +## Model fit template: +## rstanarm::stan_glm(formula = missing_arg(), data = missing_arg(), +## weights = missing_arg(), family = stats::binomial, refresh = 0)
Note that the refresh default prevents logging of the estimation
process. Change this value in set_engine() will show the logs.
logistic_reg() %>% - set_engine("spark") %>% - set_mode("classification") %>% - translate()
## Logistic Regression Model Specification (classification) -## -## Computational engine: spark -## -## Model fit template: -## sparklyr::ml_logistic_regression(x = missing_arg(), formula = missing_arg(), -## weight_col = missing_arg(), family = "binomial") -+
logistic_reg() %>% + set_engine("spark") %>% + set_mode("classification") %>% + translate()
## Logistic Regression Model Specification (classification) +## +## Computational engine: spark +## +## Model fit template: +## sparklyr::ml_logistic_regression(x = missing_arg(), formula = missing_arg(), +## weight_col = missing_arg(), family = "binomial")
logistic_reg() %>% - set_engine("keras") %>% - set_mode("classification") %>% - translate()
## Logistic Regression Model Specification (classification) -## -## Computational engine: keras -## -## Model fit template: -## parsnip::keras_mlp(x = missing_arg(), y = missing_arg(), hidden_units = 1, -## act = "linear") -+
logistic_reg() %>% + set_engine("keras") %>% + set_mode("classification") %>% + translate()
## Logistic Regression Model Specification (classification) +## +## Computational engine: keras +## +## Model fit template: +## parsnip::keras_mlp(x = missing_arg(), y = missing_arg(), hidden_units = 1, +## act = "linear")
mars() %>% - set_engine("earth") %>% - set_mode("regression") %>% - translate()
## MARS Model Specification (regression) -## -## Computational engine: earth -## -## Model fit template: -## earth::earth(x = missing_arg(), y = missing_arg(), weights = missing_arg(), -## keepxy = TRUE) -
mars() %>% - set_engine("earth") %>% - set_mode("classification") %>% - translate()
## MARS Model Specification (classification) -## -## Engine-Specific Arguments: -## glm = list(family = stats::binomial) -## -## Computational engine: earth -## -## Model fit template: -## earth::earth(x = missing_arg(), y = missing_arg(), weights = missing_arg(), -## glm = list(family = stats::binomial), keepxy = TRUE) -+
mars() %>% + set_engine("earth") %>% + set_mode("regression") %>% + translate()
## MARS Model Specification (regression) +## +## Computational engine: earth +## +## Model fit template: +## earth::earth(formula = missing_arg(), data = missing_arg(), weights = missing_arg(), +## keepxy = TRUE)
mars() %>% + set_engine("earth") %>% + set_mode("classification") %>% + translate()
## MARS Model Specification (classification) +## +## Engine-Specific Arguments: +## glm = list(family = stats::binomial) +## +## Computational engine: earth +## +## Model fit template: +## earth::earth(formula = missing_arg(), data = missing_arg(), weights = missing_arg(), +## glm = list(family = stats::binomial), keepxy = TRUE)
Note that, when the model is fit, the earth package only has its
namespace loaded. However, if multi_predict is used, the package is
diff --git a/docs/dev/reference/mlp.html b/docs/dev/reference/mlp.html
index 8d4bdcee1..a84fd2ae5 100644
--- a/docs/dev/reference/mlp.html
+++ b/docs/dev/reference/mlp.html
@@ -276,57 +276,53 @@
mlp() %>% - set_engine("keras") %>% - set_mode("regression") %>% - translate()
## Single Layer Neural Network Specification (regression) -## -## Computational engine: keras -## -## Model fit template: -## parsnip::keras_mlp(x = missing_arg(), y = missing_arg()) -
mlp() %>% - set_engine("keras") %>% - set_mode("classification") %>% - translate()
## Single Layer Neural Network Specification (classification) -## -## Computational engine: keras -## -## Model fit template: -## parsnip::keras_mlp(x = missing_arg(), y = missing_arg()) -+
mlp() %>% + set_engine("keras") %>% + set_mode("regression") %>% + translate()
## Single Layer Neural Network Specification (regression) +## +## Computational engine: keras +## +## Model fit template: +## parsnip::keras_mlp(x = missing_arg(), y = missing_arg())
mlp() %>% + set_engine("keras") %>% + set_mode("classification") %>% + translate()
## Single Layer Neural Network Specification (classification) +## +## Computational engine: keras +## +## Model fit template: +## parsnip::keras_mlp(x = missing_arg(), y = missing_arg())
An error is thrown if both penalty and dropout are specified for
keras models.
mlp() %>% - set_engine("nnet") %>% - set_mode("regression") %>% - translate()
## Single Layer Neural Network Specification (regression) -## -## Main Arguments: -## hidden_units = 5 -## -## Computational engine: nnet -## -## Model fit template: -## nnet::nnet(formula = missing_arg(), data = missing_arg(), weights = missing_arg(), -## size = 5, trace = FALSE, linout = TRUE) -
mlp() %>% - set_engine("nnet") %>% - set_mode("classification") %>% - translate()
## Single Layer Neural Network Specification (classification) -## -## Main Arguments: -## hidden_units = 5 -## -## Computational engine: nnet -## -## Model fit template: -## nnet::nnet(formula = missing_arg(), data = missing_arg(), weights = missing_arg(), -## size = 5, trace = FALSE, linout = FALSE) -+
mlp() %>% + set_engine("nnet") %>% + set_mode("regression") %>% + translate()
## Single Layer Neural Network Specification (regression) +## +## Main Arguments: +## hidden_units = 5 +## +## Computational engine: nnet +## +## Model fit template: +## nnet::nnet(formula = missing_arg(), data = missing_arg(), weights = missing_arg(), +## size = 5, trace = FALSE, linout = TRUE)
mlp() %>% + set_engine("nnet") %>% + set_mode("classification") %>% + translate()
## Single Layer Neural Network Specification (classification) +## +## Main Arguments: +## hidden_units = 5 +## +## Computational engine: nnet +## +## Model fit template: +## nnet::nnet(formula = missing_arg(), data = missing_arg(), weights = missing_arg(), +## size = 5, trace = FALSE, linout = FALSE)
multinom_reg() %>% - set_engine("glmnet") %>% - set_mode("classification") %>% - translate()
## Multinomial Regression Model Specification (classification) -## -## Computational engine: glmnet -## -## Model fit template: -## glmnet::glmnet(x = missing_arg(), y = missing_arg(), weights = missing_arg(), -## family = "multinomial") -+
multinom_reg() %>% + set_engine("glmnet") %>% + set_mode("classification") %>% + translate()
## Multinomial Regression Model Specification (classification) +## +## Computational engine: glmnet +## +## Model fit template: +## glmnet::glmnet(x = missing_arg(), y = missing_arg(), weights = missing_arg(), +## family = "multinomial")
For glmnet models, the full regularization path is always fit
regardless of the value given to penalty. Also, there is the option to
@@ -277,45 +276,42 @@
multinom_reg() %>% - set_engine("nnet") %>% - set_mode("classification") %>% - translate()
## Multinomial Regression Model Specification (classification) -## -## Computational engine: nnet -## -## Model fit template: -## nnet::multinom(formula = missing_arg(), data = missing_arg(), -## weights = missing_arg(), trace = FALSE) -+
multinom_reg() %>% + set_engine("nnet") %>% + set_mode("classification") %>% + translate()
## Multinomial Regression Model Specification (classification) +## +## Computational engine: nnet +## +## Model fit template: +## nnet::multinom(formula = missing_arg(), data = missing_arg(), +## weights = missing_arg(), trace = FALSE)
multinom_reg() %>% - set_engine("spark") %>% - set_mode("classification") %>% - translate()
## Multinomial Regression Model Specification (classification) -## -## Computational engine: spark -## -## Model fit template: -## sparklyr::ml_logistic_regression(x = missing_arg(), formula = missing_arg(), -## weight_col = missing_arg(), family = "multinomial") -+
multinom_reg() %>% + set_engine("spark") %>% + set_mode("classification") %>% + translate()
## Multinomial Regression Model Specification (classification) +## +## Computational engine: spark +## +## Model fit template: +## sparklyr::ml_logistic_regression(x = missing_arg(), formula = missing_arg(), +## weight_col = missing_arg(), family = "multinomial")
multinom_reg() %>% - set_engine("keras") %>% - set_mode("classification") %>% - translate()
## Multinomial Regression Model Specification (classification) -## -## Computational engine: keras -## -## Model fit template: -## parsnip::keras_mlp(x = missing_arg(), y = missing_arg(), hidden_units = 1, -## act = "linear") -+
multinom_reg() %>% + set_engine("keras") %>% + set_mode("classification") %>% + translate()
## Multinomial Regression Model Specification (classification) +## +## Computational engine: keras +## +## Model fit template: +## parsnip::keras_mlp(x = missing_arg(), y = missing_arg(), hidden_units = 1, +## act = "linear")
nearest_neighbor() %>% - set_engine("kknn") %>% - set_mode("regression") %>% - translate()
## K-Nearest Neighbor Model Specification (regression) -## -## Computational engine: kknn -## -## Model fit template: -## kknn::train.kknn(formula = missing_arg(), data = missing_arg(), -## ks = 5) -
nearest_neighbor() %>% - set_engine("kknn") %>% - set_mode("classification") %>% - translate()
## K-Nearest Neighbor Model Specification (classification) -## -## Computational engine: kknn -## -## Model fit template: -## kknn::train.kknn(formula = missing_arg(), data = missing_arg(), -## ks = 5) -+
nearest_neighbor() %>% + set_engine("kknn") %>% + set_mode("regression") %>% + translate()
## K-Nearest Neighbor Model Specification (regression) +## +## Computational engine: kknn +## +## Model fit template: +## kknn::train.kknn(formula = missing_arg(), data = missing_arg(), +## ks = 5)
nearest_neighbor() %>% + set_engine("kknn") %>% + set_mode("classification") %>% + translate()
## K-Nearest Neighbor Model Specification (classification) +## +## Computational engine: kknn +## +## Model fit template: +## kknn::train.kknn(formula = missing_arg(), data = missing_arg(), +## ks = 5)
For kknn, the underlying modeling function used is a restricted
version of train.kknn() and not kknn(). It is set up in this way so
diff --git a/docs/dev/reference/null_model.html b/docs/dev/reference/null_model.html
index c299fd299..f5890685b 100644
--- a/docs/dev/reference/null_model.html
+++ b/docs/dev/reference/null_model.html
@@ -171,25 +171,23 @@
null_model() %>% - set_engine("parsnip") %>% - set_mode("regression") %>% - translate()
## Model Specification (regression) -## -## Computational engine: parsnip -## -## Model fit template: -## nullmodel(x = missing_arg(), y = missing_arg()) -
null_model() %>% - set_engine("parsnip") %>% - set_mode("classification") %>% - translate()
## Model Specification (classification) -## -## Computational engine: parsnip -## -## Model fit template: -## nullmodel(x = missing_arg(), y = missing_arg()) -+
null_model() %>% + set_engine("parsnip") %>% + set_mode("regression") %>% + translate()
## Model Specification (regression) +## +## Computational engine: parsnip +## +## Model fit template: +## nullmodel(x = missing_arg(), y = missing_arg())
null_model() %>% + set_engine("parsnip") %>% + set_mode("classification") %>% + translate()
## Model Specification (classification) +## +## Computational engine: parsnip +## +## Model fit template: +## nullmodel(x = missing_arg(), y = missing_arg())
rand_forest() %>% - set_engine("ranger") %>% - set_mode("regression") %>% - translate()
## Random Forest Model Specification (regression) -## -## Computational engine: ranger -## -## Model fit template: -## ranger::ranger(formula = missing_arg(), data = missing_arg(), -## case.weights = missing_arg(), num.threads = 1, verbose = FALSE, -## seed = sample.int(10^5, 1)) -
rand_forest() %>% - set_engine("ranger") %>% - set_mode("classification") %>% - translate()
## Random Forest Model Specification (classification) -## -## Computational engine: ranger -## -## Model fit template: -## ranger::ranger(formula = missing_arg(), data = missing_arg(), -## case.weights = missing_arg(), num.threads = 1, verbose = FALSE, -## seed = sample.int(10^5, 1), probability = TRUE) -+
rand_forest() %>% + set_engine("ranger") %>% + set_mode("regression") %>% + translate()
## Random Forest Model Specification (regression) +## +## Computational engine: ranger +## +## Model fit template: +## ranger::ranger(formula = missing_arg(), data = missing_arg(), +## case.weights = missing_arg(), num.threads = 1, verbose = FALSE, +## seed = sample.int(10^5, 1))
rand_forest() %>% + set_engine("ranger") %>% + set_mode("classification") %>% + translate()
## Random Forest Model Specification (classification) +## +## Computational engine: ranger +## +## Model fit template: +## ranger::ranger(formula = missing_arg(), data = missing_arg(), +## case.weights = missing_arg(), num.threads = 1, verbose = FALSE, +## seed = sample.int(10^5, 1), probability = TRUE)
Note that ranger::ranger() does not require
factor predictors to be converted to indicator variables.
rand_forest() %>% - set_engine("randomForest") %>% - set_mode("regression") %>% - translate()
## Random Forest Model Specification (regression) -## -## Computational engine: randomForest -## -## Model fit template: -## randomForest::randomForest(x = missing_arg(), y = missing_arg()) -
rand_forest() %>% - set_engine("randomForest") %>% - set_mode("classification") %>% - translate()
## Random Forest Model Specification (classification) -## -## Computational engine: randomForest -## -## Model fit template: -## randomForest::randomForest(x = missing_arg(), y = missing_arg()) -+
rand_forest() %>% + set_engine("randomForest") %>% + set_mode("regression") %>% + translate()
## Random Forest Model Specification (regression) +## +## Computational engine: randomForest +## +## Model fit template: +## randomForest::randomForest(x = missing_arg(), y = missing_arg())
rand_forest() %>% + set_engine("randomForest") %>% + set_mode("classification") %>% + translate()
## Random Forest Model Specification (classification) +## +## Computational engine: randomForest +## +## Model fit template: +## randomForest::randomForest(x = missing_arg(), y = missing_arg())
Note that
randomForest::randomForest() does
not require factor predictors to be converted to indicator variables.
rand_forest() %>% - set_engine("spark") %>% - set_mode("regression") %>% - translate()
## Random Forest Model Specification (regression) -## -## Computational engine: spark -## -## Model fit template: -## sparklyr::ml_random_forest(x = missing_arg(), formula = missing_arg(), -## type = "regression", seed = sample.int(10^5, 1)) -
rand_forest() %>% - set_engine("spark") %>% - set_mode("classification") %>% - translate()
## Random Forest Model Specification (classification) -## -## Computational engine: spark -## -## Model fit template: -## sparklyr::ml_random_forest(x = missing_arg(), formula = missing_arg(), -## type = "classification", seed = sample.int(10^5, 1)) -+
rand_forest() %>% + set_engine("spark") %>% + set_mode("regression") %>% + translate()
## Random Forest Model Specification (regression) +## +## Computational engine: spark +## +## Model fit template: +## sparklyr::ml_random_forest(x = missing_arg(), formula = missing_arg(), +## type = "regression", seed = sample.int(10^5, 1))
rand_forest() %>% + set_engine("spark") %>% + set_mode("classification") %>% + translate()
## Random Forest Model Specification (classification) +## +## Computational engine: spark +## +## Model fit template: +## sparklyr::ml_random_forest(x = missing_arg(), formula = missing_arg(), +## type = "classification", seed = sample.int(10^5, 1))
These objects are imported from other packages. Follow the links below to see their documentation.
-%>%
When the user passes a formula to fit() and the underyling model function
+uses a formula, the call object produced by fit() may not be usable by
+other functions. For example, some arguments may still be quosures and the
+data portion of the call will not correspond to the original data.
repair_call(x, data)+ +
| x | +A fitted |
+
|---|---|
| data | +A data object that is relavant to the call. In most cases, this
+is the data frame that was given to |
+
A modified parsnip fitted model.
repair_call() call can adjust the model objects call to be usable by other
+functions and methods.
++fitted_model <- + linear_reg() %>% + set_engine("lm", model = TRUE) %>% + fit(mpg ~ ., data = mtcars) + +# In this call, note that `data` is not `mtcars` and the `model = ~TRUE` +# indicates that the `model` argument is an `rlang` quosure. +fitted_model$fit$call#> stats::lm(formula = mpg ~ ., data = data, model = ~TRUE)+# All better: +repair_call(fitted_model, mtcars)$fit$call#> stats::lm(formula = mpg ~ ., data = mtcars, model = TRUE)
surv_reg() %>% - set_engine("flexsurv") %>% - set_mode("regression") %>% - translate()
## Parametric Survival Regression Model Specification (regression) -## -## Computational engine: flexsurv -## -## Model fit template: -## flexsurv::flexsurvreg(formula = missing_arg(), data = missing_arg(), -## weights = missing_arg()) -+
surv_reg() %>% + set_engine("flexsurv") %>% + set_mode("regression") %>% + translate()
## Parametric Survival Regression Model Specification (regression) +## +## Computational engine: flexsurv +## +## Model fit template: +## flexsurv::flexsurvreg(formula = missing_arg(), data = missing_arg(), +## weights = missing_arg())
surv_reg() %>% - set_engine("survival") %>% - set_mode("regression") %>% - translate()
## Parametric Survival Regression Model Specification (regression) -## -## Computational engine: survival -## -## Model fit template: -## survival::survreg(formula = missing_arg(), data = missing_arg(), -## weights = missing_arg(), model = TRUE) -+
surv_reg() %>% + set_engine("survival") %>% + set_mode("regression") %>% + translate()
## Parametric Survival Regression Model Specification (regression) +## +## Computational engine: survival +## +## Model fit template: +## survival::survreg(formula = missing_arg(), data = missing_arg(), +## weights = missing_arg(), model = TRUE)
Note that model = TRUE is needed to produce quantile predictions when
there is a stratification variable and can be overridden in other cases.
svm_poly() %>% - set_engine("kernlab") %>% - set_mode("regression") %>% - translate()
## Polynomial Support Vector Machine Specification (regression) -## -## Computational engine: kernlab -## -## Model fit template: -## kernlab::ksvm(x = missing_arg(), y = missing_arg(), kernel = "polydot") -
svm_poly() %>% - set_engine("kernlab") %>% - set_mode("classification") %>% - translate()
## Polynomial Support Vector Machine Specification (classification) -## -## Computational engine: kernlab -## -## Model fit template: -## kernlab::ksvm(x = missing_arg(), y = missing_arg(), kernel = "polydot", -## prob.model = TRUE) -+
svm_poly() %>% + set_engine("kernlab") %>% + set_mode("regression") %>% + translate()
## Polynomial Support Vector Machine Specification (regression) +## +## Computational engine: kernlab +## +## Model fit template: +## kernlab::ksvm(x = missing_arg(), data = missing_arg(), kernel = "polydot")
svm_poly() %>% + set_engine("kernlab") %>% + set_mode("classification") %>% + translate()
## Polynomial Support Vector Machine Specification (classification) +## +## Computational engine: kernlab +## +## Model fit template: +## kernlab::ksvm(x = missing_arg(), data = missing_arg(), kernel = "polydot", +## prob.model = TRUE)
svm_rbf() %>% - set_engine("kernlab") %>% - set_mode("regression") %>% - translate()
## Radial Basis Function Support Vector Machine Specification (regression) -## -## Computational engine: kernlab -## -## Model fit template: -## kernlab::ksvm(x = missing_arg(), y = missing_arg(), kernel = "rbfdot") -
svm_rbf() %>% - set_engine("kernlab") %>% - set_mode("classification") %>% - translate()
## Radial Basis Function Support Vector Machine Specification (classification) -## -## Computational engine: kernlab -## -## Model fit template: -## kernlab::ksvm(x = missing_arg(), y = missing_arg(), kernel = "rbfdot", -## prob.model = TRUE) -+
svm_rbf() %>% + set_engine("kernlab") %>% + set_mode("regression") %>% + translate()
## Radial Basis Function Support Vector Machine Specification (regression) +## +## Computational engine: kernlab +## +## Model fit template: +## kernlab::ksvm(x = missing_arg(), data = missing_arg(), kernel = "rbfdot")
svm_rbf() %>% + set_engine("kernlab") %>% + set_mode("classification") %>% + translate()
## Radial Basis Function Support Vector Machine Specification (classification) +## +## Computational engine: kernlab +## +## Model fit template: +## kernlab::ksvm(x = missing_arg(), data = missing_arg(), kernel = "rbfdot", +## prob.model = TRUE)
svm_rbf() %>% - set_engine("liquidSVM") %>% - set_mode("regression") %>% - translate()
## Radial Basis Function Support Vector Machine Specification (regression) -## -## Computational engine: liquidSVM -## -## Model fit template: -## liquidSVM::svm(x = missing_arg(), y = missing_arg(), folds = 1, -## threads = 0) -
svm_rbf() %>% - set_engine("liquidSVM") %>% - set_mode("classification") %>% - translate()
## Radial Basis Function Support Vector Machine Specification (classification) -## -## Computational engine: liquidSVM -## -## Model fit template: -## liquidSVM::svm(x = missing_arg(), y = missing_arg(), folds = 1, -## threads = 0) -+
svm_rbf() %>% + set_engine("liquidSVM") %>% + set_mode("regression") %>% + translate()
## Radial Basis Function Support Vector Machine Specification (regression) +## +## Computational engine: liquidSVM +## +## Model fit template: +## liquidSVM::svm(x = missing_arg(), y = missing_arg(), folds = 1, +## threads = 0)
svm_rbf() %>% + set_engine("liquidSVM") %>% + set_mode("classification") %>% + translate()
## Radial Basis Function Support Vector Machine Specification (classification) +## +## Computational engine: liquidSVM +## +## Model fit template: +## liquidSVM::svm(x = missing_arg(), y = missing_arg(), folds = 1, +## threads = 0)
Note that models created using the liquidSVM engine cannot be saved
like conventional R objects. The fit slot of the model_fit object
diff --git a/docs/dev/reference/tidy.nullmodel.html b/docs/dev/reference/tidy.nullmodel.html
index ddc40e72a..80b406d17 100644
--- a/docs/dev/reference/tidy.nullmodel.html
+++ b/docs/dev/reference/tidy.nullmodel.html
@@ -164,14 +164,14 @@
A tibble with column value.
#> # A tibble: 1 x 1 ++#> <dbl> +#> 1 20.1#> # A tibble: 1 x 1 #> value -#> <chr> -#> 1 setosa#> # A tibble: 1 x 1 +#> <chr> +#> 1 setosa#> # A tibble: 1 x 1 #> value -#> <dbl> -#> 1 20.1