From afc49c9ad952edd5881a3e2f14a3503981d213c7 Mon Sep 17 00:00:00 2001 From: Nan Xiao Date: Mon, 2 Jan 2017 11:40:31 -0500 Subject: [PATCH] fix fused lasso issues --- NAMESPACE | 2 +- NEWS.md | 11 +- R/01-hdnom-models.R | 250 ++++++++++++++++------- R/02-hdnom-nomogram.R | 129 ++++-------- R/03-hdnom-validate.R | 42 ++-- R/04-hdnom-calibrate.R | 87 ++++---- R/05-hdnom-external-validate.R | 6 +- R/06-hdnom-external-calibrate.R | 6 +- R/07-hdnom-kmplot.R | 12 +- R/08-hdnom-logrank.R | 18 +- R/09-hdnom-compare-validate.R | 7 +- R/10-hdnom-compare-calibrate.R | 3 +- R/hdnom-package.R | 6 +- man/glmnet.survcurve.Rd | 2 +- man/hdcox.aenet.Rd | 11 +- man/hdcox.alasso.Rd | 9 +- man/hdcox.enet.Rd | 11 +- man/hdcox.flasso.Rd | 47 +++-- man/hdcox.lasso.Rd | 9 +- man/hdcox.mcp.Rd | 8 +- man/hdcox.mnet.Rd | 12 +- man/hdcox.scad.Rd | 8 +- man/hdcox.snet.Rd | 12 +- man/hdnom-package.Rd | 6 +- man/hdnom.calibrate.Rd | 23 ++- man/hdnom.external.calibrate.Rd | 6 +- man/hdnom.external.validate.Rd | 6 +- man/hdnom.kmplot.Rd | 12 +- man/hdnom.logrank.Rd | 12 +- man/hdnom.nomogram.Rd | 29 +-- man/hdnom.validate.Rd | 16 +- man/hdnom.varinfo.Rd | 6 +- man/ncvreg.survcurve.Rd | 2 +- man/penalized.calibrate.internal.pred.Rd | 2 +- man/penalized.tune.lambda.Rd | 16 ++ man/penalized.validate.internal.Rd | 2 +- man/predict.hdcox.model.Rd | 8 +- man/print.hdcox.model.Rd | 4 +- vignettes/aenetfit.rds | Bin 1151 -> 0 bytes vignettes/fit.rds | Bin 0 -> 1153 bytes vignettes/hdnom-intro.Rmd | 30 +-- 41 files changed, 503 insertions(+), 385 deletions(-) create mode 100644 man/penalized.tune.lambda.Rd delete mode 100644 vignettes/aenetfit.rds create mode 100644 vignettes/fit.rds diff --git a/NAMESPACE b/NAMESPACE index 6760400..5f8046c 100644 --- a/NAMESPACE +++ b/NAMESPACE @@ -76,7 +76,7 @@ importFrom(gridExtra,arrangeGrob) importFrom(gridExtra,grid.arrange) importFrom(ncvreg,cv.ncvsurv) importFrom(ncvreg,ncvsurv) -importFrom(penalized,optL1) +importFrom(penalized,cvl) importFrom(penalized,penalized) importFrom(penalized,survival) importFrom(rms,nomogram) diff --git a/NEWS.md b/NEWS.md index f72bac1..1acf5b0 100644 --- a/NEWS.md +++ b/NEWS.md @@ -1,9 +1,16 @@ -# hdnom 4.6 (2016-12-29) +# hdnom 4.6 (2017-01-02) ## Bug Fixes - Fixed issues in parameter tuning and cross-validation procedures for -fused lasso models. +fused lasso models ([commit link](link)). The user visible change is that +two parameters `lambda1` and `lambda2` instead of a single "lambda" are now +required for fitting, validating, and calibrating fused lasso models. + +## Improvements + +- The argument `lambda` in `hdnom.nomogram` is no longer needed and has +been deprecated. # hdnom 4.5 (2016-12-24) diff --git a/R/01-hdnom-models.R b/R/01-hdnom-models.R index 764e14f..fe17fdb 100644 --- a/R/01-hdnom-models.R +++ b/R/01-hdnom-models.R @@ -77,9 +77,9 @@ glmnet.tune.alpha = function(..., alphas, seed, parallel) { #' # registerDoParallel(detectCores()) #' # then set hdcox.aenet(..., parallel = TRUE). #' -#' # Fit Cox model by adaptive elastic-net penalization -#' aenetfit = hdcox.aenet(x, y, nfolds = 3, alphas = c(0.3, 0.7), -#' rule = "lambda.1se", seed = c(5, 7)) +#' # Fit Cox model with adaptive elastic-net penalty +#' fit = hdcox.aenet(x, y, nfolds = 3, alphas = c(0.3, 0.7), +#' rule = "lambda.1se", seed = c(5, 7)) #' #' # Prepare data for hdnom.nomogram #' x.df = as.data.frame(x) @@ -87,9 +87,8 @@ glmnet.tune.alpha = function(..., alphas, seed, parallel) { #' options(datadist = "dd") #' #' # Generate hdnom.nomogram objects and plot nomogram -#' nom = hdnom.nomogram(aenetfit$aenet_model, model.type = "aenet", -#' x, time, event, x.df, -#' lambda = aenetfit$aenet_best_lambda, pred.at = 365 * 2, +#' nom = hdnom.nomogram(fit$aenet_model, model.type = "aenet", +#' x, time, event, x.df, pred.at = 365 * 2, #' funlabel = "2-Year Overall Survival Probability") #' #' plot(nom) @@ -196,8 +195,8 @@ hdcox.aenet = function(x, y, nfolds = 5L, alphas = seq(0.05, 0.95, 0.05), #' event = smart$EVENT #' y = Surv(time, event) #' -#' # Fit Cox model by adaptive lasso penalization -#' alassofit = hdcox.alasso(x, y, nfolds = 3, rule = "lambda.1se", seed = c(7, 11)) +#' # Fit Cox model with adaptive lasso penalty +#' fit = hdcox.alasso(x, y, nfolds = 3, rule = "lambda.1se", seed = c(7, 11)) #' #' # Prepare data for hdnom.nomogram #' x.df = as.data.frame(x) @@ -205,9 +204,8 @@ hdcox.aenet = function(x, y, nfolds = 5L, alphas = seq(0.05, 0.95, 0.05), #' options(datadist = "dd") #' #' # Generate hdnom.nomogram objects and plot nomogram -#' nom = hdnom.nomogram(alassofit$alasso_model, model.type = "alasso", -#' x, time, event, x.df, -#' lambda = alassofit$alasso_best_lambda, pred.at = 365 * 2, +#' nom = hdnom.nomogram(fit$alasso_model, model.type = "alasso", +#' x, time, event, x.df, pred.at = 365 * 2, #' funlabel = "2-Year Overall Survival Probability") #' #' plot(nom) @@ -306,9 +304,9 @@ hdcox.alasso = function(x, y, nfolds = 5L, #' # registerDoParallel(detectCores()) #' # then set hdcox.enet(..., parallel = TRUE). #' -#' # Fit Cox model by elastic-net penalization -#' enetfit = hdcox.enet(x, y, nfolds = 3, alphas = c(0.3, 0.7), -#' rule = "lambda.1se", seed = 11) +#' # Fit Cox model with elastic-net penalty +#' fit = hdcox.enet(x, y, nfolds = 3, alphas = c(0.3, 0.7), +#' rule = "lambda.1se", seed = 11) #' #' # Prepare data for hdnom.nomogram #' x.df = as.data.frame(x) @@ -316,9 +314,8 @@ hdcox.alasso = function(x, y, nfolds = 5L, #' options(datadist = "dd") #' #' # Generate hdnom.nomogram objects and plot nomogram -#' nom = hdnom.nomogram(enetfit$enet_model, model.type = "enet", -#' x, time, event, x.df, -#' lambda = enetfit$enet_best_lambda, pred.at = 365 * 2, +#' nom = hdnom.nomogram(fit$enet_model, model.type = "enet", +#' x, time, event, x.df, pred.at = 365 * 2, #' funlabel = "2-Year Overall Survival Probability") #' #' plot(nom) @@ -384,8 +381,8 @@ hdcox.enet = function(x, y, nfolds = 5L, alphas = seq(0.05, 0.95, 0.05), #' event = smart$EVENT #' y = Surv(time, event) #' -#' # Fit Cox model by lasso penalization -#' lassofit = hdcox.lasso(x, y, nfolds = 5, rule = "lambda.1se", seed = 11) +#' # Fit Cox model with lasso penalty +#' fit = hdcox.lasso(x, y, nfolds = 5, rule = "lambda.1se", seed = 11) #' #' # Prepare data for hdnom.nomogram #' x.df = as.data.frame(x) @@ -393,9 +390,8 @@ hdcox.enet = function(x, y, nfolds = 5L, alphas = seq(0.05, 0.95, 0.05), #' options(datadist = "dd") #' #' # Generate hdnom.nomogram objects and plot nomogram -#' nom = hdnom.nomogram(lassofit$lasso_model, model.type = "lasso", -#' x, time, event, x.df, -#' lambda = lassofit$lasso_best_lambda, pred.at = 365 * 2, +#' nom = hdnom.nomogram(fit$lasso_model, model.type = "lasso", +#' x, time, event, x.df, pred.at = 365 * 2, #' funlabel = "2-Year Overall Survival Probability") #' #' plot(nom) @@ -430,26 +426,108 @@ hdcox.lasso = function(x, y, nfolds = 5L, } +#' Automatic lambda tuning function for fused lasso by k-fold cross-validation +#' +#' @return best model object, best lambda1, and best lambda2 +#' +#' @importFrom penalized cvl +#' @importFrom foreach %dopar% +#' @importFrom foreach foreach +#' +#' @keywords internal +penalized.tune.lambda = function(..., lambda1, lambda2, seed, trace, parallel) { + + nlambda1 = length(lambda1) + nlambda2 = length(lambda2) + + if (!parallel) { + + model_list = vector('list', nlambda1) + for (k in 1L:nlambda1) model_list[[k]] = vector('list', nlambda2) + + for (i in 1L:nlambda1) { + for (j in 1L:nlambda2) { + set.seed(seed) + if (trace) cat('Starting: lambda1 =', lambda1[i], 'lambda2 =', lambda2[j], '\n') + model_list[[i]][[j]] = + penalized::cvl(..., lambda1 = lambda1[i], lambda2 = lambda2[j]) + } + } + + # store lambda combinations + for (i in 1L:nlambda1) { + for (j in 1L:nlambda2) { + model_list[[i]][[j]][['lambda']] = + c('lambda1' = lambda1[i], 'lambda2' = lambda2[j]) + } + } + + simple_model_list = unlist(model_list, recursive = FALSE) + + } else { + + model_list <- foreach(lambda1 = lambda1) %:% + foreach(lambda2 = lambda2) %dopar% { + set.seed(seed) + penalized::cvl(..., lambda1 = lambda1, lambda2 = lambda2) + } + + # store lambda combinations + for (i in 1L:nlambda1) { + for (j in 1L:nlambda2) { + model_list[[i]][[j]][['lambda']] = + c('lambda1' = lambda1[i], 'lambda2' = lambda2[j]) + } + } + + simple_model_list = unlist(model_list, recursive = FALSE) + + } + + # choose model for best lambda combination + # criterion: cross-validated likelihood + max_cvl = which.max(unlist(sapply(simple_model_list, '[', 'cvl'))) + + return(list('best.model' = simple_model_list[[max_cvl]], + 'best.lambda1' = simple_model_list[[max_cvl]][['lambda']]['lambda1'], + 'best.lambda2' = simple_model_list[[max_cvl]][['lambda']]['lambda2'])) + +} + #' Fused Lasso Model Selection for High-Dimensional Cox Models #' #' Automatic fused lasso model selection for high-dimensional -#' Cox models, evaluated by penalized partial-likelihood. +#' Cox models, evaluated by cross-validated likelihood. #' #' @param x Data matrix. #' @param y Response matrix made by \code{\link[survival]{Surv}}. #' @param nfolds Fold numbers of cross-validation. +#' @param lambda1 Vector of lambda1 candidates. +#' Default is \code{0.001, 0.05, 0.5, 1, 5}. +#' @param lambda2 Vector of lambda2 candidates. +#' Default is \code{0.001, 0.01, 0.5}. +#' @param maxiter The maximum number of iterations allowed. +#' Default is \code{25}. +#' @param epsilon The convergence criterion. +#' Default is \code{1e-3}. #' @param seed A random seed for cross-validation fold division. #' @param trace Output the cross-validation parameter tuning #' progress or not. Default is \code{FALSE}. +#' @param parallel Logical. Enable parallel parameter tuning or not, +#' default is {FALSE}. To enable parallel tuning, load the +#' \code{doParallel} package and run \code{registerDoParallel()} +#' with the number of CPU cores before calling this function. +#' @param ... other parameters to \code{\link[penalized]{cvl}} +#' and \code{\link[penalized]{penalized}}. #' #' @note The cross-validation procedure used in this function is the -#' so-called approximated cross-validation provided by the \code{penalized} +#' \emph{approximated cross-validation} provided by the \code{penalized} #' package. Be careful dealing with the results since they might be more -#' optimistic than fully fitting the model. This cross-validation method -#' is more suitable for datasets with larger number of observations, +#' optimistic than a traditional CV procedure. This cross-validation +#' method is more suitable for datasets with larger number of observations, #' and a higher number of cross-validation folds. #' -#' @importFrom penalized optL1 +#' @importFrom penalized penalized #' #' @export hdcox.flasso #' @@ -459,13 +537,13 @@ hdcox.lasso = function(x, y, nfolds = 5L, #' #' # Load imputed SMART data; only use the first 120 samples #' data("smart") -#' x = as.matrix(smart[, -c(1, 2)])[1:140, ] -#' time = smart$TEVENT[1:140] -#' event = smart$EVENT[1:140] +#' x = as.matrix(smart[, -c(1, 2)])[1:120, ] +#' time = smart$TEVENT[1:120] +#' event = smart$EVENT[1:120] #' y = Surv(time, event) #' -#' # Fit Cox model by fused lasso penalization -#' flassofit = hdcox.flasso(x, y, nfolds = 3, seed = 11) +#' # Fit Cox model with fused lasso penalty +#' fit = hdcox.flasso(x, y, nfolds = 3, seed = 11) #' #' # Prepare data for hdnom.nomogram #' x.df = as.data.frame(x) @@ -473,27 +551,46 @@ hdcox.lasso = function(x, y, nfolds = 5L, #' options(datadist = "dd") #' #' # Generate hdnom.nomogram objects and plot nomogram -#' nom = hdnom.nomogram(flassofit$flasso_model, model.type = "flasso", -#' x, time, event, x.df, -#' lambda = flassofit$flasso_best_lambda, pred.at = 365 * 2, +#' nom = hdnom.nomogram(fit$flasso_model, model.type = "flasso", +#' x, time, event, x.df, pred.at = 365 * 2, #' funlabel = "2-Year Overall Survival Probability") #' #' plot(nom) hdcox.flasso = function(x, y, nfolds = 5L, - seed = 1001, trace = FALSE) { + lambda1 = c(0.001, 0.05, 0.5, 1, 5), + lambda2 = c(0.001, 0.01, 0.5), + maxiter = 25, epsilon = 1e-3, + seed = 1001, trace = FALSE, parallel = FALSE, ...) { + + if (trace) cat('Starting cross-validation...\n') + flasso_cv = penalized.tune.lambda( + response = y, penalized = x, fold = nfolds, + lambda1 = lambda1, lambda2 = lambda2, + maxiter = maxiter, epsilon = epsilon, + seed = seed, trace = trace, parallel = parallel, + fusedl = TRUE, standardize = TRUE, model = 'cox', ...) + + flasso_best_lambda1 = flasso_cv$'best.lambda1' + flasso_best_lambda2 = flasso_cv$'best.lambda2' - set.seed(seed) - flasso_full = optL1(response = y, penalized = x, fusedl = TRUE, - standardize = TRUE, model = 'cox', fold = nfolds, - trace = trace) - - if (all(abs(flasso_full$fullfit@penalized) < .Machine$double.eps)) + # fit the model on all the data use the parameters got by CV + if (trace) cat('Fitting fused lasso model with full data...\n') + flasso_full = penalized( + response = y, penalized = x, + lambda1 = flasso_best_lambda1, + lambda2 = flasso_best_lambda2, + maxiter = maxiter, epsilon = epsilon, + trace = trace, + fusedl = TRUE, standardize = FALSE, model = 'cox', ...) + + if (all(abs(flasso_full@penalized) < .Machine$double.eps)) stop('Null model produced by the full fit (all coefficients are zero). - Please try to tune seed, nfolds, or increase sample size.') + Please try changing the seed, nfolds, or increase sample size.') coxflasso_model = list('seed' = seed, - 'flasso_best_lambda' = flasso_full$lambda, - 'flasso_model' = flasso_full$fullfit) + 'flasso_best_lambda1' = flasso_best_lambda1, + 'flasso_best_lambda2' = flasso_best_lambda2, + 'flasso_model' = flasso_full) class(coxflasso_model) = c('hdcox.model', 'hdcox.model.flasso') @@ -585,8 +682,8 @@ ncvreg.tune.gamma = function(..., gammas, seed, parallel) { #' event = smart$EVENT[1:150] #' y = Surv(time, event) #' -#' # Fit Cox model by MCP penalization -#' mcpfit = hdcox.mcp(x, y, nfolds = 3, gammas = c(2.1, 3), seed = 1001) +#' # Fit Cox model with MCP penalty +#' fit = hdcox.mcp(x, y, nfolds = 3, gammas = c(2.1, 3), seed = 1001) #' #' # Prepare data for hdnom.nomogram #' x.df = as.data.frame(x) @@ -594,8 +691,8 @@ ncvreg.tune.gamma = function(..., gammas, seed, parallel) { #' options(datadist = "dd") #' #' # Generate hdnom.nomogram objects and plot nomogram -#' nom = hdnom.nomogram(mcpfit$mcp_model, model.type = "mcp", x, time, event, x.df, -#' lambda = mcpfit$mcp_best_lambda, pred.at = 365 * 2, +#' nom = hdnom.nomogram(fit$mcp_model, model.type = "mcp", +#' x, time, event, x.df, pred.at = 365 * 2, #' funlabel = "2-Year Overall Survival Probability") #' plot(nom) hdcox.mcp = function(x, y, nfolds = 5L, gammas = c(1.01, 1.7, 3, 100), @@ -716,9 +813,9 @@ ncvreg.tune.gamma.alpha = function(..., gammas, alphas, seed, parallel) { #' event = smart$EVENT[1:120] #' y = Surv(time, event) #' -#' # Fit Cox model by Mnet penalization -#' mnetfit = hdcox.mnet(x, y, nfolds = 3, gammas = 3, -#' alphas = c(0.3, 0.8), seed = 1010) +#' # Fit Cox model with Mnet penalty +#' fit = hdcox.mnet(x, y, nfolds = 3, gammas = 3, +#' alphas = c(0.3, 0.8), seed = 1010) #' #' # Prepare data for hdnom.nomogram #' x.df = as.data.frame(x) @@ -726,10 +823,8 @@ ncvreg.tune.gamma.alpha = function(..., gammas, alphas, seed, parallel) { #' options(datadist = "dd") #' #' # Generate hdnom.nomogram objects and plot nomogram -#' nom = hdnom.nomogram(mnetfit$mnet_model, model.type = "mnet", -#' x, time, event, x.df, -#' lambda = mnetfit$mnet_best_lambda, -#' pred.at = 365 * 2, +#' nom = hdnom.nomogram(fit$mnet_model, model.type = "mnet", +#' x, time, event, x.df, pred.at = 365 * 2, #' funlabel = "2-Year Overall Survival Probability") #' #' plot(nom) @@ -804,8 +899,8 @@ hdcox.mnet = function(x, y, nfolds = 5L, gammas = c(1.01, 1.7, 3, 100), #' event = smart$EVENT[1:120] #' y = Surv(time, event) #' -#' # Fit Cox model by SCAD penalization -#' scadfit = hdcox.scad(x, y, nfolds = 3, gammas = c(3.7, 5), seed = 1010) +#' # Fit Cox model with SCAD penalty +#' fit = hdcox.scad(x, y, nfolds = 3, gammas = c(3.7, 5), seed = 1010) #' #' # Prepare data for hdnom.nomogram #' x.df = as.data.frame(x) @@ -813,8 +908,8 @@ hdcox.mnet = function(x, y, nfolds = 5L, gammas = c(1.01, 1.7, 3, 100), #' options(datadist = "dd") #' #' # Generate hdnom.nomogram objects and plot nomogram -#' nom = hdnom.nomogram(scadfit$scad_model, model.type = "scad", x, time, event, x.df, -#' lambda = scadfit$scad_best_lambda, pred.at = 365 * 2, +#' nom = hdnom.nomogram(fit$scad_model, model.type = "scad", +#' x, time, event, x.df, pred.at = 365 * 2, #' funlabel = "2-Year Overall Survival Probability") #' #' plot(nom) @@ -883,9 +978,9 @@ hdcox.scad = function(x, y, nfolds = 5L, gammas = c(2.01, 2.3, 3.7, 200), #' event = smart$EVENT[1:120] #' y = Surv(time, event) #' -#' # Fit Cox model by Snet penalization -#' snetfit = hdcox.snet(x, y, nfolds = 3, gammas = 3.7, -#' alphas = c(0.3, 0.8), seed = 1010) +#' # Fit Cox model with Snet penalty +#' fit = hdcox.snet(x, y, nfolds = 3, gammas = 3.7, +#' alphas = c(0.3, 0.8), seed = 1010) #' #' # Prepare data for hdnom.nomogram #' x.df = as.data.frame(x) @@ -893,10 +988,8 @@ hdcox.scad = function(x, y, nfolds = 5L, gammas = c(2.01, 2.3, 3.7, 200), #' options(datadist = "dd") #' #' # Generate hdnom.nomogram objects and plot nomogram -#' nom = hdnom.nomogram(snetfit$snet_model, model.type = "snet", -#' x, time, event, x.df, -#' lambda = snetfit$snet_best_lambda, -#' pred.at = 365 * 2, +#' nom = hdnom.nomogram(fit$snet_model, model.type = "snet", +#' x, time, event, x.df, pred.at = 365 * 2, #' funlabel = "2-Year Overall Survival Probability") #' #' plot(nom) @@ -954,8 +1047,8 @@ hdcox.snet = function(x, y, nfolds = 5L, gammas = c(2.01, 2.3, 3.7, 200), #' @param pred.at Time point at which prediction should take place. #' @param ... Other parameters (not used). #' -#' @return A \code{nrow(newx) x length(pred.at)} matrix containing overall -#' survival probablity. +#' @return A \code{nrow(newx) x length(pred.at)} matrix containing +#' overall survival probablity. #' #' @method predict hdcox.model #' @@ -974,8 +1067,8 @@ hdcox.snet = function(x, y, nfolds = 5L, gammas = c(2.01, 2.3, 3.7, 200), #' event = smart$EVENT #' y = Surv(time, event) #' -#' lassofit = hdcox.lasso(x, y, nfolds = 5, rule = "lambda.1se", seed = 11) -#' predict(lassofit, x, y, newx = x[101:105, ], pred.at = 1:10 * 365) +#' fit = hdcox.lasso(x, y, nfolds = 5, rule = "lambda.1se", seed = 11) +#' predict(fit, x, y, newx = x[101:105, ], pred.at = 1:10 * 365) predict.hdcox.model = function(object, x, y, newx, pred.at, ...) { if (!('hdcox.model' %in% class(object))) @@ -985,7 +1078,7 @@ predict.hdcox.model = function(object, x, y, newx, pred.at, ...) { if (!('matrix' %in% class(newx))) stop('newx must be a matrix') - time = y[, 1L] + time = y[, 1L] event = y[, 2L] obj.type = switch(model.type, @@ -1050,9 +1143,9 @@ predict.hdcox.model = function(object, x, y, newx, pred.at, ...) { #' event = smart$EVENT #' y = Surv(time, event) #' -#' # Fit Cox model by lasso penalization -#' lassofit = hdcox.lasso(x, y, nfolds = 5, rule = "lambda.1se", seed = 11) -#' hdnom.varinfo(lassofit, x) +#' # Fit Cox model with lasso penalty +#' fit = hdcox.lasso(x, y, nfolds = 5, rule = "lambda.1se", seed = 11) +#' hdnom.varinfo(fit, x) hdnom.varinfo = function(object, x) { if (!('hdcox.model' %in% class(object))) @@ -1134,8 +1227,8 @@ hdnom.varinfo = function(object, x) { #' event = smart$EVENT #' y = Surv(time, event) #' -#' lassofit = hdcox.lasso(x, y, nfolds = 5, rule = "lambda.1se", seed = 11) -#' print(lassofit) +#' fit = hdcox.lasso(x, y, nfolds = 5, rule = "lambda.1se", seed = 11) +#' print(fit) print.hdcox.model = function(x, ...) { if (!('hdcox.model' %in% class(x))) @@ -1208,7 +1301,8 @@ print.hdcox.model = function(x, ...) { cat('High-Dimensional Cox Model Object\n') cat('Random seed:', x$'seed', '\n') cat('Model type: fused lasso\n') - cat('Best lambda:', x$'flasso_best_lambda', '\n') + cat('Best lambda1:', x$'flasso_best_lambda1', '\n') + cat('Best lambda2:', x$'flasso_best_lambda2', '\n') } ) diff --git a/R/02-hdnom-nomogram.R b/R/02-hdnom-nomogram.R index 8feded3..8d536b3 100644 --- a/R/02-hdnom-nomogram.R +++ b/R/02-hdnom-nomogram.R @@ -12,17 +12,21 @@ #' @param event Status indicator, normally 0 = alive, 1 = dead. #' Must be of the same length with the number of rows as \code{x}. #' @param ddist Data frame version of x, made by \code{\link[rms]{datadist}}. -#' @param lambda Value of the penalty parameter lambda in -#' \code{\link[glmnet]{glmnet}} or \code{\link[ncvreg]{ncvsurv}}. -#' Required except when \code{model.type == "flasso"}. -#' We will use the selected variables at the provided \code{lambda} to -#' build the nomogram, and make predictions. -#' See the example for choosing a proper lambda value extracted -#' from cross-validation results. #' @param pred.at Time point at which to plot nomogram prediction axis. #' @param fun.at Function values to label on axis. #' @param funlabel Label for \code{fun} axis. #' +#' @note We will try to use the value of the selected penalty +#' parameter (e.g. lambda, alpha) in the model object fitted by +#' \code{\link[glmnet]{glmnet}}, \code{\link[ncvreg]{ncvsurv}}, or +#' \code{\link[penalized]{penalized}}. The selected variables under +#' the penalty parameter will be used to build the nomogram +#' and make predictions. This means, if you use routines other +#' than \code{hdcox.*} functions to fit the model, please +#' make sure that there is only one set of necessary parameters +#' (e.g. only a single lambda should be in glmnet objects intead +#' of a vector of multiple lambdas) in the fitted model object. +#' #' @export hdnom.nomogram #' #' @importFrom rms ols nomogram @@ -39,7 +43,7 @@ #' y = Surv(time, event) #' #' # Fit penalized Cox model with lasso penalty -#' lassofit = hdcox.lasso(x, y, nfolds = 5, rule = "lambda.1se", seed = 11) +#' fit = hdcox.lasso(x, y, nfolds = 5, rule = "lambda.1se", seed = 11) #' #' # Prepare data needed for plotting nomogram #' x.df = as.data.frame(x) @@ -47,79 +51,19 @@ #' options(datadist = "dd") #' #' # Generate hdnom.nomogram objects and plot nomogram -#' nom = hdnom.nomogram(lassofit$lasso_model, model.type = "lasso", -#' x, time, event, x.df, -#' lambda = lassofit$lasso_best_lambda, pred.at = 365 * 2, +#' nom = hdnom.nomogram(fit$lasso_model, model.type = "lasso", +#' x, time, event, x.df, pred.at = 365 * 2, #' funlabel = "2-Year Overall Survival Probability") #' #' print(nom) #' plot(nom) -# ### Testing fused lasso models ### -# library("penalized") -# library("survival") -# library("rms") -# -# # Load imputed SMART data -# data(smart) -# x = as.matrix(smart[, -c(1, 2)]) -# x = x[1:500, ] -# time = smart$TEVENT[1:500] -# event = smart$EVENT[1:500] -# x.df = as.data.frame(x) -# dd = datadist(x.df) -# options(datadist = "dd") -# -# # Fit penalized Cox model (fused lasso penalty) with penalized package -# set.seed(1010) -# cvfit = optL1(response = Surv(time, event), penalized = x, fusedl = TRUE, -# standardize = TRUE, model = 'cox', fold = 3, trace = TRUE) -# -# # Generate hdnom.nomogram objects and plot nomogram -# nom = hdnom.nomogram(cvfit$fullfit, model.type = "flasso", -# x, time, event, x.df, pred.at = 365 * 2, -# funlabel = "2-Year Overall Survival Probability") -# -# print(nom) -# plot(nom) -# -# ### Testing SCAD models ### -# library("ncvreg") -# -# cvfit = cv.ncvsurv(x, Surv(time, event), -# model = 'cox', penalty = 'SCAD', -# alpha = 1, nfolds = 5, -# seed = 1010, trace = TRUE) -# fit = ncvsurv(x, Surv(time, event), model = 'cox', penalty = 'SCAD', alpha = 1) -# -# nom = hdnom.nomogram(fit, model.type = "scad", x, time, event, x.df, -# lambda = cvfit$lambda.min, pred.at = 365 * 2, -# funlabel = "2-Year Overall Survival Probability") -# -# print(nom) -# plot(nom) -# -# ### Testing Mnet models ### -# cvfit = cv.ncvsurv(x, Surv(time, event), -# model = 'cox', penalty = 'MCP', -# alpha = 0.5, nfolds = 5, -# seed = 1010, trace = TRUE) -# fit = ncvsurv(x, Surv(time, event), model = 'cox', penalty = 'MCP', alpha = 0.5) -# -# # Generate hdnom.nomogram objects and plot nomogram -# nom = hdnom.nomogram(fit, model.type = "mnet", x, time, event, x.df, -# lambda = cvfit$lambda.min, pred.at = 365 * 2, -# funlabel = "2-Year Overall Survival Probability") -# -# print(nom) -# plot(nom) hdnom.nomogram = function(object, model.type = c('lasso', 'alasso', 'flasso', 'enet', 'aenet', 'mcp', 'mnet', 'scad', 'snet'), x, time, event, ddist, - lambda = NULL, pred.at = NULL, - fun.at = NULL, funlabel = NULL) { + pred.at = NULL, fun.at = NULL, funlabel = NULL) { model.type = match.arg(model.type) @@ -133,13 +77,16 @@ hdnom.nomogram = function(object, if (!all(c('coxnet', 'glmnet') %in% class(object))) stop('object class must be "glmnet" and "coxnet"') - if (is.null(lambda)) stop('Missing argument lambda') + if (length(object$'lambda') != 1L) + stop('There should be one and only one lambda in the model object') - glmnet_pred_lp = as.vector(predict(object, newx = x, s = lambda, type = 'link')) + glmnet_pred_lp = as.vector( + predict(object, newx = x, s = object$'lambda', type = 'link')) - all_vars = rownames(object$beta) + all_vars = rownames(object$'beta') selected_vars = - all_vars[which(as.vector(abs(coef(object, s = lambda)) > .Machine$double.eps))] + all_vars[which(as.vector( + abs(coef(object, s = object$'lambda')) > .Machine$double.eps))] ols_formula = paste('glmnet_pred_lp ~', paste(paste('`', selected_vars, '`', sep = ''), collapse = ' + ')) @@ -154,7 +101,7 @@ hdnom.nomogram = function(object, survtime_at_idx = names(survtime_at) survcurve = glmnet.survcurve(object = object, time = time, event = event, - x = x, survtime = survtime_ones, lambda = lambda) + x = x, survtime = survtime_ones) } @@ -163,11 +110,9 @@ hdnom.nomogram = function(object, if (!all(c('ncvsurv', 'ncvreg') %in% class(object))) stop('object class must be "ncvreg" and "ncvsurv"') - if (is.null(lambda)) stop('Missing argument lambda') - ncvreg_pred_lp = predict(object, X = x, type = 'link') - all_vars = rownames(object$beta) + all_vars = rownames(object$'beta') selected_vars = all_vars[which(as.vector(abs(coef(object)) > .Machine$double.eps))] ols_formula = paste('ncvreg_pred_lp ~', @@ -184,7 +129,7 @@ hdnom.nomogram = function(object, survtime_at_idx = names(survtime_at) survcurve = ncvreg.survcurve(object = object, time = time, event = event, - x = x, survtime = survtime_ones, lambda = lambda) + x = x, survtime = survtime_ones) } @@ -193,7 +138,6 @@ hdnom.nomogram = function(object, if (!('penfit' %in% class(object))) stop('object class must be "penfit"') - lambda = object@lambda1 penalized_pred_lp = as.numeric(object@lin.pred) all_vars = colnames(x) @@ -228,9 +172,12 @@ hdnom.nomogram = function(object, if (is.null(funlabel)) funlabel = paste('Overall Survival Probability at Time', pred.at) - nom = list('ols_fit' = ols_fit, 'lambda' = lambda, 'survcurve' = survcurve, - 'bhfun' = bhfun, 'pred.at' = pred.at, - 'fun.at' = fun.at, 'funlabel' = funlabel) + nom = list('ols_fit' = ols_fit, + 'survcurve' = survcurve, + 'bhfun' = bhfun, + 'pred.at' = pred.at, + 'fun.at' = fun.at, + 'funlabel' = funlabel) class(nom) = 'hdnom.nomogram' @@ -246,10 +193,10 @@ hdnom.nomogram = function(object, #' linear predictors for all samples #' #' @keywords internal -glmnet.survcurve = function(object, time, event, x, survtime, lambda) { +glmnet.survcurve = function(object, time, event, x, survtime) { lp = as.numeric(predict(object, newx = data.matrix(x), - s = lambda, type = 'link')) + s = object$'lambda', type = 'link')) basesurv = glmnet.basesurv(time, event, lp, sort(survtime)) p = exp(exp(lp) %*% (-t(basesurv$cumulative_base_hazard))) colnames(p) = names(sort(survtime)) @@ -302,7 +249,7 @@ glmnet.basesurv = function(time, event, lp, #' linear predictors for all samples #' #' @keywords internal -ncvreg.survcurve = function(object, time, event, x, survtime, lambda) { +ncvreg.survcurve = function(object, time, event, x, survtime) { lp = as.numeric(predict(object, X = data.matrix(x), type = 'link')) basesurv = ncvreg.basesurv(time, event, lp, sort(survtime)) @@ -422,8 +369,8 @@ print.hdnom.nomogram = function(x, ...) { if (class(x) != 'hdnom.nomogram') stop('object class must be "hdnom.nomogram"') - nom = nomogram(fit = x$ols_fit, fun = x$bhfun, - fun.at = x$fun.at, funlabel = x$funlabel, + nom = nomogram(fit = x$'ols_fit', fun = x$'bhfun', + fun.at = x$'fun.at', funlabel = x$'funlabel', lp = TRUE, vnames = 'labels', ...) print(nom) @@ -450,8 +397,8 @@ plot.hdnom.nomogram = function(x, ...) { if (class(x) != 'hdnom.nomogram') stop('object class must be "hdnom.nomogram"') - nom = nomogram(fit = x$ols_fit, fun = x$bhfun, - fun.at = x$fun.at, funlabel = x$funlabel, + nom = nomogram(fit = x$'ols_fit', fun = x$'bhfun', + fun.at = x$'fun.at', funlabel = x$'funlabel', lp = TRUE, vnames = 'labels', ...) plot(nom) diff --git a/R/03-hdnom-validate.R b/R/03-hdnom-validate.R index 3c53f59..860b522 100644 --- a/R/03-hdnom-validate.R +++ b/R/03-hdnom-validate.R @@ -24,6 +24,8 @@ #' From the built \emph{adaptive lasso} or \emph{adaptive elastic-net} model. #' @param gamma Value of the model parameter gamma for #' MCP/SCAD/Mnet/Snet models. +#' @param lambda1 Value of the penalty parameter lambda1 for fused lasso model. +#' @param lambda2 Value of the penalty parameter lambda2 for fused lasso model. #' @param method Validation method. #' Could be \code{"bootstrap"}, \code{"cv"}, or \code{"repeated.cv"}. #' @param boot.times Number of repetitions for bootstrap. @@ -69,27 +71,27 @@ #' y = Surv(time, event) #' #' # Fit penalized Cox model with lasso penalty -#' lassofit = hdcox.lasso(x, y, nfolds = 5, rule = "lambda.1se", seed = 11) +#' fit = hdcox.lasso(x, y, nfolds = 5, rule = "lambda.1se", seed = 11) #' #' # Model validation by bootstrap with time-dependent AUC #' # Normally boot.times should be set to 200 or more, #' # we set it to 3 here only to save example running time. #' val.boot = hdnom.validate(x, time, event, model.type = "lasso", -#' alpha = 1, lambda = lassofit$lasso_best_lambda, +#' alpha = 1, lambda = fit$lasso_best_lambda, #' method = "bootstrap", boot.times = 3, #' tauc.type = "UNO", tauc.time = seq(0.25, 2, 0.25) * 365, #' seed = 1010) #' #' # Model validation by 5-fold cross-validation with time-dependent AUC #' val.cv = hdnom.validate(x, time, event, model.type = "lasso", -#' alpha = 1, lambda = lassofit$lasso_best_lambda, +#' alpha = 1, lambda = fit$lasso_best_lambda, #' method = "cv", nfolds = 5, #' tauc.type = "UNO", tauc.time = seq(0.25, 2, 0.25) * 365, #' seed = 1010) #' #' # Model validation by repeated cross-validation with time-dependent AUC #' val.repcv = hdnom.validate(x, time, event, model.type = "lasso", -#' alpha = 1, lambda = lassofit$lasso_best_lambda, +#' alpha = 1, lambda = fit$lasso_best_lambda, #' method = "repeated.cv", nfolds = 5, rep.times = 3, #' tauc.type = "UNO", tauc.time = seq(0.25, 2, 0.25) * 365, #' seed = 1010) @@ -122,7 +124,7 @@ # # set.seed(1010) # val.boot = hdnom.validate(x, time, event, model.type = "flasso", -# lambda = 60, +# lambda1 = 5, lambda2 = 2, # method = "bootstrap", boot.times = 10, # tauc.type = "UNO", tauc.time = seq(0.25, 2, 0.25) * 365, # seed = 1010) @@ -156,6 +158,7 @@ hdnom.validate = function(x, time, event, 'mcp', 'mnet', 'scad', 'snet'), alpha, lambda, pen.factor = NULL, gamma, + lambda1, lambda2, method = c('bootstrap', 'cv', 'repeated.cv'), boot.times = NULL, nfolds = NULL, rep.times = NULL, tauc.type = c("CD", "SZ", "UNO"), tauc.time, @@ -215,7 +218,7 @@ hdnom.validate = function(x, time, event, tauc[[i]] = penalized.validate.internal( x_tr = x_tr, x_te = x_te, y_tr = y_tr, y_te = y_te, - lambda = lambda, + lambda1 = lambda1, lambda2 = lambda2, tauc.type = tauc.type, tauc.time = tauc.time ) } @@ -271,7 +274,7 @@ hdnom.validate = function(x, time, event, tauc[[i]] = penalized.validate.internal( x_tr = x_tr, x_te = x_te, y_tr = y_tr, y_te = y_te, - lambda = lambda, + lambda1 = lambda1, lambda2 = lambda2, tauc.type = tauc.type, tauc.time = tauc.time ) } @@ -332,7 +335,7 @@ hdnom.validate = function(x, time, event, tauc[[j]][[i]] = penalized.validate.internal( x_tr = x_tr, x_te = x_te, y_tr = y_tr, y_te = y_te, - lambda = lambda, + lambda1 = lambda1, lambda2 = lambda2, tauc.type = tauc.type, tauc.time = tauc.time ) } @@ -377,7 +380,8 @@ hdnom.validate = function(x, time, event, class(tauc) = c('hdnom.validate', 'penalized.validate.bootstrap') attr(tauc, 'model.type') = model.type - attr(tauc, 'lambda') = lambda + attr(tauc, 'lambda1') = lambda1 + attr(tauc, 'lambda2') = lambda2 attr(tauc, 'boot.times') = boot.times attr(tauc, 'tauc.type') = tauc.type attr(tauc, 'tauc.time') = tauc.time @@ -418,7 +422,8 @@ hdnom.validate = function(x, time, event, class(tauc) = c('hdnom.validate', 'penalized.validate.cv') attr(tauc, 'model.type') = model.type - attr(tauc, 'lambda') = lambda + attr(tauc, 'lambda1') = lambda1 + attr(tauc, 'lambda2') = lambda2 attr(tauc, 'nfolds') = nfolds attr(tauc, 'tauc.type') = tauc.type attr(tauc, 'tauc.time') = tauc.time @@ -461,7 +466,8 @@ hdnom.validate = function(x, time, event, class(tauc) = c('hdnom.validate', 'penalized.validate.repeated.cv') attr(tauc, 'model.type') = model.type - attr(tauc, 'lambda') = lambda + attr(tauc, 'lambda1') = lambda1 + attr(tauc, 'lambda2') = lambda2 attr(tauc, 'nfolds') = nfolds attr(tauc, 'rep.times') = rep.times attr(tauc, 'tauc.type') = tauc.type @@ -596,11 +602,12 @@ ncvreg.validate.internal = function(x_tr, x_te, y_tr, y_te, model.type, #' #' @keywords internal penalized.validate.internal = function(x_tr, x_te, y_tr, y_te, - lambda, + lambda1, lambda2, tauc.type, tauc.time) { samp_fit = penalized(response = y_tr, penalized = x_tr, - lambda1 = lambda, lambda2 = 0, + lambda1 = lambda1, lambda2 = lambda2, + maxiter = 25, epsilon = 1e-3, # for faster convergence, consistent with `hdcox.flasso()` fusedl = TRUE, standardize = TRUE, model = 'cox') lp_tr = as.vector(samp_fit@lin.pred) @@ -748,7 +755,8 @@ print.hdnom.validate = function(x, ...) { cat('Validation method: bootstrap\n') cat('Bootstrap samples:', attr(x, 'boot.times'), '\n') cat('Model type:', attr(x, 'model.type'), '\n') - cat('Fused lasso model lambda:', attr(x, 'lambda'), '\n') + cat('Fused lasso model lambda1:', attr(x, 'lambda1'), '\n') + cat('Fused lasso model lambda2:', attr(x, 'lambda2'), '\n') cat('Time-dependent AUC type:', attr(x, 'tauc.type'), '\n') cat('Evaluation time points for tAUC:', attr(x, 'tauc.time')) }, @@ -759,7 +767,8 @@ print.hdnom.validate = function(x, ...) { cat('Validation method: k-fold cross-validation\n') cat('Cross-validation folds:', attr(x, 'nfolds'), '\n') cat('Model type:', attr(x, 'model.type'), '\n') - cat('Fused lasso model lambda:', attr(x, 'lambda'), '\n') + cat('Fused lasso model lambda1:', attr(x, 'lambda1'), '\n') + cat('Fused lasso model lambda2:', attr(x, 'lambda2'), '\n') cat('Time-dependent AUC type:', attr(x, 'tauc.type'), '\n') cat('Evaluation time points for tAUC:', attr(x, 'tauc.time')) }, @@ -771,7 +780,8 @@ print.hdnom.validate = function(x, ...) { cat('Cross-validation folds:', attr(x, 'nfolds'), '\n') cat('Cross-validation repeated times:', attr(x, 'rep.times'), '\n') cat('Model type:', attr(x, 'model.type'), '\n') - cat('Fused lasso model lambda:', attr(x, 'lambda'), '\n') + cat('Fused lasso model lambda1:', attr(x, 'lambda1'), '\n') + cat('Fused lasso model lambda2:', attr(x, 'lambda2'), '\n') cat('Time-dependent AUC type:', attr(x, 'tauc.type'), '\n') cat('Evaluation time points for tAUC:', attr(x, 'tauc.time')) } diff --git a/R/04-hdnom-calibrate.R b/R/04-hdnom-calibrate.R index d1273c3..8785491 100644 --- a/R/04-hdnom-calibrate.R +++ b/R/04-hdnom-calibrate.R @@ -22,7 +22,10 @@ #' model fits on the resampled data. From the Cox model you have built. #' @param pen.factor Penalty factors to apply to each coefficient. #' From the built \emph{adaptive lasso} or \emph{adaptive elastic-net} model. -#' @param gamma Value of the model parameter gamma for MCP/SCAD/Mnet/Snet models. +#' @param gamma Value of the model parameter gamma for +#' MCP/SCAD/Mnet/Snet models. +#' @param lambda1 Value of the penalty parameter lambda1 for fused lasso model. +#' @param lambda2 Value of the penalty parameter lambda2 for fused lasso model. #' @param method Calibration method. #' Options including \code{"fitting"}, \code{"bootstrap"}, \code{"cv"}, #' and \code{"repeated.cv"}. @@ -53,11 +56,11 @@ #' y = Surv(time, event) #' #' # Fit Cox model with lasso penalty -#' lassofit = hdcox.lasso(x, y, nfolds = 5, rule = "lambda.1se", seed = 11) +#' fit = hdcox.lasso(x, y, nfolds = 5, rule = "lambda.1se", seed = 11) #' #' # Model calibration by fitting the original data directly #' cal.fitting = hdnom.calibrate(x, time, event, model.type = "lasso", -#' alpha = 1, lambda = lassofit$lasso_best_lambda, +#' alpha = 1, lambda = fit$lasso_best_lambda, #' method = "fitting", #' pred.at = 365 * 9, ngroup = 5, #' seed = 1010) @@ -66,21 +69,21 @@ #' # Normally boot.times should be set to 200 or more, #' # we set it to 3 here only to save example running time. #' cal.boot = hdnom.calibrate(x, time, event, model.type = "lasso", -#' alpha = 1, lambda = lassofit$lasso_best_lambda, +#' alpha = 1, lambda = fit$lasso_best_lambda, #' method = "bootstrap", boot.times = 3, #' pred.at = 365 * 9, ngroup = 5, #' seed = 1010) #' #' # Model calibration by 5-fold cross-validation #' cal.cv = hdnom.calibrate(x, time, event, model.type = "lasso", -#' alpha = 1, lambda = lassofit$lasso_best_lambda, +#' alpha = 1, lambda = fit$lasso_best_lambda, #' method = "cv", nfolds = 5, #' pred.at = 365 * 9, ngroup = 5, #' seed = 1010) #' #' # Model calibration by repeated cross-validation #' cal.repcv = hdnom.calibrate(x, time, event, model.type = "lasso", -#' alpha = 1, lambda = lassofit$lasso_best_lambda, +#' alpha = 1, lambda = fit$lasso_best_lambda, #' method = "repeated.cv", nfolds = 3, rep.times = 3, #' pred.at = 365 * 9, ngroup = 5, #' seed = 1010) @@ -113,7 +116,7 @@ # # set.seed(1010) # cal.fitting = hdnom.calibrate(x, time, event, model.type = "flasso", -# lambda = 60, +# lambda1 = 5, lambda2 = 2, # method = "fitting", # pred.at = 365 * 9, ngroup = 5, # seed = 1010) @@ -131,7 +134,7 @@ # seed = 1010) # # cal.repcv = hdnom.calibrate(x, time, event, model.type = "flasso", -# lambda = 60, +# lambda1 = 5, lambda2 = 2, # method = "repeated.cv", nfolds = 5, rep.times = 3, # pred.at = 365 * 9, ngroup = 5, # seed = 1010) @@ -157,6 +160,7 @@ hdnom.calibrate = function(x, time, event, 'mcp', 'mnet', 'scad', 'snet'), alpha, lambda, pen.factor = NULL, gamma, + lambda1, lambda2, method = c('fitting', 'bootstrap', 'cv', 'repeated.cv'), boot.times = NULL, nfolds = NULL, rep.times = NULL, pred.at, ngroup = 5, @@ -195,7 +199,7 @@ hdnom.calibrate = function(x, time, event, if (model.type %in% c('flasso')) { pred_list = penalized.calibrate.internal.pred( x_tr = x, x_te = x, y_tr = Surv(time, event), - lambda = lambda, + lambda1 = lambda1, lambda2 = lambda2, pred.at = pred.at ) } @@ -258,7 +262,7 @@ hdnom.calibrate = function(x, time, event, if (model.type %in% c('flasso')) { pred_list_list[[i]] = penalized.calibrate.internal.pred( x_tr = x_tr, x_te = x_te, y_tr = y_tr, - lambda = lambda, + lambda1 = lambda1, lambda2 = lambda2, pred.at = pred.at ) } @@ -331,7 +335,7 @@ hdnom.calibrate = function(x, time, event, if (model.type %in% c('flasso')) { pred_list_list[[i]] = penalized.calibrate.internal.pred( x_tr = x_tr, x_te = x_te, y_tr = y_tr, - lambda = lambda, + lambda1 = lambda1, lambda2 = lambda2, pred.at = pred.at ) } @@ -412,7 +416,7 @@ hdnom.calibrate = function(x, time, event, if (model.type %in% c('flasso')) { pred_list_list[[j]][[i]] = penalized.calibrate.internal.pred( x_tr = x_tr, x_te = x_te, y_tr = y_tr, - lambda = lambda, + lambda1 = lambda1, lambda2 = lambda2, pred.at = pred.at ) } @@ -466,23 +470,24 @@ hdnom.calibrate = function(x, time, event, if (model.type %in% c('lasso', 'alasso', 'enet', 'aenet')) { class(prob) = c('hdnom.calibrate', 'glmnet.calibrate.fitting') - attr(prob, 'alpha') = alpha - attr(prob, 'lambda') = lambda + attr(prob, 'alpha') = alpha + attr(prob, 'lambda') = lambda attr(prob, 'pen.factor') = pen.factor } if (model.type %in% c('mcp', 'mnet', 'scad', 'snet')) { class(prob) = c('hdnom.calibrate', 'ncvreg.calibrate.fitting') - attr(prob, 'alpha') = alpha + attr(prob, 'alpha') = alpha attr(prob, 'lambda') = lambda - attr(prob, 'gamma') = gamma + attr(prob, 'gamma') = gamma } if (model.type %in% c('flasso')) { class(prob) = c('hdnom.calibrate', 'penalized.calibrate.fitting') - attr(prob, 'lambda') = lambda + attr(prob, 'lambda1') = lambda1 + attr(prob, 'lambda2') = lambda2 } }, @@ -510,7 +515,8 @@ hdnom.calibrate = function(x, time, event, if (model.type %in% c('flasso')) { class(prob) = c('hdnom.calibrate', 'penalized.calibrate.bootstrap') - attr(prob, 'lambda') = lambda + attr(prob, 'lambda1') = lambda1 + attr(prob, 'lambda2') = lambda2 attr(prob, 'boot.times') = boot.times } @@ -539,7 +545,8 @@ hdnom.calibrate = function(x, time, event, if (model.type %in% c('flasso')) { class(prob) = c('hdnom.calibrate', 'penalized.calibrate.cv') - attr(prob, 'lambda') = lambda + attr(prob, 'lambda1') = lambda1 + attr(prob, 'lambda2') = lambda2 attr(prob, 'nfolds') = nfolds } @@ -570,7 +577,8 @@ hdnom.calibrate = function(x, time, event, if (model.type %in% c('flasso')) { class(prob) = c('hdnom.calibrate', 'penalized.calibrate.repeated.cv') - attr(prob, 'lambda') = lambda + attr(prob, 'lambda1') = lambda1 + attr(prob, 'lambda2') = lambda2 attr(prob, 'nfolds') = nfolds attr(prob, 'rep.times') = rep.times } @@ -705,11 +713,12 @@ ncvreg.calibrate.internal.pred = function(x_tr, x_te, y_tr, #' #' @keywords internal penalized.calibrate.internal.pred = function(x_tr, x_te, y_tr, - lambda, + lambda1, lambda2, pred.at) { object = penalized(response = y_tr, penalized = x_tr, - lambda1 = lambda, lambda2 = 0, + lambda1 = lambda1, lambda2 = lambda2, + maxiter = 25, epsilon = 1e-3, # for faster convergence, consistent with `hdcox.flasso()` fusedl = TRUE, standardize = TRUE, model = 'cox') lp = as.vector(data.matrix(x_tr) %*% as.matrix(object@penalized)) @@ -913,7 +922,8 @@ print.hdnom.calibrate = function(x, ...) { cat('Random seed:', attr(x, 'seed'), '\n') cat('Calibration method: fitting\n') cat('Model type:', attr(x, 'model.type'), '\n') - cat('Fused lasso model lambda:', attr(x, 'lambda'), '\n') + cat('Fused lasso model lambda1:', attr(x, 'lambda1'), '\n') + cat('Fused lasso model lambda2:', attr(x, 'lambda2'), '\n') cat('Calibration time point:', attr(x, 'pred.at'), '\n') cat('Number of groups formed for calibration:', attr(x, 'ngroup'), '\n') }, @@ -924,7 +934,8 @@ print.hdnom.calibrate = function(x, ...) { cat('Calibration method: bootstrap\n') cat('Bootstrap samples:', attr(x, 'boot.times'), '\n') cat('Model type:', attr(x, 'model.type'), '\n') - cat('Fused lasso model lambda:', attr(x, 'lambda'), '\n') + cat('Fused lasso model lambda1:', attr(x, 'lambda1'), '\n') + cat('Fused lasso model lambda2:', attr(x, 'lambda2'), '\n') cat('Calibration time point:', attr(x, 'pred.at'), '\n') cat('Number of groups formed for calibration:', attr(x, 'ngroup'), '\n') }, @@ -935,7 +946,8 @@ print.hdnom.calibrate = function(x, ...) { cat('Calibration method: k-fold cross-validation\n') cat('Cross-validation folds:', attr(x, 'nfolds'), '\n') cat('Model type:', attr(x, 'model.type'), '\n') - cat('Fused lasso model lambda:', attr(x, 'lambda'), '\n') + cat('Fused lasso model lambda1:', attr(x, 'lambda1'), '\n') + cat('Fused lasso model lambda2:', attr(x, 'lambda2'), '\n') cat('Calibration time point:', attr(x, 'pred.at'), '\n') cat('Number of groups formed for calibration:', attr(x, 'ngroup'), '\n') }, @@ -947,7 +959,8 @@ print.hdnom.calibrate = function(x, ...) { cat('Cross-validation folds:', attr(x, 'nfolds'), '\n') cat('Cross-validation repeated times:', attr(x, 'rep.times'), '\n') cat('Model type:', attr(x, 'model.type'), '\n') - cat('Fused lasso model lambda:', attr(x, 'lambda'), '\n') + cat('Fused lasso model lambda1:', attr(x, 'lambda1'), '\n') + cat('Fused lasso model lambda2:', attr(x, 'lambda2'), '\n') cat('Calibration time point:', attr(x, 'pred.at'), '\n') cat('Number of groups formed for calibration:', attr(x, 'ngroup'), '\n') } @@ -1036,23 +1049,27 @@ summary.hdnom.calibrate = function(object, ...) { }, penalized.calibrate.fitting = { - attr(object, 'lambda') = NULL + attr(object, 'lambda1') = NULL + attr(object, 'lambda2') = NULL }, penalized.calibrate.bootstrap = { - attr(object, 'lambda') = NULL - attr(object, 'boot.times') = NULL + attr(object, 'lambda1') = NULL + attr(object, 'lambda2') = NULL + attr(object, 'boot.times') = NULL }, - penalized.calibrate.cv = { - attr(object, 'lambda') = NULL - attr(object, 'nfolds') = NULL + penalized.calibrate.cv = { + attr(object, 'lambda1') = NULL + attr(object, 'lambda2') = NULL + attr(object, 'nfolds') = NULL }, penalized.calibrate.repeated.cv = { - attr(object, 'lambda') = NULL - attr(object, 'nfolds') = NULL - attr(object, 'rep.times') = NULL + attr(object, 'lambda1') = NULL + attr(object, 'lambda2') = NULL + attr(object, 'nfolds') = NULL + attr(object, 'rep.times') = NULL } ) diff --git a/R/05-hdnom-external-validate.R b/R/05-hdnom-external-validate.R index 9fdf5f0..f1523d9 100644 --- a/R/05-hdnom-external-validate.R +++ b/R/05-hdnom-external-validate.R @@ -57,12 +57,12 @@ #' time_new = smart$TEVENT[1001:2000] #' event_new = smart$EVENT[1001:2000] #' -#' # Fit Cox model by lasso penalization -#' lassofit = hdcox.lasso(x, Surv(time, event), nfolds = 5, rule = "lambda.1se", seed = 11) +#' # Fit Cox model with lasso penalty +#' fit = hdcox.lasso(x, Surv(time, event), nfolds = 5, rule = "lambda.1se", seed = 11) #' #' # External validation with time-dependent AUC #' val.ext = -#' hdnom.external.validate(lassofit, x, time, event, +#' hdnom.external.validate(fit, x, time, event, #' x_new, time_new, event_new, #' tauc.type = "UNO", #' tauc.time = seq(0.25, 2, 0.25) * 365) diff --git a/R/06-hdnom-external-calibrate.R b/R/06-hdnom-external-calibrate.R index a27d0db..22acd93 100644 --- a/R/06-hdnom-external-calibrate.R +++ b/R/06-hdnom-external-calibrate.R @@ -41,12 +41,12 @@ #' time_new = smart$TEVENT[1001:2000] #' event_new = smart$EVENT[1001:2000] #' -#' # Fit Cox model by lasso penalization -#' lassofit = hdcox.lasso(x, Surv(time, event), nfolds = 5, rule = "lambda.1se", seed = 11) +#' # Fit Cox model with lasso penalty +#' fit = hdcox.lasso(x, Surv(time, event), nfolds = 5, rule = "lambda.1se", seed = 11) #' #' # External calibration #' cal.ext = -#' hdnom.external.calibrate(lassofit, x, time, event, +#' hdnom.external.calibrate(fit, x, time, event, #' x_new, time_new, event_new, #' pred.at = 365 * 5, ngroup = 5) #' diff --git a/R/07-hdnom-kmplot.R b/R/07-hdnom-kmplot.R index 0293bc9..507c32a 100644 --- a/R/07-hdnom-kmplot.R +++ b/R/07-hdnom-kmplot.R @@ -37,21 +37,21 @@ #' time_new = smart$TEVENT[1001:2000] #' event_new = smart$EVENT[1001:2000] #' -#' # Fit Cox model by lasso penalization -#' lassofit = hdcox.lasso(x, Surv(time, event), nfolds = 5, rule = "lambda.1se", seed = 11) +#' # Fit Cox model with lasso penalty +#' fit = hdcox.lasso(x, Surv(time, event), nfolds = 5, rule = "lambda.1se", seed = 11) #' -#' ### Internal calibration +#' # Internal calibration #' cal.int = hdnom.calibrate(x, time, event, model.type = "lasso", -#' alpha = 1, lambda = lassofit$'lasso_best_lambda', +#' alpha = 1, lambda = fit$lasso_best_lambda, #' method = "cv", nfolds = 5, #' pred.at = 365 * 9, ngroup = 3) #' #' hdnom.kmplot(cal.int, group.name = c('High risk', 'Medium risk', 'Low risk'), #' time.at = 1:6 * 365) #' -#' ### External calibration +#' # External calibration #' cal.ext = -#' hdnom.external.calibrate(lassofit, x, time, event, +#' hdnom.external.calibrate(fit, x, time, event, #' x_new, time_new, event_new, #' pred.at = 365 * 5, ngroup = 3) #' diff --git a/R/08-hdnom-logrank.R b/R/08-hdnom-logrank.R index b0f06cc..b64afbf 100644 --- a/R/08-hdnom-logrank.R +++ b/R/08-hdnom-logrank.R @@ -29,20 +29,20 @@ #' time_new = smart$TEVENT[1001:2000] #' event_new = smart$EVENT[1001:2000] #' -#' # Fit Cox model by lasso penalization -#' lassofit = hdcox.lasso(x, Surv(time, event), nfolds = 5, rule = "lambda.1se", seed = 11) +#' # Fit Cox model with lasso penalty +#' fit = hdcox.lasso(x, Surv(time, event), nfolds = 5, rule = "lambda.1se", seed = 11) #' -#' ### Internal calibration +#' # Internal calibration #' cal.int = hdnom.calibrate(x, time, event, model.type = "lasso", -#' alpha = 1, lambda = lassofit$'lasso_best_lambda', +#' alpha = 1, lambda = fit$lasso_best_lambda, #' method = "cv", nfolds = 5, #' pred.at = 365 * 9, ngroup = 3) #' #' hdnom.logrank(cal.int) #' -#' ### External calibration +#' # External calibration #' cal.ext = -#' hdnom.external.calibrate(lassofit, x, time, event, +#' hdnom.external.calibrate(fit, x, time, event, #' x_new, time_new, event_new, #' pred.at = 365 * 5, ngroup = 3) #' @@ -52,12 +52,12 @@ hdnom.logrank = function(object) { if (!(any(c('hdnom.calibrate', 'hdnom.external.calibrate') %in% class(object)))) stop('object class must be "hdnom.calibrate" or "hdnom.external.calibrate"') - time = attr(object, 'surv.time') + time = attr(object, 'surv.time') event = attr(object, 'surv.event') - grp = attr(object, 'risk.group') + grp = attr(object, 'risk.group') sdiff = survdiff(formula('Surv(time, event) ~ grp')) - pval = pchisq(sdiff$'chisq', length(sdiff$'n') - 1L, lower.tail = FALSE) + pval = pchisq(sdiff$'chisq', length(sdiff$'n') - 1L, lower.tail = FALSE) sdiff$'pval' = pval return(sdiff) diff --git a/R/09-hdnom-compare-validate.R b/R/09-hdnom-compare-validate.R index fa9eebc..f2e3921 100644 --- a/R/09-hdnom-compare-validate.R +++ b/R/09-hdnom-compare-validate.R @@ -151,7 +151,8 @@ hdnom.compare.validate = tauclist[[i]] = hdnom.validate( x, time, event, model.type = 'flasso', - alpha = 1, lambda = cvfit$'flasso_best_lambda', + lambda1 = cvfit$'flasso_best_lambda1', + lambda2 = cvfit$'flasso_best_lambda2', method = method, boot.times = boot.times, nfolds = nfolds, rep.times = rep.times, tauc.type = tauc.type, tauc.time = tauc.time, seed = seed, trace = trace) @@ -382,7 +383,7 @@ plot.hdnom.compare.validate = geom_point(data = df, aes_string(x = 'Time', y = 'Median', colour = 'Model', fill = 'Model')) + geom_line(data = df, aes_string(x = 'Time', y = 'Median', - colour = 'Model', fill = 'Model'), + colour = 'Model'), linetype = 'dashed') + scale_x_continuous(breaks = df$'Time') + scale_colour_manual(values = col_pal) + @@ -399,7 +400,7 @@ plot.hdnom.compare.validate = geom_point(data = df, aes_string(x = 'Time', y = 'Median', colour = 'Model', fill = 'Model')) + geom_line(data = df, aes_string(x = 'Time', y = 'Median', - colour = 'Model', fill = 'Model'), + colour = 'Model'), linetype = 'dashed') + geom_ribbon(data = df, aes_string(ymin = 'Qt25', ymax = 'Qt75', colour = 'Model', fill = 'Model'), diff --git a/R/10-hdnom-compare-calibrate.R b/R/10-hdnom-compare-calibrate.R index fdc128a..0380e33 100644 --- a/R/10-hdnom-compare-calibrate.R +++ b/R/10-hdnom-compare-calibrate.R @@ -133,7 +133,8 @@ hdnom.compare.calibrate = problist[[i]] = hdnom.calibrate( x, time, event, model.type = 'flasso', - alpha = 1, lambda = cvfit$'flasso_best_lambda', + lambda1 = cvfit$'flasso_best_lambda1', + lambda2 = cvfit$'flasso_best_lambda2', method = method, boot.times = boot.times, nfolds = nfolds, rep.times = rep.times, pred.at = pred.at, ngroup = ngroup, seed = seed, trace = trace) diff --git a/R/hdnom-package.R b/R/hdnom-package.R index 1bb056b..801d291 100644 --- a/R/hdnom-package.R +++ b/R/hdnom-package.R @@ -1,8 +1,8 @@ -#' Nomograms for High-Dimensional Data with Penalized Cox Models +#' Benchmarking and Visualization Toolkit for Penalized Cox Models #' -#' Nomograms for High-Dimensional Data with Penalized Cox Models +#' Benchmarking and Visualization Toolkit for Penalized Cox Models #' -#' The vignette can be opened with \code{vignette('hdnom')}. +#' Browse vignettes with \code{browseVignettes(package = "hdnom")}. #' #' \tabular{ll}{ Package: \tab hdnom\cr #' Type: \tab Package\cr diff --git a/man/glmnet.survcurve.Rd b/man/glmnet.survcurve.Rd index 00eea95..ad18890 100644 --- a/man/glmnet.survcurve.Rd +++ b/man/glmnet.survcurve.Rd @@ -4,7 +4,7 @@ \alias{glmnet.survcurve} \title{Survival Curve Prediction for glmnet Objects} \usage{ -glmnet.survcurve(object, time, event, x, survtime, lambda) +glmnet.survcurve(object, time, event, x, survtime) } \value{ list containing predicted survival probabilities and diff --git a/man/hdcox.aenet.Rd b/man/hdcox.aenet.Rd index 47c5af3..bd29818 100644 --- a/man/hdcox.aenet.Rd +++ b/man/hdcox.aenet.Rd @@ -49,9 +49,9 @@ y = Surv(time, event) # registerDoParallel(detectCores()) # then set hdcox.aenet(..., parallel = TRUE). -# Fit Cox model by adaptive elastic-net penalization -aenetfit = hdcox.aenet(x, y, nfolds = 3, alphas = c(0.3, 0.7), - rule = "lambda.1se", seed = c(5, 7)) +# Fit Cox model with adaptive elastic-net penalty +fit = hdcox.aenet(x, y, nfolds = 3, alphas = c(0.3, 0.7), + rule = "lambda.1se", seed = c(5, 7)) # Prepare data for hdnom.nomogram x.df = as.data.frame(x) @@ -59,9 +59,8 @@ dd = datadist(x.df) options(datadist = "dd") # Generate hdnom.nomogram objects and plot nomogram -nom = hdnom.nomogram(aenetfit$aenet_model, model.type = "aenet", - x, time, event, x.df, - lambda = aenetfit$aenet_best_lambda, pred.at = 365 * 2, +nom = hdnom.nomogram(fit$aenet_model, model.type = "aenet", + x, time, event, x.df, pred.at = 365 * 2, funlabel = "2-Year Overall Survival Probability") plot(nom) diff --git a/man/hdcox.alasso.Rd b/man/hdcox.alasso.Rd index 8bd3aa1..c7250d4 100644 --- a/man/hdcox.alasso.Rd +++ b/man/hdcox.alasso.Rd @@ -36,8 +36,8 @@ time = smart$TEVENT event = smart$EVENT y = Surv(time, event) -# Fit Cox model by adaptive lasso penalization -alassofit = hdcox.alasso(x, y, nfolds = 3, rule = "lambda.1se", seed = c(7, 11)) +# Fit Cox model with adaptive lasso penalty +fit = hdcox.alasso(x, y, nfolds = 3, rule = "lambda.1se", seed = c(7, 11)) # Prepare data for hdnom.nomogram x.df = as.data.frame(x) @@ -45,9 +45,8 @@ dd = datadist(x.df) options(datadist = "dd") # Generate hdnom.nomogram objects and plot nomogram -nom = hdnom.nomogram(alassofit$alasso_model, model.type = "alasso", - x, time, event, x.df, - lambda = alassofit$alasso_best_lambda, pred.at = 365 * 2, +nom = hdnom.nomogram(fit$alasso_model, model.type = "alasso", + x, time, event, x.df, pred.at = 365 * 2, funlabel = "2-Year Overall Survival Probability") plot(nom) diff --git a/man/hdcox.enet.Rd b/man/hdcox.enet.Rd index b4408de..125ad8b 100644 --- a/man/hdcox.enet.Rd +++ b/man/hdcox.enet.Rd @@ -47,9 +47,9 @@ y = Surv(time, event) # registerDoParallel(detectCores()) # then set hdcox.enet(..., parallel = TRUE). -# Fit Cox model by elastic-net penalization -enetfit = hdcox.enet(x, y, nfolds = 3, alphas = c(0.3, 0.7), - rule = "lambda.1se", seed = 11) +# Fit Cox model with elastic-net penalty +fit = hdcox.enet(x, y, nfolds = 3, alphas = c(0.3, 0.7), + rule = "lambda.1se", seed = 11) # Prepare data for hdnom.nomogram x.df = as.data.frame(x) @@ -57,9 +57,8 @@ dd = datadist(x.df) options(datadist = "dd") # Generate hdnom.nomogram objects and plot nomogram -nom = hdnom.nomogram(enetfit$enet_model, model.type = "enet", - x, time, event, x.df, - lambda = enetfit$enet_best_lambda, pred.at = 365 * 2, +nom = hdnom.nomogram(fit$enet_model, model.type = "enet", + x, time, event, x.df, pred.at = 365 * 2, funlabel = "2-Year Overall Survival Probability") plot(nom) diff --git a/man/hdcox.flasso.Rd b/man/hdcox.flasso.Rd index ef865b4..88d2115 100644 --- a/man/hdcox.flasso.Rd +++ b/man/hdcox.flasso.Rd @@ -4,7 +4,9 @@ \alias{hdcox.flasso} \title{Fused Lasso Model Selection for High-Dimensional Cox Models} \usage{ -hdcox.flasso(x, y, nfolds = 5L, seed = 1001, trace = FALSE) +hdcox.flasso(x, y, nfolds = 5L, lambda1 = c(0.001, 0.05, 0.5, 1, 5), + lambda2 = c(0.001, 0.01, 0.5), maxiter = 25, epsilon = 0.001, + seed = 1001, trace = FALSE, parallel = FALSE, ...) } \arguments{ \item{x}{Data matrix.} @@ -13,21 +15,41 @@ hdcox.flasso(x, y, nfolds = 5L, seed = 1001, trace = FALSE) \item{nfolds}{Fold numbers of cross-validation.} +\item{lambda1}{Vector of lambda1 candidates. +Default is \code{0.001, 0.05, 0.5, 1, 5}.} + +\item{lambda2}{Vector of lambda2 candidates. +Default is \code{0.001, 0.01, 0.5}.} + +\item{maxiter}{The maximum number of iterations allowed. +Default is \code{25}.} + +\item{epsilon}{The convergence criterion. +Default is \code{1e-3}.} + \item{seed}{A random seed for cross-validation fold division.} \item{trace}{Output the cross-validation parameter tuning progress or not. Default is \code{FALSE}.} + +\item{parallel}{Logical. Enable parallel parameter tuning or not, +default is {FALSE}. To enable parallel tuning, load the +\code{doParallel} package and run \code{registerDoParallel()} +with the number of CPU cores before calling this function.} + +\item{...}{other parameters to \code{\link[penalized]{cvl}} +and \code{\link[penalized]{penalized}}.} } \description{ Automatic fused lasso model selection for high-dimensional -Cox models, evaluated by penalized partial-likelihood. +Cox models, evaluated by cross-validated likelihood. } \note{ The cross-validation procedure used in this function is the -so-called approximated cross-validation provided by the \code{penalized} +\emph{approximated cross-validation} provided by the \code{penalized} package. Be careful dealing with the results since they might be more -optimistic than fully fitting the model. This cross-validation method -is more suitable for datasets with larger number of observations, +optimistic than a traditional CV procedure. This cross-validation +method is more suitable for datasets with larger number of observations, and a higher number of cross-validation folds. } \examples{ @@ -36,13 +58,13 @@ library("rms") # Load imputed SMART data; only use the first 120 samples data("smart") -x = as.matrix(smart[, -c(1, 2)])[1:140, ] -time = smart$TEVENT[1:140] -event = smart$EVENT[1:140] +x = as.matrix(smart[, -c(1, 2)])[1:120, ] +time = smart$TEVENT[1:120] +event = smart$EVENT[1:120] y = Surv(time, event) -# Fit Cox model by fused lasso penalization -flassofit = hdcox.flasso(x, y, nfolds = 3, seed = 11) +# Fit Cox model with fused lasso penalty +fit = hdcox.flasso(x, y, nfolds = 3, seed = 11) # Prepare data for hdnom.nomogram x.df = as.data.frame(x) @@ -50,9 +72,8 @@ dd = datadist(x.df) options(datadist = "dd") # Generate hdnom.nomogram objects and plot nomogram -nom = hdnom.nomogram(flassofit$flasso_model, model.type = "flasso", - x, time, event, x.df, - lambda = flassofit$flasso_best_lambda, pred.at = 365 * 2, +nom = hdnom.nomogram(fit$flasso_model, model.type = "flasso", + x, time, event, x.df, pred.at = 365 * 2, funlabel = "2-Year Overall Survival Probability") plot(nom) diff --git a/man/hdcox.lasso.Rd b/man/hdcox.lasso.Rd index 010b09d..16d2d8a 100644 --- a/man/hdcox.lasso.Rd +++ b/man/hdcox.lasso.Rd @@ -35,8 +35,8 @@ time = smart$TEVENT event = smart$EVENT y = Surv(time, event) -# Fit Cox model by lasso penalization -lassofit = hdcox.lasso(x, y, nfolds = 5, rule = "lambda.1se", seed = 11) +# Fit Cox model with lasso penalty +fit = hdcox.lasso(x, y, nfolds = 5, rule = "lambda.1se", seed = 11) # Prepare data for hdnom.nomogram x.df = as.data.frame(x) @@ -44,9 +44,8 @@ dd = datadist(x.df) options(datadist = "dd") # Generate hdnom.nomogram objects and plot nomogram -nom = hdnom.nomogram(lassofit$lasso_model, model.type = "lasso", - x, time, event, x.df, - lambda = lassofit$lasso_best_lambda, pred.at = 365 * 2, +nom = hdnom.nomogram(fit$lasso_model, model.type = "lasso", + x, time, event, x.df, pred.at = 365 * 2, funlabel = "2-Year Overall Survival Probability") plot(nom) diff --git a/man/hdcox.mcp.Rd b/man/hdcox.mcp.Rd index 2f03889..047e9d3 100644 --- a/man/hdcox.mcp.Rd +++ b/man/hdcox.mcp.Rd @@ -41,8 +41,8 @@ time = smart$TEVENT[1:150] event = smart$EVENT[1:150] y = Surv(time, event) -# Fit Cox model by MCP penalization -mcpfit = hdcox.mcp(x, y, nfolds = 3, gammas = c(2.1, 3), seed = 1001) +# Fit Cox model with MCP penalty +fit = hdcox.mcp(x, y, nfolds = 3, gammas = c(2.1, 3), seed = 1001) # Prepare data for hdnom.nomogram x.df = as.data.frame(x) @@ -50,8 +50,8 @@ dd = datadist(x.df) options(datadist = "dd") # Generate hdnom.nomogram objects and plot nomogram -nom = hdnom.nomogram(mcpfit$mcp_model, model.type = "mcp", x, time, event, x.df, - lambda = mcpfit$mcp_best_lambda, pred.at = 365 * 2, +nom = hdnom.nomogram(fit$mcp_model, model.type = "mcp", + x, time, event, x.df, pred.at = 365 * 2, funlabel = "2-Year Overall Survival Probability") plot(nom) } diff --git a/man/hdcox.mnet.Rd b/man/hdcox.mnet.Rd index 00a15a5..a71d8ce 100644 --- a/man/hdcox.mnet.Rd +++ b/man/hdcox.mnet.Rd @@ -44,9 +44,9 @@ time = smart$TEVENT[1:120] event = smart$EVENT[1:120] y = Surv(time, event) -# Fit Cox model by Mnet penalization -mnetfit = hdcox.mnet(x, y, nfolds = 3, gammas = 3, - alphas = c(0.3, 0.8), seed = 1010) +# Fit Cox model with Mnet penalty +fit = hdcox.mnet(x, y, nfolds = 3, gammas = 3, + alphas = c(0.3, 0.8), seed = 1010) # Prepare data for hdnom.nomogram x.df = as.data.frame(x) @@ -54,10 +54,8 @@ dd = datadist(x.df) options(datadist = "dd") # Generate hdnom.nomogram objects and plot nomogram -nom = hdnom.nomogram(mnetfit$mnet_model, model.type = "mnet", - x, time, event, x.df, - lambda = mnetfit$mnet_best_lambda, - pred.at = 365 * 2, +nom = hdnom.nomogram(fit$mnet_model, model.type = "mnet", + x, time, event, x.df, pred.at = 365 * 2, funlabel = "2-Year Overall Survival Probability") plot(nom) diff --git a/man/hdcox.scad.Rd b/man/hdcox.scad.Rd index 87dd3a2..b3a87b6 100644 --- a/man/hdcox.scad.Rd +++ b/man/hdcox.scad.Rd @@ -41,8 +41,8 @@ time = smart$TEVENT[1:120] event = smart$EVENT[1:120] y = Surv(time, event) -# Fit Cox model by SCAD penalization -scadfit = hdcox.scad(x, y, nfolds = 3, gammas = c(3.7, 5), seed = 1010) +# Fit Cox model with SCAD penalty +fit = hdcox.scad(x, y, nfolds = 3, gammas = c(3.7, 5), seed = 1010) # Prepare data for hdnom.nomogram x.df = as.data.frame(x) @@ -50,8 +50,8 @@ dd = datadist(x.df) options(datadist = "dd") # Generate hdnom.nomogram objects and plot nomogram -nom = hdnom.nomogram(scadfit$scad_model, model.type = "scad", x, time, event, x.df, - lambda = scadfit$scad_best_lambda, pred.at = 365 * 2, +nom = hdnom.nomogram(fit$scad_model, model.type = "scad", + x, time, event, x.df, pred.at = 365 * 2, funlabel = "2-Year Overall Survival Probability") plot(nom) diff --git a/man/hdcox.snet.Rd b/man/hdcox.snet.Rd index c0cd6c9..44d588f 100644 --- a/man/hdcox.snet.Rd +++ b/man/hdcox.snet.Rd @@ -44,9 +44,9 @@ time = smart$TEVENT[1:120] event = smart$EVENT[1:120] y = Surv(time, event) -# Fit Cox model by Snet penalization -snetfit = hdcox.snet(x, y, nfolds = 3, gammas = 3.7, - alphas = c(0.3, 0.8), seed = 1010) +# Fit Cox model with Snet penalty +fit = hdcox.snet(x, y, nfolds = 3, gammas = 3.7, + alphas = c(0.3, 0.8), seed = 1010) # Prepare data for hdnom.nomogram x.df = as.data.frame(x) @@ -54,10 +54,8 @@ dd = datadist(x.df) options(datadist = "dd") # Generate hdnom.nomogram objects and plot nomogram -nom = hdnom.nomogram(snetfit$snet_model, model.type = "snet", - x, time, event, x.df, - lambda = snetfit$snet_best_lambda, - pred.at = 365 * 2, +nom = hdnom.nomogram(fit$snet_model, model.type = "snet", + x, time, event, x.df, pred.at = 365 * 2, funlabel = "2-Year Overall Survival Probability") plot(nom) diff --git a/man/hdnom-package.Rd b/man/hdnom-package.Rd index 63236ca..ab652d4 100644 --- a/man/hdnom-package.Rd +++ b/man/hdnom-package.Rd @@ -3,12 +3,12 @@ \docType{package} \name{hdnom-package} \alias{hdnom-package} -\title{Nomograms for High-Dimensional Data with Penalized Cox Models} +\title{Benchmarking and Visualization Toolkit for Penalized Cox Models} \description{ -Nomograms for High-Dimensional Data with Penalized Cox Models +Benchmarking and Visualization Toolkit for Penalized Cox Models } \details{ -The vignette can be opened with \code{vignette('hdnom')}. +Browse vignettes with \code{browseVignettes(package = "hdnom")}. \tabular{ll}{ Package: \tab hdnom\cr Type: \tab Package\cr diff --git a/man/hdnom.calibrate.Rd b/man/hdnom.calibrate.Rd index c7ac99c..d45766c 100644 --- a/man/hdnom.calibrate.Rd +++ b/man/hdnom.calibrate.Rd @@ -6,9 +6,9 @@ \usage{ hdnom.calibrate(x, time, event, model.type = c("lasso", "alasso", "flasso", "enet", "aenet", "mcp", "mnet", "scad", "snet"), alpha, lambda, - pen.factor = NULL, gamma, method = c("fitting", "bootstrap", "cv", - "repeated.cv"), boot.times = NULL, nfolds = NULL, rep.times = NULL, - pred.at, ngroup = 5, seed = 1001, trace = TRUE) + pen.factor = NULL, gamma, lambda1, lambda2, method = c("fitting", + "bootstrap", "cv", "repeated.cv"), boot.times = NULL, nfolds = NULL, + rep.times = NULL, pred.at, ngroup = 5, seed = 1001, trace = TRUE) } \arguments{ \item{x}{Matrix of training data used for fitting the model; @@ -38,7 +38,12 @@ model fits on the resampled data. From the Cox model you have built.} \item{pen.factor}{Penalty factors to apply to each coefficient. From the built \emph{adaptive lasso} or \emph{adaptive elastic-net} model.} -\item{gamma}{Value of the model parameter gamma for MCP/SCAD/Mnet/Snet models.} +\item{gamma}{Value of the model parameter gamma for +MCP/SCAD/Mnet/Snet models.} + +\item{lambda1}{Value of the penalty parameter lambda1 for fused lasso model.} + +\item{lambda2}{Value of the penalty parameter lambda2 for fused lasso model.} \item{method}{Calibration method. Options including \code{"fitting"}, \code{"bootstrap"}, \code{"cv"}, @@ -74,11 +79,11 @@ event = smart$EVENT y = Surv(time, event) # Fit Cox model with lasso penalty -lassofit = hdcox.lasso(x, y, nfolds = 5, rule = "lambda.1se", seed = 11) +fit = hdcox.lasso(x, y, nfolds = 5, rule = "lambda.1se", seed = 11) # Model calibration by fitting the original data directly cal.fitting = hdnom.calibrate(x, time, event, model.type = "lasso", - alpha = 1, lambda = lassofit$lasso_best_lambda, + alpha = 1, lambda = fit$lasso_best_lambda, method = "fitting", pred.at = 365 * 9, ngroup = 5, seed = 1010) @@ -87,21 +92,21 @@ cal.fitting = hdnom.calibrate(x, time, event, model.type = "lasso", # Normally boot.times should be set to 200 or more, # we set it to 3 here only to save example running time. cal.boot = hdnom.calibrate(x, time, event, model.type = "lasso", - alpha = 1, lambda = lassofit$lasso_best_lambda, + alpha = 1, lambda = fit$lasso_best_lambda, method = "bootstrap", boot.times = 3, pred.at = 365 * 9, ngroup = 5, seed = 1010) # Model calibration by 5-fold cross-validation cal.cv = hdnom.calibrate(x, time, event, model.type = "lasso", - alpha = 1, lambda = lassofit$lasso_best_lambda, + alpha = 1, lambda = fit$lasso_best_lambda, method = "cv", nfolds = 5, pred.at = 365 * 9, ngroup = 5, seed = 1010) # Model calibration by repeated cross-validation cal.repcv = hdnom.calibrate(x, time, event, model.type = "lasso", - alpha = 1, lambda = lassofit$lasso_best_lambda, + alpha = 1, lambda = fit$lasso_best_lambda, method = "repeated.cv", nfolds = 3, rep.times = 3, pred.at = 365 * 9, ngroup = 5, seed = 1010) diff --git a/man/hdnom.external.calibrate.Rd b/man/hdnom.external.calibrate.Rd index 68d2daa..5d6d709 100644 --- a/man/hdnom.external.calibrate.Rd +++ b/man/hdnom.external.calibrate.Rd @@ -52,12 +52,12 @@ x_new = as.matrix(smart[, -c(1, 2)])[1001:2000, ] time_new = smart$TEVENT[1001:2000] event_new = smart$EVENT[1001:2000] -# Fit Cox model by lasso penalization -lassofit = hdcox.lasso(x, Surv(time, event), nfolds = 5, rule = "lambda.1se", seed = 11) +# Fit Cox model with lasso penalty +fit = hdcox.lasso(x, Surv(time, event), nfolds = 5, rule = "lambda.1se", seed = 11) # External calibration cal.ext = - hdnom.external.calibrate(lassofit, x, time, event, + hdnom.external.calibrate(fit, x, time, event, x_new, time_new, event_new, pred.at = 365 * 5, ngroup = 5) diff --git a/man/hdnom.external.validate.Rd b/man/hdnom.external.validate.Rd index 12c4565..e0245bb 100644 --- a/man/hdnom.external.validate.Rd +++ b/man/hdnom.external.validate.Rd @@ -56,12 +56,12 @@ x_new = as.matrix(smart[, -c(1, 2)])[1001:2000, ] time_new = smart$TEVENT[1001:2000] event_new = smart$EVENT[1001:2000] -# Fit Cox model by lasso penalization -lassofit = hdcox.lasso(x, Surv(time, event), nfolds = 5, rule = "lambda.1se", seed = 11) +# Fit Cox model with lasso penalty +fit = hdcox.lasso(x, Surv(time, event), nfolds = 5, rule = "lambda.1se", seed = 11) # External validation with time-dependent AUC val.ext = - hdnom.external.validate(lassofit, x, time, event, + hdnom.external.validate(fit, x, time, event, x_new, time_new, event_new, tauc.type = "UNO", tauc.time = seq(0.25, 2, 0.25) * 365) diff --git a/man/hdnom.kmplot.Rd b/man/hdnom.kmplot.Rd index c98b3c0..77f04a3 100644 --- a/man/hdnom.kmplot.Rd +++ b/man/hdnom.kmplot.Rd @@ -43,21 +43,21 @@ x_new = as.matrix(smart[, -c(1, 2)])[1001:2000, ] time_new = smart$TEVENT[1001:2000] event_new = smart$EVENT[1001:2000] -# Fit Cox model by lasso penalization -lassofit = hdcox.lasso(x, Surv(time, event), nfolds = 5, rule = "lambda.1se", seed = 11) +# Fit Cox model with lasso penalty +fit = hdcox.lasso(x, Surv(time, event), nfolds = 5, rule = "lambda.1se", seed = 11) -### Internal calibration +# Internal calibration cal.int = hdnom.calibrate(x, time, event, model.type = "lasso", - alpha = 1, lambda = lassofit$'lasso_best_lambda', + alpha = 1, lambda = fit$lasso_best_lambda, method = "cv", nfolds = 5, pred.at = 365 * 9, ngroup = 3) hdnom.kmplot(cal.int, group.name = c('High risk', 'Medium risk', 'Low risk'), time.at = 1:6 * 365) -### External calibration +# External calibration cal.ext = - hdnom.external.calibrate(lassofit, x, time, event, + hdnom.external.calibrate(fit, x, time, event, x_new, time_new, event_new, pred.at = 365 * 5, ngroup = 3) diff --git a/man/hdnom.logrank.Rd b/man/hdnom.logrank.Rd index 8907fe5..9f0572d 100644 --- a/man/hdnom.logrank.Rd +++ b/man/hdnom.logrank.Rd @@ -31,20 +31,20 @@ x_new = as.matrix(smart[, -c(1, 2)])[1001:2000, ] time_new = smart$TEVENT[1001:2000] event_new = smart$EVENT[1001:2000] -# Fit Cox model by lasso penalization -lassofit = hdcox.lasso(x, Surv(time, event), nfolds = 5, rule = "lambda.1se", seed = 11) +# Fit Cox model with lasso penalty +fit = hdcox.lasso(x, Surv(time, event), nfolds = 5, rule = "lambda.1se", seed = 11) -### Internal calibration +# Internal calibration cal.int = hdnom.calibrate(x, time, event, model.type = "lasso", - alpha = 1, lambda = lassofit$'lasso_best_lambda', + alpha = 1, lambda = fit$lasso_best_lambda, method = "cv", nfolds = 5, pred.at = 365 * 9, ngroup = 3) hdnom.logrank(cal.int) -### External calibration +# External calibration cal.ext = - hdnom.external.calibrate(lassofit, x, time, event, + hdnom.external.calibrate(fit, x, time, event, x_new, time_new, event_new, pred.at = 365 * 5, ngroup = 3) diff --git a/man/hdnom.nomogram.Rd b/man/hdnom.nomogram.Rd index 9ff897a..b06923a 100644 --- a/man/hdnom.nomogram.Rd +++ b/man/hdnom.nomogram.Rd @@ -6,7 +6,7 @@ \usage{ hdnom.nomogram(object, model.type = c("lasso", "alasso", "flasso", "enet", "aenet", "mcp", "mnet", "scad", "snet"), x, time, event, ddist, - lambda = NULL, pred.at = NULL, fun.at = NULL, funlabel = NULL) + pred.at = NULL, fun.at = NULL, funlabel = NULL) } \arguments{ \item{object}{Fitted model object.} @@ -25,14 +25,6 @@ Must be of the same length with the number of rows as \code{x}.} \item{ddist}{Data frame version of x, made by \code{\link[rms]{datadist}}.} -\item{lambda}{Value of the penalty parameter lambda in -\code{\link[glmnet]{glmnet}} or \code{\link[ncvreg]{ncvsurv}}. -Required except when \code{model.type == "flasso"}. -We will use the selected variables at the provided \code{lambda} to -build the nomogram, and make predictions. -See the example for choosing a proper lambda value extracted -from cross-validation results.} - \item{pred.at}{Time point at which to plot nomogram prediction axis.} \item{fun.at}{Function values to label on axis.} @@ -42,6 +34,18 @@ from cross-validation results.} \description{ Nomograms for High-Dimensional Cox Models } +\note{ +We will try to use the value of the selected penalty +parameter (e.g. lambda, alpha) in the model object fitted by +\code{\link[glmnet]{glmnet}}, \code{\link[ncvreg]{ncvsurv}}, or +\code{\link[penalized]{penalized}}. The selected variables under +the penalty parameter will be used to build the nomogram +and make predictions. This means, if you use routines other +than \code{hdcox.*} functions to fit the model, please +make sure that there is only one set of necessary parameters +(e.g. only a single lambda should be in glmnet objects intead +of a vector of multiple lambdas) in the fitted model object. +} \examples{ library("rms") @@ -53,7 +57,7 @@ event = smart$EVENT y = Surv(time, event) # Fit penalized Cox model with lasso penalty -lassofit = hdcox.lasso(x, y, nfolds = 5, rule = "lambda.1se", seed = 11) +fit = hdcox.lasso(x, y, nfolds = 5, rule = "lambda.1se", seed = 11) # Prepare data needed for plotting nomogram x.df = as.data.frame(x) @@ -61,9 +65,8 @@ dd = datadist(x.df) options(datadist = "dd") # Generate hdnom.nomogram objects and plot nomogram -nom = hdnom.nomogram(lassofit$lasso_model, model.type = "lasso", - x, time, event, x.df, - lambda = lassofit$lasso_best_lambda, pred.at = 365 * 2, +nom = hdnom.nomogram(fit$lasso_model, model.type = "lasso", + x, time, event, x.df, pred.at = 365 * 2, funlabel = "2-Year Overall Survival Probability") print(nom) diff --git a/man/hdnom.validate.Rd b/man/hdnom.validate.Rd index 1cf4fdd..20d2fa1 100644 --- a/man/hdnom.validate.Rd +++ b/man/hdnom.validate.Rd @@ -6,8 +6,8 @@ \usage{ hdnom.validate(x, time, event, model.type = c("lasso", "alasso", "flasso", "enet", "aenet", "mcp", "mnet", "scad", "snet"), alpha, lambda, - pen.factor = NULL, gamma, method = c("bootstrap", "cv", "repeated.cv"), - boot.times = NULL, nfolds = NULL, rep.times = NULL, + pen.factor = NULL, gamma, lambda1, lambda2, method = c("bootstrap", "cv", + "repeated.cv"), boot.times = NULL, nfolds = NULL, rep.times = NULL, tauc.type = c("CD", "SZ", "UNO"), tauc.time, seed = 1001, trace = TRUE) } \arguments{ @@ -41,6 +41,10 @@ From the built \emph{adaptive lasso} or \emph{adaptive elastic-net} model.} \item{gamma}{Value of the model parameter gamma for MCP/SCAD/Mnet/Snet models.} +\item{lambda1}{Value of the penalty parameter lambda1 for fused lasso model.} + +\item{lambda2}{Value of the penalty parameter lambda2 for fused lasso model.} + \item{method}{Validation method. Could be \code{"bootstrap"}, \code{"cv"}, or \code{"repeated.cv"}.} @@ -78,27 +82,27 @@ event = smart$EVENT[1:500] y = Surv(time, event) # Fit penalized Cox model with lasso penalty -lassofit = hdcox.lasso(x, y, nfolds = 5, rule = "lambda.1se", seed = 11) +fit = hdcox.lasso(x, y, nfolds = 5, rule = "lambda.1se", seed = 11) # Model validation by bootstrap with time-dependent AUC # Normally boot.times should be set to 200 or more, # we set it to 3 here only to save example running time. val.boot = hdnom.validate(x, time, event, model.type = "lasso", - alpha = 1, lambda = lassofit$lasso_best_lambda, + alpha = 1, lambda = fit$lasso_best_lambda, method = "bootstrap", boot.times = 3, tauc.type = "UNO", tauc.time = seq(0.25, 2, 0.25) * 365, seed = 1010) # Model validation by 5-fold cross-validation with time-dependent AUC val.cv = hdnom.validate(x, time, event, model.type = "lasso", - alpha = 1, lambda = lassofit$lasso_best_lambda, + alpha = 1, lambda = fit$lasso_best_lambda, method = "cv", nfolds = 5, tauc.type = "UNO", tauc.time = seq(0.25, 2, 0.25) * 365, seed = 1010) # Model validation by repeated cross-validation with time-dependent AUC val.repcv = hdnom.validate(x, time, event, model.type = "lasso", - alpha = 1, lambda = lassofit$lasso_best_lambda, + alpha = 1, lambda = fit$lasso_best_lambda, method = "repeated.cv", nfolds = 5, rep.times = 3, tauc.type = "UNO", tauc.time = seq(0.25, 2, 0.25) * 365, seed = 1010) diff --git a/man/hdnom.varinfo.Rd b/man/hdnom.varinfo.Rd index 82bdb50..c6490ab 100644 --- a/man/hdnom.varinfo.Rd +++ b/man/hdnom.varinfo.Rd @@ -29,8 +29,8 @@ time = smart$TEVENT event = smart$EVENT y = Surv(time, event) -# Fit Cox model by lasso penalization -lassofit = hdcox.lasso(x, y, nfolds = 5, rule = "lambda.1se", seed = 11) -hdnom.varinfo(lassofit, x) +# Fit Cox model with lasso penalty +fit = hdcox.lasso(x, y, nfolds = 5, rule = "lambda.1se", seed = 11) +hdnom.varinfo(fit, x) } diff --git a/man/ncvreg.survcurve.Rd b/man/ncvreg.survcurve.Rd index 5d5879a..dc98b13 100644 --- a/man/ncvreg.survcurve.Rd +++ b/man/ncvreg.survcurve.Rd @@ -4,7 +4,7 @@ \alias{ncvreg.survcurve} \title{Survival Curve Prediction for ncvreg Objects} \usage{ -ncvreg.survcurve(object, time, event, x, survtime, lambda) +ncvreg.survcurve(object, time, event, x, survtime) } \value{ list containing predicted survival probabilities and diff --git a/man/penalized.calibrate.internal.pred.Rd b/man/penalized.calibrate.internal.pred.Rd index 26b43e8..7d936ac 100644 --- a/man/penalized.calibrate.internal.pred.Rd +++ b/man/penalized.calibrate.internal.pred.Rd @@ -4,7 +4,7 @@ \alias{penalized.calibrate.internal.pred} \title{Compute "penalized" Predicted Survival Probabilities for Calibration} \usage{ -penalized.calibrate.internal.pred(x_tr, x_te, y_tr, lambda, pred.at) +penalized.calibrate.internal.pred(x_tr, x_te, y_tr, lambda1, lambda2, pred.at) } \value{ list containing predicted survival probability diff --git a/man/penalized.tune.lambda.Rd b/man/penalized.tune.lambda.Rd new file mode 100644 index 0000000..a15a49c --- /dev/null +++ b/man/penalized.tune.lambda.Rd @@ -0,0 +1,16 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/01-hdnom-models.R +\name{penalized.tune.lambda} +\alias{penalized.tune.lambda} +\title{Automatic lambda tuning function for fused lasso by k-fold cross-validation} +\usage{ +penalized.tune.lambda(..., lambda1, lambda2, seed, trace, parallel) +} +\value{ +best model object, best lambda1, and best lambda2 +} +\description{ +Automatic lambda tuning function for fused lasso by k-fold cross-validation +} +\keyword{internal} + diff --git a/man/penalized.validate.internal.Rd b/man/penalized.validate.internal.Rd index a46763b..ad911cc 100644 --- a/man/penalized.validate.internal.Rd +++ b/man/penalized.validate.internal.Rd @@ -4,7 +4,7 @@ \alias{penalized.validate.internal} \title{Compute Validation Measures for "penalized" Model Objects} \usage{ -penalized.validate.internal(x_tr, x_te, y_tr, y_te, lambda, tauc.type, +penalized.validate.internal(x_tr, x_te, y_tr, y_te, lambda1, lambda2, tauc.type, tauc.time) } \value{ diff --git a/man/predict.hdcox.model.Rd b/man/predict.hdcox.model.Rd index 60b21f9..a3fad92 100644 --- a/man/predict.hdcox.model.Rd +++ b/man/predict.hdcox.model.Rd @@ -21,8 +21,8 @@ at which predictions are to be made.} \item{...}{Other parameters (not used).} } \value{ -A \code{nrow(newx) x length(pred.at)} matrix containing overall -survival probablity. +A \code{nrow(newx) x length(pred.at)} matrix containing +overall survival probablity. } \description{ Predict overall survival probability at certain time points @@ -38,7 +38,7 @@ time = smart$TEVENT event = smart$EVENT y = Surv(time, event) -lassofit = hdcox.lasso(x, y, nfolds = 5, rule = "lambda.1se", seed = 11) -predict(lassofit, x, y, newx = x[101:105, ], pred.at = 1:10 * 365) +fit = hdcox.lasso(x, y, nfolds = 5, rule = "lambda.1se", seed = 11) +predict(fit, x, y, newx = x[101:105, ], pred.at = 1:10 * 365) } diff --git a/man/print.hdcox.model.Rd b/man/print.hdcox.model.Rd index 3c63337..0fedffd 100644 --- a/man/print.hdcox.model.Rd +++ b/man/print.hdcox.model.Rd @@ -24,7 +24,7 @@ time = smart$TEVENT event = smart$EVENT y = Surv(time, event) -lassofit = hdcox.lasso(x, y, nfolds = 5, rule = "lambda.1se", seed = 11) -print(lassofit) +fit = hdcox.lasso(x, y, nfolds = 5, rule = "lambda.1se", seed = 11) +print(fit) } diff --git a/vignettes/aenetfit.rds b/vignettes/aenetfit.rds deleted file mode 100644 index 79d5fbfefb36ff0a283ed3608d9088defd4e7645..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1151 zcmV-_1c3V=iwFP!000001MOB@h#OTHK0C=wE_-Q~-L=|SM6gN~!dkZ#Y|G@bV=`=R zO;UI3gW)8ZTy%0VN!^WzH{2rh0zPc16f7!KtAef8Zd;JTiiprs3T0coP((r52hj(8 zP~3PXbNk}W=oNQeBB^WXpTpZ|Qxj2$7wAQNLk~QvU%$tEkU(2SR2>(IYdmDc=S18YEcL>CJRNKiBrwfVPYjFC0vwhWIaOd94%WKA}-9p(&HC2FVW)_ z5<(n!pAZwgF;9^47M~|ZOnjQB$3vG~REUYu0QkidLO2o^Ar2Sc6Hj=f5QT)$v8h*s zetGl4b2paW;V*ss;nBFbk$1+}1Lji~4T&sSoUWIvFbT+@X45jR5hbH9*OtloLbzTn z%o&4KaQZ2nlMLJAz?j?!l?LhX+q(zVc$+AH^z8>8ww+l7-hKYe=}P03tGZaX>()qj z!IX#7SCf;AC8@5K&Nfx*Y<`1y8--jJ7YlO3ur*VY?x2y1R`T;Vvtc4Zuv{ahT$iRL3CAWTEm5fr-uv;B=*DTvc2z zl;KoMdu5pjHMT-Vg5;yopOmUq2vp8yYouJNNn=@tb*Y@HsbyQ5v{DAVNDoVzmv%g@ zE%$_4@5{&5M@Du%+Z$^8nzU6M>O@#2Cq-4ojsZIWQcjhd^l}|~nw#hrTB83FS}GT} zH4z;D>Mute4_+GNf3%*T{`H$L_$z<>_W0$m-@Wkprc>8GdVY%kK6icJwp}0a-)*bE zHzpob{W&;M)Sk?n0-d#q{wcC?-9uy?UOL=w?A!8_O$ej4k!=IIDYDs`-B_l;7eH9Q zvFbKx4K$Gvc2u0gZJ-*`+YIZW3HLxVi!_&(_huy23A*fTzL3t-e&`j<&Ye^W5_2k# z>r^(R+bL>yxg+CHMJi#r-f*eEWcs8yLv*Mocsigd|0}#?l!iHd<7&6uu_w;{b$0tK z@WyX9H}``3{&L&?p6yqbt*1ZfvU~H}yXGer9c#tuvJc!>n%U1@UsGRr?2$(gZhqZ8 z-u=}1%0L(n000ulJ0}1D diff --git a/vignettes/fit.rds b/vignettes/fit.rds new file mode 100644 index 0000000000000000000000000000000000000000..d4f50f0c5d53ca0b4200dbb97257e371f4dcce10 GIT binary patch literal 1153 zcmV-{1b+J;iwFP!000001MOC8h#OTDzB|cG9{XsP-L=|SM6gN~!dkZ#Y|G@aV=`=B zO;UI355r9|dFbR}lDZoaU${l+1N^b2Qn08{tqQhUyKO-VD02La5q1D(82ByhEQD@>*^K_V4iAf0;r5fQ!sGXx_OGCtk8DKqrLGuzl zULhgGLG}qT!5i}gsci9iV#LI!d3rpQ<)T7Nj0V6jo)E&3xCn8$_?~#e8-*w&gid6Y zMeyWWYnl`k!v3~2Er|X=0+M?}B4y3KKmhVqOiN)K-zKBwe+HW`yrZ#RoiFI`-8{=nC_x=O`ES$_K3p_9p> z!~F8*h39T8y~AJn_`{=daU<`Hu?NhjE*cV9vN&BYS78#6VVO9Vm2ES4*ItW&H@Nws>irAQGVrBxs-~80Y0^p=@FG1dX?OK7QF z+}1>J{HwnlaXffwkpIzoe)`vMzTmI?@!R8pf%7$M%Ynt3b%o3NN+Q&hbG(u%`DPfTHc$HP$%fJv-v_gPy3-?V0P}LTF{tN zd0eNuA>B?Jbm%N={->|bZM z&w^}xyScd+-1nE;_V;YRvTQy5NtfN5-`+JpvFKPUPM3Y)zS7Ko_WGLZg~uLw^x)># z-Sdrk&x=RCa$7dsd;G23pYGuuE5|38yk@ub>EX)LzpsL9IJsN+`|siJs-gE*$XS!5 z41DJ(aFA?+2*65P-EnX1Eq$?^kznW@xOaqMyhezA+!e!4ZhR7upy&1J&u7R5a_OCh Tbt)HzFRs4