Skip to content

Commit

Permalink
(source cleanup:) remove trailing whitespace at the end of a line
Browse files Browse the repository at this point in the history
  • Loading branch information
yrosseel committed Jun 25, 2018
1 parent 6b1978b commit 1c03b46
Show file tree
Hide file tree
Showing 122 changed files with 2,612 additions and 2,581 deletions.
2 changes: 1 addition & 1 deletion DESCRIPTION
@@ -1,6 +1,6 @@
Package: lavaan
Title: Latent Variable Analysis
Version: 0.6-2.1265
Version: 0.6-2.1268
Authors@R: c(person(given = "Yves", family = "Rosseel",
role = c("aut", "cre"),
email = "Yves.Rosseel@UGent.be"),
Expand Down
10 changes: 5 additions & 5 deletions R/00class.R
@@ -1,5 +1,5 @@
# class definitions
#
#
# initial version: YR 25/03/2009
# added ModelSyntax: YR 02/08/2010
# deleted ModelSyntax: YR 01/11/2010 (using flattened model syntax now)
Expand Down Expand Up @@ -50,7 +50,7 @@ setClass("lavSampleStats", # sample moments
res.var="list", # residual variances
res.th="list", # residual thresholds
res.th.nox="list", # residual thresholds ignoring x
res.slopes="list", # slopes exo (if conditional.x)
res.slopes="list", # slopes exo (if conditional.x)
res.int="list", # intercepts (if conditional.x)

mean.x="list", # mean exo
Expand Down Expand Up @@ -191,7 +191,7 @@ setClass("lavaan",
SampleStats = "lavSampleStats", # sample statistics
Model = "lavModel", # internal matrix representation
Cache = "list", # housekeeping stuff
Fit = "Fit", # fitted results
Fit = "Fit", # fitted results
boot = "list", # bootstrap results
optim = "list", # optimizer results
loglik = "list", # loglik values and info
Expand All @@ -201,7 +201,7 @@ setClass("lavaan",
h1 = "list", # unrestricted model results
baseline = "list", # baseline model results
external = "list" # optional slot, for add-on packages
)
)
)

setClass("lavaanList",
Expand Down Expand Up @@ -234,4 +234,4 @@ setClass("lavaanList",




6 changes: 3 additions & 3 deletions R/00generic.R
@@ -1,9 +1,9 @@
# for blavaan
setGeneric("fitMeasures",
function(object, fit.measures = "all", baseline.model = NULL)
setGeneric("fitMeasures",
function(object, fit.measures = "all", baseline.model = NULL)
standardGeneric("fitMeasures"))
setGeneric("fitmeasures",
function(object, fit.measures = "all", baseline.model = NULL)
function(object, fit.measures = "all", baseline.model = NULL)
standardGeneric("fitmeasures"))


Expand Down
2 changes: 1 addition & 1 deletion R/01RefClass_00lavRefModel.R
@@ -1,7 +1,7 @@
# generic statistical model -- YR 10 july 2012


# super class -- virtual statistical model
# super class -- virtual statistical model
lavRefModel <- setRefClass("lavRefModel",

# fields
Expand Down
22 changes: 11 additions & 11 deletions R/01RefClass_01lavOptim.R
Expand Up @@ -32,15 +32,15 @@ minGradient = function(x) {
minHessian = function(x) {
cat("this is dummy a function [minHessian]\n")
return(matrix(as.numeric(NA), npar, npar))
},
},

optimize = function(method = "nlminb", control = list(), verbose = FALSE,
start.values = NULL) {
method <- tolower(method)
hessian <- FALSE
if( method == "none" ) {
.self$optim.method <- "none"
} else if( method %in% c("nlminb", "quasi-newton", "quasi.newton",
} else if( method %in% c("nlminb", "quasi-newton", "quasi.newton",
"nlminb.hessian") ) {
.self$optim.method <- "nlminb"
if(verbose)
Expand All @@ -59,7 +59,7 @@ optimize = function(method = "nlminb", control = list(), verbose = FALSE,
.self$optim.control <- control.nlminb[c("eval.max", "iter.max", "trace",
"abs.tol", "rel.tol", "x.tol",
"step.min")]

} else if( method %in% c("newton", "newton-raphson", "newton.raphson") ) {
.self$optim.method <- "newton"
if(verbose)
Expand All @@ -76,7 +76,7 @@ optimize = function(method = "nlminb", control = list(), verbose = FALSE,
}

# user provided starting values?
if(!is.null(start.values)) {
if(!is.null(start.values)) {
stopifnot(length(start.values) == npar)
.self$theta.start <- start.values
}
Expand All @@ -94,14 +94,14 @@ optimize = function(method = "nlminb", control = list(), verbose = FALSE,
gradient = .self$minGradient, control = optim.control)
} else {
out <- nlminb(start = theta, objective = .self$minObjective,
gradient = .self$minGradient,
gradient = .self$minGradient,
hessian = .self$minHessian,
control = optim.control)
}
# FIXME: use generic fields
.self$optim.out <- out
}
# just in case, a last call to objective()
# just in case, a last call to objective()
tmp <- minObjective()

}
Expand All @@ -114,10 +114,10 @@ optimize = function(method = "nlminb", control = list(), verbose = FALSE,
# - it assumes that the hessian is always positive definite (no check!)
# - it may do some backstepping, but there is no guarantee that it will
# converge
# this function is NOT for general-purpose optimization, but should only be
# used or simple (convex!) problems (eg. estimating polychoric/polyserial
# this function is NOT for general-purpose optimization, but should only be
# used or simple (convex!) problems (eg. estimating polychoric/polyserial
# correlations, probit regressions, ...)
#
#
lavOptimNewtonRaphson <- function(object,
control = list(iter.max = 100L,
grad.tol = 1e-6,
Expand All @@ -140,8 +140,8 @@ lavOptimNewtonRaphson <- function(object,
for(i in seq_len(control$iter.max)) {

if(control$verbose) {
cat("NR step ", sprintf("%2d", (i-1L)), ": max.grad = ",
sprintf("%12.9f", max.grad), " norm.grad = ",
cat("NR step ", sprintf("%2d", (i-1L)), ": max.grad = ",
sprintf("%12.9f", max.grad), " norm.grad = ",
sprintf("%12.9f", norm.grad), "\n", sep="")
}

Expand Down
2 changes: 1 addition & 1 deletion R/01RefClass_02lavML.R
Expand Up @@ -33,7 +33,7 @@ scores = function(x) {
if(!missing(x)) .self$theta <- x
cat("this is dummy function\n")
return(matrix(as.numeric(NA), nobs, npar))
},
},

gradient = function(x) {
SCORES <- scores(x)
Expand Down
22 changes: 11 additions & 11 deletions R/ctr_estfun.R
@@ -1,15 +1,15 @@
# contributed by Ed Merkle (17 Jan 2013)


# YR 12 Feb 2013: small changes to match the results of lav_model_gradient
# YR 12 Feb 2013: small changes to match the results of lav_model_gradient
# in the multiple group case
# YR 30 May 2014: handle 1-variable case (fixing apply in lines 56, 62, 108)
# YR 05 Nov 2015: add remove.duplicated = TRUE, to cope with strucchange in
# YR 05 Nov 2015: add remove.duplicated = TRUE, to cope with strucchange in
# case of simple equality constraints
# YR 19 Nov 2015: if constraints have been used, compute case-wise Lagrange
# multipliers, and define the scores as: SC + (t(R) lambda)
# YR 05 Feb 2016: catch conditional.x = TRUE: no support (for now), until
# we can use the generic 0.6 infrastructure for scores,
# we can use the generic 0.6 infrastructure for scores,
# including the missing-values case
# YR 16 Feb 2016: adapt to changed @Mp slot elements; add remove.empty.cases=
# argument
Expand All @@ -21,7 +21,7 @@ estfun.lavaan <- lavScores <- function(object, scaling = FALSE,

stopifnot(inherits(object, "lavaan"))

# what if estimator != ML?
# what if estimator != ML?
# avoid hard error (using stop); throw a warning, and return an empty matrix
if(object@Options$estimator != "ML") {
warning("lavaan WARNING: scores only availalbe if estimator is ML")
Expand Down Expand Up @@ -53,7 +53,7 @@ estfun.lavaan <- lavScores <- function(object, scaling = FALSE,
# npar <- NCOL(object@Model@eq.constraints.K)
#}
Score.mat <- matrix(NA, ntot, npar)

for(g in 1:lavsamplestats@ngroups) {
if (lavsamplestats@ngroups > 1){
moments <- fitted(object)[[g]]
Expand All @@ -67,7 +67,7 @@ estfun.lavaan <- lavScores <- function(object, scaling = FALSE,
} else {
N1 <- 1
}

if(!lavsamplestats@missing.flag) { # complete data
#if(lavmodel@meanstructure) { # mean structure
nvar <- ncol(lavsamplestats@cov[[g]])
Expand Down Expand Up @@ -115,7 +115,7 @@ estfun.lavaan <- lavScores <- function(object, scaling = FALSE,
nvar <- ncol(lavsamplestats@cov[[g]])
score.sigma <- matrix(0, nsub, nvar*(nvar+1)/2)
score.mu <- matrix(0, nsub, nvar)

for(p in 1:length(M)) {
## Data
#X <- M[[p]][["X"]]
Expand All @@ -127,7 +127,7 @@ estfun.lavaan <- lavScores <- function(object, scaling = FALSE,
## (Used to keep track of scores in score.sigma)
var.idx.mat <- tcrossprod(var.idx)
Sigma.idx <- which(var.idx.mat[lower.tri(var.idx.mat, diag=T)]==1)

J <- matrix(1, 1L, nobs) #[var.idx]
J2 <- matrix(1, nvar, nvar)[var.idx, var.idx, drop = FALSE]
diag(J2) <- 0.5
Expand All @@ -148,7 +148,7 @@ estfun.lavaan <- lavScores <- function(object, scaling = FALSE,
scores.H1 <- group.w[g] * scores.H1
}
} # missing

Delta <- computeDelta(lavmodel = lavmodel)[[g]]
#if(lavmodel@eq.constraints) {
# Delta <- Delta %*% lavmodel@eq.constraints.K # + lavmodel@eq.constraints.k0
Expand All @@ -160,12 +160,12 @@ estfun.lavaan <- lavScores <- function(object, scaling = FALSE,
if(scaling){
Score.mat[wi,] <- (-1/ntot) * Score.mat[wi,]
}

} # g

# handle empty rows
if(remove.empty.cases) {
#empty.idx <- which( apply(Score.mat, 1L,
#empty.idx <- which( apply(Score.mat, 1L,
# function(x) sum(is.na(x))) == ncol(Score.mat) )
empty.idx <- unlist(lapply(lavdata@Mp, "[[", "empty.idx"))
if(length(empty.idx) > 0L) {
Expand Down

0 comments on commit 1c03b46

Please sign in to comment.