Permalink
Browse files

mixed mirt can now accomodate item designs

  • Loading branch information...
philchalmers committed Jan 2, 2013
1 parent b5b8242 commit eea3b250ba9e297d5a3db07d58f931fb90282dfd
Showing with 189 additions and 17 deletions.
  1. +9 −9 DESCRIPTION
  2. +3 −0 NEWS
  3. +2 −2 R/MHRM.mixed.R
  4. +14 −0 R/mirt-package.R
  5. +69 −4 R/mixedmirt.R
  6. +3 −1 R/utils.R
  7. +24 −1 inst/tests/test-07-mixedmirt.R
  8. +65 −0 man/mixedmirt.Rd
View
@@ -11,15 +11,15 @@ Description: Analysis of dichotomous and polytomous response data using latent
and multivariate one-, two-, three-, and four-parameter logistic models,
graded response models, rating scale graded response models, (generalized)
partial credit models, rating scale models, nominal models, multiple choice
- models, and multivariate partially-compensatory models. Many of these models
- can be used in an exploratory or confirmatory manner with optional user defined
- constraints. Exploratory models can be estimated via quadrature or
- stochastic methods, a generalized confirmatory bi-factor analysis is
- included, and confirmatory models can be fit with a Metropolis-Hastings
- Robbins-Monro algorithm which can include polynomial or product constructed
- latent traits. Additionally, multiple group analysis may be performed for
- unidimensional or multidimensional item response models for detecting
- differential item functioning.
+ models, and multivariate partially-compensatory models. Many of these
+ models can be used in an exploratory or confirmatory manner with optional
+ user defined constraints. Exploratory models can be estimated via
+ quadrature or stochastic methods, a generalized confirmatory bi-factor
+ analysis is included, and confirmatory models can be fit with a
+ Metropolis-Hastings Robbins-Monro algorithm which can include polynomial or
+ product constructed latent traits. Additionally, multiple group analysis
+ may be performed for unidimensional or multidimensional item response
+ models for detecting differential item functioning.
Depends:
R (>= 2.14),
stats4,
View
3 NEWS
@@ -1,3 +1,6 @@
+* mixedmirt() can now model item level design matricies (e.g., LLTM) that the user must manually
+ customize. Examples of how to do this are in the documentation in ?mixedmirt
+
* new fitIndices() function added to compute additional model fit statistics such as M2
* lower bound parameters under more stringent control during estimation and are bounded to never
View
@@ -86,7 +86,7 @@ MHRM.mixed <- function(pars, constrain, PrepList, list, mixedlist, debug)
stop('Constraint applied to fixed parameter(s) ',
paste(redindex[diag(L)[!estpars] > 0]), ' but should only be applied to
estimated parameters. Please fix!')
- }
+ }
####Big MHRM loop
for(cycles in 1:(NCYCLES + BURNIN + SEMCYCLES))
{
@@ -175,7 +175,7 @@ MHRM.mixed <- function(pars, constrain, PrepList, list, mixedlist, debug)
g[ind1:ind2] <- pars[[group]][[i]]@gradient <- deriv$grad
h[ind1:ind2, ind1:ind2] <- pars[[group]][[i]]@hessian <- deriv$hess
ind1 <- ind2 + 1
- }
+ }
grad <- g %*% L
ave.h <- (-1)*L %*% h %*% L
grad <- grad[1, estpars & !redun_constr]
View
@@ -97,3 +97,17 @@ NULL
#'
#' @keywords data
NULL
+
+#' Description of deAyala data
+#'
+#' Mathematics data from de Ayala (2009; pg. 14); 5 item dataset in table format.
+#'
+#'
+#' @name deAyala
+#' @docType data
+#' @author Phil Chalmers \email{rphilip.chalmers@@gmail.com}
+#' @references
+#' de Ayala, R. J. (2009). \emph{The theory and practice of item response theory}. Guilford Press.
+#'
+#' @keywords data
+NULL
View
@@ -80,27 +80,92 @@
#' summary(mod2)
#' anova(mod1b, mod2)
#' anova(mod2, mod3)
+#'
+#' #################################
+#' ###### Advanced use of mixedmirt: LLTM, and 2PL version of LLTM
+#' #################################
+#'
+#' # flexible LLTM model by customizing the structure
+#' data(SAT12)
+#' data <- key2binary(SAT12,
+#' key = c(1,4,5,2,3,1,2,1,3,1,2,4,2,1,5,3,4,4,1,4,3,3,4,1,3,5,1,3,1,5,4,5))
+#' model <- confmirt.model()
+#' Theta = 1-32
+#'
+#'
+#' # suppose that the first 16 items were suspected to be easier than the last 16 items, and we wish
+#' # to test this item level hypothesis. First, create a predictor matrix entirely of 1's for each
+#' # item design effect (only one here)
+#' covdata <- data.frame(itemorder = matrix(1, nrow(data), 1))
#'
+#' # obtain starting values and identify parameters that are to be equal (first 16, last 16), and
+#' # fix item intercepts to 0
+#' sv <- mixedmirt(data, covdata, model, fixed = ~ itemorder, itemtype = 'Rasch', pars = 'values')
+#' constrain <- list()
+#' constrain[[1]] <- sv$parnum[sv$name == 'itemorder'][1:16]
+#' constrain[[2]] <- sv$parnum[sv$name == 'itemorder'][-(1:16)]
+#' sv$value[sv$name == 'd'] <- 0
+#' sv$est[sv$name == 'd'] <- FALSE
+#'
+#' #estimate LLTM using new starting values (with adjusted fixed parameters) and constraints
+#' LLTM <- mixedmirt(data, covdata, model, fixed = ~ itemorder, itemtype = 'Rasch',
+#' pars = sv, constrain = constrain, fixed.constrain = FALSE)
+#' coef(LLTM)
+#' #compare to standard items with estimated slopes (2PL)?
+#' sv$est[sv$name == 'a1'] <- TRUE
+#' twoPL <- mixedmirt(data, covdata, model, fixed = ~ itemorder,
+#' pars = sv, constrain = constrain, fixed.constrain = FALSE)
+#' coef(twoPL)
+#' anova(twoPL, LLTM)
+#' #twoPL model better than LLTM, and don't draw the (spurious?) conclusion that the first
+#' # half of the test is any easier/harder than the last
+#'
+#' ### Similar example, but with simulated data
+#'
+#' set.seed(1234)
+#' N <- 750
+#' a <- matrix(rep(1,10))
+#' d <- matrix(c(rep(-1,5), rep(1,5)))
+#' Theta <- matrix(rnorm(N))
+#' data <- simdata(a, d, N, itemtype = rep('dich',10), Theta=Theta, D=1)
+#' covdata <- data.frame(itempred=rep(1, nrow(data)))
+#' model <- confmirt.model('confmods/mixedmirt1', quiet = TRUE)
+#' sv <- mixedmirt(data, covdata, model, fixed= ~ itempred, pars = 'values',
+#' itemtype = 'Rasch')
+#' sv$value[sv$name == 'd'] <- 0
+#' sv$est[sv$name == 'd'] <- FALSE
+#'
+#' #make design such that the first 5 items are systematically more difficult than the last 5
+#' constrain <- list()
+#' constrain[[1]] <- sv$parnum[sv$name == 'itempred'][1:5]
+#' constrain[[2]] <- sv$parnum[sv$name == 'itempred'][-c(1:5)]
+#' mod <- mixedmirt(data, covdata, model, fixed= ~ itempred, pars = sv,
+#' itemtype = 'Rasch', constrain = constrain, fixed.constrain = FALSE,
+#' verbose = TRUE)
+#' coef(mod)
+#' rasch <- mirt(data, 1, itemtype = 'Rasch', D=1)
+#' anova(mod, rasch) #n.s., LLTM model a much better choice compared to Rasch
+#'
#' }
mixedmirt <- function(data, covdata, model, fixed = ~ 1, random = NULL, itemtype = NULL,
fixed.constrain = TRUE, ...)
{
Call <- match.call()
for(i in 1:ncol(covdata))
- if(is(covdata[,i], 'numeric') || is(covdata[,i], 'integer'))
+ if(is(covdata[,i], 'numeric') || is(covdata[,i], 'integer'))
covdata[,i] <- matrix(scale(covdata[,i], scale = FALSE))
### TEMPORARY
if(!is.null(random))
stop('random effect covariates not yet supported')
random <- ~ 1
###
if(fixed == ~ 1 && random == ~ 1)
- stop('No fixed or random effects have been specified.')
+ stop('No fixed or random effects have been specified.')
Theta <- matrix(0, nrow(data), nrow(model$x))
- colnames(Theta) <- model$x[,1]
+ colnames(Theta) <- model$x[,1]
fixed.design <- designMats(covdata, fixed, Theta)
mixedlist <- list(fixed=fixed, random=random, covdata=covdata, factorNames=model$x[,1],
- FD=fixed.design, fixed.constrain=fixed.constrain)
+ FD=fixed.design, fixed.constrain=fixed.constrain)
mod <- ESTIMATION(data=data, model=model, group=rep('all', nrow(data)), itemtype=itemtype,
D=1, mixedlist=mixedlist, method='MIXED', ...)
if(is(mod, 'MixedClass'))
View
@@ -519,6 +519,8 @@ designMats <- function(covdata, fixed, Thetas, random = NULL){
drop <- rep(FALSE, length(CN))
for(i in 1:ncol(Thetas))
drop <- drop | CN == cn[i]
- fixed.design <- fixed.design[ , !drop, drop = FALSE]
+ fixed.design <- fixed.design[ , !drop, drop = FALSE]
+ zerocols <- colSums(abs(fixed.design))
+ fixed.design[ ,zerocols == 0] <- 1
return(fixed.design)
}
@@ -39,4 +39,27 @@ test_that('mixed poly', {
data <- simdata(a,d,N, itemtype = rep('dich',10), Theta=Theta)
covdata <- data.frame(group, pseudoIQ)
-})
+})
+
+test_that('item covs', {
+ set.seed(1234)
+ N <- 750
+ a <- matrix(rep(1,10))
+ d <- matrix(c(rep(-1,5), rep(1,5)))
+ Theta <- matrix(rnorm(N))
+ data <- simdata(a, d, N, itemtype = rep('dich',10), Theta=Theta, D=1)
+ covdata <- data.frame(itempred=rep(1, nrow(data)))
+ # d > 0
+ model <- confmirt.model('confmods/mixedmirt1', quiet = TRUE)
+ sv <- mixedmirt(data, covdata, model, fixed= ~ itempred, pars = 'values',
+ itemtype = 'Rasch')
+ sv$value[sv$name == 'd'] <- 0
+ sv$est[sv$name == 'd'] <- FALSE
+ constrain <- list()
+ constrain[[1]] <- sv$parnum[sv$name == 'itempred'][1:5]
+ constrain[[2]] <- sv$parnum[sv$name == 'itempred'][-c(1:5)]
+ mod <- mixedmirt(data, covdata, model, fixed= ~ itempred, pars = sv,
+ itemtype = 'Rasch', constrain = constrain, fixed.constrain = FALSE,
+ verbose = FALSE)
+ expect_is(mod, 'MixedClass')
+})
View
@@ -106,6 +106,71 @@ summary(mod2)
anova(mod1b, mod2)
anova(mod2, mod3)
+#################################
+###### Advanced use of mixedmirt: LLTM, and 2PL version of LLTM
+#################################
+
+# flexible LLTM model by customizing the structure
+data(SAT12)
+data <- key2binary(SAT12,
+ key = c(1,4,5,2,3,1,2,1,3,1,2,4,2,1,5,3,4,4,1,4,3,3,4,1,3,5,1,3,1,5,4,5))
+model <- confmirt.model()
+ Theta = 1-32
+
+
+# suppose that the first 16 items were suspected to be easier than the last 16 items, and we wish
+# to test this item level hypothesis. First, create a predictor matrix entirely of 1's for each
+# item design effect (only one here)
+covdata <- data.frame(itemorder = matrix(1, nrow(data), 1))
+
+# obtain starting values and identify parameters that are to be equal (first 16, last 16), and
+# fix item intercepts to 0
+sv <- mixedmirt(data, covdata, model, fixed = ~ itemorder, itemtype = 'Rasch', pars = 'values')
+constrain <- list()
+constrain[[1]] <- sv$parnum[sv$name == 'itemorder'][1:16]
+constrain[[2]] <- sv$parnum[sv$name == 'itemorder'][-(1:16)]
+sv$value[sv$name == 'd'] <- 0
+sv$est[sv$name == 'd'] <- FALSE
+
+#estimate LLTM using new starting values (with adjusted fixed parameters) and constraints
+LLTM <- mixedmirt(data, covdata, model, fixed = ~ itemorder, itemtype = 'Rasch',
+ pars = sv, constrain = constrain, fixed.constrain = FALSE)
+coef(LLTM)
+#compare to standard items with estimated slopes (2PL)?
+sv$est[sv$name == 'a1'] <- TRUE
+twoPL <- mixedmirt(data, covdata, model, fixed = ~ itemorder,
+ pars = sv, constrain = constrain, fixed.constrain = FALSE)
+coef(twoPL)
+anova(twoPL, LLTM)
+#twoPL model better than LLTM, and don't draw the (spurious?) conclusion that the first
+# half of the test is any easier/harder than the last
+
+### Similar example, but with simulated data
+
+set.seed(1234)
+N <- 750
+a <- matrix(rep(1,10))
+d <- matrix(c(rep(-1,5), rep(1,5)))
+Theta <- matrix(rnorm(N))
+data <- simdata(a, d, N, itemtype = rep('dich',10), Theta=Theta, D=1)
+covdata <- data.frame(itempred=rep(1, nrow(data)))
+model <- confmirt.model('confmods/mixedmirt1', quiet = TRUE)
+sv <- mixedmirt(data, covdata, model, fixed= ~ itempred, pars = 'values',
+ itemtype = 'Rasch')
+sv$value[sv$name == 'd'] <- 0
+sv$est[sv$name == 'd'] <- FALSE
+
+#make design such that the first 5 items are systematically more difficult than the last 5
+constrain <- list()
+constrain[[1]] <- sv$parnum[sv$name == 'itempred'][1:5]
+constrain[[2]] <- sv$parnum[sv$name == 'itempred'][-c(1:5)]
+mod <- mixedmirt(data, covdata, model, fixed= ~ itempred, pars = sv,
+ itemtype = 'Rasch', constrain = constrain, fixed.constrain = FALSE,
+ verbose = TRUE)
+coef(mod)
+rasch <- mirt(data, 1, itemtype = 'Rasch', D=1)
+anova(mod, rasch) #n.s., LLTM model a much better choice compared to Rasch
+
}
}
\author{

0 comments on commit eea3b25

Please sign in to comment.