From f61f437f842180add45bbc59825559e91571f4e2 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=C3=98ystein=20Olav=20Skaar?=
Date: Sat, 3 Nov 2018 18:26:53 +0100
Subject: [PATCH] 0.2.0.9005
---
.gitignore | 2 +-
.travis.yml | 11 +-
DESCRIPTION | 8 +-
NAMESPACE | 5 +-
NEWS.md | 25 ++
R/basic_functions.R | 80 +++++-
R/mcmc.R | 250 ++++++++++--------
R/mcmc_misc.R | 32 ++-
R/mcmc_summary.R | 2 +-
R/plot_circlize.R | 19 +-
R/plot_mean.R | 20 +-
R/plot_param.R | 203 ++++++++++++++
R/plot_parse.R | 4 +-
R/settings.R | 125 +++------
R/stats_bernoulli.R | 8 +-
R/stats_covariate.R | 45 ++--
R/stats_fit.R | 36 +--
R/stats_kappa.R | 15 +-
R/stats_mean.R | 12 +-
R/stats_metric.R | 18 +-
R/stats_nominal.R | 12 +-
R/stats_regression.R | 42 +--
R/stats_softmax.R | 16 +-
README.md | 32 ++-
TODO.md | 13 +-
inst/doc/{plot_data.md => plot_mean.md} | 24 +-
inst/extdata/models/stats_fit_cfa.txt | 10 +
inst/extdata/models/stats_regression.txt | 8 +-
.../models/stats_regression_robust.txt | 8 +-
inst/extdata/templates/apa.pptx | Bin 0 -> 32693 bytes
man/MatrixCombn.Rd | 11 +-
man/MultiGrep.Rd | 20 ++
man/ParsePlot.Rd | 2 +-
man/PlotCirclize.Rd | 10 +-
man/PlotMean.Rd | 4 +-
man/PlotParam.Rd | 52 ++++
man/RemoveGarbage.Rd | 14 +
man/RunMCMC.Rd | 25 +-
man/StatsBernoulli.Rd | 3 +-
man/StatsCovariate.Rd | 5 +-
man/StatsFit.Rd | 10 +-
man/StatsKappa.Rd | 3 +-
man/StatsMean.Rd | 3 +-
man/StatsMetric.Rd | 5 +-
man/StatsNominal.Rd | 5 +-
man/StatsRegression.Rd | 5 +-
man/StatsSoftmax.Rd | 5 +-
man/SumMCMC.Rd | 2 +-
man/TidyCode.Rd | 2 +-
man/TrimSplit.Rd | 2 +-
man/bfw.Rd | 95 +------
51 files changed, 865 insertions(+), 503 deletions(-)
create mode 100644 R/plot_param.R
rename inst/doc/{plot_data.md => plot_mean.md} (84%)
create mode 100644 inst/extdata/templates/apa.pptx
create mode 100644 man/MultiGrep.Rd
create mode 100644 man/PlotParam.Rd
create mode 100644 man/RemoveGarbage.Rd
diff --git a/.gitignore b/.gitignore
index 5ed9cc2..32d6254 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,6 +1,6 @@
Meta
.Rproj.user
-.Rhistory
+*.Rhistory
.RData
.Ruserdata
*.Rmd
diff --git a/.travis.yml b/.travis.yml
index 3040555..75b6af7 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -34,12 +34,13 @@ apt_packages:
- libv8-dev
r_packages:
- - coda
- - MASS
- - runjags
+ - covr
+ - circlize
+ - dplyr
- ggplot2
- knitr
- lavaan
+ - magrittr
- officer
- plyr
- png
@@ -47,7 +48,8 @@ r_packages:
- rmarkdown
- rvg
- scales
-
+ - testthat
+
notifications:
email:
on_success: change
@@ -55,6 +57,7 @@ notifications:
r_github_packages:
- r-lib/covr
+ - r-lib/devtools
after_success:
- Rscript -e 'covr::coveralls()'
diff --git a/DESCRIPTION b/DESCRIPTION
index 5297cf5..dde7937 100644
--- a/DESCRIPTION
+++ b/DESCRIPTION
@@ -1,6 +1,6 @@
Package: bfw
-Version: 0.2.0.9004
-Date: 2018-09-18
+Version: 0.2.0.9005
+Date: 2018-11-03
Title: Bayesian Framework for Computational Modeling
Authors@R: person( "Øystein Olav","Skaar", email="bayesianfw@gmail.com", role=c("aut","cre"))
Maintainer: Øystein Olav Skaar
@@ -19,13 +19,15 @@ SystemRequirements: JAGS >=4.3.0 ,
Depends: R (>= 3.5.0),
Imports: coda (>= 0.19-1),
MASS (>= 7.3-47),
- runjags (>= 2.0.4-2)
+ runjags (>= 2.0.4-2)
Suggests:
covr (>= 3.1.0),
circlize (>= 0.4.4),
+ dplyr (>= 0.7.7),
ggplot2 (>= 2.2.1),
knitr (>= 1.20),
lavaan (>= 0.6-1),
+ magrittr (>= 1.5),
officer (>= 0.3.1),
plyr (>= 1.8.4),
png (>= 0.1-7),
diff --git a/NAMESPACE b/NAMESPACE
index b3e1214..d256728 100644
--- a/NAMESPACE
+++ b/NAMESPACE
@@ -15,16 +15,18 @@ export(Interleave)
export(Layout)
export(MatrixCombn)
export(MergeMCMC)
+export(MultiGrep)
export(Normalize)
export(PadVector)
export(ParseNumber)
export(ParsePlot)
export(PlotCirclize)
-export(PlotData)
export(PlotMean)
export(PlotNominal)
+export(PlotParam)
export(ReadFile)
export(RemoveEmpty)
+export(RemoveGarbage)
export(RemoveSpaces)
export(RunContrasts)
export(RunMCMC)
@@ -79,6 +81,7 @@ importFrom(runjags,run.jags)
importFrom(runjags,runjags.options)
importFrom(stats,acf)
importFrom(stats,aggregate)
+importFrom(stats,approx)
importFrom(stats,approxfun)
importFrom(stats,complete.cases)
importFrom(stats,cor)
diff --git a/NEWS.md b/NEWS.md
index 335a230..0766a18 100644
--- a/NEWS.md
+++ b/NEWS.md
@@ -1,3 +1,28 @@
+# bfw 0.2.0.9005
+
+### Feature
+
+* Updated `CFA` function to include correlation matrix
+* Added a option to run `PPP` for every kth length of MCMC chains (Default is every 10th)
+
+#### Moderate
+
+* Optimized `RunContrasts` to allow larger MCMC simulations (2nd review)
+
+#### Minor
+
+* Fixed `plot_data` vignette
+* Updated `README`
+* Fixed title bug in `circlize` plots
+* Added `RemoveGarbage` function to clear up working memory
+* Added `MultiGrep` function to use multiple patterns to select an element from a vector
+* Fixed bug in `kappa` function
+* Fixed bug in `covariate` function
+* Fixed inlinde comment bug in `TidyCode` function
+* Added option to define which parameters to use for diagnostics
+* Removed (some of the...) unnecessary arguments in `bfw` function
+* Added a `apa` PowerPoint template
+
# bfw 0.2.0.9004
### Feature
diff --git a/R/basic_functions.R b/R/basic_functions.R
index 18f5b35..39a93b1 100644
--- a/R/basic_functions.R
+++ b/R/basic_functions.R
@@ -283,6 +283,7 @@ Layout <- function(x = "a4", layout.inverse = FALSE) {
x <- switch (x,
"pt" = c(10,7.5),
"pw" = c(13.33,7.5),
+ "apa" = c(5.1338582677, 7.2515748),
"4a0" = c(66.2,93.6),
"2a0" = c(46.8,66.2),
"a0" = c(33.1,46.8),
@@ -459,7 +460,7 @@ Trim <- function(s, multi = TRUE) {
#' @param sep symbol to separate data (e.g., comma-delimited), Default: ','
#' @param fixed logical, if TRUE match split exactly, otherwise use regular expressions. Has priority over perl, Default: FALSE
#' @param perl logical, indicating whether or not to use Perl-compatible regexps, Default: FALSE
-#' @param useBytes logical. If TRUE the matching is done byte-by-byte rather than character-by-character, Default: FALSE
+#' @param useBytes logical, if TRUE the matching is done byte-by-byte rather than character-by-character, Default: FALSE
#' @param rm.empty logical. indicating whether or not to remove empty elements, Default: TRUE
#' @details \link[base]{strsplit}
#' @examples
@@ -515,7 +516,7 @@ VectorSub <- function ( pattern , replacement , string ) {
#' @title Tidy Code
#' @description Small function that clears up messy code
#' @param tidy.code Messy code that needs cleaning
-#' @param jags logical. If TRUE run code as JAGS model, Default: TRUE
+#' @param jags logical, if TRUE run code as JAGS model, Default: TRUE
#' @return (Somewhat) tidy code
#' @examples
#' messy <- "code <- function( x ) {
@@ -540,35 +541,47 @@ TidyCode <- function(tidy.code,
tidy.code <- gsub("model[[:space:]]+\\{", "if (TidyJagsModel) {" , tidy.code)
tidy.code <- gsub("model\\{", "if (TidyJagsModel) {" , tidy.code)
}
-
- # Extract blocks from cod
+
+ # Extract blocks from code
tidy.code <- TrimSplit(tidy.code,"\\\n")
-
+
# Wrap comments prior to parsing
- invisible(lapply(grep("\\#",tidy.code), function (i){
- tidy.code[i] <<- sprintf("invisible(\"StartPreParse%sEndPreParse\")" , tidy.code[i])
+ invisible(lapply(grep("\\#",tidy.code), function (i) {
+ if (substring(tidy.code[[i]], 1, 1) == "#") {
+ tidy.code[i] <<- sprintf("invisible(\"StartPreParse%sEndPreParse\")" , tidy.code[i])
+ } else {
+ tidy.code[i] <<- sprintf("%s\ninvisible(\"StartInlinePreParse%sEndPreParse\")" ,
+ gsub('\\#.*', '', tidy.code[[i]]),
+ gsub('.*\\#', '#', tidy.code[[i]]) )
+ }
}))
-
+
# Parse code
tidy.code <- base::parse(text = tidy.code, keep.source = FALSE)
-
+
# Collapse parsed function into a vector
tidy.code <- sapply(tidy.code, function(e) {
paste(base::deparse(e, getOption("width")), collapse = "\n")
})
-
+
# remove spaces between commas
tidy.code <- gsub("\\s*\\,\\s*", "," , tidy.code)
-
+
# Revert comments (remove invisibility)
tidy.code <- gsub("invisible\\(\\\"StartPreParse" , "" , tidy.code)
tidy.code <- gsub("EndPreParse\\\")" , "" , tidy.code)
-
+ # Revert inline comments (remove invisibility)
+ tidy.code <- gsub("\n[[:space:]]+invisible\\(\\\"StartInlinePreParse" , " " , tidy.code)
+
+
# If jags replace placeholder
if (jags) {
tidy.code <- gsub("if \\(TidyJagsData\\)", "data" , tidy.code)
tidy.code <- gsub("if \\(TidyJagsModel\\)", "model" , tidy.code)
}
+
+ # Collapse to string
+ tidy.code <- paste0(tidy.code, collapse="\n")
return (tidy.code)
}
@@ -594,4 +607,47 @@ ETA <- function (start.time, i , total) {
cat("\r" , eta.message , sep="")
utils::flush.console()
if (i == total) cat("\n")
+}
+
+#' @title Remove Garbage
+#' @description Remove variable(s) and remove garbage from memory
+#' @param v variables to remove
+#' @rdname RemoveGarbage
+#' @export
+
+RemoveGarbage <- function (v) {
+ v <- TrimSplit(v)
+ rm( list = v, envir=sys.frame(-1) )
+ # Garbage Collection
+ invisible(base::gc(verbose = FALSE, full = TRUE))
+}
+
+#' @title Multi Grep
+#' @description Use multiple patterns from vector to find element in another vector, with option to remove certain patterns
+#' @param find vector to find
+#' @param from vector to find from
+#' @param remove variables to remove, Default: NULL
+#' @param value logical, if TRUE returns value, Default: TRUE
+#' @rdname MultiGrep
+#' @export
+
+MultiGrep <- function (find, from , remove = NULL , value = TRUE) {
+
+ find <- TrimSplit(find)
+ remove <- TrimSplit(remove)
+
+ found <- grep(paste(sprintf("(?=.*%s)",find), collapse=""),
+ from, perl = TRUE , value=value)
+
+ if (length(remove)) {
+ remove.find <- if (value) found else from[found]
+ remove <- unique(unlist(lapply(remove, function (x) {
+ grep(paste(sprintf("(?=.*\\b%s\\b)",x), collapse=""),
+ remove.find, perl = TRUE)
+ })))
+ found <- found[-remove]
+ }
+
+ return (found)
+
}
\ No newline at end of file
diff --git a/R/mcmc.R b/R/mcmc.R
index e4b7f2b..7eebb1a 100644
--- a/R/mcmc.R
+++ b/R/mcmc.R
@@ -6,15 +6,17 @@
#' @param data.list list of data
#' @param initial.list initial values for analysis, Default: list()
#' @param run.contrasts logical, indicating whether or not to run contrasts, Default: FALSE
-#' @param use.contrast choose from "between", "within" and "mixed". Between compare groups at different conditions. Within compare a group at different conditions. Mixed compute all comparisons.
+#' @param use.contrast choose from "between", "within" and "mixed". Between compare groups at different conditions. Within compare a group at different conditions. Mixed compute all comparisons, Default: "between",
#' @param contrasts define contrasts to use for analysis (defaults to all) , Default: NULL
#' @param run.ppp logical, indicating whether or not to conduct ppp analysis, Default: FALSE
+#' @param k.ppp run ppp for every kth length of MCMC chains, Default: 10
#' @param n.data sample size for each parameter
#' @param credible.region summarize uncertainty by defining a region of most credible values (e.g., 95 percent of the distribution), Default: 0.95
#' @param save.data logical, indicating whether or not to save data, Default: FALSE
#' @param ROPE define range for region of practical equivalence (e.g., c(-0.05 , 0.05), Default: NULL
#' @param merge.MCMC logical, indicating whether or not to merge MCMC chains, Default: FALSE
#' @param run.diag logical, indicating whether or not to run diagnostics, Default: FALSE
+#' @param param.diag define parameters to use for diagnostics, default equals all parameters, Default: NULL
#' @param sep symbol to separate data (e.g., comma-delimited), Default: ','
#' @param monochrome logical, indicating whether or not to use monochrome colors, else use \link[bfw]{DistinctColors}, Default: TRUE
#' @param plot.colors range of color to use, Default: c("#495054", "#e3e8ea")
@@ -49,21 +51,23 @@
#' @importFrom MASS mvrnorm
#' @importFrom utils write.table
RunMCMC <- function(jags.model,
- params,
+ params = NULL,
name.list,
data.list,
- initial.list,
- run.contrasts,
- use.contrast,
- contrasts,
- run.ppp,
+ initial.list = list(),
+ run.contrasts = FALSE,
+ use.contrast = "between",
+ contrasts = NULL,
+ run.ppp = FALSE,
+ k.ppp = 10,
n.data,
- credible.region,
+ credible.region = 0.95,
save.data = FALSE,
- ROPE,
- merge.MCMC,
- run.diag,
- sep,
+ ROPE = NULL,
+ merge.MCMC = FALSE,
+ run.diag = FALSE,
+ param.diag = NULL,
+ sep = ",",
monochrome = TRUE,
plot.colors = c("#495054", "#e3e8ea"),
graphic.type = "pdf",
@@ -100,24 +104,7 @@ RunMCMC <- function(jags.model,
# Add jags seed to initial list
initial.list <- c(initial.list, .RNG.seed = jags.seed)
-
- # Number of adapting steps
- if (!length(adapt.steps)) adapt.steps <- max ( (saved.steps * 5) / 100 , 2000 )
- # Number of burn-in steps
- if (!length(burnin.steps)) burnin.steps <- max ( (saved.steps * 7.5) / 100 , 3000 )
-
- # Runjags options
- # Disable redundant warnings
- try( runjags::runjags.options( inits.warning=FALSE , rng.warning=FALSE ) )
- # Set runjags method and number of chains
- if (!length(jags.method)) {
- detect.cores <- parallel::detectCores()
- jags.method <- if ( !is.finite(detect.cores || detect.cores < 4) ) "simple" else "parallel"
- }
- if (!length(jags.chains)) {
- jags.chains <- if ( detect.cores >= 4 ) 4 else detect.cores
- }
-
+
# Number of samples
n.samples <- ceiling(saved.steps / jags.chains)
@@ -153,8 +140,13 @@ RunMCMC <- function(jags.model,
data.MCMC <- merge.MCMC
}
+ # Clean up working memory
+ RemoveGarbage("jags.data")
+
# Treat results as matrix for further examination
matrix.MCMC <- as.matrix(data.MCMC)
+ # Sort matrix by column names
+ if (ncol(matrix.MCMC)>1) matrix.MCMC <- matrix.MCMC[, order(colnames(matrix.MCMC))]
# Append bracket (ie., [1] ) for naming purposes
colnames(matrix.MCMC) <- unlist(lapply(colnames(matrix.MCMC), function (x) if(regexpr('\\[', x)[1]<0) paste0(x,"[1]") else x ))
@@ -163,12 +155,19 @@ RunMCMC <- function(jags.model,
# Diagnostics
if (run.diag) {
cat("\nConducting diagnostics. Please wait.\n")
+
diag.start.time <- Sys.time()
- diag.length <- length(coda::varnames(data.MCMC))
+ if (length(param.diag)) {
+ param.diag <- grep(paste(TrimSplit(param.diag),collapse="|"),
+ coda::varnames(data.MCMC), value = TRUE)
+ } else {
+ param.diag <- coda::varnames(data.MCMC)
+ }
+ diag.length <- length(param.diag)
diag.plots <- lapply(1:diag.length, function(i) {
- x <- coda::varnames(data.MCMC)[i]
-
+ x <- param.diag[i]
+
if (stats::sd(matrix.MCMC[,x]) > 0) {
diag <- DiagMCMC(data.MCMC = data.MCMC,
par.name = x,
@@ -183,20 +182,18 @@ RunMCMC <- function(jags.model,
ETA(diag.start.time , i , diag.length)
return (diag)
-
+
} )
-
+
# Adding names to diagnostics
- names(diag.plots) <- coda::varnames(data.MCMC)
+ names(diag.plots) <- param.diag
if (save.data) {
cat("\nAdjusting and saving diagnostic plots. Please wait.\n")
- diag.plots <- diag.plots[lapply(diag.plots,length)>1]
-
# Save plots as PowerPoint, Default is raster graphics.
## Change vector.graphic to TRUE if needed (not recommended)
- ParsePlot(diag.plots,
+ ParsePlot(diag.plots[lapply(diag.plots,length)>1],
project.dir = paste0(project.dir,"Diagnostics/"),
project.name = project.name,
graphic.type = graphic.type,
@@ -218,43 +215,57 @@ RunMCMC <- function(jags.model,
cat("\nComputing contrasts. Please wait.\n")
- # Createsum to zero contrasts for mettric and nominal models
+ # Createsum to zero contrasts for metric and nominal models
if (model.type == "Metric" | model.type == "Nominal") {
sum.zero <- SumToZero(q.levels, matrix.MCMC, contrasts)
- matrix.MCMC <- cbind(matrix.MCMC, sum.zero)
}
# Add odds (and odds-ratios/cohen's d) for nominal models
if (model.type == "Nominal") {
-
+
+ # Create expected and observed nominal and proportions data
+ expected.observed <- MatrixCombn(matrix.MCMC,
+ "o,o,e,e",
+ "NULL,p,NULL,p",
+ q.levels,
+ rm.last = FALSE,
+ row.means=FALSE)
+
+ # Remove expected and observed columns from MCMC matrix
+ matrix.MCMC <- matrix.MCMC[ , !colnames(matrix.MCMC) %in% colnames(expected.observed)]
+
contrast.type <- c("b","o")
- # Create matrix combiantions for models with multiple factors
- if (length(q.levels)>1) {
- odds <- MatrixCombn(matrix.MCMC, "o", lvl = q.levels, row.means=FALSE)
- expected.observed <- MatrixCombn(matrix.MCMC, "e,o", "p", q.levels, row.means=FALSE)
- matrix.MCMC <- cbind(matrix.MCMC, cbind(odds, expected.observed))
- }
+ contrast.data <- list(sum.zero, expected.observed)
+
}
+
# Add effect size cohen's d for metric model
if (model.type == "Metric") {
+
+ # Create mean difference data
+ mean.diff <- MatrixCombn(matrix.MCMC, "m,s", q.levels = q.levels, rm.last = FALSE)
+
+ # Remove mean difference columns from MCMC matrix
+ matrix.MCMC <- matrix.MCMC[ , !colnames(matrix.MCMC) %in% colnames(mean.diff)]
+
contrast.type <- c("b","m")
- # Create matrix combiantions for models with multiple factors
- if (length(q.levels)>1) {
- mean.diff <- MatrixCombn(matrix.MCMC, "m,s", lvl = q.levels)
- matrix.MCMC <- cbind(matrix.MCMC, mean.diff)
- }
+ contrast.data <- list(sum.zero, mean.diff)
}
-
- # Run contrasts and add contrasts to data matrix
- done.contrasts <- do.call(cbind, lapply(contrast.type, function (x) {
- RunContrasts(x , q.levels , use.contrast , contrasts , matrix.MCMC , job.names)
+
+ # Run contrasts
+ contrasts <- do.call(cbind, lapply(1:length(contrast.type), function (i) {
+ RunContrasts(contrast.type[[i]],
+ q.levels,
+ use.contrast,
+ contrasts,
+ contrast.data[[i]],
+ job.names)
}))
- matrix.MCMC <- cbind(matrix.MCMC, done.contrasts)
}
# Add R^2 if regression
if (model.type == "Regression") {
-
+
x <- data.list$x
n.x <- data.list$n.x
y <- data.list$y
@@ -276,19 +287,15 @@ RunMCMC <- function(jags.model,
adjusted.rob.r <- as.matrix(1 - (1-rob.r.squared) * ( (n-1) / (n -(q+1)) ))
# Add names
- colnames(r.squared) <- sprintf("R^2 (block: %s)",i)
- colnames(rob.r.squared) <- sprintf("Robust R^2 (block: %s)",i)
- colnames(adjusted.r) <- sprintf("Adjusted R^2 (block: %s)",i)
- colnames(adjusted.rob.r) <- sprintf("Adjusted Robust R^2 (block: %s)",i)
-
+ colnames(r.squared) <- if (q>1) sprintf("R^2 (block: %s)",i) else "R^2"
+ colnames(rob.r.squared) <- if (q>1) sprintf("Robust R^2 (block: %s)",i) else "Robust R^2"
+ colnames(adjusted.r) <- if (q>1) sprintf("Adjusted R^2 (block: %s)",i) else "Adjusted R^2"
+ colnames(adjusted.rob.r) <- if (q>1) sprintf("Adjusted Robust R^2 (block: %s)",i) else "Adjusted Robust R^2"
+
# Return as matrix
cbind(r.squared , rob.r.squared , adjusted.r , adjusted.rob.r)
}))
-
- # Add R^2 to MCMC matrix
- matrix.MCMC <- cbind(matrix.MCMC, r.squared)
-
}
# Add PPP if SEM/CFA
@@ -299,12 +306,14 @@ RunMCMC <- function(jags.model,
n <- data.list$n
ppp.start.time <- Sys.time()
- cov.mat <- stats::cov(y)
pppv <- 0
+ jags.ppp <- sample(1:nrow(matrix.MCMC), nrow(matrix.MCMC)/k.ppp)
+
+ cov.mat <- stats::cov(y)
cat("\nComputing PPP-value, please wait (it may take some time).\n")
- PPP <- lapply(1:nrow(matrix.MCMC), function (i) {
+ PPP <- lapply(1:length(jags.ppp), function (i) {
- x <- matrix.MCMC[i,]
+ x <- matrix.MCMC[jags.ppp[i],]
# Epsilon/Error variance matrix
eps.pos <- grep("error", names(x))
eps.length <- length(eps.pos)
@@ -318,12 +327,12 @@ RunMCMC <- function(jags.model,
lam.pos <- grep("lam", names(x))
lam.length <- length(lam.pos)
- lambda <- do.call( rbind, lapply(1:lam.length, function (i) c(factor.seq[i], x[lam.pos][i]) ) )
- lam.matrix <- matrix( unlist ( lapply(1:lat, function (i) {
- if (i < max(lat)) {
- c ( lambda[ lambda[,1] == i , 2], rep(0,lam.length) )
+ lambda <- do.call( rbind, lapply(1:lam.length, function (j) c(factor.seq[j], x[lam.pos][j]) ) )
+ lam.matrix <- matrix( unlist ( lapply(1:lat, function (j) {
+ if (j < max(lat)) {
+ c ( lambda[ lambda[,1] == j , 2], rep(0,lam.length) )
} else {
- lambda[ lambda[,1] == i , 2]
+ lambda[ lambda[,1] == j , 2]
}
} ) ), lam.length, lat)
@@ -340,11 +349,11 @@ RunMCMC <- function(jags.model,
sim.fit <- (n - 1) * (log(det(pred.sigma)) + sum(diag(solve(pred.sigma) %*% sim.sigma)) - log(det(sim.sigma)) - eps.length)
# Compute PPP-value
- if (pred.fit <= sim.fit) pppv <<- pppv + 1
+ pppv <<- if (pred.fit < sim.fit) pppv + 1 else if (pred.fit == sim.fit) pppv + 0.5 else pppv
PPP <- as.numeric(pppv / i)
- ETA(ppp.start.time , i , nrow(matrix.MCMC))
-
+ ETA(ppp.start.time , i , length(jags.ppp))
+
# Create matrix with chi-square, discrepancy between predicted and simulated data and PPP
PPP <- as.data.frame(t(
c("Fit (Predicted)" = pred.fit,
@@ -358,44 +367,63 @@ RunMCMC <- function(jags.model,
} )
if (requireNamespace("plyr", quietly = TRUE)) {
- matrix.MCMC <- cbind(matrix.MCMC, plyr::rbind.fill(PPP))
+ PPP <- plyr::rbind.fill(PPP)
} else {
- matrix.MCMC <- cbind(matrix.MCMC, do.call(rbind,PPP))
+ PPP <- do.call(rbind,PPP)
}
}
- # Find params from MCMC list
- params <- colnames(matrix.MCMC)
+ # create list of matrices to summarize
+ list.MCMC <- list(
+ matrix = matrix.MCMC,
+ PPP = if (exists("PPP")) PPP,
+ r.squared = if (exists("r.squared")) r.squared,
+ sum.zero = if (exists("sum.zero")) sum.zero,
+ count.data = if (exists("expected.observed")) expected.observed,
+ mean.difference = if (exists("mean.diff")) mean.diff,
+ contrasts = if (exists("contrasts")) contrasts
+ )
+
+ # Remove empty plots
+ list.MCMC <- Filter(length, list.MCMC)
- # Create final posterior parameter indicies
- summary.start.time <- Sys.time()
- cat("\nSummarizing data of each parameter. Please wait.\n")
- summary.MCMC <- do.call(rbind,lapply(1:length(params), function(i) {
+ summary.MCMC <- do.call(rbind,lapply(1:length(list.MCMC), function (k) {
- summary <- SumMCMC( par = matrix.MCMC[, params[i]] ,
- par.names = params[i],
- job.names = job.names,
- job.group = job.group,
- n.data = n.data,
- credible.region = credible.region,
- ROPE = ROPE
- )
+ # Find params from MCMC list
+ params <- colnames(list.MCMC[[k]])
- ETA(summary.start.time , i , length(params))
-
- return (summary)
+ # Create final posterior parameter indicies
+ summary.start.time <- Sys.time()
+ summary.cat <- sprintf("\nSummarizing data for each parameter in %s. Please wait.\n" ,
+ gsub("\\."," ",names(list.MCMC)[[k]]) )
+ cat(summary.cat)
+ do.call(rbind,lapply(1:length(params), function(i) {
- } ) )
-
+ summary <- SumMCMC( par = list.MCMC[[k]][, params[i]] ,
+ par.names = params[i],
+ job.names = job.names,
+ job.group = job.group,
+ n.data = n.data,
+ credible.region = credible.region,
+ ROPE = ROPE
+ )
+
+ ETA(summary.start.time , i , length(params))
+
+ return (summary)
+
+ }))
+ }))
+
# Display completion and running time
stop.time <- Sys.time()
total.time <- capture.output(difftime(stop.time, start.time))
cat(format(stop.time,"\nCompleted at %d.%m.%Y - %H:%M:%S\n"),
gsub("Time difference of","Running time:",total.time),"\n", sep="")
-
+
# Create MCMC and summary list (without chain information)
final.MCMC <- list( raw.MCMC = data.MCMC,
- matrix.MCMC = matrix.MCMC,
+ matrix.MCMC = do.call(cbind,list.MCMC),
summary.MCMC = summary.MCMC,
name.list = name.list,
initial.list = initial.list,
@@ -418,11 +446,27 @@ RunMCMC <- function(jags.model,
MCMC.file.name <- paste0(project.dir,"MCMC/",project.name,".rds")
saveRDS( final.MCMC , file = MCMC.file.name, compress="bzip2")
+ # Create meta data
+ meta.final.MCMC <- list(summary.MCMC = summary.MCMC,
+ name.list = name.list,
+ initial.list = initial.list,
+ data.list = data.list,
+ jags.model = jags.model,
+ run.time = c(start.time,stop.time)
+ )
+
+ # Save meta data
+ MCMC.file.name <- sub('-', '-META-', MCMC.file.name)
+ saveRDS( meta.final.MCMC , file = MCMC.file.name, compress="bzip2")
+
# Append to final MCMC list
final.MCMC <- c(final.MCMC,
data.file.name = data.file.name,
MCMC.file.name = MCMC.file.name)
}
-
+
+ # Clear up working memory
+ RemoveGarbage("data.MCMC,list.MCMC")
+
return (final.MCMC)
}
diff --git a/R/mcmc_misc.R b/R/mcmc_misc.R
index f65bf2a..f9bcaa6 100644
--- a/R/mcmc_misc.R
+++ b/R/mcmc_misc.R
@@ -124,39 +124,45 @@ ContrastNames <- function(items , job.names , col.names) {
#' @title Matrix Combinations
#' @description Create matrices from combinations of columns
-#' @param m matrix to combine
-#' @param s stem first name of columns to use (e.g., "m" for mean)
-#' @param p stem last name of columns to use (e.g., "p" for proportions)
-#' @param lvl number of levels per column
+#' @param matrix matrix to combine
+#' @param first.stem first name of columns to use (e.g., "m" for mean)
+#' @param last.stem optional last name of columns to use (e.g., "p" for proportions) , Default: NONE
+#' @param q.levels number of levels per column
#' @param rm.last logical, indicating whether or not to remove last combination (i.e., m1m2m3m4) , Default: TRUE
#' @param row.means logical, indicating whether or not to compute row means from combined columns, else use row sums, Default: TRUE
#' @rdname MatrixCombn
#' @export
-MatrixCombn <- function(m , s, p = NULL, lvl, rm.last=TRUE, row.means=TRUE) {
- s <- TrimSplit(s)
- grid <- expand.grid(lapply(lvl, function (x) seq(x)))
+MatrixCombn <- function(matrix , first.stem, last.stem = NULL, q.levels, rm.last=TRUE, row.means=TRUE) {
+ first.stem <- TrimSplit(first.stem)
+ last.stem <- TrimSplit(last.stem)
+ grid <- expand.grid(lapply(q.levels, function (x) seq(x)))
q <- ncol(grid)
matrix.list <- lapply(seq(q-as.numeric(rm.last)), function (i) {
q.combn <- t(combn(as.numeric(paste0(seq(q))),i))
q.combn <- split(q.combn, 1:nrow(q.combn))
lapply(q.combn, function (x) {
- cols <- expand.grid(lapply(x, function (j) seq(lvl[[j]] ) ) )
+ cols <- expand.grid(lapply(x, function (j) seq(q.levels[[j]] ) ) )
colnames(cols) <- c(x)
lapply(1:nrow(cols), function (k) {
col <- paste(sprintf("grid[,%s]==%s",colnames(cols),cols[k,]),collapse="&")
- lapply(s, function (y) {
+ lapply(1:length(first.stem), function (l) {
- s <- paste0(paste0(y,seq(q),collapse=""),p)
- s.names <- colnames(m[, grep(paste0("\\b",s,"\\b"),colnames(m))])
+ if (length(last.stem) >= l) {
+ last.stem <- if (tolower(last.stem[[l]]) != "null") last.stem[[l]]
+ } else {
+ last.stem <- NULL
+ }
+ new.stem <- paste0( paste0(first.stem[[l]], seq(q), collapse=""), last.stem )
+ s.names <- colnames(matrix[, grep(paste0("\\b", new.stem, "\\b"), colnames(matrix))])
- new.col <- as.matrix(m[,s.names[eval(parse(text=paste0(col)))]])
+ new.col <- as.matrix(matrix[,s.names[eval(parse(text=paste0(col)))]])
if (ncol(new.col)>1) {
new.col <- if (row.means) rowMeans(new.col) else rowSums(new.col)
}
new.col <- as.matrix(new.col)
- new.colname <- paste0(paste0(y,colnames(cols),collapse=""),p)
+ new.colname <- paste0(paste0(first.stem[[l]],colnames(cols),collapse=""),last.stem)
new.colname <- sprintf("%s[%s]",new.colname,paste(cols[k,],collapse=","))
colnames(new.col) <- new.colname
diff --git a/R/mcmc_summary.R b/R/mcmc_summary.R
index 551916f..9f89153 100644
--- a/R/mcmc_summary.R
+++ b/R/mcmc_summary.R
@@ -19,7 +19,7 @@ SumMCMC <- function(par,
job.names = NULL,
job.group = NULL,
credible.region = 0.95,
- ROPE,
+ ROPE = NULL,
n.data,
...
) {
diff --git a/R/plot_circlize.R b/R/plot_circlize.R
index 04cea27..d6250de 100644
--- a/R/plot_circlize.R
+++ b/R/plot_circlize.R
@@ -1,7 +1,6 @@
#' @title Circlize Plot
#' @description Create a circlize plot
-#' @param category.items named items for circlize plot
-#' @param category.selects selected data for ciclize plot
+#' @param data data for circlize plot
#' @param category.spacing spacing between category items , Default: 1.25
#' @param category.inset inset of category items form plot , Default: c(-0.5, 0)
#' @param monochrome logical, indicating whether or not to use monochrome colors, else use \link[bfw]{DistinctColors}, Default: TRUE
@@ -20,14 +19,14 @@
#' @importFrom grDevices dev.new recordPlot dev.off
#' @importFrom graphics legend
-PlotCirclize <- function (category.items,
- category.selects,
+PlotCirclize <- function (data,
category.spacing = 1.2,
category.inset = c(-0.4, 0),
monochrome = TRUE,
plot.colors = c("#CCCCCC", "#DEDEDE"),
font.type = "serif") {
-
+
+
# Check if circlize is installed
if (!requireNamespace("circlize", quietly = TRUE)) {
@@ -35,6 +34,14 @@ PlotCirclize <- function (category.items,
call. = FALSE)
}
+ # Clear circlize
+ circlize::circos.clear()
+
+ # Fetch category, items and selects
+ category <- data$category
+ category.items <- data$category.items
+ category.selects <- data$category.selects
+
# Set 0 as missing
category.selects[category.selects == 0] <- NA
# Remove missing
@@ -111,7 +118,7 @@ PlotCirclize <- function (category.items,
# Legend title
legend.title <- list(
bquote(
- bold(.(sprintf("%s (%s)", category.items, nrow(category.selects))))
+ bold(.(sprintf("%s (%s)", category, nrow(category.selects))))
)
)
diff --git a/R/plot_mean.R b/R/plot_mean.R
index 64f5344..1d1e68d 100644
--- a/R/plot_mean.R
+++ b/R/plot_mean.R
@@ -10,6 +10,7 @@
#' @param ribbon.plot logical, indicating whether or not to use ribbon plot for HDI, Default: TRUE
#' @param y.text label on y axis, Default: 'Score'
#' @param x.text label on x axis, Default: NULL
+#' @param remove.x logical, indicating whether or not to show x.axis information, Default: FALSE
#' @seealso
#' \code{\link[ggplot2]{ggproto}},
#' \code{\link[ggplot2]{ggplot2-ggproto}},
@@ -49,7 +50,8 @@ PlotMean <- function (data,
y.split = FALSE ,
ribbon.plot = TRUE,
y.text = "Score",
- x.text = NULL) {
+ x.text = NULL,
+ remove.x = FALSE) {
# Check if ggplots is installed
if (!requireNamespace("ggplot2", quietly = TRUE) |
@@ -200,7 +202,7 @@ PlotMean <- function (data,
# Lists of variables to plot
plot.variables <- lapply(1:max(plot.sequence), function (i) matrix(which(plot.sequence %in% i) ) )
# Create data frame with group indices and Bayesian statistics
- plot.data <- plyr::rbind.fill(lapply(1:q, function (i) {
+ plot.data <- do.call(rbind,lapply(1:q, function (i) {
x.names <- if (!i %in% y.position) x.names[x.groups[i]] else NA
sub.names <- if (!i %in% y.position) sub.names[(repeated.position-1)[i]] else NA
@@ -272,19 +274,19 @@ PlotMean <- function (data,
if ( (run.split & !y.split & !run.repeated) | !run.repeated ) job.names <- y.names
if (run.repeated & run.split) {
- plotTitle <- paste(job.title, "by", x.names)
+ plot.title <- paste(job.title, "by", x.names)
label.groups <- y.names
} else if (all(is.na(sub.names))) {
- plotTitle <- job.title
+ plot.title <- job.title
label.groups <- unique(y.names)
} else if (all(sub.names[1] == sub.names) & run.repeated) {
- plotTitle <- sprintf("%s by %s [%s]",job.title,x.names,sub.names)
+ plot.title <- sprintf("%s by %s [%s]",job.title,x.names,sub.names)
label.groups <- unique(y.names)
} else if (all(sub.names[1] == sub.names)) {
- plotTitle <- sprintf("%s [%s]", x.names,sub.names)
+ plot.title <- sprintf("%s [%s]", x.names,sub.names)
label.groups <- unique(y.names)
} else {
- plotTitle <- paste(job.names, "by", x.names)
+ plot.title <- paste(job.names, "by", x.names)
label.groups <- unique(sub.names)
}
@@ -411,7 +413,7 @@ PlotMean <- function (data,
{if (run.split) geom_errorbar(aes(x=x.pos2, ymin = sd.lower.mode2, ymax = sd.lower.mode2), lwd=0.05, width = 0.1)}+
{if (!run.split | run.repeated & run.split) scale_x_discrete(labels=x.ticks)}+
scale_fill_manual(labels = label.groups , values = plot.colors)+
- ggplot2::labs(title=plotTitle ,x=x.text, y = y.text)+
+ ggplot2::labs(title = plot.title , x = x.text, y = y.text)+
theme(legend.position="top",
legend.key.size = grid::unit(0.75, 'lines'),
legend.key = element_rect(size = 1),
@@ -424,7 +426,7 @@ PlotMean <- function (data,
panel.grid.major.y = element_line( size=.01, color="lightgrey"),
axis.title.x=element_blank())+
{if (!run.split) theme(legend.position = "none")}+
- {if (run.split & !run.repeated) theme(axis.text.x=element_blank(),
+ {if (run.split & !run.repeated | remove.x) theme(axis.text.x=element_blank(),
axis.ticks.x=element_blank())}
)
diff --git a/R/plot_param.R b/R/plot_param.R
new file mode 100644
index 0000000..24d0349
--- /dev/null
+++ b/R/plot_param.R
@@ -0,0 +1,203 @@
+#' @title Plot Param
+#' @description Create a density plot with parameter values
+#' @param data MCMC data to plot
+#' @param param parameter of interest
+#' @param ROPE plot ROPE values, Default: FALSE
+#' @param monochrome logical, indicating whether or not to use monochrome colors, else use \link[bfw]{DistinctColors}, Default: TRUE
+#' @param plot.colors range of color to use, Default: c("#495054", "#e3e8ea")
+#' @param font.type font type used for visualizations, Default: 'serif'
+#' @param font.size font size, Default: 4.5
+#' @param rope.line size of ROPE lien, Default: -0.2
+#' @param rope.tick distance to ROPE tick, Default: -0.1
+#' @param rope.label distance to ROPE label, Default: -0.35
+#' @param line.size overall line size, Default: 0.5
+#' @param dens.zero.col colour of line indicating zero, Default: 'black'
+#' @param dens.mean.col colour of line indicating mean value, Default: 'white'
+#' @param dens.median.col colour of line indicating median value, Default: 'white'
+#' @param dens.mode.col colour of line indicating mode value, Default: 'black'
+#' @param dens.rope.col colour of line indicating ROPE value, Default: 'black'
+#' @return Density plot of parameter values
+#' @seealso
+#' \code{\link[magrittr]{`%>%`}}
+#' \code{\link[dplyr]{mutate}},\code{\link[dplyr]{group_by}},\code{\link[dplyr]{join}},\code{\link[dplyr]{select}},\code{\link[dplyr]{slice}},\code{\link[dplyr]{filter}}
+#' \code{\link[stats]{approxfun}}
+#' \code{\link[ggplot2]{aes}},\code{\link[ggplot2]{margin}},\code{\link[ggplot2]{geom_density}},\code{\link[ggplot2]{geom_polygon}},\code{\link[ggplot2]{geom_segment}},\code{\link[ggplot2]{geom_label}},\code{\link[ggplot2]{ggplot}},\code{\link[ggplot2]{ggplot_build}},\code{\link[ggplot2]{scale_continuous}},\code{\link[ggplot2]{theme}},\code{\link[ggplot2]{labs}}
+#' @rdname PlotParam
+#' @export
+#' @importFrom stats approx
+
+PlotParam <- function (data,
+ param,
+ ROPE = FALSE,
+ monochrome = TRUE,
+ plot.colors = c("#495054", "#e3e8ea"),
+ font.type = "serif",
+ font.size = 4.5,
+ rope.line = -0.2,
+ rope.tick = -0.1,
+ rope.label = -0.35,
+ line.size = 0.5,
+ dens.zero.col = "black",
+ dens.mean.col = "white",
+ dens.median.col = "white",
+ dens.mode.col = "black",
+ dens.rope.col = "black") {
+
+ # Check if ggplots is installed
+ if (!requireNamespace("ggplot2", quietly = TRUE) |
+ !requireNamespace("magrittr", quietly = TRUE) |
+ !requireNamespace("dplyr", quietly = TRUE)) {
+ stop("Packages \"ggplot2\", \"magrittr\", and \"dplyr\" are needed for this function to work. Please install them.",
+ call. = FALSE)
+ }
+
+ # Define import elements
+ `%>%` <- magrittr::`%>%`
+ mutate <- dplyr::mutate
+ group_by <- dplyr::group_by
+ left_join <- dplyr::left_join
+ select <- dplyr::select
+ slice <- dplyr::slice
+ approx <- stats::approx
+ filter <- dplyr::filter
+ aes <- ggplot2::aes
+ element_blank <- ggplot2::element_blank
+ element_line <- ggplot2::element_line
+ element_text <- ggplot2::element_text
+ geom_density <- ggplot2::geom_density
+ geom_polygon <- ggplot2::geom_polygon
+ geom_segment <- ggplot2::geom_segment
+ geom_text <- ggplot2::geom_text
+ ggplot <- ggplot2::ggplot
+ ggplot_build <- ggplot2::ggplot_build
+ scale_x_continuous <- ggplot2::scale_x_continuous
+ theme <- ggplot2::theme
+ HDIhi <- NULL
+ HDIlo <- NULL
+ Mean <- NULL
+ Median <- NULL
+ Mode <- NULL
+ ROPEhi <- NULL
+ ROPEin <- NULL
+ ROPElo <- NULL
+ ROPEmax <- NULL
+ ROPEmin <- NULL
+ dens.mean <- NULL
+ dens.median <- NULL
+ dens.mode <- NULL
+ dens.rope.max <- NULL
+ dens.rope.min <- NULL
+ dens.zero <- NULL
+ var <- NULL
+ x <- NULL
+ y <- NULL
+
+ data.matrix <- data$matrix.MCMC
+ param.col <- MultiGrep(param, rownames(data$summary.MCMC), value = FALSE)
+ param <- TrimSplit(param)[[2]]
+
+ raw.data <- data.frame(data.matrix[, param.col])
+ colnames(raw.data) <- "var"
+ raw.data$param <- param
+
+ summary <- as.data.frame(t(data$summary.MCMC[ param.col , ]))
+ use.cols <- c("Mean", "Median", "Mode", "HDIlo", "HDIhi", "ROPEmin", "ROPEmax", "ROPElo", "ROPEhi", "ROPEin")
+ summary <- summary[colnames(summary) %in% use.cols]
+ summary$Min <- min(raw.data$var)
+ summary$Max <- max(raw.data$var)
+ summary$param <- param
+
+ dens.data <- suppressMessages(ggplot_build(ggplot(raw.data, aes(x=var, colour=param)) +
+ geom_density())$data[[1]] %>%
+ mutate(param = summary$param) %>%
+ left_join(summary) %>%
+ select(y, x, Mean, Median, Mode, HDIlo, HDIhi, ROPEmin, ROPEmax, ROPElo, ROPEhi, ROPEin, Max, Min, param) %>%
+ mutate(dens.zero = approx(x, y, xout = 0)[[2]],
+ dens.mean = approx(x, y, xout = Mean)[[2]],
+ dens.median = approx(x, y, xout = Median)[[2]],
+ dens.mode = approx(x, y, xout = Mode)[[2]],
+ dens.rope.min = approx(x, y, xout = ROPEmin)[[2]],
+ dens.rope.max = approx(x, y, xout = ROPEmax)[[2]]) %>%
+ select(-y, -x) %>%
+ slice(1))
+
+
+ dens.data[is.na(dens.data)] <- dens.data$dens.mode
+
+ # Create area for hdi in density plot
+ hdi.ribbon <- suppressMessages(ggplot_build(ggplot(raw.data, aes(x=var, colour=param)) +
+ geom_density())$data[[1]] %>%
+ mutate(param = summary$param) %>%
+ left_join(dens.data) %>%
+ group_by(param) %>%
+ filter(x >= HDIlo & x <= HDIhi) %>%
+ select(param, x, y))
+
+ # Add zero distribution to ribbon
+ hdi.ribbon <- rbind(data.frame(param = summary$param, x = summary$HDIlo, y = 0),
+ as.data.frame(hdi.ribbon),
+ data.frame(param = summary$param, x = summary$HDIhi, y = 0))
+
+ if (ROPE) {
+ Min <- dens.data$ROPEmin
+ Max <- dens.data$ROPEmax
+ } else {
+ Min <- 0
+ Max <- 0
+ }
+
+ dens.data$Min <- min(dens.data$Min,Min)
+ dens.data$Max <- max(dens.data$Max,Max)
+ font.size.pts <- font.size/0.352777778
+
+ plot <- ggplot() +
+ ggplot2::labs(title = summary$param , x = "Parameter value", y = "Density")+
+ scale_x_continuous(breaks = round(seq(dens.data$Min, dens.data$Max, by = 0.05),1)) +
+ theme(plot.title = element_text(hjust = 0.5),
+ panel.background = element_blank(),
+ panel.grid.major.y = element_line(size=.1, color="grey"),
+ text=element_text(family=font.type, size = font.size.pts),
+ legend.position="none") +
+ geom_density(data = raw.data, aes(x = var), fill = "grey" , alpha = .7) +
+ geom_polygon(data = hdi.ribbon, aes(x = x, y = y), fill = "black", alpha = .4) +
+ geom_segment(data = dens.data, aes(x = 0, xend = 0, y = 0, yend = dens.zero),
+ color = dens.zero.col , linetype = "dotted", size = line.size) +
+ geom_segment(data = dens.data, aes(x = Mean, xend = Mean, y = 0, yend = dens.mean),
+ color =dens.mean.col , linetype = "solid", size = line.size) +
+ geom_segment(data = dens.data, aes(x = Median, xend = Median, y = 0, yend = dens.median),
+ color = dens.median.col , linetype = "dashed", size = line.size) +
+ geom_segment(data = dens.data, aes(x = Mode, xend = Mode, y = 0, yend = dens.mode),
+ color = dens.mode.col , linetype = "solid", size = line.size)
+
+
+ if (ROPE) {
+
+ plot <- plot + geom_segment(data = dens.data, aes(x = ROPEmin, xend = ROPEmin, y = 0, yend = dens.rope.min),
+ colour = dens.rope.col , linetype = "dashed", size = line.size) +
+ geom_segment(data = dens.data, aes(x = ROPEmax, xend = ROPEmax, y = 0, yend = dens.rope.max),
+ colour = dens.rope.col , linetype = "dashed", size = line.size) +
+ geom_segment(data = dens.data, aes(x = Min, xend = ROPEmin, y = rope.line , yend = rope.line),
+ colour = dens.rope.col , linetype = "dashed", size = line.size) +
+ geom_segment(data = dens.data, aes(x = ROPEmin, xend = ROPEmax, y = rope.line , yend = rope.line),
+ colour = dens.rope.col , linetype = "dashed", size = line.size) +
+ geom_segment(data = dens.data, aes(x = ROPEmax, xend = Max, y = rope.line , yend = rope.line),
+ colour = dens.rope.col , linetype = "dashed", size = line.size) +
+ geom_segment(data = dens.data, aes(x = ROPEmin, xend = ROPEmin, y = rope.line, yend = rope.tick),
+ colour = dens.rope.col , linetype = "dashed", size = line.size) +
+ geom_segment(data = dens.data, aes(x = ROPEmax, xend = ROPEmax, y = rope.line, yend = rope.tick),
+ colour = dens.rope.col , linetype = "dashed", size = line.size) +
+ geom_segment(data = dens.data, aes(x = Min, xend = Min, y = rope.line, yend = rope.tick),
+ colour = dens.rope.col , linetype = "dashed", size = line.size) +
+ geom_segment(data = dens.data, aes(x = Max, xend = Max, y = rope.line, yend = rope.tick),
+ colour = dens.rope.col , linetype = "dashed", size = line.size) +
+ geom_text(data = dens.data, aes(x = mean(c(Min, ROPEmin)),
+ y = rope.label, label = sprintf("%0.2f%%", ROPElo)), family = font.type , size = font.size) +
+ geom_text(data = dens.data, aes(x = mean(c(ROPEmin, ROPEmax)),
+ y = rope.label, label = sprintf("%0.2f%%", ROPEin)), family = font.type , size = font.size) +
+ geom_text(data = dens.data, aes(x = mean(c(ROPEmax, Max)),
+ y = rope.label, label = sprintf("%0.2f%%", ROPEhi)), family = font.type , size = font.size)
+ }
+
+ return (plot)
+
+}
\ No newline at end of file
diff --git a/R/plot_parse.R b/R/plot_parse.R
index af236e2..ff1a746 100644
--- a/R/plot_parse.R
+++ b/R/plot_parse.R
@@ -2,7 +2,7 @@
#' @description Display and/or save plots
#' @param plot.data a list of plots
#' @param project.dir define where to save data, Default: 'Results/'
-#' @param project.name define name of project, Default: 'FileName(name="Plot")'
+#' @param project.name define name of project, Default: 'FileName(name="Print")'
#' @param save.data logical, indicating whether or not to save data, Default: FALSE
#' @param graphic.type type of graphics to use (e.g., pdf, png, ps), Default: 'pdf'
#' @param plot.size size of plot, Default: '15,10'
@@ -363,7 +363,7 @@ ParsePlot <- function (plot.data,
if (font.type == "serif") font.type <- "Times New Roman"
# Select template
- template <- if (layout == "pt") "legacy" else "widescreen"
+ template <- if (layout == "pt") "legacy" else if (layout == "apa") "apa" else "widescreen"
template.file <- paste0(system.file(package = 'bfw'),"/extdata/templates/",template,".pptx")
# Create PowerPoint document
diff --git a/R/settings.R b/R/settings.R
index a0b5576..9069193 100644
--- a/R/settings.R
+++ b/R/settings.R
@@ -1,17 +1,5 @@
#' @title Settings
#' @description main settings for bfw
-#' @param y criterion variable(s), Default: NULL
-#' @param y.names optional names for criterion variable(s), Default: NULL
-#' @param x predictor variable(s), Default: NULL
-#' @param x.names optional names for predictor variable(s), Default: NULL
-#' @param latent latent variables, Default: NULL
-#' @param latent.names optional names for for latent variables, Default: NULL
-#' @param observed observed variable(s), Default: NULL
-#' @param observed.names optional names for for observed variable(s), Default: NULL
-#' @param additional supplemental parameters for fitted data (e.g., indirect pathways and total effect), Default: NULL
-#' @param additional.names optional names for supplemental parameters, Default: NULL
-#' @param x.steps define number of steps in hierarchical regression , Default: NULL
-#' @param x.blocks define which predictors are included in each step (e.g., for three steps "1,2,3") , Default: NULL
#' @param job.title title of analysis, Default: NULL
#' @param job.group for some hierarchical models with several layers of parameter names (e.g., latent and observed parameters), Default: NULL
#' @param jags.model specify which module to use
@@ -25,12 +13,6 @@
#' @param thinned.steps save every kth step of the original saved.steps, Default: 1
#' @param adapt.steps the number of adaptive iterations to use at the start of each simulation, Default: NULL
#' @param burnin.steps the number of burnin iterations, NOT including the adaptive iterations to use for the simulation, Default: NULL
-#' @param credible.region summarize uncertainty by defining a region of most credible values (e.g., 95 percent of the distribution), Default: 0.95
-#' @param ROPE define range for region of practical equivalence (e.g., c(-0.05 , 0.05), Default: NULL
-#' @param run.contrasts logical, indicating whether or not to run contrasts, Default: FALSE
-#' @param use.contrast choose from "between", "within" and "mixed". Between compare groups at different conditions. Within compare a group at different conditions. Mixed compute all comparisons
-#' @param contrasts define contrasts to use for analysis (defaults to all) , Default: NULL
-#' @param run.ppp logical, indicating whether or not to conduct ppp analysis, Default: FALSE
#' @param initial.list initial values for analysis, Default: list()
#' @param project.name name of project, Default: 'Project'
#' @param project.dir define where to save data, Default: 'Results/'
@@ -44,21 +26,7 @@
#' @param merge.MCMC logical, indicating whether or not to merge MCMC chains, Default: FALSE
#' @param run.diag logical, indicating whether or not to run diagnostics, Default: FALSE
#' @param sep symbol to separate data (e.g., comma-delimited), Default: ','
-#' @param monochrome logical, indicating whether or not to use monochrome colors, else use \link[bfw]{DistinctColors}, Default: TRUE
-#' @param plot.colors range of color to use, Default: c("#495054", "#e3e8ea")
-#' @param graphic.type type of graphics to use (e.g., pdf, png, ps), Default: 'pdf'
-#' @param plot.size size of plot, Default: '15,10'
-#' @param scaling scale size of plot, Default: 100
-#' @param plot.aspect aspect of plot, Default: NULL
-#' @param vector.graphic logical, indicating whether or not visualizations should be vector or raster graphics, Default: FALSE
-#' @param point.size point size used for visualizations, Default: 12
-#' @param font.type font type used for visualizations, Default: 'serif'
-#' @param one.file logical, indicating whether or not visualizations should be placed in one or several files, Default: TRUE
-#' @param ppi define pixel per inch used for visualizations, Default: 300
-#' @param units define unit of length used for visualizations, Default: 'in'
-#' @param layout define a layout size for visualizations, Default: 'a4'
-#' @param layout.inverse logical, indicating whether or not to inverse layout (e.g., landscape) , Default: FALSE
-#' @param silent logical, indicating whether or not analysis should be run silent, Default: FALSE
+#' @param silent logical, indicating whether or not to run analysis without output, Default: FALSE
#' @param ... further arguments passed to or from other methods
#' @return data from MCMC \link[bfw]{RunMCMC}
#' @details Settings act like the main framework for bfw, connecting function, model and JAGS.
@@ -67,19 +35,7 @@
#' @rdname bfw
#' @export
#' @importFrom utils tail modifyList capture.output
-bfw <- function(y = NULL,
- y.names = NULL,
- x = NULL,
- x.names = NULL,
- latent = NULL,
- latent.names = NULL,
- observed = NULL,
- observed.names = NULL,
- additional = NULL,
- additional.names = NULL,
- x.steps = NULL,
- x.blocks = NULL,
- job.title = NULL,
+bfw <- function(job.title = NULL,
job.group = NULL,
jags.model,
jags.seed = NULL,
@@ -92,12 +48,6 @@ bfw <- function(y = NULL,
thinned.steps = 1,
adapt.steps = NULL,
burnin.steps = NULL,
- credible.region = 0.95,
- ROPE = NULL,
- run.contrasts = FALSE,
- use.contrast = "between",
- contrasts = NULL,
- run.ppp = FALSE,
initial.list = list(),
project.name = "Project",
project.dir = "Results/",
@@ -111,20 +61,6 @@ bfw <- function(y = NULL,
merge.MCMC = FALSE,
run.diag = FALSE,
sep = ",",
- monochrome = TRUE,
- plot.colors = c("#495054", "#e3e8ea"),
- graphic.type = "pdf",
- plot.size = "15,10",
- scaling = 100,
- plot.aspect = NULL,
- vector.graphic = FALSE,
- point.size = 12,
- font.type = "serif",
- one.file = TRUE,
- ppi = 300,
- units = "in",
- layout = "a4",
- layout.inverse = FALSE,
silent = FALSE,
...
) {
@@ -195,6 +131,7 @@ bfw <- function(y = NULL,
# Select model function
stats.model <- eval(parse(text=paste0("Stats",model.type)))
}
+
# If custom jags model
if (length(custom.model)) {
model.name <- paste0("Custom JAGS model")
@@ -220,12 +157,14 @@ bfw <- function(y = NULL,
# Get arguments from model function
model.arguments <- TrimSplit(names(formals(stats.model)))
- # Create argument list
- model.arguments <- paste0(paste(model.arguments,
- model.arguments,sep="="),
- collapse=",")
+
+ # Create argument list not defined by user
+ model.arguments <- paste0(unlist(lapply(model.arguments, function (arg) {
+ if (exists(arg) & arg != "...") sprintf("%s = %s", arg , arg)
+ })), collapse = ",")
+
# Create data list from model from specified argument list
- stats.model <- eval(parse(text=sprintf("stats.model(%s)" , model.arguments)))
+ stats.model <- eval(parse(text=sprintf("stats.model(%s , ...)" , model.arguments)))
# assign attributes from stats.model
for (i in 1:length(stats.model)){
@@ -236,10 +175,33 @@ bfw <- function(y = NULL,
if (is.null(jags.seed)) jags.seed <- sample(1:10^6,1)
# Create save name
+ if (run.robust) {
+ job.title <- if (is.null(job.title)) "Robust" else paste0(job.title,"-Robust")
+ }
project.name <- FileName( project.name , data.set , model.type , job.title , time.stamp)
# Tidy up JAGS model
jags.model <- TidyCode(jags.model)
+
+ # Number of adapting steps
+ if (!length(adapt.steps)) adapt.steps <- max ( (saved.steps * 5) / 100 , 2000 )
+
+ # Number of burn-in steps
+ if (!length(burnin.steps)) burnin.steps <- max ( (saved.steps * 7.5) / 100 , 3000 )
+
+ # Runjags options
+
+ # Disable redundant warnings
+ try( runjags::runjags.options( inits.warning=FALSE , rng.warning=FALSE ) )
+
+ # Set runjags method and number of chains
+ if (!length(jags.method)) {
+ detect.cores <- parallel::detectCores()
+ jags.method <- if ( !is.finite(detect.cores || detect.cores < 4) ) "simple" else "parallel"
+ }
+ if (!length(jags.chains)) {
+ jags.chains <- if ( detect.cores >= 4 ) 4 else detect.cores
+ }
# Create name list
tmp.list <- list(
@@ -277,31 +239,12 @@ bfw <- function(y = NULL,
initial.list = initial.list,
saved.steps = saved.steps,
thinned.steps = thinned.steps,
- run.contrasts = run.contrasts,
- use.contrast = use.contrast,
- contrasts = contrasts,
- run.ppp = run.ppp,
n.data = n.data,
- credible.region = credible.region,
- ROPE = ROPE,
merge.MCMC = merge.MCMC,
run.diag = run.diag,
sep = sep,
- monochrome = monochrome,
- plot.colors = plot.colors,
- graphic.type = graphic.type,
- plot.size = plot.size,
- scaling = scaling,
- plot.aspect = plot.aspect,
save.data = save.data,
- vector.graphic = vector.graphic,
- point.size = point.size,
- font.type = font.type,
- one.file = one.file,
- ppi = 300,
- units = units,
- layout = layout,
- layout.inverse = layout.inverse)
+ ...)
# Run MCMC
if (silent) {
diff --git a/R/stats_bernoulli.R b/R/stats_bernoulli.R
index 9f4d581..75fb7af 100644
--- a/R/stats_bernoulli.R
+++ b/R/stats_bernoulli.R
@@ -68,11 +68,11 @@
#' @rdname StatsBernoulli
#' @export
#' @importFrom stats complete.cases
-StatsBernoulli <- function(x,
- x.names,
+StatsBernoulli <- function(x = NULL,
+ x.names = NULL,
DF,
- params,
- initial.list,
+ params = NULL,
+ initial.list = list(),
...
) {
diff --git a/R/stats_covariate.R b/R/stats_covariate.R
index 65f2853..52224db 100644
--- a/R/stats_covariate.R
+++ b/R/stats_covariate.R
@@ -6,6 +6,7 @@
#' @param x.names optional names for predictor variable(s), Default: NULL
#' @param DF data to analyze
#' @param params define parameters to observe, Default: NULL
+#' @param job.group for some hierarchical models with several layers of parameter names (e.g., latent and observed parameters), Default: NULL
#' @param initial.list initial values for analysis, Default: list()
#' @param jags.model specify which module to use
#' @param ... further arguments passed to or from other methods
@@ -105,13 +106,14 @@
#' @rdname StatsCovariate
#' @export
#' @importFrom stats complete.cases
-StatsCovariate <- function(y,
- y.names,
- x,
- x.names,
+StatsCovariate <- function(y = NULL,
+ y.names = NULL,
+ x = NULL,
+ x.names = NULL,
DF,
- params,
- initial.list,
+ params = NULL,
+ job.group = NULL,
+ initial.list = list(),
jags.model,
...
) {
@@ -181,22 +183,16 @@ StatsCovariate <- function(y,
if (length(x)) {
n.data <- data.frame(do.call(rbind,lapply(y.names, function (z) {
m <- expand.grid(z,x.names)
- data.frame(m,n)
+ data.frame(m,n, stringsAsFactors = FALSE)
})))
} else {
# Create n data for y variables
- n.data <- data.frame(t(combn(job.names, 2)),n)
+ n.data <- data.frame(t(combn(job.names, 2)),n, stringsAsFactors = FALSE)
}
-
+
# Paramter(s) of interest
params <- if(length(params)) TrimSplit(params) else c("cor")
-
- # Add Cronbach's alpha if requested
- if ("Alpha" %in% params) {
- alpha <- "Alpha <- q / (q - 1) * (1 - sum(diag[]) / (sum(cov)))"
- jags.model <- gsub("\\#ALPHA", alpha , jags.model)
- }
-
+
# Create data for Jags
data.list <- list(
n = n,
@@ -206,8 +202,25 @@ StatsCovariate <- function(y,
m2 = m2
)
+ # Define name group
+ if (is.null(job.group)) job.group <- list ( c("cov","cor") , c("Alpha") )
+
+ # Add Cronbach's alpha if requested
+ if ("Alpha" %in% params) {
+ alpha <- "Alpha <- q / (q - 1) * (1 - sum(diag[]) / (sum(cov)))"
+ jags.model <- gsub("\\#ALPHA", alpha , jags.model)
+ job.names <- list(list(job.names) , list("Tau-equivalent reliability"))
+ alpha.n <- c( rep("Alpha" , ncol(n.data)-1) ,
+ mean(n.data[ , ncol(n.data)]) )
+ n.data <- rbind(n.data , alpha.n)
+ }
+
+ # Make certain n column in n.data is numeric
+ n.data$n <- as.numeric(n.data$n)
+
# Create name list
name.list <- list(
+ job.group = job.group,
job.names = job.names
)
diff --git a/R/stats_fit.R b/R/stats_fit.R
index 3030b37..9ef5999 100644
--- a/R/stats_fit.R
+++ b/R/stats_fit.R
@@ -1,6 +1,6 @@
#' @title Fit Data
#' @description Apply latent or observed models to fit data (e.g., SEM, CFA, mediation)
-#' @param latent laten variables, Default: NULL
+#' @param latent latenr variables, Default: NULL
#' @param latent.names optional names for for latent variables, Default: NULL
#' @param observed observed variable(s), Default: NULL
#' @param observed.names optional names for for observed variable(s), Default: NULL
@@ -21,21 +21,21 @@
#' @rdname StatsFit
#' @export
#' @importFrom stats complete.cases
-StatsFit <- function(latent,
- latent.names,
- observed,
- observed.names,
- additional,
- additional.names,
+StatsFit <- function(latent = NULL,
+ latent.names = NULL,
+ observed = NULL,
+ observed.names = NULL,
+ additional = NULL,
+ additional.names = NULL,
DF,
- params,
- job.group,
- initial.list,
+ params = NULL,
+ job.group = NULL,
+ initial.list = list(),
model.name,
jags.model,
- custom.model,
- run.ppp,
- run.robust,
+ custom.model = NULL,
+ run.ppp = FALSE,
+ run.robust = FALSE,
...
) {
@@ -75,6 +75,9 @@ StatsFit <- function(latent,
# Number of observed variables
lat <- length(name.stems)
+ # Latent variable permutations
+ m <- t(combn(1:lat, 2))
+
# Select appropriate model
if (!length(custom.model)) jags.model <- ReadFile( model.name , data.format = "txt" )
@@ -162,7 +165,7 @@ StatsFit <- function(latent,
params <- TrimSplit(params)
} else {
lambda <- if(length(observed)) c("lam", "error") else NULL
- params <- if (run.ppp) c( lambda , "cov") else c( lambda , "beta", "zbeta")
+ params <- if (run.ppp) c( lambda , "cov" , "cor") else c( lambda , "beta", "zbeta")
}
# Create data for Jags
@@ -177,7 +180,7 @@ StatsFit <- function(latent,
fl = fl)
if (run.ppp) {
- data.list <- c(data.list, list(psi.prec = psi.prec))
+ data.list <- c(data.list, list(psi.prec = psi.prec , m = m))
} else {
data.list <- c(data.list, list(b.priors = b.priors))
}
@@ -190,7 +193,7 @@ StatsFit <- function(latent,
}
# Define name group
- if (is.null(job.group)) job.group <- list ( c("lam","error") , c("cov","beta","zbeta") )
+ if (is.null(job.group)) job.group <- list ( c("lam","error") , c("cov","cor","beta","zbeta") )
# Add observed names if present
if(length(observed.names)) {
@@ -233,6 +236,7 @@ StatsFit <- function(latent,
name.list = name.list,
params = params,
jags.model = jags.model,
+ run.ppp = run.ppp,
n.data = as.matrix(n)
))
}
\ No newline at end of file
diff --git a/R/stats_kappa.R b/R/stats_kappa.R
index 6835b81..954e84a 100644
--- a/R/stats_kappa.R
+++ b/R/stats_kappa.R
@@ -35,11 +35,11 @@
#' @rdname StatsKappa
#' @export
#' @importFrom stats complete.cases
-StatsKappa <- function(x,
- x.names,
+StatsKappa <- function(x = NULL,
+ x.names = NULL,
DF,
- params,
- initial.list,
+ params = NULL,
+ initial.list = list(),
...
) {
@@ -47,10 +47,7 @@ StatsKappa <- function(x,
x <- TrimSplit(x)
# Exclude noncomplete observations
- DF <- DF[stats::complete.cases(DF[, x]), ]
-
- # If data is binary ( ones and zeros ) add one
- if (any(DF == 0)) DF <- DF + 1
+ DF <- DF[stats::complete.cases(DF[, x]), x]
# Create crosstable for x parameters
n.data <- as.data.frame(table(DF[, x]))
@@ -75,7 +72,7 @@ StatsKappa <- function(x,
# Paramter(s) of interest
params <- if(length(params)) TrimSplit(params) else c("Kappa")
-
+
# Create data for Jags
data.list <- list(
rater = rater,
diff --git a/R/stats_mean.R b/R/stats_mean.R
index e2e22e5..2f9146a 100644
--- a/R/stats_mean.R
+++ b/R/stats_mean.R
@@ -12,13 +12,13 @@
#' @rdname StatsMean
#' @export
-StatsMean <- function(y,
- y.names,
- x,
- x.names,
+StatsMean <- function(y = NULL,
+ y.names = NULL,
+ x = NULL,
+ x.names = NULL,
DF,
- params,
- initial.list,
+ params = NULL,
+ initial.list = list(),
...
) {
diff --git a/R/stats_metric.R b/R/stats_metric.R
index c834b81..2e783ee 100644
--- a/R/stats_metric.R
+++ b/R/stats_metric.R
@@ -22,18 +22,18 @@
#' @rdname StatsMetric
#' @export
#' @importFrom stats complete.cases sd aggregate median
-StatsMetric <- function(y,
- y.names,
- x,
- x.names,
+StatsMetric <- function(y = NULL,
+ y.names = NULL,
+ x = NULL,
+ x.names = NULL,
DF,
- params,
- job.group,
- initial.list,
+ params = NULL,
+ job.group = NULL,
+ initial.list = list(),
model.name,
jags.model,
- custom.model,
- run.robust,
+ custom.model = NULL,
+ run.robust = FALSE,
...
) {
diff --git a/R/stats_nominal.R b/R/stats_nominal.R
index fb4b2d0..c8db73e 100644
--- a/R/stats_nominal.R
+++ b/R/stats_nominal.R
@@ -53,15 +53,15 @@
#' @rdname StatsNominal
#' @export
-StatsNominal <- function(x,
- x.names,
+StatsNominal <- function(x = NULL,
+ x.names = NULL,
DF,
- params,
- job.group,
- initial.list,
+ params = NULL,
+ job.group = NULL,
+ initial.list = list(),
model.name,
jags.model,
- custom.model,
+ custom.model = NULL,
...
) {
diff --git a/R/stats_regression.R b/R/stats_regression.R
index c0823b9..f946540 100644
--- a/R/stats_regression.R
+++ b/R/stats_regression.R
@@ -16,16 +16,16 @@
#' @rdname StatsRegression
#' @export
#' @importFrom stats complete.cases
-StatsRegression <- function(y,
- y.names,
- x,
- x.names,
- x.steps,
- x.blocks,
+StatsRegression <- function(y = NULL,
+ y.names = NULL,
+ x = NULL,
+ x.names = NULL,
+ x.steps = NULL,
+ x.blocks = NULL,
DF,
- params,
- job.group,
- initial.list,
+ params = NULL,
+ job.group = NULL,
+ initial.list = list(),
...
) {
@@ -41,25 +41,29 @@ StatsRegression <- function(y,
# Number of datapoints
n <- dim(x.matrix)[1]
- # Number of blocks
- if (is.null(x.blocks)) x.blocks <- 1
+ # Number of steps
+ x.steps <- if (is.null(x.steps)) 1 else as.numeric(x.steps)
# Number of variables per block
- x.steps <- if (is.null(x.steps)) dim(x.matrix)[2] else as.numeric(TrimSplit(x.steps))
+ x.blocks <- if (is.null(x.blocks)) dim(x.matrix)[2] else as.numeric(TrimSplit(x.blocks))
# Create job.names
y.names <- if (!is.null(y.names)) TrimSplit(y.names) else CapWords(y)
x.names <- if (!is.null(x.names)) TrimSplit(x.names) else CapWords(x)
# Create job group
- if (is.null(job.group)) job.group <- list ( c("beta0","zbeta0") ,
- c("beta","zbeta","sigma","zsigma")
+ if (is.null(job.group)) job.group <- list ( c("beta0","zbeta0","sigma","zsigma") ,
+ c("beta","zbeta")
)
# Final name list
- job.names <- list(list("Intercept"),
- list(rep(y.names,x.blocks), x.names)
- )
+ if (x.steps > 1) {
+ job.names <- list(list(sprintf("Intercept (block: %s)", seq(x.steps))),
+ list(sprintf("%s (block: %s)", y.names, seq(x.steps)), x.names)
+ )
+ } else {
+ job.names <- list(list("Intercept"), list(y.names,x.names))
+ }
# Create crosstable for y parameters
n.data <- data.frame(t(combn(job.names, 2)),n)
@@ -72,8 +76,8 @@ StatsRegression <- function(y,
x = x.matrix,
y = y.matrix,
n = n,
- n.x = x.steps,
- q = x.blocks)
+ n.x = x.blocks,
+ q = x.steps)
# Create name list
name.list <- list(
diff --git a/R/stats_softmax.R b/R/stats_softmax.R
index 2b89a10..74cd13e 100644
--- a/R/stats_softmax.R
+++ b/R/stats_softmax.R
@@ -52,15 +52,15 @@
#' @rdname StatsSoftmax
#' @export
#' @importFrom stats complete.cases
-StatsSoftmax <- function(y,
- y.names,
- x,
- x.names,
+StatsSoftmax <- function(y = NULL,
+ y.names = NULL,
+ x = NULL,
+ x.names = NULL,
DF,
- params,
- job.group,
- initial.list,
- run.robust,
+ params = NULL,
+ job.group = NULL,
+ initial.list = NULL,
+ run.robust = FALSE,
...
) {
diff --git a/README.md b/README.md
index 7a65382..7bc07a0 100644
--- a/README.md
+++ b/README.md
@@ -7,16 +7,16 @@
-
+
-
+
What is *bfw*?
---------------
+-----------------------
The purpose of *`bfw`* is to establish a framework for conducting
Bayesian analysis in [R](https://www.r-project.org/), using
@@ -30,9 +30,7 @@ Derived from the excellent work of Kruschke (2015), the goal of the
framework is to easily estimate parameter values and the stability of
estimates from the *highest density interval* (HDI), make null value
assessment through *region of practical equivalence testing* (ROPE) and
-conduct convergence diagnostics (e.g., Gelman & Rubin, 1992). Though the
-initial version only support plotting mean data (including repeated
-measures), future releases will support other types of visualizations.
+conduct convergence diagnostics (e.g., Gelman & Rubin, 1992).
Users are encouraged to apply justified priors by modifying existing
JAGS models found in `extdata/models` or by adding custom models.
@@ -45,15 +43,23 @@ List of current modules
-----------------------
- Bernoulli trials
-- Covariate estimations (including correlation and Cronbach's alpha)
+- Covariate estimations (including correlation and Cronbach’s alpha)
- Fit observed and latent data (e.g., SEM, CFA, mediation models)
-- Bayesian equivalent of Cohen's kappa
+- Bayesian equivalent of Cohen’s kappa
- Mean and standard deviation estimations
- Predict metric values (cf., ANOVA)
- Predict nominal values (cf., chi-square test)
- Simple, multiple and hierarchical regression
- Softmax regression (i.e., multinomial logistic regression)
+List of current visualizations
+------------------------------
+
+- Plot density of parameter values (including ROPE)
+- Plot mean data (including repeated measures)
+- Plot nominal data (e.g., expected and observed values)
+- Plot circlize data (e.g., multiple response categories)
+
Prerequisites
-------------
@@ -193,7 +199,7 @@ Shamelessly adapted from
(credits to James Curran).
# Create a function for left-censored data
- custom.function <- function(DF) {
+ custom.function <- function(DF, ...) {
x <- as.vector(unlist(DF))
x[x < log(29)] = NA
@@ -260,11 +266,11 @@ The cost of conducting robust estimates
# Running time for normal distribution analyis
biased.mcmc$run.time[2] - biased.mcmc$run.time[1]
- #> Time difference of 9.83 secs
+ #> Time difference of 7.43 secs
# Running time for t-distribution analysis
biased.mcmc.robust$run.time[2] - biased.mcmc.robust$run.time[1]
- #> Time difference of 55.6 secs
+ #> Time difference of 34.1 secs
License
-------
@@ -286,8 +292,8 @@ References
Simulation Using Multiple Sequences. *Statistical Science*, *7*(4),
457-472.
- Kruschke, J. K. (2013). Posterior predictive checks can and should
- be Bayesian: Comment on Gelman and Shalizi, 'Philosophy and the
- practice of Bayesian statistics'. *British Journal of Mathematical
+ be Bayesian: Comment on Gelman and Shalizi, ‘Philosophy and the
+ practice of Bayesian statistics’. *British Journal of Mathematical
and Statistical Psychology*, *66*(1), 4556.
- Kruschke, J. K. (2015). *Doing Bayesian data analysis: a tutorial
diff --git a/TODO.md b/TODO.md
index efee1dc..b92a097 100644
--- a/TODO.md
+++ b/TODO.md
@@ -1,13 +1,10 @@
-# List of past and present TODO's
+# TODO
-## bfw 0.0.1 (Initial launch)
-
-- [x] Develop a more elegant system to generate models for jags
-- [] Write a more detailed manual/vignettes
+- [] Replace cumbersome diagnostics plot
+- [] Write more detailed manual/vignettes
- [] Add more visualizations (e.g., correlation and regression paths and SEM models)
- [] Add ROPE for more than one parameter
+- [] Develop a more elegant system to generate models for jags
-
-## bfw 0.0.0-1 (Pre-launch)
-
+# Archive
- [x] Merge modules for initial launch
\ No newline at end of file
diff --git a/inst/doc/plot_data.md b/inst/doc/plot_mean.md
similarity index 84%
rename from inst/doc/plot_data.md
rename to inst/doc/plot_mean.md
index 7fd7a00..0d58c53 100644
--- a/inst/doc/plot_data.md
+++ b/inst/doc/plot_mean.md
@@ -29,13 +29,14 @@ Enjoy this brief demonstration of the plot data module
#> sigma[1]: Before 1 1 1.000 49354 0.958 1.046 1000
#> sigma[2]: During 1 1 1.000 50000 0.957 1.045 1000
#> sigma[3]: After 1 1 0.997 50000 0.957 1.045 1000
- plot <- bfw::PlotData(mcmc, run.repeated = TRUE)
- ParsePlot(plot)
+ Plot <- bfw::PlotMean(mcmc,
+ run.repeated = TRUE)
+ ParsePlot(Plot)
### Plot the data as repeated measures
### Lets add some noise
set.seed(101)
@@ -58,13 +59,14 @@ Enjoy this brief demonstration of the plot data module
#> sigma[1]: Before 1.120 1.119 1.116 50000 1.072 1.170 1000
#> sigma[2]: During 1.116 1.116 1.112 50000 1.068 1.166 1000
#> sigma[3]: After 1.101 1.100 1.097 49233 1.054 1.151 1000
- plot <- bfw::PlotData(noise.mcmc, run.repeated = TRUE)
- ParsePlot(plot)
+ Plot <- bfw::PlotMean(noise.mcmc,
+ run.repeated = TRUE)
+ ParsePlot(Plot)
### Plot the noise as repeated measures
### Let’s add a group
combined.data <- as.data.frame(rbind(cbind(data,"Y"), cbind(noise,"X") ), stringsAsFactors=FALSE)
@@ -102,12 +104,16 @@ Enjoy this brief demonstration of the plot data module
#> sigma[7]: After 1.105 50000 1.072 1.140 2000
#> sigma[8]: After vs. Groups @ X 1.098 50000 1.054 1.150 1000
#> sigma[9]: After vs. Groups @ Y 1.000 50000 0.957 1.045 1000
+
# Let's also add some colors!
- plot <- bfw::PlotData(combined.data, run.split = TRUE, run.repeated = TRUE , monochrome = FALSE)
- ParsePlot(plot)
+ Plot <- bfw::PlotMean(combined.data,
+ run.split = TRUE,
+ run.repeated = TRUE,
+ monochrome = FALSE)
+ ParsePlot(Plot)
### Plot the split data
diff --git a/inst/extdata/models/stats_fit_cfa.txt b/inst/extdata/models/stats_fit_cfa.txt
index 9f27023..ef77280 100644
--- a/inst/extdata/models/stats_fit_cfa.txt
+++ b/inst/extdata/models/stats_fit_cfa.txt
@@ -40,4 +40,14 @@ model {
# Priors Wishart Distribution
psi[1:lat,1:lat] ~ dwish(psi.prec,lat)
cov[1:lat,1:lat] <- inverse(psi[1:lat,1:lat])
+
+ # Compute covariance matrix
+ for(k in 1:lat) {
+ cor[k , k] <- 1
+ }
+
+ for (k in 1:length(m[, 1])) {
+ cor[m[k, 1], m[k, 2]] <- cov[m[k, 1], m[k, 2]]/(sqrt(cov[m[k, 1], m[k, 1]])*sqrt(cov[m[k, 2], m[k, 2]]))
+ cor[m[k, 2], m[k, 1]] <- cor[ m[k, 1] , m[k, 2] ]
+ }
}
\ No newline at end of file
diff --git a/inst/extdata/models/stats_regression.txt b/inst/extdata/models/stats_regression.txt
index 0d194dc..712a025 100644
--- a/inst/extdata/models/stats_regression.txt
+++ b/inst/extdata/models/stats_regression.txt
@@ -1,4 +1,4 @@
-# Standardize the data:
+# Standardize the data
data {
y.m <- mean(y)
y.sd <- sd(y)
@@ -17,14 +17,14 @@ data {
}
}
}
-# Specify the model for standardized data:
+# Specify the model for standardized data
model {
for (j in 1:q) {
for (i in 1:n) {
zY[i,j] ~ dnorm(zbeta0[j] + sum(zbeta[j,1:n.x[j]] * zX[i, j, 1:n.x[j]]), 1 / zsigma[j] ^ 2)
}
}
- # Priors vague on standardized scale:
+ # Priors vague on standardized scale
for (k in 1:q) {
zbeta0[k] ~ dnorm(0 , 0.0001)
for (l in 1:n.x[k]) {
@@ -36,7 +36,7 @@ model {
zsigma.prec[m] ~ dgamma(0.0001,0.0001)
}
- # Transform to original scale:
+ # Transform to original scale
for (n in 1:q) {
beta[n, 1:n.x[n]] <- (zbeta[n, 1:n.x[n]] / x.sd[n, 1:n.x[n]]) * y.sd
beta0[n] <- zbeta0[n] * y.sd + y.m - sum(zbeta[n, 1:n.x[n]] * x.m[n, 1:n.x[n]] / x.sd[n, 1:n.x[n]]) * y.sd
diff --git a/inst/extdata/models/stats_regression_robust.txt b/inst/extdata/models/stats_regression_robust.txt
index 588857d..4f31845 100644
--- a/inst/extdata/models/stats_regression_robust.txt
+++ b/inst/extdata/models/stats_regression_robust.txt
@@ -1,4 +1,4 @@
-# Standardize the data:
+# Standardize the data
data {
y.m <- mean(y)
y.sd <- sd(y)
@@ -17,14 +17,14 @@ data {
}
}
}
-# Specify the model for standardized data:
+# Specify the model for standardized data
model {
for (j in 1:q) {
for (i in 1:n) {
zY[i,j] ~ dt(zbeta0[j] + sum(zbeta[j,1:n.x[j]] * zX[i, j, 1:n.x[j]]), 1 / zsigma[j] ^ 2, nu[j])
}
}
- # Priors vague on standardized scale:
+ # Priors vague on standardized scale
for (k in 1:q) {
zbeta0[k] ~ dnorm(0, 0.001)
for (l in 1:n.x[k]) {
@@ -38,7 +38,7 @@ model {
nu.prec[m] ~ dexp(1 / 29)
}
- # Transform to original scale:
+ # Transform to original scale
for (n in 1:q) {
beta[n, 1:n.x[n]] <- (zbeta[n, 1:n.x[n]] / x.sd[n, 1:n.x[n]]) * y.sd
beta0[n] <- zbeta0[n] * y.sd + y.m - sum(zbeta[n, 1:n.x[n]] * x.m[n, 1:n.x[n]] / x.sd[n, 1:n.x[n]]) * y.sd
diff --git a/inst/extdata/templates/apa.pptx b/inst/extdata/templates/apa.pptx
new file mode 100644
index 0000000000000000000000000000000000000000..92a3f6fef53ca51e91d15ba4a9216b8b7eecc318
GIT binary patch
literal 32693
zcma&Nb8u!~w=EpAV>{{Cwr$(C^Takfb~<)Cwr$&XI=1=c{hf2~Ik(PRb-t=+?pky0
z+W+kJj4{W=R*(h-Lj{5Wf&u~pA_B6`UdX=%0Rqy32LeL<-f?hnrgv~M1(@178#-Is
z+tIn(+MK5<+OIJn4sVlRaEth6Sr`omgCS928mBKv{%IiSk^Ld!vNz&NanW$q1u%js
zSX>C>Jt}Rwy7WukZXIE7Y2Nc0-7D4HQkL;_G-Q}+pAAj0$hR2t{l{=A{3@YDG
zu(oCUezKGfH}0jYhhjlJ2o%p5r{rfbKCC>sH0_hltJfW&t*|L2PxPqSe(2Pa$I*w#
z2NK>&tEr3(md6f^PZp0n3z^A6pKv1$ifV^#p*8ZAEfOelfBou~p0k-=o(wuNqoghf
zfKTr;w~qfZS#x+G?w&o`8=$^LMUoL`fhUGJUs%cZ2PgL9y4i-UF5G?9Hz!Bh6WM|V
z-hGHn#Zy8RHH;gGAQ~^9#aPO~vs@uU9;}r%OaS0=a(cUmQ|qvyjCohn>S006
zvxw#~LS=FsHnea*sS(0LGiQQHaK(5DAO~-t<-KDNJ^9do?F-9$CnzEg{Dy+1A*Bn2lZOO!G8
zJidZ>k&i={kKvNBM*9s<#oGhM&C*+r)W4>*{2b~8%zXqZSFH`2;wQtaNZtzQOIX(L@hdJ8kKxE3puZODNw
zB2>VmFmfpuVQ;N0|3Yy|#VuP@p)yVMq
zfkW?orRMAAMC;_zz&TbnW&zkbU(Ht_Up%Fzg~1x@V^doA$FIn0R|h06<##wzd>f
z(zDqn=%Dv_j}Tswn?CCW_mWM|#M*M_GJ$fSqXSR|ii1iK6<6gyHOJnr?4*t=N-ci<
zLeRaR0^p6+Q_(*g9e;YWUXWJJOY{NysN}*fX>B*I*Fl(c|BFv*uu6WvQ%|
z73yFU2QvLMe4niQld+`rfoa{C3zs%4fA!-8>?l>YPDj@q_U;WOi(AweRPUAt4BI4^
zcAgPsD!nF;Tbr=i@3zZ6*;bl&V?jMoX>soiA-}7FKG?UvS~+
z>NTa>rNHm+jp#Z_P(~}oBHG4&4;1jA_QSe5lP7W31XO(`i}q)WIJ{jr-~
znVBhsn;%z8)|4s{mJ1!`=y9D9r-Nv_#95QE{plT5yB)4x??w{!gbF*h+2^SiJ)Vbl
zkY?$G7Vms1&U(Pb!>_9X29|kI$xO1FD!)FKUx_5FcQGjE!=FiqU!0
z7G$glBd;CtVvVY^#7z5tAaBi74x6O_5bD=RV7NTp6$8~({d~7($Hf#-1{O-&V1`k<
zV$NP>+`I|V)Ww27c|f^^*%O6gLqI#_)$7tRTt$wO%OY8CmA075%yr8NXwex6#Z)?!
zx87dnI7G9fG~;!->zpj~B!