content
stringlengths 0
14.9M
| filename
stringlengths 44
136
|
---|---|
#' A3 Results for Arbitrary Model
#'
#' This function calculates the A3 results for an arbitrary model construction algorithm (e.g. Linear Regressions, Support Vector Machines or Random Forests). For linear regression models, you may use the \code{\link{a3.lm}} convenience function.
#'
#' @param formula the regression formula.
#' @param data a data frame containing the data to be used in the model fit.
#' @param model.fn the function to be used to build the model.
#' @param model.args a list of arguments passed to \code{model.fn}.
#' @param ... additional arguments passed to \code{\link{a3.base}}.
#' @return S3 \code{A3} object; see \code{\link{a3.base}} for details
#' @references Scott Fortmann-Roe (2015). Consistent and Clear Reporting of Results from Diverse Modeling Techniques: The A3 Method. Journal of Statistical Software, 66(7), 1-23. <http://www.jstatsoft.org/v66/i07/>
#' @examples
#' \donttest{
#' ## Standard linear regression results:
#'
#' summary(lm(rating ~ ., attitude))
#'
#' ## A3 Results for a Linear Regression model:
#'
#' # In practice, p.acc should be <= 0.01 in order
#' # to obtain finer grained p values.
#'
#' a3(rating ~ ., attitude, lm, p.acc = 0.1)
#'
#'
#' ## A3 Results for a Random Forest model:
#'
#' # It is important to include the "+0" in the formula
#' # to eliminate the constant term.
#'
#' require(randomForest)
#' a3(rating ~ .+0, attitude, randomForest, p.acc = 0.1)
#'
#' # Set the ntrees argument of the randomForest function to 100
#'
#' a3(rating ~ .+0, attitude, randomForest, p.acc = 0.1, model.args = list(ntree = 100))
#'
#' # Speed up the calculation by doing 5-fold cross-validation.
#' # This is faster and more conservative (i.e. it should over-estimate error)
#'
#' a3(rating ~ .+0, attitude, randomForest, n.folds = 5, p.acc = 0.1)
#'
#' # Use Leave One Out Cross Validation. The least biased approach,
#' # but, for large data sets, potentially very slow.
#'
#' a3(rating ~ .+0, attitude, randomForest, n.folds = 0, p.acc = 0.1)
#'
#' ## Use a Support Vector Machine algorithm.
#'
#' # Just calculate the slopes and R^2 values, do not calculate p values.
#'
#' require(e1071)
#' a3(rating ~ .+0, attitude, svm, p.acc = NULL)
#' }
a3 <- function(formula, data, model.fn, model.args = list(), ...){
model.fn.w.args <- function(y, x){
dat <- data.frame(cbind(y, x))
names(dat) <- c("y", paste("x", 1:ncol(x), sep=""))
new.model.args = list(formula = y ~ . + 0, data = dat)
for(n in names(model.args)){
new.model.args[[n]] = model.args[[n]]
}
return(do.call(model.fn, new.model.args))
}
simulate.fn <- function(y, x, new.x, ...){
reg <- model.fn.w.args(y, x)
new.data <- data.frame(new.x)
if(ncol(new.data) != ncol(x)){
new.data <- data.frame(t(new.data))
}
names(new.data) <- paste("x", 1:ncol(x), sep="")
return(predict(reg, new.data))
}
a3.base(formula, data, model.fn.w.args, simulate.fn, ...)
}
#' A3 for Linear Regressions
#'
#' This convenience function calculates the A3 results specifically for linear regressions. It uses R's \code{\link{glm}} function and so supports logistic regressions and other link functions using the \code{family} argument. For other forms of models you may use the more general \code{\link{a3}} function.
#'
#' @param formula the regression formula.
#' @param data a data frame containing the data to be used in the model fit.
#' @param family the regression family. Typically 'gaussian' for linear regressions.
#' @param ... additional arguments passed to \code{\link{a3.base}}.
#' @return S3 \code{A3} object; see \code{\link{a3.base}} for details
#' @examples
#' \donttest{
#' ## Standard linear regression results:
#'
#' summary(lm(rating ~ ., attitude))
#'
#' ## A3 linear regression results:
#'
#' # In practice, p.acc should be <= 0.01 in order
#' # to obtain fine grained p values.
#'
#' a3.lm(rating ~ ., attitude, p.acc = 0.1)
#'
#' # This is equivalent both to:
#'
#' a3(rating ~ ., attitude, glm, model.args = list(family = gaussian), p.acc = 0.1)
#'
#' # and also to:
#'
#' a3(rating ~ ., attitude, lm, p.acc = 0.1)
#' }
a3.lm <- function(formula, data, family = gaussian, ...){
a3(formula, data, model.fn = glm, model.args = list(family = family), ...)
}
#' Base A3 Results Calculation
#'
#' This function calculates the A3 results. Generally this function is not called directly. It is simpler to use \code{\link{a3}} (for arbitrary models) or \code{\link{a3.lm}} (specifically for linear regressions).
#'
#' @param formula the regression formula.
#' @param data a data frame containing the data to be used in the model fit.
#' @param model.fn function used to generate a model.
#' @param simulate.fn function used to create the model and generate predictions.
#' @param n.folds the number of folds used for cross-validation. Set to 0 to use Leave One Out Cross Validation.
#' @param data.generating.fn the function used to generate stochastic noise for calculation of exact p values.
#' @param p.acc the desired accuracy for the calculation of exact p values. The entire calculation process will be repeated \eqn{1/p.acc} times so this can have a dramatic affect on time required. Set to \code{NULL} to disable the calculation of p values.
#' @param features whether to calculate the average slopes, added \eqn{R^2} and p values for each of the features in addition to the overall model.
#' @param slope.sample if not NULL the sample size for use to calculate the average slopes (useful for very large data sets).
#' @param slope.displacement the amount of displacement to take in calculating the slopes. May be a single number in which case the same slope is applied to all features. May also be a named vector where there is a name for each feature.
#' @return S3 \code{A3} object containing:
#' \item{model.R2}{The cross validated \eqn{R^2} for the entire model.}
#' \item{feature.R2}{The cross validated \eqn{R^2}'s for the features (if calculated).}
#' \item{model.p}{The p value for the entire model (if calculated).}
#' \item{feature.p}{The p value for the features (if calculated).}
#' \item{all.R2}{The \eqn{R^2}'s for the model features, and any stochastic simulations for calculating exact p values.}
#' \item{observed}{The observed response for each observation.}
#' \item{predicted}{The predicted response for each observation.}
#' \item{slopes}{Average slopes for each of the features (if calculated).}
#' \item{all.slopes}{Slopes for each of the observations for each of the features (if calculated).}
#' \item{table}{The A3 results table.}
#'
a3.base <- function(formula, data, model.fn, simulate.fn, n.folds = 10, data.generating.fn = replicate(ncol(x), a3.gen.default), p.acc = 0.01, features = TRUE, slope.sample = NULL, slope.displacement = 1){
if(! is.null(p.acc)){
if(p.acc <= 0 || p.acc >=1){
stop("p.acc must be between 0 and 1. Set p.acc to NULL to disable the calculation of p values.")
}
}
if(n.folds < 2 && n.folds != 0){
stop("n.folds must be >= 2. Set n.folds to 0 to use Leave One Out Cross Validation.")
}
n.reps <- 0
if(! is.null(p.acc)){
n.reps <- ceiling(1/p.acc)
}
res <- list()
mf <- model.frame(formula, data, drop.unused.levels = TRUE)
x <- model.matrix(formula, mf)
y <- model.response(mf)
if(length(data.generating.fn) != ncol(x)){
stop("data.generating.fn must be a list of functions one for each column in the model matrix")
}
if(n.folds == 0){
n.folds <- length(y)
}
my.apply <- lapply
if( ! is.null(p.acc) ){
# if( library(pbapply, quietly = TRUE, logical.return = TRUE) == TRUE ){ # not needed due to depends
my.apply <- pblapply # Show a progress bar if available
# }
}
# Calculate the groups for cross validation
cv.folds <- split(sample(1:length(y)), rep(1:n.folds, length = length(y)))
# Generate random data series for p values
new.data <- lapply(1:ncol(x), function(c){
data.generating.fn[[c]](x[,c], n.reps)
})
r2.formatter <- function(x){
signs <- sign(x)
x <- abs(x)*100
res <- paste(format(round(x, 1), digits = 3), "%")
signs <- sapply(signs, function(x){
if(x == -1){
return("- ")
}else{
return("+ ")
}
})
if(signs[1] == "+ "){
signs[1] <- " " # removed the plus sign for the overall model accuracy
}
res <- paste(signs, res, sep="")
return(res)
}
p.formatter <- function(x){
if(length(x) == 0){
return(c())
}
res <- format(x, digits = 4)
for(i in 1:length(x)){
if(x[i] == 0){
res[i] <- paste("<", p.acc)
}
}
return(res)
}
slope.formatter <- function(x){
if(length(x) == 0){
return(c())
}
format(x, digits = 3)
}
# Setup iterations
# "default" is initial simulation without any randomized data
# Each rep after that has some form of randomized data
iterations <- "default"
if(! is.null(p.acc)){
iterations <- c(iterations, 1:n.reps)
}
top <- 0
if(features){
top <- ncol(x)
}
# Iterate through each rep and the default
# outputs[[1]] will have the set of default cases in it
# outputs[[>1]] will have the randomized data cases
outputs <- my.apply(iterations, function(rep){
# Calculate R2's for the rep
# We calculate for the model (0) and then for each column of data by numerical index
out <- lapply(0:top, function(c){
new.x <- x
if(rep != "default"){
# If we aren't on the default case, we add some form of randomization
if(c==0){
# We are doing the overall model
# So randomize all the data
for(j in 1:ncol(x)){
new.x[,j] <- new.data[[j]][[as.numeric(rep)]]
}
}else{
# We're looking at a specific column, so just randomize that data
new.x[,c] <- new.data[[c]][[as.numeric(rep)]]
}
}
# Remove a column of data if we are at the un-randomized case
if((c != 0) && (rep == "default")){
if(top == 1){
return(list(R2=0)); # if there is only one feature column added R^2 should be full value
}
new.x <- as.data.frame(new.x[,-c])
}
res <- a3.r2(y, new.x, simulate.fn, cv.folds)
return(res)
}
)
r2 <- sapply(out, function(x){x$R2})
return(list( R2 = r2, predicted = out[[1]]$predicted, observed = out[[1]]$observed ))
})
predicted <- outputs[[1]]$predicted
observed <- outputs[[1]]$observed
outputs <- lapply(outputs, function(x){x$R2})
if(features){
get.names <- function(formula, data){
#t <- terms(formula, data=data)
#l <- attr(t, "term.labels")
#if(attr(t, "intercept")==1){
# l <- c("(Intercept)",l)
#}
return(attr(x, "dimnames")[[2]])
}
entry.names <- c("-Full Model-", get.names(formula, data = data))
getSlopes <- function (reg, data){
slopes <- list()
for(col in 2:ncol(data)){
slopes[[as.character(col)]] <- c()
span <- range(data[,col])
span <- span[2] - span[1]
for(row in 1:nrow(data)){
point <- data[row,]
at.point <- predict(reg, point)
if(length(slope.displacement) == 1){
dist <- slope.displacement
}else{
dist <- slope.displacement[entry.names[col]]
}
slope <- 0
#while(TRUE){
above.point <- point
above.point[col] <- point[col] + dist
at.above <- predict(reg, above.point)[[1]]
below.point <- point
below.point[col] <- point[col] - dist
at.below <- predict(reg, below.point)[[1]]
new.slope <- (at.above - at.below)/(dist*2)
#
# if(new.slope == 0){
# dist <- dist * 2
# if(slope != 0){
# break
# }
# if(dist > span){
# break
# }
# }else{
# if(abs( (slope-new.slope) / new.slope) < epsilon){
# break
# }
# dist <- dist / 2
# }
slope <- new.slope
#}
slopes[[as.character(col)]] <- c(slopes[[as.character(col)]], slope)
}
}
slopes
}
# print(model.fn(y, x))
slope.data <- data.frame(cbind(y, x))
names(slope.data) <- c("y", paste("x", 1:ncol(x), sep=""))
if(! is.null(slope.sample)){
slope.data <- slope.data[sample(1:nrow(slope.data), slope.sample),]
}
res[["all.slopes"]] <- getSlopes(model.fn(y, x), slope.data)
res[["all.slopes"]] <- lapply(res[["all.slopes"]], function(x){ round(x, digits = 8)})
res[["slopes"]] = sapply(res[["all.slopes"]], function(x){
return(median(x))
# r <- unique(round(range(x), digits=8))
# if(length(r) == 1){
# return(r)
# }else{
# return(paste(r, collapse = " - "))
# }
} )
names(res[["all.slopes"]]) <- entry.names[-1]
names(res[["slopes"]]) <- entry.names[-1]
}else{
entry.names <- c("-Full Model-")
}
# Now take the data and calculate R2 and p values
res[["predicted"]] <- predicted
res[["observed"]] <- observed
if(! is.null(p.acc)){
# we did a set of repitions so we should calculate p value
r2 <- c()
p.values <- c()
res$all.R2 <- list()
# item 1 is the overall model
# item > 1 is a specific column
for(i in 1:(top+1)){
# Get the R2 for the specific item
items <- sapply(outputs, function(x){x[i]})
# check if we are on a column item
if(i > 1){
# if so replace the first element of the items list with the value of the overall model R2
items[1] <- r2[1]
}
names(items) <- c("Base",paste("Rep", 1:n.reps))
res$all.R2[[paste(entry.names[i])]] <- items
# rank the items by R2
dist <- rank(items)
# find the position of the first item (overall model R2) within the list of randomly derived p values
# this is the emprical R2
p.values <- c(p.values, 1 - (dist[1]-1)/(length(dist)-1))
# the R2 of the model as generated with random data
new.null <- mean(items[-1])
if(i==1){
# the R2 of the model as generated with random data
new.null <- mean(items[-1])
# if the R2 with stochasticity is better than 0, we will use as a baseline to scale our R2
#if(new.null > 0){ # Adjust R^2 based on what was observed in stochastic series XXX reenable?
# r2 <- (items[1]-new.null) / (1-new.null)
#}else{
r2 <- items[1]
#}
}else{
# get data series again (we overwrote the item[1] position earlier)
items <- sapply(outputs, function(x){x[i]})
# see how the results improve compared to the baseline
r2 <- c(r2, r2[1] - items[1])#max(new.null, items[1])) # Adjust R^2 based on what was observed in stochastic series XXX reenable?
}
}
res$table <- data.frame(`Average Slope` = c("", slope.formatter(res$slopes)), `CV R^2` = r2.formatter(r2), `p value` = p.formatter(p.values), check.names=F)
res$model.R2 <- r2[1]
res$feature.R2 <- r2[-1]
res$model.p <- p.values[1]
names(res$model.p) <- entry.names[1]
res$feature.p <- p.values[-1]
names(res$feature.p) <- entry.names[-1]
}else{
# we didn't do repetitions so no p values
r2 <- outputs[[1]]
r2[-1] <- r2[1] - r2[-1] # get delta to overall model R2
res$table <- data.frame(`Average Slope` = c("", slope.formatter(res$slopes)), `CV R^2` = r2.formatter(r2), check.names=F)
res$all.R2 <- r2
res$model.R2 <- r2[1]
res$feature.R2 <- r2[-1]
}
names(res$model.R2) <- entry.names[1]
names(res$feature.R2) <- entry.names[-1]
row.names(res$table) <- entry.names
class(res) <- "A3"
return(res)
}
#' Plot A3 Results
#'
#' Plots an 'A3' object results. Displays predicted versus observed values for each observation along with the distribution of slopes measured for each feature.
#'
#' @param x an A3 object.
#' @param ... additional options provided to \code{\link{plotPredictions}}, \code{\link{plotSlopes}} and \code{\link{plot}} functions.
#' @method plot A3
#' @examples
#' \donttest{
#' data(housing)
#' res <- a3.lm(MED.VALUE ~ NOX + ROOMS + AGE + HIGHWAY + PUPIL.TEACHER, housing, p.acc = NULL)
#' plot(res)
#' }
plot.A3 <- function(x, ...){
if(class(x) != "A3"){
stop("'x' must be of class 'A3'.")
}
plotPredictions(x, ...)
old.par <- par(ask=T)
plotSlopes(x, ...)
par(old.par)
}
#' Plot Predicted versus Observed
#'
#' Plots an 'A3' object's values showing the predicted versus observed values for each observation.
#'
#' @param x an A3 object,
#' @param show.equality if true plot a line at 45-degrees.
#' @param xlab the x-axis label.
#' @param ylab the y-axis label.
#' @param main the plot title.
#' @param ... additional options provided to the \code{\link{plot}} function.
#'
#' @examples
#' data(multifunctionality)
#' x <- a3.lm(MUL ~ ., multifunctionality, p.acc = NULL, features = FALSE)
#' plotPredictions(x)
plotPredictions <- function(x, show.equality = TRUE, xlab = "Observed Value", ylab = "Predicted Value", main = "Predicted vs Observed", ...){
if(class(x) != "A3"){
stop("'x' must be of class 'A3'.")
}
plot(x$observed, x$predict, xlab=xlab, ylab=ylab, main=main, ...)
abline(h=0, col="Gray"); abline(v=0, col="Gray");
if(show.equality){
abline(coef = c(0, 1), col = "Blue", lty = 2)
}
}
#' Plot Distribution of Slopes
#'
#' Plots an 'A3' object's distribution of slopes for each feature and observation. Uses Kernel Density Estimation to create an estimate of the distribution of slopes for a feature.
#'
#' @param x an A3 object.
#' @param ... additional options provided to the \code{\link{plot}} and \code{\link{density}} functions.
#'
#' @examples
#' \donttest{
#' require(randomForest)
#' data(housing)
#'
#' x <- a3(MED.VALUE ~ NOX + PUPIL.TEACHER + ROOMS + AGE + HIGHWAY + 0,
#' housing, randomForest, p.acc = NULL, n.folds = 2)
#'
#' plotSlopes(x)
#' }
plotSlopes <- function(x, ...){
if(class(x) != "A3"){
stop("'x' must be of class 'A3'.")
}
size <- length(x$slopes)
if(size == 0 ){
stop("no slopes to plot")
}
width <- ceiling(sqrt(size))
height <- floor(sqrt(size))
if(width*height < size){
width <- width+1
}
old.par <- par(mfrow = c(height, width), mar = .55*c(5, 4, 4, 2) + 0.2)
for(s in names(x$slopes)){
if(length(unique(x$all.slopes[[s]]))==1){
plot(x$slopes[[s]], 0, main = s)
}else{
plot(density(x$all.slopes[[s]],...), xlab = "", ylab="", main = s, ...)
rug(x$all.slopes[[s]], col="Blue")
}
abline(h=0, col="Gray")
abline(v=0, col="Gray")
}
par(old.par)
}
#' Print Fit Results
#'
#' Prints an 'A3' object results table.
#'
#' @param x an A3 object.
#' @param ... additional arguments passed to the \code{\link{print}} function.
#' @method print A3
#' @examples
#' x <- a3.lm(rating ~ ., attitude, p.acc = NULL)
#' print(x)
print.A3 <- function(x, ...){
if(class(x) != "A3"){
stop("'x' must be of class 'A3'.")
}
print(x$table, ...)
}
#' Nicely Formatted Fit Results
#'
#' Creates a LaTeX table of results. Depends on the \pkg{xtable} package.
#'
#' @param x an A3 object.
#' @param ... additional arguments passed to the \code{\link{print.xtable}} function.
#' @method xtable A3
#' @examples
#' x <- a3.lm(rating ~ ., attitude, p.acc = NULL)
#' xtable(x)
xtable.A3 <- function(x, ...){
# require(xtable) # not needed due to depends
if(class(x) != "A3"){
stop("'x' must be of class 'A3'.")
}
data <- x$table
names(data) <- gsub("p value","Pr(>R^2)", names(data), fixed=TRUE)
names(data) <- gsub("R^2","$R^2$", names(data), fixed=TRUE)
print(xtable(data, align=c("l","|", rep("r", ncol(data))), ...), sanitize.colnames.function = function(x){x}, sanitize.text.function = function(x){
return(sapply(x, function(x){
trimmed = gsub("(^ +)|( +$)", "", x)
if((! is.na(suppressWarnings(as.numeric(x)))) && suppressWarnings(as.numeric(x))==trimmed){
return(paste0("$", x, "$"))
}else if(substr(trimmed, nchar(trimmed), nchar(trimmed))=="%"){
return(paste0("$", gsub("%", "\\%", trimmed, fixed=T), "$"));
}else if(substr(trimmed, 1, 1)=="<"){
return(paste0("$", trimmed, "$"));
}else{
return (gsub("_", "\\_", x, fixed=T));
}
}))
})
}
#' Cross-Validated \eqn{R^2}
#'
#' Applies cross validation to obtain the cross-validated \eqn{R^2} for a model: the fraction of the squared error explained by the model compared to the null model (which is defined as the average response). A pseudo \eqn{R^2} is implemented for classification.
#'
#' @param y a vector or responses.
#' @param x a matrix of features.
#' @param simulate.fn a function object that creates a model and predicts y.
#' @param cv.folds the cross-validation folds.
#'
#' @return A list comprising of the following elements:
#' \item{R2}{the cross-validated \eqn{R^2}}
#' \item{predicted}{the predicted responses}
#' \item{observed}{the observed responses}
a3.r2 <- function(y, x, simulate.fn, cv.folds){
errors <- lapply(cv.folds, function(fold){
test.y <- y[fold]
test.x <- x[fold,]
train.y <- y[-fold]
train.x <- as.data.frame(x[-fold,])
new.y <- simulate.fn(train.y, train.x, test.x)
if(is.factor(new.y)){
# classification
return(list( type="classification", correct = (new.y == test.y), predicted = new.y, observed = test.y ))
}else{
# regression
y.null <- mean(train.y)
return(list( type="regression", ss.null = sum((test.y-y.null)^2), ss.model = sum((test.y-new.y)^2), predicted = new.y, observed = test.y ))
}
}
)
if(errors[[1]]$type == "regression"){
# regression
ss.model <- sum(unlist(sapply(errors, function(x){x$ss.model})))
ss.null <- sum(unlist(sapply(errors, function(x){x$ss.null})))
return( list(R2 = 1 - ss.model/ss.null, predicted = unlist(sapply(errors, function(x){x$predicted})), observed = unlist(sapply(errors, function(x){x$observed}))) )
}else{
# classification
corrects <- unlist(sapply(errors, function(x){x$correct}))
null.count <- max(table(as.factor(y))) # return number of observations in the largest class
return( list( R2 = (sum(corrects)-null.count) / (length(corrects)-null.count), predicted = unlist(sapply(errors, function(x){x$predicted})), observed = unlist(sapply(errors, function(x){x$observed}))) )
}
}
#' Stochastic Data Generators
#'
#' The stochastic data generators generate stochastic noise with (if specified correctly) the same properties as the observed data. By replicating the stochastic properties of the original data, we are able to obtain the exact calculation of p values.
#'
#' Generally these will not be called directly but will instead be passed to the \code{data.generating.fn} argument of \code{\link{a3.base}}.
#'
#' @name a3.gen.default
#' @aliases a3.gen.default a3.gen.bootstrap a3.gen.resample a3.gen.normal a3.gen.autocor
#'
#' @param x the original (observed) data series.
#' @param n.reps the number of stochastic repetitions to generate.
#'
#' @return A list of of length \code{n.reps} of vectors of stochastic noise. There are a number of different methods of generating noise:
#' \item{a3.gen.default}{The default data generator. Uses \code{a3.gen.bootstrap}.}
#' \item{a3.gen.resample}{Reorders the original data series.}
#' \item{a3.gen.bootstrap}{Resamples the original data series with replacement.}
#' \item{a3.gen.normal}{Calculates the mean and standard deviation of the original series and generates a new series with that distribution.}
#' \item{a3.gen.autocor}{Assumesa first order autocorrelation of the original series and generates a new series with the same properties.}
#'
#' @examples
#' \donttest{
#' # Calculate the A3 results assuming an auto-correlated set of observations.
#' # In usage p.acc should be <=0.01 in order to obtain more accurate p values.
#'
#' a3.lm(rating ~ ., attitude, p.acc = 0.1,
#' data.generating.fn = replicate(ncol(attitude), a3.gen.autocor))
#' }
#'
#' ## A general illustration:
#'
#' # Take x as a sample set of observations for a feature
#' x <- c(0.349, 1.845, 2.287, 1.921, 0.803, 0.855, 2.368, 3.023, 2.102, 4.648)
#'
#' # Generate three stochastic data series with the same autocorrelation properties as x
#' rand.x <- a3.gen.autocor(x, 3)
#'
#' plot(x, type="l")
#' for(i in 1:3) lines(rand.x[[i]], lwd = 0.2)
# Default generator, use bootstrap
a3.gen.default <- function(x, n.reps){
if(length(unique(x))==1){
#it's a constant, such as an intercept
return(a3.gen.normal(x, n.reps))
}
a3.gen.bootstrap(x, n.reps)
}
# Generates a bootstrap random data series
a3.gen.bootstrap <- function(x, n.reps){
res <- lapply(1:n.reps, function(r) {sample(x, length(x), replace=TRUE)})
res$default <- x
res
}
# Generates a resampled random data series
a3.gen.resample <- function(x, n.reps){
res <- lapply(1:n.reps, function(r) {sample(x, length(x), replace=FALSE)})
res$default <- x
res
}
# Generates a normally distributed random data series
a3.gen.normal <- function(x, n.reps){
mu <- mean(x)
sd <- sd(x)
if(sd == 0){
sd <- 1
}
res <- lapply(1:n.reps, function(r) {rnorm(length(x), mu, sd)})
res$default <- x
res
}
# Generates a first order autocorrelated random data series
a3.gen.autocor <- function(x, n.reps){
mu <- mean(x)
sd <- sd(x)
if(sd == 0){
r <- 1
}else{
r <- cor(x[-1], x[-length(x)])
}
res <- lapply(1:n.reps,
function(rep) {
dat <- rnorm(length(x), mu, sd)
for(i in 2:length(x)){
dat[i] <- dat[i-1]*r + dat[i]*(1-r)
}
return(dat)
}
)
res$default <- x
res
}
| /scratch/gouwar.j/cran-all/cranData/A3/R/A3.R |
#' Boston Housing Prices
#'
#' A dataset containing the prices of houses in the Boston region and a number of features.
#' The dataset and the following description is based on that provided by UCI Machine Learning Repository (\url{http://archive.ics.uci.edu/ml/datasets/Housing}).
#'
#' \itemize{
#' \item CRIME: Per capita crime rate by town
#' \item ZN: Proportion of residential land zoned for lots over 25,000 sq.ft.
#' \item INDUS: Proportion of non-retail business acres per town
#' \item CHAS: Charles River dummy variable (= 1 if tract bounds river; 0 otherwise)
#' \item NOX: Nitrogen oxides pollutant concentration (parts per 10 million)
#' \item ROOMS: Average number of rooms per dwelling
#' \item AGE: Proportion of owner-occupied units built prior to 1940
#' \item DISTANCE: Weighted distances to five Boston employment centres
#' \item HIGHWAY: Index of accessibility to radial highways
#' \item TAX: Full-value property-tax rate per ten thousand dollar
#' \item PUPIL.TEACHER: Pupil-teacher ratio by town
#' \item MINORITY: 1000(Bk - 0.63)^2 where Bk is the proportion of blacks by town
#' \item LSTAT: Percent lower status of the population
#' \item MED.VALUE: Median value of owner-occupied homes in thousands of dollars
#' }
#'
#' @docType data
#' @keywords datasets
#' @name housing
#' @usage data(housing)
#' @references
#' Frank, A. & Asuncion, A. (2010). UCI Machine Learning Repository [http://archive.ics.uci.edu/ml]. Irvine, CA: University of California, School of Information and Computer Science.
#'
#' Harrison, D. and Rubinfeld, D.L. Hedonic prices and the demand for clean air, J. Environ. Economics & Management, vol.5, 81-102, 1978.
NULL
#' Ecosystem Multifunctionality
#'
#' This dataset relates multifunctionality to a number of different biotic and abiotic features in a global survey of drylands. The dataset was obtained from (\url{http://www.sciencemag.org/content/335/6065/214/suppl/DC1}). The dataset contains the features listed below.
#'
#' \itemize{
#' \item ELE: Elevation of the site
#' \item LAT & LONG: Location of the site
#' \item SLO: Site slope
#' \item SAC: Soil sand content
#' \item PCA_C1, PCA_C2, PCA_C3, PCA_C4: Principal components of a set of 21 climatic features
#' \item SR: Species richness
#' \item MUL: Multifunctionality
#' }
#'
#' @docType data
#' @keywords datasets
#' @name multifunctionality
#' @usage data(multifunctionality)
#' @references
#' Maestre, F. T., Quero, J. L., Gotelli, N. J., Escudero, A., Ochoa, V., Delgado-Baquerizo, M., et al. (2012). Plant Species Richness and Ecosystem Multifunctionality in Global Drylands. Science, 335(6065), 214-218. doi:10.1126/science.1215442
NULL | /scratch/gouwar.j/cran-all/cranData/A3/R/A3.data.R |
#############################
# bootstrapped bias score computation
#' @title Compute bootstrapped approach-bias scores
#' @description Compute bootstrapped approach-bias scores with confidence intervals.
#' @param ds a longformat data.frame
#' @param subjvar Quoted name of the participant identifier column
#' @param pullvar Quoted name of the column indicating pull trials.
#' Pull trials should either be represented by 1, or by the second level of a factor.
#' @param targetvar Name of the column indicating trials featuring the target stimulus.
#' Target stimuli should either be represented by 1, or by the second level of a factor.
#' @param rtvar Name of the reaction time column.
#' @param iters Total number of desired iterations. At least 200 are required to get confidence intervals that make sense.
#' @param algorithm Function (without brackets or quotes) to be used to compute AAT scores. See \link{Algorithms} for a list of usable algorithms.
#' @param trialdropfunc Function (without brackets or quotes) to be used to exclude outlying trials in each half.
#' The way you handle outliers for the reliability computation should mimic the way you do it in your regular analyses.
#' It is recommended to exclude outlying trials when computing AAT scores using the mean double-dfference scores and regression scoring approaches,
#' but not when using d-scores or median double-difference scores.
#' \itemize{
#' \item \code{prune_nothing} excludes no trials (default)
#' \item \code{trial_prune_grubbs} applies a Grubbs' test to the data, removing one outlier at a time until the test is no longer significant.
#' \item \code{trial_prune_3SD} excludes trials deviating more than 3SD from the mean per participant.
#' \item \code{trial_prune_SD_dropcases} removes trials deviating more than a specific number of standard deviations from the participant's mean,
#' and removes participants with an excessive percentage of outliers.
#' Required arguments:
#' \itemize{
#' \item \code{trialsd} - trials deviating more than \code{trialsd} standard deviations from the participant's mean are excluded (optional; default is 3)
#' \item \code{maxoutliers} - participants with a higher percentage of outliers are removed from the data. (optional; default is .15)
#' }
#' \item \code{trial_recode_SD} recodes outlying reaction times to the nearest non-outlying value,
#' with outliers defined as reaction times deviating more than a certain number of standard deviations from the participant's mean. Required argument:
#' \itemize{
#' \item \code{trialsd} - trials deviating more than this many standard deviations from the mean are classified as outliers.
#' }
#' \item \code{trial_prune_percent_subject} and \code{trial_prune_percent_sample} remove trials below and/or above certain percentiles,
#' on a subject-by-subject basis or sample-wide, respectively. The following arguments are available:
#' \itemize{
#' \item \code{lowerpercent} and \code{uppperpercent} (optional; defaults are .01 and .99).
#' }
#' }
#' @param errortrialfunc Function (without brackets or quotes) to apply to an error trial.
#'
#' \itemize{
#' \item \code{prune_nothing} removes no errors (default).
#' \item \code{error_replace_blockmeanplus} replaces error trial reaction times with the block mean, plus an arbitrary extra quantity.
#' If used, the following additional arguments are required:
#' \itemize{
#' \item \code{blockvar} - Quoted name of the block variable (mandatory)
#' \item \code{errorvar} - Quoted name of the error variable, where errors are 1 or TRUE and correct trials are 0 or FALSE (mandatory)
#' \item \code{errorbonus} - Amount to add to the reaction time of error trials. Default is 0.6 (recommended by \code{Greenwald, Nosek, & Banaji, 2003})
#' }
#' \item \code{error_prune_dropcases} removes errors and drops participants if they have more errors than a given percentage. The following arguments are available:
#' \itemize{
#' \item \code{errorvar} - Quoted name of the error variable, where errors are 1 or TRUE and correct trials are 0 or FALSE (mandatory)
#' \item \code{maxerrors} - participants with a higher percentage of errors are excluded from the dataset. Default is .15.
#' }
#' }
#' @param plot Plot the bias scores and their confidence intervals after computation is complete. This gives a good overview of the data.
#' @param include.raw logical indicating whether raw split-half data should be included in the output object.
#' @param parallel If TRUE (default), will use parallel computing to compute results faster.
#' If a doParallel backend has not been registered beforehand,
#' this function will register a cluster and stop it after finishing, which takes some extra time.
#' @param ... Other arguments, to be passed on to the algorithm or outlier rejection functions (see arguments above)
#'
#'
#' @return A list, containing bootstrapped bias scores, their variance, bootstrapped 95 percent confidence intervals,
#' the number of iterations, and a matrix of bias scores for each iteration.
#'
#' @author Sercan Kahveci
#' @examples
#' # Compute 10 bootstrapped AAT scores.
#' boot<-aat_bootstrap(ds=erotica[erotica$is_irrelevant==0,], subjvar="subject",
#' pullvar="is_pull", targetvar="is_target",rtvar="RT",
#' iters=10,algorithm="aat_doublemediandiff",
#' trialdropfunc="trial_prune_3SD",
#' plot=FALSE, parallel=FALSE)
#' plot(boot)
#' print(boot)
#'
#' @export
aat_bootstrap<-function(ds,subjvar,pullvar,targetvar=NULL,rtvar,iters,
algorithm=c("aat_doublemeandiff","aat_doublemediandiff",
"aat_dscore","aat_dscore_multiblock",
"aat_regression","aat_standardregression",
"aat_singlemeandiff","aat_singlemediandiff"),
trialdropfunc=c("prune_nothing","trial_prune_3SD","trial_prune_3MAD",
"trial_prune_SD_dropcases","trial_recode_SD",
"trial_prune_percent_subject","trial_prune_percent_sample",
"trial_prune_grubbs"),
errortrialfunc=c("prune_nothing","error_replace_blockmeanplus","error_prune_dropcases"),
plot=TRUE,include.raw=FALSE,parallel=TRUE,...){
packs<-c("magrittr","dplyr","AATtools")
#Handle arguments
args<-list(...)
algorithm<-ifelse(is.function(algorithm),deparse(substitute(algorithm)),match.arg(algorithm))
if(!(algorithm %in% c("aat_singlemeandiff","aat_singlemediandiff","aat_regression","aat_standardregression")) & is.null(targetvar)){
stop("Argument targetvar missing but required for algorithm!")
}
trialdropfunc<-ifelse(is.function(trialdropfunc),deparse(substitute(trialdropfunc)),match.arg(trialdropfunc))
errortrialfunc<-ifelse(is.function(errortrialfunc),deparse(substitute(errortrialfunc)),match.arg(errortrialfunc))
errorpenalizefunc<-ifelse(errortrialfunc=="error_replace_blockmeanplus",errortrialfunc,"prune_nothing")
errorremovefunc<-ifelse(errortrialfunc=="error_replace_blockmeanplus","prune_nothing",errortrialfunc)
if(errortrialfunc=="error_replace_blockmeanplus"){
stopifnot(!is.null(args$blockvar),!is.null(args$errorvar))
if(is.null(args$errorbonus)){ args$errorbonus<- 0.6 }
if(is.null(args$blockvar)){ args$blockvar<- 0 }
if(is.null(args$errorvar)){ args$errorvar<- 0 }
}
stopifnot(!(algorithm=="aat_dscore_multiblock" & is.null(args$blockvar)))
if(algorithm %in% c("aat_regression","aat_standardregression")){
if(!("formula" %in% names(args))){
args$formula<-as.formula(paste0(rtvar,"~",pullvar,"*",targetvar))
warning("No formula provided. Defaulting to formula ",form2char(args$formula))
}else if(is.character(args$formula)){
args$formula<-as.formula(args$formula)
}
if(!("aatterm" %in% names(args))){
args$aatterm<-paste0(pullvar,":",targetvar)
warning("No AAT-term provided. Defaulting to AAT-term ",args$aatterm)
}
}
ds<-do.call(aat_preparedata,c(list(ds=ds,subjvar=subjvar,pullvar=pullvar,targetvar=targetvar,rtvar=rtvar),args)) %>% mutate(key=1)
#Prepare the cluster
if(parallel){
`%dofunc%` <- `%dopar%`
hasCluster<-getDoParRegistered()
if(!hasCluster){
cluster<-makeCluster(getOption("AATtools.workers"))
registerDoParallel(cluster)
on.exit(unregisterDoParallel(cluster))
}
}else{
`%dofunc%` <- `%do%`
}
#bootstrap loop
results<-
foreach(iter = seq_len(iters), .packages=packs, .combine=cbind) %dofunc% {
#Split data
# iterds<-ds %>% group_by(!!sym(subjvar), !!sym(pullvar), !!sym(targetvar)) %>%
# sample_n(size=n(),replace=TRUE) %>% ungroup()
iterds<-ds[unlist(lapply(split(x=seq_len(nrow(ds)),f=ds[c(subjvar,pullvar,targetvar)]),
FUN=function(x){ x[sample.int(length(x),replace=T)] })),]
#Handle error removal
iterds<-do.call(errorremovefunc,c(args,list(ds=iterds,subjvar=subjvar,rtvar=rtvar)))
#Handle outlying trials
iterds<-do.call(trialdropfunc,c(args,list(ds=iterds,subjvar=subjvar,rtvar=rtvar)))
#Handle error penalization
iterds<-do.call(errorpenalizefunc,c(args,list(ds=iterds,subjvar=subjvar,rtvar=rtvar)))
abds<-do.call(algorithm,c(list(ds=iterds,subjvar=subjvar,pullvar=pullvar,
targetvar=targetvar,rtvar=rtvar),args))
#colnames(abds)<-c(subjvar,paste0("iter", formatC(iter, width = nchar(iters), format = "d", flag = "0")))
outvar<-abds$ab
names(outvar)<-abds[[subjvar]]
outvar
}
#results<-results[!is.na(rownames(results)),]
statset<-data.frame(ppidx=rownames(results),
bias=rowMeans(results,na.rm=TRUE),
var=apply(results,MARGIN = 1,FUN=var,na.rm=TRUE),
lowerci=apply(results,MARGIN=1,FUN=function(x){quantile(x,0.025,na.rm=TRUE)}),
upperci=apply(results,MARGIN=1,FUN=function(x){quantile(x,0.975,na.rm=TRUE)}),
stringsAsFactors=F)
statset$ci<-statset$upperci-statset$lowerci
#q-reliability
bv<-var(statset$bias,na.rm=TRUE)
wv<-mean(statset$var,na.rm=TRUE)
q<-1-wv/bv
output<-list(bias=statset,
reliability=q,
parameters=c(list(ds=ds,
subjvar=subjvar,
pullvar=pullvar,
targetvar=targetvar,
rtvar=rtvar,
iters=iters,
algorithm=algorithm,
trialdropfunc=trialdropfunc,
errortrialfunc=errortrialfunc),args)) %>%
structure(class = "aat_bootstrap")
if(include.raw){
output$iterdata<-results
}
if(plot){ plot(output) }
return(output)
}
#' @export
#' @rdname aat_bootstrap
#' @param x An \code{aat_bootstrap} object.
print.aat_bootstrap<-function(x,...){
cat("Bootstrapped bias scores and confidence intervals",
"\nMean bias score: ", mean(x$bias$bias,na.rm=TRUE),
"\nMean confidence interval: ",mean(x$bias$ci,na.rm=TRUE),
"\nreliability: q = ",x$reliability,
"\nNumber of iterations: ",x$parameters$iters,sep="")
}
#' @export
#' @rdname aat_bootstrap
#' @param x An \code{aat_bootstrap} object.
plot.aat_bootstrap <- function(x,...){
statset<-x$bias
statset<-statset[!is.na(statset$bias) & !is.na(statset$upperci) & !is.na(statset$lowerci),]
rank<-rank(statset$bias)
wideness<-max(statset$upperci) - min(statset$lowerci)
plot(x=statset$bias,y=rank,xlim=c(min(statset$lowerci)-0.01*wideness,max(statset$upperci)+0.01*wideness),
xlab="Bias score",main=paste0("Individual bias scores with 95%CI",
"\nEstimated reliability: q = ",x$reliability))
segments(x0=statset$lowerci,x1=statset$bias-0.005*wideness,y0=rank,y1=rank)
segments(x0=statset$bias+0.005*wideness,x1=statset$upperci,y0=rank,y1=rank)
abline(v=0)
#text(x=statset$bias,y=statset$rownr,labels=statset$ppidx,cex=0.5)
}
| /scratch/gouwar.j/cran-all/cranData/AATtools/R/aat_bootstrap.R |
#' @title Compute simple AAT scores
#' @description Compute simple AAT scores, with optional outlier exclusion and error trial recoding.
#' @param ds a long-format data.frame
#' @param subjvar column name of subject variable
#' @param pullvar column name of pull/push indicator variable, must be numeric or logical (where pull is 1 or TRUE)
#' @param targetvar column name of target stimulus indicator, must be numeric or logical (where target is 1 or TRUE)
#' @param rtvar column name of reaction time variable
#' @param algorithm Function (without brackets or quotes) to be used to compute AAT scores. See \link{Algorithms} for a list of usable algorithms.
#' @param trialdropfunc Function (without brackets or quotes) to be used to exclude outlying trials in each half.
#' The way you handle outliers for the reliability computation should mimic the way you do it in your regular analyses.
#' It is recommended to exclude outlying trials when computing AAT scores using the mean double-dfference scores and regression scoring approaches,
#' but not when using d-scores or median double-difference scores.
#' \itemize{
#' \item \code{prune_nothing} excludes no trials (default)
#' \item \code{trial_prune_3SD} excludes trials deviating more than 3SD from the mean per participant.
#' \item \code{trial_prune_grubbs} applies a Grubbs' test to the data, removing one outlier at a time until the test is no longer significant.
#' \item \code{trial_prune_SD_dropcases} removes trials deviating more than a specific number of standard deviations from the participant's mean,
#' and removes participants with an excessive percentage of outliers.
#' Required arguments:
#' \itemize{
#' \item \code{trialsd} - trials deviating more than \code{trialsd} standard deviations from the participant's mean are excluded (optional; default is 3)
#' \item \code{maxoutliers} - participants with a higher percentage of outliers are removed from the data. (optional; default is .15)
#' }
#' \item \code{trial_recode_SD} recodes outlying reaction times to the nearest non-outlying value,
#' with outliers defined as reaction times deviating more than a certain number of standard deviations from the participant's mean. Required argument:
#' \itemize{
#' \item \code{trialsd} - trials deviating more than this many standard deviations from the mean are classified as outliers.
#' }
#' \item \code{trial_prune_percent_subject} and \code{trial_prune_percent_sample} remove trials below and/or above certain percentiles,
#' on a subject-by-subject basis or sample-wide, respectively. The following arguments are available:
#' \itemize{
#' \item \code{lowerpercent} and \code{uppperpercent} (optional; defaults are .01 and .99).
#' }
#' }
#' @param errortrialfunc Function (without brackets or quotes) to apply to an error trial.
#'
#' \itemize{
#' \item \code{prune_nothing} removes no errors (default).
#' \item \code{error_replace_blockmeanplus} replaces error trial reaction times with the block mean, plus an arbitrary extra quantity.
#' If used, the following additional arguments are required:
#' \itemize{
#' \item \code{blockvar} - Quoted name of the block variable (mandatory)
#' \item \code{errorvar} - Quoted name of the error variable, where errors are 1 or TRUE and correct trials are 0 or FALSE (mandatory)
#' \item \code{errorbonus} - Amount to add to the reaction time of error trials. Default is 0.6 (recommended by \code{Greenwald, Nosek, & Banaji, 2003})
#' }
#' \item \code{error_prune_dropcases} removes errors and drops participants if they have more errors than a given percentage. The following arguments are available:
#' \itemize{
#' \item \code{errorvar} - Quoted name of the error variable, where errors are 1 or TRUE and correct trials are 0 or FALSE (mandatory)
#' \item \code{maxerrors} - participants with a higher percentage of errors are excluded from the dataset. Default is .15.
#' }
#' }
#' @param ... Other arguments, to be passed on to the algorithm or outlier rejection functions (see arguments above)
#'
#' @export
#'
#' @examples
#' #Compute the correlation between relevant-feature and irrelevant-feature AAT scores
#' ds<-erotica[erotica$correct==1,]
#' relevant <- aat_compute(ds=ds[ds$is_irrelevant==0,],
#' pullvar="is_pull",targetvar="is_target",
#' rtvar="RT",subjvar="subject",
#' trialdropfunc="trial_prune_3SD",
#' algorithm="aat_doublemediandiff")
#'
#' irrelevant <- aat_compute(ds=ds[ds$is_irrelevant==1,],
#' pullvar="is_pull",targetvar="is_target",
#' rtvar="RT",subjvar="subject",
#' trialdropfunc="trial_prune_3SD",
#' algorithm="aat_doublemediandiff")
#'
#' comparison.df <- merge(relevant, irrelevant, by = "subject")
#' cor(comparison.df$ab.x, comparison.df$ab.y)
#' # 0.1145726
aat_compute<-function(ds,subjvar,pullvar,targetvar=NULL,rtvar,
algorithm=c("aat_doublemeandiff","aat_doublemediandiff",
"aat_dscore","aat_dscore_multiblock",
"aat_regression","aat_standardregression",
"aat_doublemeanquotient","aat_doublemedianquotient",
"aat_singlemeandiff","aat_singlemediandiff"),
trialdropfunc=c("prune_nothing","trial_prune_3SD","trial_prune_3MAD",
"trial_prune_SD_dropcases","trial_recode_SD",
"trial_prune_percent_subject","trial_prune_percent_sample",
"trial_prune_grubbs"),
errortrialfunc=c("prune_nothing","error_replace_blockmeanplus","error_prune_dropcases"),
...){
#Handle arguments
args<-list(...)
algorithm<-ifelse(is.function(algorithm),deparse(substitute(algorithm)),match.arg(algorithm))
if(!(algorithm %in% c("aat_singlemeandiff","aat_singlemediandiff","aat_regression","aat_standardregression")) & is.null(targetvar)){
stop("Argument targetvar missing but required for algorithm!")
}
trialdropfunc<-ifelse(is.function(trialdropfunc),deparse(substitute(trialdropfunc)),match.arg(trialdropfunc))
errortrialfunc<-ifelse(is.function(errortrialfunc),deparse(substitute(errortrialfunc)),match.arg(errortrialfunc))
errorpenalizefunc<-ifelse(errortrialfunc=="error_replace_blockmeanplus",errortrialfunc,"prune_nothing")
errorremovefunc<-ifelse(errortrialfunc=="error_replace_blockmeanplus","prune_nothing",errortrialfunc)
if(errortrialfunc=="error_replace_blockmeanplus"){
stopifnot(!is.null(args$blockvar),!is.null(args$errorvar))
if(is.null(args$errorbonus)){ args$errorbonus<- 0.6 }
if(is.null(args$blockvar)){ args$blockvar<- 0 }
if(is.null(args$errorvar)){ args$errorvar<- 0 }
}
stopifnot(!(algorithm=="aat_dscore_multiblock" & is.null(args$blockvar)))
if(algorithm %in% c("aat_regression","aat_standardregression")){
if(!("formula" %in% names(args))){
args$formula<-as.formula(paste0(rtvar,"~",pullvar,"*",targetvar))
warning("No formula provided. Defaulting to formula ",form2char(args$formula))
}else if(is.character(args$formula)){
args$formula<-as.formula(args$formula)
}
if(!("aatterm" %in% names(args))){
args$aatterm<-paste0(pullvar,":",targetvar)
warning("No AAT-term provided. Defaulting to AAT-term ",args$aatterm)
}
}
ds<-do.call(aat_preparedata,c(list(ds=ds,subjvar=subjvar,pullvar=pullvar,targetvar=targetvar,rtvar=rtvar),args)) %>% mutate(key=1)
#Handle error removal
ds<-do.call(errorremovefunc,c(args,list(ds=ds,subjvar=subjvar,rtvar=rtvar)))
#Handle outlying trials
ds<-do.call(trialdropfunc,c(args,list(ds=ds,subjvar=subjvar,rtvar=rtvar)))
#Handle error penalization
ds<-do.call(errorpenalizefunc,c(args,list(ds=ds,subjvar=subjvar,rtvar=rtvar)))
abds<-do.call(algorithm,c(list(ds=ds,subjvar=subjvar,pullvar=pullvar,
targetvar=targetvar,rtvar=rtvar),args))
abds <- merge(x=abds,by=subjvar,all=TRUE,y=ds %>% group_by(!!sym(subjvar)) %>% summarise(trials=n()))
return(abds)
}
| /scratch/gouwar.j/cran-all/cranData/AATtools/R/aat_compute.R |
#' Compute stimulus-specific bias scores
#' Computes mean single-difference scores (push - pull) for each stimulus.
#'
#' @param ds the \code{data.frame} to use
#' @param subjvar Name of the subject-identifying variable
#' @param stimvar Name of the stimulus-identifying variable
#' @param pullvar Name of the movement-direction identifying variable
#' @param targetvar Optional. Name of the stimulus-category identifying variable
#' @param rtvar Name of the reaction-time identifying variable
#' @param aggfunc The function with which to aggregate the RTs before computing difference scores. Defaults to mean but can be changed to median.
#' @param iters If there are missing values (which is almost inevitable) then
#' multiple imputation will be used to complete the covariance matrix - this argument sets
#' the number of multiple imputations to be used.
#'
#' @return Exports a \code{list} containing
#' a \code{data.frame} with stimulus-specific bias scores, indicated in the column names,
#' a covariance matrix of that same data, and
#' a \code{data.frame} indicating to which stimulus category each stimulus belongs.
#' @export
#'
#' @examples
#' ds<-aat_simulate(biasfx_jitter=40,nstims=16)
#' ds$stim<-paste0(ds$stim,"-",ds$is_target)
#' aat_stimulusscores(ds,"subj","stim","is_pull","is_target","rt")
aat_stimulusscores<-function(ds,subjvar,stimvar,pullvar,targetvar=NULL,rtvar,aggfunc=c("mean","median"),iters=5){
ds<-aat_preparedata(ds,subjvar=subjvar,pullvar=pullvar,stimvar=stimvar,targetvar=targetvar,rtvar=rtvar)
pps<-unique(ds[[subjvar]])
stims<-unique(ds[[stimvar]])
if(!is.null(targetvar)){
stimcats<-distinct(ds[c(stimvar,targetvar)]) %>% setNames(c("stim","cat"))
}else{
stimcats<-data.frame(stim=stims,cat=0,stringsAsFactors=F)
}
aggfunc<-match.arg(aggfunc)
if(aggfunc=="median"){
scorefunc<-aat_singlemediandiff
}else{
scorefunc<-aat_singlemeandiff
}
biases<-list()
for(u in seq_along(pps)){
biases[[u]]<-
do.call(scorefunc,list(ds=ds[ds[[subjvar]]==pps[u],],
subjvar=stimvar,pullvar=pullvar,rtvar=rtvar)) %>%
setNames(c(stimvar,paste0("",pps[u]))) #subject-
}
biasset<-Reduce(function(x,y){merge(x,y,by=stimvar,all=T)},x=biases[-1],init=biases[1])
biasmat<-t(as.matrix(biasset[,-1]))
colnames(biasmat)<-biasset[[1]]
unmissing<-covEM(biasmat,iters)
covmat<-unmissing$sigma
rownames(covmat)<-colnames(covmat)
dataset<-unmissing$data
out<-list(data=dataset,covmat=covmat,stimcats=stimcats)
return(out)
}
#' Compute a dataset's reliability from its covariance matrix
#'
#' This function computes mean single-difference scores (push minus pull) for individual stimuli,
#' and computes the reliability from that information.
#' Missing values are dealt with using multiple imputation.
#'
#' When only one stimulus category is indicated, one of the commonly known reliability algorithms
#' provided with the \code{algorithm} argument is used.
#' When two stimulus categories are indicated, this function uses Lord's (1963) algorithm to
#' compute the reliability of a double mean difference score, using the algorithms in \code{algorithm}
#' to estimate the reliability of indiviau lstimulus categories.
#'
#' When one wants to compute the reliability of a double median difference score or D-score,
#' \code{aat_splithalf()} is recommended instead.
#'
#' @param ds the \code{data.frame} to use
#' @param subjvar Name of the subject-identifying variable
#' @param stimvar Name of the stimulus-identifying variable
#' @param pullvar Name of the movement-direction identifying variable
#' @param targetvar Optional. Name of the stimulus-category identifying variable
#' @param rtvar Name of the reaction-time identifying variable
#' @param aggfunc The function with which to aggregate the RTs before computing difference scores. Defaults to mean but can be changed to median.
#' @param algorithm The reliability formula to use. Defaults to Cronbach's alpha, but Guttman's Lambda-2 is recommended instead.
#' @param iters If there are missing values (which is almost inevitable) then
#' multiple imputation will be used to complete the covariance matrix - this option sets
#' the number of multiple imputations to be used.
#'
#' @return Returns an \code{aat_covreliability} object containing the reliability value
#' as well as the dataset and covariance matrix with replaced missing values. When
#' the argument \code{targetvar} is provided, the output also contains the reliability of the
#' individual stimulus categories and their intercorrelation.
#'
#' @export
#'
#' @references
#' Lord, F.Y. (1963), "Elementary Models for Measuring Change",
#' in Problems in Measuring Change, C.W. Harris, ed.. Madison. Wisconsin:
#' University of Wisconsin.
#'
#' @examples
#' #We generate a dataset with 16 stimuli in each category
#' ds<-aat_simulate(biasfx_jitter=40,nstims=16)
#' ds$stim<-paste0(ds$stim,"-",ds$is_target)
#'
#' # If Lord's formula and
#' # bootstrapped splithalf measure something similar,
#' # then the outcomes should be close to each other.
#' aat_covreliability(ds=ds,subjvar="subj",stimvar="stim",pullvar="is_pull",
#' targetvar="is_target",rtvar="rt")
#' aat_splithalf(ds=ds,subjvar="subj",pullvar="is_pull",targetvar="is_target",rtvar="rt",
#' algorithm="aat_doublemeandiff",iters=100,plot=FALSE)
#'
#' #Testing reliability for single-difference scores
#' ds<-ds[ds$is_target==1,]
#' aat_covreliability(ds=ds,subjvar="subj",stimvar="stim",pullvar="is_pull",rtvar="rt")
aat_covreliability<-function(ds,subjvar,stimvar,pullvar,targetvar=NULL,rtvar,aggfunc=c("mean","median"),
algorithm=c("calpha","lambda2","lambda4"),iters=5){
algorithm<-match.arg(algorithm)
aggfunc<-match.arg(aggfunc)
sc<-aat_stimulusscores(ds,subjvar=subjvar,stimvar=stimvar,pullvar=pullvar,targetvar=targetvar,
rtvar=rtvar,aggfunc=aggfunc,iters=iters)
if(!is.null(targetvar)){
dia<-diag(sc$covmat)
firstcat <-which(names(dia) %in% sc$stimcats$stim[sc$stimcats$cat==0])
secondcat<-which(names(dia) %in% sc$stimcats$stim[sc$stimcats$cat==1])
n1<-length(firstcat )
n2<-length(secondcat)
r11<-do.call(algorithm,list(covmat=sc$covmat[firstcat, firstcat ]))
r22<-do.call(algorithm,list(covmat=sc$covmat[secondcat,secondcat]))
# r12<-cor(x=rowSums(sc$dataset[,firstcat]),
# y=rowSums(sc$dataset[,secondcat]))
r12<-sum(sc$covmat[firstcat,secondcat])/sqrt(sum(sc$covmat[firstcat,firstcat])*sum(sc$covmat[secondcat,secondcat]))
s1<-sqrt(sum(sc$covmat[firstcat,firstcat]))/n1
s2<-sqrt(sum(sc$covmat[secondcat,secondcat]))/n2
rel<-(s1^2*r11+s2^2*r22-2*s1*s2*r12)/
(s1^2+s2^2-2*s1*s2*r12)
}else{
rel<-do.call(algorithm,list(covmat=sc$covmat))
}
out<-structure(list(rel=rel,data=sc$data,covmat=sc$covmat,algorithm=algorithm),
class="aat_covreliability")
if(!is.null(targetvar)){
out$components<-list(r11=r11,r22=r22,r12=r12,n1=n1,n2=n2,s1=s1,s2=s2)
}
return(out)
}
#' @export
#' @describeIn aat_covreliability Print an \code{aat_covreliability} object
print.aat_covreliability<-function(x,...){
cat(sep="","r = ",mf(x$rel),
"\nBased on ",ncol(x$data)," valid stimuli, ",
nrow(x$data)," valid participants, and the ",
x$algorithm," algorithm.\n")
if(any("components"==names(x))){
cat(sep="",
"Reliability of stimulus category 1: r = ",mf(x$components$r11),", n = ",x$components$n1,", sd = ",mf(x$components$s1),"\n",
"Reliability of stimulus category 2: r = ",mf(x$components$r22),", n = ",x$components$n2,", sd = ",mf(x$components$s2),"\n",
"Category intercorrelation: r = ",mf(x$components$r12),"\n")
}
}
#' @rdname aat_covreliability
#' @param holdout What should be removed from the data for computation of jackknife statistics?
#' "both" computes reliability when stimuli and participants are separately removed,
#' while "cross" computes reliability when stimuli and participants are simultaneously removed.
#' @description This function computes the reliability when stimuli and participants are removed,
#' allowing for the diagnosis of potential sources of unreliability within the data.
#' @export
#' @return \code{aat_covreliability_jackknife()} returns an \code{aat_covreliability_jackknife} object,
#' containing jackknife reliability statistics. If argument \code{holdout} was set to "cross",
#' then these statistics are provided in a matrix where rows represent participants and columns represent stimuli.
#' Otherwise, they are provided in \code{data.frame}s where the stimulus or participant is represented in a column
#' alongside the associated reliability value.
#' @examples
#' hh<-aat_simulate()
#' test<-aat_covreliability_jackknife(ds=hh,subjvar="subj",stimvar="stim",pullvar="is_pull",
#' targetvar="is_target",rtvar="rt",holdout="cross")
#' print(test)
#' plot(test)
aat_covreliability_jackknife<-function(ds,subjvar,stimvar,pullvar,targetvar=NULL,rtvar,
algorithm=c("calpha","lambda2","lambda4"),iters=5,
holdout=c("both","participant","stimulus","cross")){
algorithm<-match.arg(algorithm)
sc<-aat_stimulusscores(ds,subjvar=subjvar,stimvar=stimvar,pullvar=pullvar,targetvar=targetvar,
rtvar=rtvar,iters=iters)
cat1<-sc$stimcats$stim[sc$stimcats$cat==0]
cat2<-sc$stimcats$stim[sc$stimcats$cat==1]
#declare reliability computation functions
if(!is.null(targetvar)){
relfinder<-function(psc){
dia<-diag(psc)
firstcat <-which(names(dia) %in% cat1)
secondcat<-which(names(dia) %in% cat2)
n1<-length(firstcat)
n2<-length(secondcat)
r11<-do.call(algorithm,list(covmat=psc[firstcat, firstcat ]))
r22<-do.call(algorithm,list(covmat=psc[secondcat,secondcat]))
r12<-sum(psc[firstcat,secondcat])/sqrt(sum(psc[firstcat,firstcat])*sum(psc[secondcat,secondcat]))
s1<-sqrt(sum(psc[firstcat,firstcat]))/n1
s2<-sqrt(sum(psc[secondcat,secondcat]))/n2
rel<-(s1^2*r11+s2^2*r22-2*s1*s2*r12)/
(s1^2+s2^2-2*s1*s2*r12)
return(rel)
}
}else{
relfinder<-function(psc){
rel<-do.call(algorithm,list(covmat=psc))
return(rel)
}
}
output<-list(rel=relfinder(sc$covmat))
pps<-sort(unique(ds[[subjvar]]))
stims<-sort(unique(ds[[stimvar]]))
if(any(c("both","participant")==holdout)){
# Run jackknife over participants
ppset<-data.frame(pp=pps,rel=NA)
for(i in seq_along(ppset$pp)){
ppset$rel[i]<-relfinder(cov(sc$data[rownames(sc$data)!=ppset$pp[i],]))
}
output$pps<-ppset
}
if(any(c("both","stimulus")==holdout)){
#Run jackknife over stimuli
stimset<-data.frame(stim=stims,rel=NA)
for(i in seq_along(stimset$stim)){
stimset$rel[i]<-relfinder(sc$covmat[rownames(sc$covmat) != stimset$stim[i], colnames(sc$covmat) != stimset$stim[i]])
}
output$stims<-stimset
}
if("cross"==holdout){
#run jackknife over stimuli and participants simultaneously
relmat<-matrix(NA,nrow=length(pps),ncol=length(stims))
rownames(relmat)<-pps
colnames(relmat)<-stims
for(i in seq_len(nrow(relmat))){
itercov<-cov(sc$data[rownames(sc$data) != rownames(relmat)[i],])
for(j in seq_len(ncol(relmat))){
relmat[i,j]<-relfinder(itercov[rownames(itercov) != colnames(relmat)[j],colnames(itercov) != colnames(relmat)[j]])
}
}
output$cross<-relmat
}
output<-structure(c(output,list(data=sc$data,covmat=sc$covmat,algorithm=algorithm,holdout=holdout)),
class="aat_covreliability_jackknife")
return(output)
}
#' @export
#' @describeIn aat_covreliability Print an \code{aat_covreliability_jackknife} object
#' @param x Object to be printed
#' @param ... Ignored
print.aat_covreliability_jackknife<-function(x, ...){
cat("Reliability: r = ",mf(x$rel),"\n",sep="")
if(any("pps"==names(x))){
cmax<-which.max(x$pps$rel)
cat("Maximum achieveable reliability is with removal of participant ",as.character(x$pps$pp[cmax]),
": r = ",mf(x$pps$rel[cmax]),"\n",sep="")
}
if(any("stims"==names(x))){
cmax<-which.max(x$stims$rel)
cat("Maximum achieveable reliability is with removal of stimulus ",as.character(x$stims$stim[cmax]),
": r = ",mf(x$stims$rel[cmax]),"\n",sep="")
}
if(any("cross"==names(x))){
cmax<-which(x$cross==max(x$cross),arr.ind=T)
cat("Maximum achieveable reliability is with removal of stimulus ",colnames(x$cross)[cmax[2]],
" and participant ",rownames(x$cross)[cmax[1]],
": r = ",mf(x$cross[cmax[1],cmax[2]]),"\n",sep="")
}
}
#' @export
#' @describeIn aat_covreliability Plot an \code{aat_covreliability_jackknife} object
plot.aat_covreliability_jackknife<-function(x, ...){
prev.mfrow<-par("mfrow")
ncols<-sum(c("pps", "stims","cross") %in% names(x))
par(mfrow=c(1,ncols))
if(any("pps"==names(x))){
ord<-order(x$pps$rel)
plot(range(x$pps$rel), range(ord), bty = 'n', type = 'n',main="Participants",
xlab="Jackknife reliability",ylab="Rank")
abline(v=x$rel,col="#00000055")
text(x=x$pps$rel[ord],y=seq_along(x$pps$rel),label=as.character(x$pps$pp[ord]),cex=.7)
}
if(any("stims"==names(x))){
ord<-order(x$stims$rel)
plot(range(x$stims$rel), range(ord), bty = 'n', type = 'n',main="Stimuli",
xlab="Jackknife reliability",ylab="Rank")
abline(v=x$rel,col="#00000055")
text(x=x$stims$rel[ord],y=seq_along(x$stims$rel),label=as.character(x$stims$stim[ord]),cex=.7)
}
if(any("cross"==names(x))){
image(t(x$cross), xlab="Stimuli",ylab="Participants",axes=FALSE,main="Jackknife reliability")
axis(2,at=(seq_len(nrow(x$cross))-1)/(nrow(x$cross)-1),labels=rownames(x$cross))
axis(1,at=(seq_len(ncol(x$cross))-1)/(ncol(x$cross)-1),labels=colnames(x$cross))
}
par(mfrow=prev.mfrow)
}
| /scratch/gouwar.j/cran-all/cranData/AATtools/R/aat_covreliability.R |
#' Simulate AAT datasets and predict parameters
#'
#' \code{aat_simulate()} generates approach-avoidance task datasets.
#'
#' @param npps Number of participants
#' @param nstims Number of stimuli
#' @param stimreps Number of repetitions of each stimulus within each group
#' (i.e. within approach target, avoid target, approach control, avoid control)
#' @param meanrt Mean sample reaction time
#' @param meanrt_jitter Extent by which participants' mean RTs
#' deviate from mean sample RT.
#' @param sdrt Standard deviation of samplewide RTs,
#' ignoring effects of movement, stimulus, and approach bias.
#' In essence, this represents the amount of pure noise present in the data.
#' @param sdrt_jitter Extent by which standard deviations of individual participants' RTs
#' are larger or smaller than the samplewide SD.
#' @param pullfx size of the effect of approach-versus-avoidance, in milliseconds
#' @param pullfx_jitter Individual variation in the effect of approach-versus-avoidance
#' @param stimfx size of the effect of stimulus category, in milliseconds
#' @param stimfx_jitter Individual variation in the effect of stimulus category
#' @param biasfx Size of the approach bias effect, in milliseconds
#' @param biasfx_jitter Individual variation in the approach bias effect
#' @param empirical If TRUE, then effect sizes and standard deviations will be exact
#' @param ... Ignored.
#'
#' @return \code{aat_simulate()} returns a \code{data.frame} with the following columns:
#' subj (participant ID), stim (stimulus number), rep (stimulus repetition number),
#' is_pull (0 = avoid, 1 = approach), is_target (0 = control stimulus, 1 = target stimulus),
#' meanrt (participant's mean RT), sdrt (participant's residual standard deviation),
#' pullfx (participant approach-avoidance effect size in ms),
#' stimfx (participant stimulus category effect size in ms),
#' biasfx (participant approach bias effect size in ms),
#' and rt (trial reaction time).
#' Additionally, the data.frame has the attribute \code{population_reliability} which represents
#' the expected reliability of the data given the provided parameters.
#' @details Defaults of \code{aat_simulate()} are based on
#' Kahveci, Van Alebeek, Berking, & Blechert (2021).
#' @export
#'
#' @examples
#' ts<- aat_simulate(pullfx = 50, stimfx = 10, biasfx = 100)
#' mod<-lm(rt~is_pull*is_target,data=ts)
#' coef(mod) #these should be somewhat close to the provided coefficients
#'
#' # Here's how one might derive the parameters used in this function from a real dataset
#' \dontrun{
#' mod<-lmer(decisiontime ~ is_pull * is_food + (is_pull * is_food | subjectid),data=dsa)
#' fixef(mod) # from here, all the fx and mean RTs are derived
#' ranef(mod)$subjectid %>% apply(2,sd) #from here, all the fx jitters are derived
#' dsa %>% group_by(subjectid) %>% summarise(sd=sd(resid)) %>%
#' summarise(m=mean(sd),s=sd(sd)) # from here, sdrt_jitter is derived
#' }
aat_simulate<-function(npps=36,nstims=16,stimreps=4,
meanrt=632,meanrt_jitter=90.1,
sdrt=158,sdrt_jitter=49.9,
pullfx=-39.2,pullfx_jitter=40.5,
stimfx=-30.9,stimfx_jitter=32.5,
biasfx= 39.0,biasfx_jitter=60.1,
empirical=FALSE, ...){
cond.scale<-function(x,emp){ if(emp){vec.scale(x)}else{x} }
#set properties
subjprops<-data.frame(subj=1:npps,
meanrt=meanrt+meanrt_jitter*cond.scale(rnorm(npps),empirical),
sdrt=sdrt+sdrt_jitter*cond.scale(rgamma2(n=npps,shape=3),empirical),
pullfx=pullfx+pullfx_jitter*cond.scale(rnorm(npps),empirical),
stimfx=stimfx+stimfx_jitter*cond.scale(rnorm(npps),empirical),
biasfx=biasfx+biasfx_jitter*cond.scale(rnorm(npps),empirical))
#initialize dataset
ds<-expand.grid(subj=1:npps,stim=1:nstims,rep=1:stimreps,is_pull=0:1,is_target=0:1)
ds<-merge(ds,subjprops,by="subj",all.x=T)
#fix stimulus names
ds$stim<-paste0(ds$is_target,"-",ds$stim)
#Generate RTs
gshape<-3
gscale<-1
ds$rt<-rgamma2(n=nrow(ds),shape=gshape)
if(empirical){
ds$rt<-ave(ds$rt,ds[c("subj","is_pull","is_target")],FUN=vec.scale)
}
ds$rt<-ds$rt * ds$sdrt + ds$meanrt +
(ds$is_pull-.5)*ds$pullfx + (ds$is_target-.5) * ds$stimfx +
((ds$is_pull==ds$is_target)-.5)*-.5 * ds$biasfx
#compute true "population" reliability (Kahveci's Q)
# alt_q <- (biasfx_jitter^2)/(biasfx_jitter^2 + sdrt^2 /(nstims*stimreps) *4)
# attr(ds,"population_reliability")<-alt_q
#output
return(ds)
}
rgamma2<-function(n,shape,m=0,s=1){
m + (rgamma(n=n,shape=shape,scale=1) - shape*1) *s/(sqrt(shape)*1)
}
aat_simulate_old<-function(npps=40,nstims=32,stimreps=2,
meanrt=743,meanrt_jitter=66,
sdrt=133,sdrt_jitter=38,
pullfx=25,pullfx_jitter=40,
stimfx=10,stimfx_jitter=35,
biasfx=35,biasfx_jitter=75){
#set properties
subjprops<-data.frame(subj=1:npps,
meanrt=meanrt+meanrt_jitter*rnorm(npps),
sdrt=sdrt+sdrt_jitter*rgamma2(n=npps,shape=3),
pullfx=pullfx+pullfx_jitter*rnorm(npps),
stimfx=stimfx+stimfx_jitter*rnorm(npps),
biasfx=biasfx+biasfx_jitter*rnorm(npps))
#initialize dataset
ds<-expand.grid(subj=1:npps,stim=1:nstims,rep=1:stimreps,is_pull=0:1,is_target=0:1)
ds<-merge(ds,subjprops,by="subj",all.x=T)
#Generate RTs
gshape<-3
gscale<-1
ds$rt<-(rgamma(n=nrow(ds),shape=gshape,scale=gscale)-gshape*gscale) *
ds$sdrt/(sqrt(gshape)*gscale) + ds$meanrt +
(ds$is_pull-.5)*ds$pullfx + (ds$is_target-.5)*ds$stimfx +
(ds$is_pull*ds$is_target-.25)*ds$biasfx
#compute true "population" reliability (Kahveci's Q)
alt_q <- (biasfx_jitter^2)/(biasfx_jitter^2 + sdrt^2 /(nstims*stimreps) *4)
attr(ds,"population_reliability")<-alt_q
#output
return(ds)
}
#'
#' \code{aat_simulate2} offers defaults taken from different studies and allows inserting outliers.
#'
#' @param ... Any parameters of \code{aat_simulate} provided here will override the defaults
#' from the defaults parameter.
#' @param defaults Which set of default values should be used?
#' @param slowols Number of slow outliers to insert per participant
#' @param fastols Number of fats outliers to insert per participant
#' @param olsd Number of standard deviations by which (slow) outliers deviate
#'
#' @details "Lender2018" parameters are taken from the relevant-feature AAT of
#' Lender, Meule, Rinck, Brockmeyer, & Blechert (2018). "Kahveci2021" parameters
#' are taken from Kahveci, Van Alebeek, Berking, & Blechert (in review).
#'
#' Lender, A., Meule, A., Rinck, M., Brockmeyer, T., & Blechert, J. (2018).
#' Measurement of food-related approach–avoidance biases:
#' Larger biases when food stimuli are task relevant. Appetite, 125, 42-47.
#'
#' Kahveci, S., Van Alebeek, H., Berking, M., & Blechert, J. (in review).
#' Touchscreen based assessment of food approach biases: investigation of
#' reliability and stimulus-specific effects.
#' @export
#'
#' @examples
#' hist(aat_simulate2(defaults="Lender2018_relevant_raw",slowols=10,fastols=10)$rt)
#' @rdname aat_simulate
aat_simulate2<-function(..., defaults="none",
slowols=0,fastols=0,olsd=3){
override.args<-list(...)
if(defaults=="none"){
args<-override.args
}else{
chosenset<-match.arg(defaults,choices=dataprops$setname)
args<-as.list(dataprops[chosenset==dataprops$setname,])
}
args[names(override.args)]<-override.args
ds<-do.call(aat_simulate,args)
gshape<-3
gscale<-1
#slow OLs
ds<-ds%>%group_by(.data$subj)%>%mutate(rownum=1:n())%>%
mutate(rt = ifelse(!(.data$rownum %in% sample(.data$rownum,slowols)),.data$rt,
.data$rt+.data$sdrt*olsd))
#fast OLs
if(fastols>0){
ds<-ds%>%group_by(.data$subj)%>%mutate(rownum=1:n(),eligible=.data$rt>.data$sdrt*olsd)%>%
mutate(rt = ifelse(!(.data$rownum %in% sample(which(.data$eligible),fastols)),
.data$rt,.data$rt-.data$sdrt*olsd))
}
return(ds)
}
#experimental. Can currently only be used in datasets with approximately equal trials in all cells
aat_properties<-function(ds,subjvar,pullvar,targetvar,rtvar){
ds<-aat_preparedata(ds=ds,subjvar=subjvar,pullvar=pullvar,targetvar=targetvar,rtvar=rtvar)
ds%<>%group_by(!!sym(subjvar))%>%
mutate(pulldiff=mean(subset(!!sym(rtvar),!!sym(pullvar)==1)) - mean(subset(!!sym(rtvar),!!sym(pullvar)==0)),
targetdiff=mean(subset(!!sym(rtvar),!!sym(targetvar)==1)) - mean(subset(!!sym(rtvar),!!sym(targetvar)==0)),
doublediff=2*mean(subset(!!sym(rtvar),!!sym(pullvar)!=!!sym(targetvar))) -
2*mean(subset(!!sym(rtvar),!!sym(pullvar)==!!sym(targetvar))))
ds%<>%group_by(!!sym(subjvar)) %>%
mutate(.residrt=!!sym(rtvar)+
-(!!sym(pullvar)-mean(!!sym(pullvar)))*.data$pulldiff+
-(!!sym(targetvar)-mean(!!sym(targetvar)))*.data$targetdiff+
+.5*((!!sym(pullvar)==!!sym(targetvar))-mean(!!sym(pullvar)==!!sym(targetvar)))*.data$doublediff)
ppstats<-ds%>%group_by(!!sym(subjvar))%>%
summarise(.pullfx=first(.data$pulldiff),
.targetfx=first(.data$targetdiff),
.biasfx=first(.data$doublediff),
.meanrt=mean(!!sym(rtvar)),
.sdrt.full=sd(!!sym(rtvar)),
.sdrt.resid=sd(.data$.residrt),
ntrial=n(),
.groups="drop")
output<-ppstats %>% ungroup() %>%
summarise(pullfx=mean(.data$.pullfx),pullfx_jitter=sd(.data$.pullfx),
stimfx=mean(.data$.targetfx),stimfx_jitter=sd(.data$.targetfx),
biasfx=mean(.data$.biasfx),biasfx_jitter=sd(.data$.biasfx),
meanrt=mean(.data$.meanrt),meanrt_jitter=sd(.data$.meanrt),
sdrt.full=mean(.data$.sdrt.full),sdrt.full_jitter=sd(.data$.sdrt.full),
sdrt.resid=mean(.data$.sdrt.resid),sdrt.resid_jitter=sd(.data$.sdrt.resid),
ntrial=mean(.data$ntrial),
.groups="drop")
return(list(dataprops=as.list(output),subjectprops=ppstats,ds=ds))
}
#' @rdname aat_simulate
#' @description \code{aat_getstudydata()} retrieves the properties of datasets from a number of pre-existing studies
#' @export
aat_getstudydata<-function(){
dataprops
}
| /scratch/gouwar.j/cran-all/cranData/AATtools/R/aat_simulate.R |
# splithalf engine ####
#multicore splithalf
#' @title Compute the bootstrapped split-half reliability for approach-avoidance task data
#' @description Compute bootstrapped split-half reliability for approach-avoidance task data.
#' @param ds a longformat data.frame
#' @param subjvar Quoted name of the participant identifier column
#' @param pullvar Quoted name of the column indicating pull trials.
#' Pull trials should either be represented by 1, or by the second level of a factor.
#' @param targetvar Name of the column indicating trials featuring the target stimulus.
#' Target stimuli should either be represented by 1, or by the second level of a factor.
#' @param rtvar Name of the reaction time column.
#' @param stratvars Names of additional variables to stratify splits by.
#' @param iters Total number of desired iterations. At least 6000 are recommended for reasonable estimates.
#' @param algorithm Function (without brackets or quotes) to be used to compute AAT scores. See \link{Algorithms} for a list of usable algorithms.
#' @param trialdropfunc Function (without brackets or quotes) to be used to exclude outlying trials in each half.
#' The way you handle outliers for the reliability computation should mimic the way you do it in your regular analyses.
#' It is recommended to exclude outlying trials when computing AAT scores using the mean double-dfference scores and regression scoring approaches,
#' but not when using d-scores or median double-difference scores.
#' \itemize{
#' \item \code{prune_nothing} excludes no trials (default)
#' \item \code{trial_prune_grubbs} applies a Grubbs' test to the data, removing one outlier at a time until the test is no longer significant.
#' \item \code{trial_prune_3SD} excludes trials deviating more than 3SD from the mean per participant.
#' \item \code{trial_prune_SD_dropcases} removes trials deviating more than a specific number of standard deviations from the participant's mean,
#' and removes participants with an excessive percentage of outliers.
#' Required arguments:
#' \itemize{
#' \item \code{trialsd} - trials deviating more than \code{trialsd} standard deviations from the participant's mean are excluded (optional; default is 3)
#' \item \code{maxoutliers} - participants with a higher percentage of outliers are removed from the data. (optional; default is .15)
#' }
#' \item \code{trial_recode_SD} recodes outlying reaction times to the nearest non-outlying value,
#' with outliers defined as reaction times deviating more than a certain number of standard deviations from the participant's mean. Required argument:
#' \itemize{
#' \item \code{trialsd} - trials deviating more than this many standard deviations from the mean are classified as outliers.
#' }
#' \item \code{trial_prune_percent_subject} and \code{trial_prune_percent_sample} remove trials below and/or above certain percentiles,
#' on a subject-by-subject basis or sample-wide, respectively. The following arguments are available:
#' \itemize{
#' \item \code{lowerpercent} and \code{uppperpercent} (optional; defaults are .01 and .99).
#' }
#' }
#' @param errortrialfunc Function (without brackets or quotes) to apply to an error trial.
#'
#' \itemize{
#' \item \code{prune_nothing} removes no errors (default).
#' \item \code{error_replace_blockmeanplus} replaces error trial reaction times with the block mean, plus an arbitrary extra quantity.
#' If used, the following additional arguments are required:
#' \itemize{
#' \item \code{blockvar} - Quoted name of the block variable (mandatory)
#' \item \code{errorvar} - Quoted name of the error variable, where errors are 1 or TRUE and correct trials are 0 or FALSE (mandatory)
#' \item \code{errorbonus} - Amount to add to the reaction time of error trials. Default is 0.6 (recommended by \code{Greenwald, Nosek, & Banaji, 2003})
#' }
#' \item \code{error_prune_dropcases} removes errors and drops participants if they have more errors than a given percentage. The following arguments are available:
#' \itemize{
#' \item \code{errorvar} - Quoted name of the error variable, where errors are 1 or TRUE and correct trials are 0 or FALSE (mandatory)
#' \item \code{maxerrors} - participants with a higher percentage of errors are excluded from the dataset. Default is .15.
#' }
#' }
#' @param casedropfunc Function (without brackets or quotes) to be used to exclude outlying participant scores in each half.
#' The way you handle outliers here should mimic the way you do it in your regular analyses.
#' \itemize{
#' \item \code{prune_nothing} excludes no participants (default)
#' \item \code{case_prune_3SD} excludes participants deviating more than 3SD from the sample mean.
#' }
#' @param plot Create a scatterplot of the AAT scores computed from each half of the data from the last iteration.
#' This is highly recommended, as it helps to identify outliers that can inflate or diminish the reliability.
#' @param include.raw logical indicating whether raw split-half data should be included in the output object.
#' @param parallel If TRUE (default), will use parallel computing to compute results faster.
#' If a doParallel backend has not been registered beforehand,
#' this function will register a cluster and stop it after finishing, which takes some extra time.
#' @param ... Other arguments, to be passed on to the algorithm or outlier rejection functions (see arguments above)
#'
#' @return A list, containing the mean bootstrapped split-half reliability, bootstrapped 95% confidence intervals,
#' a list of data.frames used over each iteration, and a vector containing the split-half reliability of each iteration.
#'
#' @author Sercan Kahveci
#' @seealso \link{q_reliability}
#' @examples
#' split <- aat_splithalf(ds=erotica[erotica$is_irrelevant==0,],
#' subjvar="subject", pullvar="is_pull", targetvar="is_target",
#' rtvar="RT", stratvars="stimuluscode", iters=10,
#' trialdropfunc="trial_prune_3SD",
#' casedropfunc="case_prune_3SD", algorithm="aat_dscore",
#' plot=FALSE, parallel=FALSE)
#'
#' print(split)
#' #Mean reliability: 0.521959
#' #Spearman-Brown-corrected r: 0.6859041
#' #95%CI: [0.4167018, 0.6172474]
#'
#' plot(split)
#'
#' \donttest{
#' #Regression Splithalf
#' aat_splithalf(ds=erotica[erotica$is_irrelevant==0,],
#' subjvar="subject", pullvar="is_pull", targetvar="is_target",
#' rtvar="RT", iters=10, trialdropfunc="trial_prune_3SD",
#' casedropfunc="case_prune_3SD", algorithm="aat_regression",
#' formula = RT ~ is_pull * is_target, aatterm = "is_pull:is_target",
#' plot=FALSE, parallel=FALSE)
#' #Mean reliability: 0.5313939
#' #Spearman-Brown-corrected r: 0.6940003
#' #95%CI: [0.2687186, 0.6749176]
#' }
#' @export
aat_splithalf<-function(ds,subjvar,pullvar,targetvar=NULL,rtvar,stratvars=NULL,iters,
algorithm=c("aat_doublemeandiff","aat_doublemediandiff",
"aat_dscore","aat_dscore_multiblock",
"aat_regression","aat_standardregression",
"aat_singlemeandiff","aat_singlemediandiff"),
trialdropfunc=c("prune_nothing","trial_prune_3SD","trial_prune_3MAD",
"trial_prune_SD_dropcases",
"trial_recode_SD","trial_prune_percent_subject",
"trial_prune_percent_sample","trial_prune_grubbs"),
errortrialfunc=c("prune_nothing","error_replace_blockmeanplus",
"error_prune_dropcases"),
casedropfunc=c("prune_nothing","case_prune_3SD"),
plot=TRUE,include.raw=FALSE,parallel=TRUE,...){
packs<-c("magrittr","dplyr","AATtools")
#Handle arguments
args<-list(...)
algorithm<-match.arg(algorithm)
if(!(algorithm %in% c("aat_singlemeandiff","aat_singlemediandiff",
"aat_regression","aat_standardregression")) & is.null(targetvar)){
stop("Argument targetvar missing but required for algorithm!")
}
trialdropfunc<-match.arg(trialdropfunc)
casedropfunc<-match.arg(casedropfunc)
errortrialfunc<-match.arg(errortrialfunc)
errorpenalizefunc<-ifelse(errortrialfunc=="error_replace_blockmeanplus",errortrialfunc,"prune_nothing")
errorremovefunc<-ifelse(errortrialfunc=="error_replace_blockmeanplus","prune_nothing",errortrialfunc)
if(errortrialfunc=="error_replace_blockmeanplus"){
stopifnot(!is.null(args$blockvar),!is.null(args$errorvar))
if(is.null(args$errorbonus)){ args$errorbonus<- 0.6 }
if(is.null(args$blockvar)){ args$blockvar<- 0 }
if(is.null(args$errorvar)){ args$errorvar<- 0 }
}
stopifnot(!(algorithm=="aat_dscore_multiblock" & is.null(args$blockvar)))
if(algorithm %in% c("aat_regression","aat_standardregression")){
if(!("formula" %in% names(args))){
args$formula<-as.formula(paste0(rtvar,"~",pullvar,"*",targetvar))
warning("No formula provided. Defaulting to formula ",form2char(args$formula))
}else if(is.character(args$formula)){
args$formula<-as.formula(args$formula)
}
if(!("aatterm" %in% names(args))){
args$aatterm<-paste0(pullvar,":",targetvar)
warning("No AAT-term provided. Defaulting to AAT-term ",args$aatterm)
}
}
ds<-do.call(aat_preparedata,c(list(ds=ds,subjvar=subjvar,pullvar=pullvar,targetvar=targetvar,
rtvar=rtvar,stratvars=stratvars),args))
#Prepare the cluster
if(parallel){
`%dofunc%` <- `%dopar%`
hasCluster<-getDoParRegistered()
if(!hasCluster){
cluster<-makeCluster(getOption("AATtools.workers"))
registerDoParallel(cluster)
on.exit(unregisterDoParallel(cluster))
}
}else{
`%dofunc%` <- `%do%`
}
#splithalf loop
results<-
foreach(iter = seq_len(iters), .packages=packs) %dofunc% {
#Split data
# if(is.null(targetvar)){
# iterds<-ds%>%group_by(!! sym(subjvar), !! sym(pullvar))%>%
# mutate(key=sample(n())%%2)%>%ungroup()
# }else{
# # iterds<-ds%>%group_by(!! sym(subjvar), !! sym(pullvar), !! sym(targetvar))%>%
# # mutate(key=sample(n())%%2)%>%ungroup()
#
# h<-tapply(seq_len(nrow(ds)),ds[c(subjvar,pullvar,targetvar)],
# function(x){sample(x,size=round(length(x)/2))})%>%unlist()
# iterds<-ds
# iterds$key<-0
# iterds$key[h]<-1
# }
#Split data
iterds<-ds
iterds$key<-datasplitter(iterds[,c(subjvar,pullvar,targetvar,stratvars)])
#Handle error removal
iterds<-do.call(errorremovefunc,c(args,list(ds=iterds,subjvar=subjvar,rtvar=rtvar)))
#Handle outlying trials
iterds<-do.call(trialdropfunc,c(args,list(ds=iterds,subjvar=subjvar,rtvar=rtvar)))
#Handle error penalization
iterds<-do.call(errorpenalizefunc,c(args,list(ds=iterds,subjvar=subjvar,rtvar=rtvar)))
#intermediate prune of empty cases
iterds<-drop_empty_cases(iterds,subjvar)
# abds<-do.call(algorithm,c(list(iterds=iterds,subjvar=subjvar,pullvar=pullvar,
# targetvar=targetvar,rtvar=rtvar),args))
#Compute AB
half0set<-iterds[which(iterds$key==0),]
half1set<-iterds[which(iterds$key==1),]
abds<-merge(
do.call(algorithm,c(list(ds=half0set,subjvar=subjvar,pullvar=pullvar,
targetvar=targetvar,rtvar=rtvar),args)),
do.call(algorithm,c(list(ds=half1set,subjvar=subjvar,pullvar=pullvar,
targetvar=targetvar,rtvar=rtvar),args)),
by=subjvar,suffixes=c("half0","half1"))
#Remove outlying participants
abds<-do.call(casedropfunc,list(ds=abds))
#Compute reliability
currcorr<-cor(abds$abhalf0,abds$abhalf1,use="complete.obs")
frcorr<-FlanaganRulonStandard(abds$abhalf0,abds$abhalf1)
rjcorr<-RajuStandard(abds$abhalf0,abds$abhalf1,mean(iterds$key))
#produce output
out<-list(corr=currcorr,frcorr=frcorr,rjcorr=rjcorr,abds=abds)
if(include.raw){out$rawdata<-iterds}
out
}
#extract coefs from output
rjcors<-sapply(results,FUN=function(x){x$rjcorr}) %>% lim(-.9999,.9999)
cors<-sapply(results,FUN=function(x){x$corr})
sbcors<-SpearmanBrown(lim(cors,-.9999,.9999),fix.negative="none") %>% lim(-.9999,.9999)
frcorrs<-sapply(results,FUN=function(x){x$frcorr}) %>% lim(-.9999,.9999)
#get sample sizes (for averaging and significance testing)
counts<-sapply(results,function(x){ sum(!is.na(x$abds$abhalf0) & !is.na(x$abds$abhalf1)) })
avg_n<-mean(counts)
#sort the cors
ordering<-order(rjcors)
rjcors<-rjcors[ordering]
cors<-cors[ordering]
sbcors<-sbcors[ordering]
frcorrs<-frcorrs[ordering]
counts<-counts[ordering]
#assemble output
output<-list(uncorrected=list(r=cormean(cors,counts),
lowerci=quantile(cors,probs=.025),
upperci=quantile(cors,probs=.975),
pval=r2p(cormean(cors,counts),avg_n),
itercors=cors),
spearmanbrown=list(r=cormean(sbcors,counts),
lowerci=quantile(sbcors,probs=.025),
upperci=quantile(sbcors,probs=.975),
pval=r2p(cormean(sbcors,counts),avg_n),
itercors=sbcors),
flanaganrulon=list(r=cormean(frcorrs,counts),
lowerci=quantile(x=frcorrs,probs=.025),
upperci=quantile(x=frcorrs,probs=.975),
pval=r2p(cormean(frcorrs,counts),avg_n),
itercors=frcorrs),
raju=list(r=cormean(rjcors,counts),
lowerci=quantile(x=rjcors,probs=.025),
upperci=quantile(x=rjcors,probs=.975),
pval=r2p(cormean(rjcors,counts),avg_n),
itercors=rjcors),
avg_n=avg_n,
ordering=ordering,
parameters=c(list(ds=ds,
subjvar=subjvar,
pullvar=pullvar,
targetvar=targetvar,
rtvar=rtvar,
iters=iters,
algorithm=algorithm,
trialdropfunc=trialdropfunc,
errortrialfunc=errortrialfunc,
casedropfunc=casedropfunc),
args),
iterdata=lapply(results,function(x){ x$abds })[ordering]) %>%
structure(class = "aat_splithalf")
#include raw data if asked to (disabled by default, takes a lot of space)
if(include.raw){
output$rawiterdata<-lapply(results,function(x){ x$rawdata })[ordering]
}
#plot if asked to (default)
if(plot){ plot(output) }
#return output
return(output)
}
#' @param coef Optional character argument,
#' indicating which reliability coefficient should be printed.
#' Defaults to Raju's beta.
#' @details The calculated split-half coefficients are described in Warrens (2016).
#' @references Warrens, M. J. (2016). A comparison of reliability coefficients for
#' psychometric tests that consist of two parts.
#' Advances in Data Analysis and Classification, 10(1), 71-84.
#' @export
#' @rdname aat_splithalf
print.aat_splithalf<-function(x,coef=c("SpearmanBrown","Raju","FlanaganRulon"),...){
coef<-match.arg(coef)
if(coef=="Raju"){
coefstr<-paste0("\nFull-length reliability (Raju's beta):\n",
"beta (",format(x$avg_n),") = ",mf(x$raju$r),
", 95%CI [", mf(x$raju$lowerci), ", ", mf(x$raju$upperci),"]",
", p = ",mf(x$raju$pval,digits=3),"\n")
}else if(coef=="FlanaganRulon"){
coefstr<-paste0("\nFull-length reliability (Flanagan-Rulon coefficient):\n",
"FR (",format(x$avg_n),") = ",mf(x$flanaganrulon$r),
", 95%CI [", mf(x$flanaganrulon$lowerci), ", ", mf(x$flanaganrulon$upperci),"]",
", p = ",mf(x$flanaganrulon$pval,digits=3),"\n")
}else if(coef=="SpearmanBrown"){
coefstr<-paste0("\nFull-length reliability (Spearman-Brown coefficient):\n",
"SB (",format(x$avg_n),") = ",mf(x$spearmanbrown$r),
", 95%CI [", mf(x$spearmanbrown$lowerci), ", ", mf(x$spearmanbrown$upperci),"]",
", p = ",mf(x$spearmanbrown$pval,digits=3),"\n")
}
cat(coefstr,
"\nUncorrected, average split-half correlation:\n",
"r (",format(x$avg_n),") = ",mf(x$uncorrected$r),
", 95%CI [", mf(x$uncorrected$lowerci), ", ", mf(x$uncorrected$upperci),"]",
", p = ",mf(x$uncorrected$pval,digits=3),"\n",
sep="")
}
#' @title Plot split-half scatterplots
#'
#' @param x an \code{aat_splithalf} object
#' @param type Character argument indicating which iteration should be chosen. Must be an abbreviation of
#' \code{"median"} (default), \code{"minimum"}, \code{"maximum"}, or \code{"random"}.
#'
#' @export
#' @rdname aat_splithalf
plot.aat_splithalf<-function(x,type=c("median","minimum","maximum","random"),...){
type<-match.arg(type)
if(type=="median"){
title<-"Split-half Scatterplot for Iteration with Median Reliability"
idx<-ceiling(x$parameters$iters/2)
}else if(type=="minimum"){
title<-"Split-half Scatterplot for Iteration with the Lowest Reliability"
idx<-1
}else if(type=="maximum"){
title<-"Split-half Scatterplot for Iteration with the Highest Reliability"
idx<-x$parameters$iters
}else if(type=="random"){
title<-"Split-half Scatterplot for Random Iteration"
idx<-sample(1:x$parameters$iters,1)
}
abds<-x$iterdata[[idx]]
plot(abds$abhalf0,abds$abhalf1,pch=20,main=
paste0(title,"\n(Uncorrected r = ", round(x$uncorrected$itercors[idx],digits=2),")"),
xlab="Half 1 computed bias",ylab="Half 2 computed bias")
text(abds$abhalf0,abds$abhalf1,abds[,1],cex= 0.7, pos=3, offset=0.3)
}
| /scratch/gouwar.j/cran-all/cranData/AATtools/R/aat_splithalf.R |
subtraction.matrix<-function(avec,bvec){
na<-length(avec)
nb<-length(bvec)
out<-matrix(NA,nrow=na,ncol=nb)
for(i in seq_len(na)){
out[i,]<-avec[i]-bvec
}
return(out)
}
meanpercentile<-function(sample,population){
sample %>% sapply(function(x) mean(x<population,na.rm=T)) %>% mean(na.rm=T)
}
#' Compute stimulus-rest correlations of double-difference scores
#' This function provides a statistic that can give an indication of how deviant
#' the responses to specific stimuli are, in comparison to the rest of the stimulus set.
#' The algorithm computes stimulus-rest correlations of stimulus-specific double-difference scores.
#' It takes single-difference approach-avoidance scores for each stimulus, and computes
#' every possible subtraction between individual stimuli from both stimulus categories.
#' It then computes correlations between every such subtraction of stimuli on one hand, and
#' the mean double difference score of all other stimuli. Stimulus-rest correlations are then
#' computed by averaging every such subtraction-rest correlation involving a specific stimulus.
#'
#' @param ds a \code{data.frame}
#' @param subjvar the label of the participant identifier variable
#' @param stimvar the label of the stimulus identifier variable
#' @param pullvar the label of the movement direction identifier variable
#' @param targetvar the label of the stimulus category identifier variable
#' @param rtvar the label of the reaction time variable
#' @param method Optional, the correlation method to be used (pearson, spearman, kendall)
#'
#' @return Returns a \code{aat_stimulus_rest} object containing statistics for each stimulus.
#' Stats include the average stimulus-rest correlation (mcor); the standard deviation of
#' dyad-rest correlations for this stimulus (sdcor);
#' the number of valid correlations involved in these statistic (n);
#' the average percentile of dyad-rest correlations involving the stimulus within
#' the distribution of all other dyad-rest correlations (restpercentile);
#' as well as z-scores (zpercentile) and p-values for this percentile (pval).
#'
#' @export
#'
#' @examples
#'
#' ds<-aat_simulate()
#' stimrest<-aat_stimulus_rest(ds,subjvar="subj",stimvar="stim",pullvar="is_pull",
#' targetvar="is_target",rtvar="rt")
#' plot(stimrest)
#' print(stimrest)
aat_stimulus_rest<-function(ds,subjvar,stimvar,pullvar,targetvar,rtvar,method=c("pearson","spearman","kendall")){
method<-match.arg(method)
# check data
ds<-aat_preparedata(ds,subjvar,pullvar,targetvar,rtvar,stimvar=stimvar)
#compute single-difference scores
biasset<-ds%>%group_by(!!sym(subjvar),!!sym(stimvar),!!sym(targetvar))%>%
summarise(bias=mean(subset(!!sym(rtvar),!!sym(pullvar)==0),na.rm=T)-
mean(subset(!!sym(rtvar),!!sym(pullvar)==1),na.rm=T),.groups="drop")
stimset<-biasset%>%select(!!sym(stimvar),!!sym(targetvar))%>%distinct()
stimset$mcor<-NA
for(i in seq_len(nrow(stimset))){
iterset<-biasset%>%group_by(!!sym(subjvar))%>%
summarise(stimbias=.data$bias[which(!!sym(stimvar)==stimset[[stimvar]][i])],
restbias=mean(.data$bias[!!sym(stimvar) != stimset[[stimvar]][i] &
!!sym(targetvar) == stimset[[targetvar]][i] ]),
counterbias=mean(.data$bias[!!sym(targetvar) != stimset[[targetvar]][i] ]),
.groups="drop")
stimset$mcor[i]<-cor(iterset$stimbias-iterset$counterbias,iterset$restbias-iterset$counterbias,
use="complete.obs",method=method)
}
return(structure(stimset,class=c("aat_stimulus_rest","data.frame")))
}
#' @rdname aat_stimulus_rest
#' @param x an \code{aat_stimulus_rest} object
#' @param ... Ignored.
#' @export
plot.aat_stimulus_rest<-function(x,...){
x<-x[!is.na(x$mcor),]
ranks<-rank(x$mcor)
wideness<-max(x$mcor)-min(x$mcor)
plot(x=x$mcor,y=ranks,
xlim=c(min(x$mcor)-.5*wideness*strwidth(s=x$mcor[min(ranks)],cex=.5,font=2,units="figure"),
max(x$mcor)+.5*wideness*strwidth(s=x$mcor[max(ranks)],cex=.5,font=2,units="figure")),
xlab="Stimulus-rest correlation",main=paste0("Stimulus-rest correlations"),
yaxt="n")
segments(x0=mean(x$mcor),x1=x$mcor,y0=ranks,y1=ranks)
text(x=x$mcor,y=ranks,labels=x$stim,
pos=3+sign(x$mcor-mean(x$mcor)),offset=0.5,cex=.5,font=2)
abline(v=mean(x$mcor))
axis(2, labels=x$img,at=ranks,las=1,cex.axis=.5)
}
| /scratch/gouwar.j/cran-all/cranData/AATtools/R/aat_stimulus_rest.R |
# Score computation algorithms ####
#' @title AAT score computation algorithms
#' @name Algorithms
#' @description AAT score computation algorithms
#' @param ds A long-format data.frame
#' @param subjvar Column name of the participant identifier variable
#' @param pullvar Column name of the movement variable (0: avoid; 1: approach)
#' @param targetvar Column name of the stimulus category variable (0: control stimulus; 1: target stimulus)
#' @param rtvar Column name of the reaction time variable
#' @param ... Other arguments passed on by functions (ignored)
#'
#' @return A data.frame containing participant number and computed AAT score.
NULL
#' @describeIn Algorithms computes a mean-based double-difference score:
#' \code{(mean(push_target) - mean(pull_target)) - (mean(push_control) - mean(pull_control))}
#'
#' @export
aat_doublemeandiff<-function(ds,subjvar,pullvar,targetvar,rtvar,...){
a<-tapply(ds[[rtvar]],list(ds[[subjvar]],ds[[targetvar]],ds[[pullvar]]),
mean.default,na.rm=TRUE)
b<-apply(a,1,function(x){x[2,1]-x[2,2]-(x[1,1]-x[1,2]) })
setNames(data.frame(id=names(b),ab=b,stringsAsFactors=F),
c(subjvar,"ab"))
}
aat_doublemeandiff_old<-function(ds,subjvar,pullvar,targetvar,rtvar,...){
idx1<-which(ds[[pullvar]]==0 & ds[[targetvar]]==1)
idx2<-which(ds[[pullvar]]==1 & ds[[targetvar]]==1)
idx3<-which(ds[[pullvar]]==0 & ds[[targetvar]]==0)
idx4<-which(ds[[pullvar]]==1 & ds[[targetvar]]==0)
ab<-(tapply(ds[[rtvar]][idx1],ds[[subjvar]][idx1],mean.default,na.rm=TRUE) -
tapply(ds[[rtvar]][idx2],ds[[subjvar]][idx2],mean.default,na.rm=TRUE))-
(tapply(ds[[rtvar]][idx3],ds[[subjvar]][idx3],mean.default,na.rm=TRUE) -
tapply(ds[[rtvar]][idx4],ds[[subjvar]][idx4],mean.default,na.rm=TRUE))
setNames(data.frame(id=names(ab),ab=ab,stringsAsFactors=F),c(subjvar,"ab"))
}
aat_doublemeandiff_older<-function(ds,subjvar,pullvar,targetvar,rtvar,...){
group_by(ds,!!sym(subjvar)) %>%
summarise(ab=(mean(subset(!!sym(rtvar),!!sym(pullvar)==0 & !!sym(targetvar) == 1),na.rm=TRUE) -
mean(subset(!!sym(rtvar),!!sym(pullvar)==1 & !!sym(targetvar) == 1),na.rm=TRUE)) -
(mean(subset(!!sym(rtvar),!!sym(pullvar)==0 & !!sym(targetvar) == 0),na.rm=TRUE) -
mean(subset(!!sym(rtvar),!!sym(pullvar)==1 & !!sym(targetvar) == 0),na.rm=TRUE)))
}
#' @export
#' @describeIn Algorithms computes a median-based double-difference score:
#' \code{(median(push_target) - median(pull_target)) - (median(push_control) - median(pull_control))}
aat_doublemediandiff<-function(ds,subjvar,pullvar,targetvar,rtvar,...){
a<-tapply(ds[[rtvar]],list(ds[[subjvar]],ds[[targetvar]],ds[[pullvar]]),
median.default,na.rm=TRUE)
b<-apply(a,1,function(x){x[2,1]-x[2,2]-(x[1,1]-x[1,2]) })
setNames(data.frame(id=names(b),ab=b,stringsAsFactors=F),
c(subjvar,"ab"))
}
aat_doublemediandiff_old<-function(ds,subjvar,pullvar,targetvar,rtvar,...){
idx1<-which(ds[[pullvar]]==0 & ds[[targetvar]]==1)
idx2<-which(ds[[pullvar]]==1 & ds[[targetvar]]==1)
idx3<-which(ds[[pullvar]]==0 & ds[[targetvar]]==0)
idx4<-which(ds[[pullvar]]==1 & ds[[targetvar]]==0)
ab<-(tapply(ds[[rtvar]][idx1],ds[[subjvar]][idx1],median.default,na.rm=TRUE) -
tapply(ds[[rtvar]][idx2],ds[[subjvar]][idx2],median.default,na.rm=TRUE))-
(tapply(ds[[rtvar]][idx3],ds[[subjvar]][idx3],median.default,na.rm=TRUE) -
tapply(ds[[rtvar]][idx4],ds[[subjvar]][idx4],median.default,na.rm=TRUE))
setNames(data.frame(id=names(ab),ab=ab,stringsAsFactors=F),c(subjvar,"ab"))
}
aat_doublemediandiff_older<-function(ds,subjvar,pullvar,targetvar,rtvar,...){
group_by(ds,!!sym(subjvar)) %>%
summarise(ab=(median(subset(!!sym(rtvar),!!sym(pullvar)==0 & !!sym(targetvar) == 1),na.rm=TRUE) -
median(subset(!!sym(rtvar),!!sym(pullvar)==1 & !!sym(targetvar) == 1),na.rm=TRUE)) -
(median(subset(!!sym(rtvar),!!sym(pullvar)==0 & !!sym(targetvar) == 0),na.rm=TRUE) -
median(subset(!!sym(rtvar),!!sym(pullvar)==1 & !!sym(targetvar) == 0),na.rm=TRUE)))
}
#' @export
#' @describeIn Algorithms computes D-scores for a 2-block design (see Greenwald, Nosek, and Banaji, 2003):
#' \code{((mean(push_target) - mean(pull_target)) - (mean(push_control) - mean(pull_control))) / sd(participant_reaction_times)}
aat_dscore<-function(ds,subjvar,pullvar,targetvar,rtvar,...){
a<-tapply(ds[[rtvar]],list(ds[[subjvar]],ds[[targetvar]],ds[[pullvar]]),
mean.default,na.rm=TRUE)
b<-apply(a,1,function(x){x[2,1]-x[2,2]-(x[1,1]-x[1,2]) })
sds<-tapply(ds[[rtvar]],ds[[subjvar]],vec.sd,na.rm=TRUE)
c<-b/sds
setNames(data.frame(id=names(c),ab=c,stringsAsFactors=F),
c(subjvar,"ab"))
}
aat_dscore_old<-function(ds,subjvar,pullvar,targetvar,rtvar,...){
idx1<-which(ds[[pullvar]]==0 & ds[[targetvar]]==1)
idx2<-which(ds[[pullvar]]==1 & ds[[targetvar]]==1)
idx3<-which(ds[[pullvar]]==0 & ds[[targetvar]]==0)
idx4<-which(ds[[pullvar]]==1 & ds[[targetvar]]==0)
ab<-((tapply(ds[[rtvar]][idx1],ds[[subjvar]][idx1],mean.default,na.rm=TRUE) -
tapply(ds[[rtvar]][idx2],ds[[subjvar]][idx2],mean.default,na.rm=TRUE))-
(tapply(ds[[rtvar]][idx3],ds[[subjvar]][idx3],mean.default,na.rm=TRUE) -
tapply(ds[[rtvar]][idx4],ds[[subjvar]][idx4],mean.default,na.rm=TRUE)))/
tapply(ds[[rtvar]],ds[[subjvar]],sd,na.rm=TRUE)
setNames(data.frame(id=names(ab),ab=ab,stringsAsFactors=F),c(subjvar,"ab"))
}
aat_dscore_older<-function(ds,subjvar,pullvar,targetvar,rtvar,...){
group_by(ds,!!sym(subjvar)) %>%
summarise(ab=((mean(subset(!!sym(rtvar),!!sym(pullvar)==0 & !!sym(targetvar) == 1),na.rm=TRUE) -
mean(subset(!!sym(rtvar),!!sym(pullvar)==1 & !!sym(targetvar) == 1),na.rm=TRUE)) -
(mean(subset(!!sym(rtvar),!!sym(pullvar)==0 & !!sym(targetvar) == 0),na.rm=TRUE) -
mean(subset(!!sym(rtvar),!!sym(pullvar)==1 & !!sym(targetvar) == 0),na.rm=TRUE))) /
sd(!!sym(rtvar),na.rm=TRUE))
}
#' @export
#' @describeIn Algorithms computes a double-difference score usign medians,
#' and divides it by the median absolute deviation of the participant's overall reaction times:
#' \code{((median(push_target) - median(pull_target)) - (median(push_control) - median(pull_control))) / mad(participant_reaction_times)}
aat_mediandscore<-function(ds,subjvar,pullvar,targetvar,rtvar,...){
a<-tapply(ds[[rtvar]],list(ds[[subjvar]],ds[[targetvar]],ds[[pullvar]]),
median.default,na.rm=TRUE)
b<-apply(a,1,function(x){x[2,1]-x[2,2]-(x[1,1]-x[1,2]) })
sds<-tapply(ds[[rtvar]],ds[[subjvar]],mad,na.rm=TRUE)
c<-b/sds
setNames(data.frame(id=names(c),ab=c,stringsAsFactors=F),
c(subjvar,"ab"))
}
aat_mediandscore_old<-function(ds,subjvar,pullvar,targetvar,rtvar,...){
idx1<-which(ds[[pullvar]]==0 & ds[[targetvar]]==1)
idx2<-which(ds[[pullvar]]==1 & ds[[targetvar]]==1)
idx3<-which(ds[[pullvar]]==0 & ds[[targetvar]]==0)
idx4<-which(ds[[pullvar]]==1 & ds[[targetvar]]==0)
ab<-((tapply(ds[[rtvar]][idx1],ds[[subjvar]][idx1],median.default,na.rm=TRUE) -
tapply(ds[[rtvar]][idx2],ds[[subjvar]][idx2],median.default,na.rm=TRUE))-
(tapply(ds[[rtvar]][idx3],ds[[subjvar]][idx3],median.default,na.rm=TRUE) -
tapply(ds[[rtvar]][idx4],ds[[subjvar]][idx4],median.default,na.rm=TRUE)))/
tapply(ds[[rtvar]],ds[[subjvar]],mad,na.rm=TRUE)
setNames(data.frame(id=names(ab),ab=ab,stringsAsFactors=F),c(subjvar,"ab"))
}
aat_mediandscore_older<-function(ds,subjvar,pullvar,targetvar,rtvar,...){
group_by(ds,!!sym(subjvar)) %>%
summarise(ab=((median(subset(!!sym(rtvar),!!sym(pullvar)==0 & !!sym(targetvar) == 1),na.rm=TRUE) -
median(subset(!!sym(rtvar),!!sym(pullvar)==1 & !!sym(targetvar) == 1),na.rm=TRUE)) -
(median(subset(!!sym(rtvar),!!sym(pullvar)==0 & !!sym(targetvar) == 0),na.rm=TRUE) -
median(subset(!!sym(rtvar),!!sym(pullvar)==1 & !!sym(targetvar) == 0),na.rm=TRUE))) /
mad(!!sym(rtvar),na.rm=TRUE))
}
#' @param blockvar name of the variable indicating block number
#' @export
#' @describeIn Algorithms computes D-scores for pairs of sequential blocks
#' and averages the resulting score (see Greenwald, Nosek, and Banaji, 2003).
#' Requires extra \code{blockvar} argument, indicating the name of the block variable.
#note: this matches sequential blocks with one another.
aat_dscore_multiblock<-function(ds,subjvar,pullvar,targetvar,rtvar,blockvar,...){
ds$.blockset<-floor((ds[[blockvar]]-min(ds[[blockvar]]))/2)
a<-tapply(ds[[rtvar]],list(ds[[subjvar]],ds$.blockset,ds[[targetvar]],ds[[pullvar]]),
mean.default,na.rm=TRUE)
b<-apply(a,1:2,function(x){x[2,1]-x[2,2]-(x[1,1]-x[1,2]) })
sds<-tapply(ds[[rtvar]],list(ds[[subjvar]],ds$.blockset),vec.sd,na.rm=TRUE)
c<-rowMeans(b/sds)
setNames(data.frame(id=names(c),ab=c,stringsAsFactors=F),
c(subjvar,"ab"))
}
aat_dscore_multiblock_old<-function(ds,subjvar,pullvar,targetvar,rtvar,blockvar,...){
ds %>% mutate(.blockset = floor((!!sym(blockvar) - min(!!sym(blockvar)))/2) ) %>%
group_by(!!sym(subjvar),.data$.blockset) %>%
summarise(ab=((mean(subset(!!sym(rtvar),!!sym(pullvar)==0 & !!sym(targetvar) == 1),na.rm=TRUE) -
mean(subset(!!sym(rtvar),!!sym(pullvar)==1 & !!sym(targetvar) == 1),na.rm=TRUE)) -
(mean(subset(!!sym(rtvar),!!sym(pullvar)==0 & !!sym(targetvar) == 0),na.rm=TRUE) -
mean(subset(!!sym(rtvar),!!sym(pullvar)==1 & !!sym(targetvar) == 0),na.rm=TRUE))) /
sd(!!sym(rtvar),na.rm=TRUE)) %>%
group_by(!!sym(subjvar)) %>% summarise(ab=mean(ab,na.rm=TRUE))
}
#' @param formula A regression formula to fit to the data to compute an AAT score
#' @param aatterm A character naming the formula term representing the approach bias.
#' Usually this is the interaction of the movement-direction and stimulus-category terms.
#' @export
#' @describeIn Algorithms \code{aat_regression} and \code{aat_standardregression} fit regression models to participants' reaction times and extract a term that serves as AAT score.
#' \code{aat_regression} extracts the raw coefficient, equivalent to a mean difference score.
#' \code{aat_standardregression} extracts the t-score of the coefficient, standardized on the basis of the variability of the participant's reaction times.
#' These algorithms can be used to regress nuisance variables out of the data before computing AAT scores.
#' When using these functions, additional arguments must be provided:
#' \itemize{
#' \item \code{formula} - a formula to fit to the data
#' \item \code{aatterm} - the term within the formula that indicates the approach bias; this is usually the interaction of the pull and target terms.
#' }
aat_regression<-function(ds,subjvar,formula,aatterm,...){
output<-data.frame(pp=unique(ds[[subjvar]]),ab=NA,var=NA)
for(i in seq_len(nrow(output))){
mod<-coef(summary(lm(formula,data=ds[ds[[subjvar]]==output[i,"pp"],])))
if(aatterm %in% rownames(mod)){
output[i,"ab"]<- -mod[rownames(mod)==aatterm,1]
output[i,"var"]<- mod[rownames(mod)==aatterm,2]
}
}
colnames(output)[colnames(output)=="pp"]<-subjvar
return(output)
}
#' @export
#' @describeIn Algorithms See above
aat_standardregression<-function(ds,subjvar,formula,aatterm,...){
output<-data.frame(pp=unique(ds[[subjvar]]),ab=NA,var=NA)
for(i in seq_len(nrow(output))){
mod<-coef(summary(lm(formula,data=ds[ds[[subjvar]]==output[i,"pp"],])))
if(aatterm %in% rownames(mod)){
output[i,"ab"]<- -mod[rownames(mod)==aatterm,1]
output[i,"var"]<- mod[rownames(mod)==aatterm,2]
}
}
colnames(output)[colnames(output)=="pp"]<-subjvar
output$ab<-output$ab/output$var
return(output)
}
#' @export
#' @describeIn Algorithms subtracts the mean approach reaction time from the mean avoidance reaction time.
#' Using this algorithm is only sensible if the supplied data contain a single stimulus category.
aat_singlemeandiff<-function(ds,subjvar,pullvar,rtvar,...){
a<-tapply(ds[[rtvar]],list(ds[[subjvar]],ds[[pullvar]]),mean.default,na.rm=T)
b<-apply(a,1,function(x){ x[1]-x[2] })
setNames(data.frame(id=names(b),ab=b,stringsAsFactors=F),
c(subjvar,"ab"))
}
aat_singlemeandiff_old<-function(ds,subjvar,pullvar,rtvar,...){
idx1<-which(ds[[pullvar]]==0)
idx2<-which(ds[[pullvar]]==1)
ab<-(tapply(ds[[rtvar]][idx1],ds[[subjvar]][idx1],mean.default,na.rm=T) -
tapply(ds[[rtvar]][idx2],ds[[subjvar]][idx2],mean.default,na.rm=T))
setNames(data.frame(id=names(ab),ab=ab,stringsAsFactors=F),c(subjvar,"ab"))
}
aat_singlemeandiff_older<-function(ds,subjvar,pullvar,rtvar,...){
group_by(ds,!!sym(subjvar))%>%
summarise(ab=mean(subset(!!sym(rtvar),!!sym(pullvar)==1)) -
mean(subset(!!sym(rtvar),!!sym(pullvar)==0)))
}
#' @export
#' @describeIn Algorithms subtracts the median approach reaction time from the median avoidance reaction time.
#' Using this algorithm is only sensible if the supplied data contain a single stimulus category.
aat_singlemediandiff<-function(ds,subjvar,pullvar,rtvar,...){
a<-tapply(ds[[rtvar]],list(ds[[subjvar]],ds[[pullvar]]),median.default,na.rm=T)
b<-apply(a,1,function(x){ x[1]-x[2] })
setNames(data.frame(id=names(b),ab=b,stringsAsFactors=F),
c(subjvar,"ab"))
}
aat_singlemediandiff_old<-function(ds,subjvar,pullvar,rtvar,...){
idx1<-which(ds[[pullvar]]==0)
idx2<-which(ds[[pullvar]]==1)
ab<-(tapply(ds[[rtvar]][idx1],ds[[subjvar]][idx1],median.default,na.rm=T) -
tapply(ds[[rtvar]][idx2],ds[[subjvar]][idx2],median.default,na.rm=T))
setNames(data.frame(id=names(ab),ab=ab,stringsAsFactors=F),c(subjvar,"ab"))
}
aat_singlemediandiff_older<-function(ds,subjvar,pullvar,rtvar,...){
group_by(ds,!!sym(subjvar))%>%
summarise(ab=median(subset(!!sym(rtvar),!!sym(pullvar)==1)) -
median(subset(!!sym(rtvar),!!sym(pullvar)==0)))
}
| /scratch/gouwar.j/cran-all/cranData/AATtools/R/algorithms.R |
#' @name correlation-tools
#' @title Correlation tools
#' @description Helper functions to compute important statistics from correlation coefficients.
#' @param r,r1,r2 a correlation value
#' @param z a Z-score
#' @param n,n1,n2 sample sizes
#' @param alpha the significance level to use
#' @seealso \link{cormean}, \link{multiple.cor}, \link{partial.cor}
#' @examples
#' z <- r2z(.5)
#' r <- z2r(z)
#' t<-r2t(r,30)
#' r2p(r,30)
#' print(rconfint(r,30))
#' print(compcorr(.5,.7,20,20))
NULL
#' @export
#' @describeIn correlation-tools converts correlation coefficients to z-scores
r2z<-function(r){
z<-.5 * (log(1+r) - log(1-r))
return(z)
}
#' @export
#' @describeIn correlation-tools converts z-scores to correlation coefficients
z2r<-function(z){
r<-(exp(2*z)-1)/(exp(2*z)+1)
rma<-which(is.nan(r))
r[rma]<-ifelse(z[rma]>0,1,-1)
return(r)
}
#' @export
#' @describeIn correlation-tools Converts correlation coefficients to t-scores
r2t<-function(r,n){ (r*sqrt(n-2))/sqrt(1-r^2) }
t2r<-function(t,n){ sqrt(t/sqrt(t^2+n-2)) }
#' @export
#' @describeIn correlation-tools Computes the two-sided p-value for a given correlation
r2p<-function(r,n){ 2*pt(abs(r2t(r,n)),n-2,lower.tail=FALSE) }
#' @export
#' @describeIn correlation-tools Computes confidence intervals for a given correlation coefficient
rconfint<-function(r,n,alpha=.05){
z<-r2z(r)
zint<-qnorm(1-alpha/2) * sqrt(1/(n-3))
confints<-c(z2r(z-zint),z2r(z+zint))
return(rconfint)
}
#' @export
#' @describeIn correlation-tools computes the significance of the difference between two correlation coefficients
compcorr<-function(r1,r2,n1,n2){
zval<-abs(r2z(r1)-r2z(r2)) / sqrt((1/(n1-3)) + (1/(n2-3)))
pval<-min(1,pnorm(abs(zval),lower.tail=F)*2)
return(structure(list(zscore=zval,pvalue=pval),class="compcorr"))
}
print.compcorr<-function(x,...){
cat("Two-tailed Z-test for the difference between two correlation coefficients.",
"\nZ =",x$zscore,"\np =",x$pvalue,"\n")
}
#' Compute a minimally biased average of correlation values
#'
#' This function computes a minimally biased average of correlation values.
#' This is needed because simple averaging of correlations is negatively biased,
#' and the often used z-transformation method of averaging correlations is positively biased.
#' The algorithm was developed by Olkin & Pratt (1958).
#'
#' @param r a vector containing correlation values
#' @param n a single value or vector containing sample sizes
#' @param wts Character. How should the correlations be weighted?
#' \code{none} leads to no weighting, \code{n} weights by sample size, \code{df} weights by sample size minus one.
#' @param type Character. Determines which averaging algorithm to use, with "OP5" being the most accurate.
#' @param na.rm Logical. Should missing values be removed?
#'
#' @return An average correlation.
#' @name cormean
#' @export
#'
#' @references
#' Olkin, I., & Pratt, J. (1958). Unbiased estimation of certain correlation coefficients.
#' The Annals of Mathematical Statistics, 29. https://doi.org/10.1214/aoms/1177706717
#'
#' Shieh, G. (2010). Estimation of the simple correlation coefficient. Behavior Research Methods,
#' 42(4), 906-917. https://doi.org/10.3758/BRM.42.4.906
#'
#' @examples
#' cormean(c(0,.3,.5),c(30,30,60))
cormean<-function(r,n,wts=c("none","n","df"),type=c("OP5","OPK","OP2"),na.rm=F){
type<-match.arg(type)
wts<-match.arg(wts)
if(na.rm){
missing<-which(is.na(r) | is.na(n))
if(length(missing)>0){
r<-r[-missing]
n<-n[-missing]
}
}
weight<-list(rep(1,times=length(n)),n,n-1)[[1+(wts=="n")+2*(wts=="df")]]
if(length(r)!=length(n)){
stop("Length of r and n not equal!")
}
if(type=="OP5"){
sizevec<-unique(n)
gammalist<-sapply(sizevec,function(nr) (gamma(.5+1:5)^2 * gamma(nr/2-1))/
(gamma(.5)^2 * gamma(nr/2-1+1:5)))
rmean<-weighted.mean(x= sapply(seq_along(r),
function(i)
r[i]*(1+ sum(gammalist[,match(n[i],sizevec)] *
(1-r[i]^2)^(1:5)/factorial(1:5)))),
w= weight)
}else if(type=="OPK"){
rmean<-weighted.mean(x= r*(1+(1-r^2)/(2*(n-(9*sqrt(2)-7)/2))),
w= weight)
}else if(type=="OP2"){
rmean<-weighted.mean(x= r*(1+ (1-r^2)/(2*(n-2)) +
(9*(1-r^2)^2)/(8*n*(n-2))),
w= weight)
}
return(rmean)
}
#' Partial correlation
#' Compute the correlation between x and y while controlling for z.
#' @param x,y,z x and y will be correlated while controlling for z
#' @param use optional character indicating how to handle missing values (see \link{cor})
#' @export
#' @examples
#' partial.cor(mtcars$mpg,mtcars$cyl,mtcars$disp)
partial.cor<-function(x,y,z,use=c("complete.obs","everything")){
use<-match.arg(use)
if(use=="complete.obs"){
key<- !is.na(x) & !is.na(y) & !is.na(z)
x<-x[key]
y<-y[key]
z<-z[key]
}
xy<-cor(x,y)
xz<-cor(x,z)
yz<-cor(y,z)
return((xy-xz*yz)/sqrt((1-xz^2)*(1-yz^2)))
}
#ref: https://www.tse-fr.eu/sites/default/files/medias/stories/SEMIN_09_10/STATISTIQUE/croux.pdf
# devlin, 1975
cor.influence<-function(x,y){
x<-x-mean(x)
y<-y-mean(y)
x*y-(x^2+y^2)/2*cor(x,y)
}
#' Multiple correlation
#' Computes the \href{https://en.wikipedia.org/wiki/Multiple_correlation}{multiple correlation coefficient}
#' of variables in \code{ymat} with the variable \code{x}
#' @param x Either a matrix of variables whose multiple correlation with each other is to be estimated; or a vector of which the multiple correlation with variables in \code{ymat} is to be estimated
#' @param ymat a matrix or data.frame of variables of which the multiple correlation with \code{x} is to be estimated
#' @param use optional character indicating how to handle missing values (see \link{cor})
#'
#' @return The multiple correlation coefficient
#' @export
#' @seealso https://www.personality-project.org/r/book/chapter5.pdf
#'
#' @examples
#' multiple.cor(mtcars[,1],mtcars[,2:4])
multiple.cor<-function(x,ymat,use="everything"){
if(missing(ymat)){
cv<-cor(x,use=use)
corvec<-numeric(ncol(x))
for(i in seq_along(corvec)){
gfvec<-cv[(1:nrow(cv))[-i],i]
dcm<-cv[(1:nrow(cv))[-i],(1:ncol(cv))[-i]]
rsq<-t(gfvec) %*% solve(dcm) %*% gfvec
corvec[i]<-sqrt(as.vector(rsq))
}
names(corvec)<-colnames(cv)
return(corvec)
}else{
cv<-cor(cbind(x,ymat),use=use)
gfvec<-cv[2:nrow(cv),1]
dcm<-cv[2:nrow(cv),2:ncol(cv)]
rsq<-t(gfvec) %*% solve(dcm) %*% gfvec
return(sqrt(as.vector(rsq)))
}
}
#negative reliability in split-half aat occurs when the subtracted components correlate too positively with each other
#' Covariance matrix computation with multiple imputation
#'
#' This function computes a covariance matrix from data with some values missing at random.
#' The code was written by Eric from StackExchange. https://stats.stackexchange.com/questions/182718/ml-covariance-estimation-from-expectation-maximization-with-missing-data
#' @param dat_missing a matrix with missing values
#' @param iters the number of iterations to perform to estimate missing values
#' @references Beale, E. M. L., & Little, R. J. A.. (1975). Missing Values in Multivariate Analysis. Journal of the Royal Statistical Society. Series B (methodological), 37(1), 129–145.
#' @export
#' @examples
#' # make data with missing values
#' missing_mtcars <- mtcars
#' for(i in 1:20){
#' missing_mtcars[sample(1:nrow(mtcars),1),sample(1:ncol(mtcars),1)]<-NA
#' }
#' covmat<-covEM(as.matrix(missing_mtcars))$sigma
#' calpha(covmat)
covEM<-function(dat_missing,iters=1000){
if(!anyNA(dat_missing)){
return(list(sigma=cov(dat_missing),data=dat_missing))
}
n <- nrow(dat_missing)
nvar <- ncol(dat_missing)
is_na <- apply(dat_missing,2,is.na) # index if NAs
dat_impute <- dat_missing # data matrix for imputation
# set initial estimates to means from available data
for(i in 1:ncol(dat_impute)){
dat_impute[is_na[,i],i] <- colMeans(dat_missing,na.rm = TRUE)[i]
}
# starting values for EM
means <- colMeans(dat_impute)
# NOTE: multiplying by (nrow-1)/(nrow) to get ML estimate
sigma <- cov(dat_impute)*(nrow(dat_impute)-1)/nrow(dat_impute)
# carry out EM over 100 iterations
for(j in 1:iters){
bias <- matrix(0,nvar,nvar)
for(i in 1:n){
row_dat <- dat_missing[i,]
miss <- which(is.na(row_dat))
if(length(miss)>0){
bias[miss,miss] <- bias[miss,miss] + sigma[miss,miss] -
sigma[miss,-miss] %*% solve(sigma[-miss,-miss]) %*% sigma[-miss,miss]
dat_impute[i,miss] <- means[miss] +
(sigma[miss,-miss] %*% solve(sigma[-miss,-miss])) %*%
(row_dat[-miss]-means[-miss])
}
}
# get updated means and covariance matrix
means <- colMeans(dat_impute)
biased_sigma <- cov(dat_impute)*(n-1)/n
# correct for bias in covariance matrix
sigma <- biased_sigma + bias/n
}
return(list(sigma=sigma,data=dat_impute))
}
| /scratch/gouwar.j/cran-all/cranData/AATtools/R/cortools.R |
#' AAT examining approach bias for erotic stimuli
#'
#' AAT
#'
#' @docType data
#'
#' @usage erotica
#'
#' @format An object of class \code{"data.frame"}
#'
#' @keywords datasets
#'
#' @references Kahveci, S., Van Bockstaele, B.D., & Wiers, R.W. (in preparation).
#' Pulling for Pleasure? Erotic Approach-Bias Associated With Porn Use, Not Problems. DOI:10.17605/OSF.IO/6H2RJ
#'
#' @source \href{https://osf.io/6h2rj/}{osf.io repository}
#'
"erotica"
# erotica<-read.csv("./../data/erotica.csv")
# erotica$subject%<>%as.factor()
# erotica%>%dplyr::group_by(subject)%>%dplyr::summarise(meanrt=mean(RT),sdrt=sd(RT),ct=n())
# erotica%<>%dplyr::filter(!(subject %in% c(13, 42,40,32,55)))
# save(erotica,file="./data/erotica.RData")
| /scratch/gouwar.j/cran-all/cranData/AATtools/R/data.R |
balancedrandombinary<-function(n){
keys<-rep(c(0,1),floor(n/2))
if(n%%2){
keys<-c(keys,NA)
}
keys[sample.int(length(keys))]
}
splitsweep<-function(currsplitset){
h<-tapply(seq_len(nrow(currsplitset)),currsplitset,function(x){
cbind(x,balancedrandombinary(length(x)))
},simplify=F)
h<-do.call(rbind,h)
currkey<-numeric(nrow(h))
currkey[h[,1]]<-h[,2]
currkey
}
datasplitter<-function(splitset){
validcols<-ncol(splitset)
key<-splitsweep(splitset)
while(anyNA(key) & validcols>0){
whichna<-is.na(key)
key[whichna]<-splitsweep(as.data.frame(splitset[whichna,1:validcols]))
validcols<-validcols-1
}
key[is.na(key)]<-sample( (seq_len(sum(is.na(key)))+sample(0:1,1)) %%2)
key
}
| /scratch/gouwar.j/cran-all/cranData/AATtools/R/datasplitter.R |
serr<-function(x,na.rm=T){sqrt(var(x,na.rm=na.rm)/sum(!is.na(x)))}
FlanaganRulonBilateral<-function(x1,x2){
key<-!is.na(x1) & !is.na(x2)
x1<-x1[key]
x2<-x2[key]
fr<-(1-var(x1-x2)/var(x1+x2))
return(fr/max(1, 1-fr))
}
RajuBilateral<-function(x1,x2,prop){
covar<-cov(x1,x2)
sumvar<-var(x1)+var(x2)+2*abs(covar)
raju<-covar / (prop * (1-prop) * sumvar)
return(raju)
}
FlanaganRulonStandard<-function(x1,x2){
(1-var(x1-x2)/var(x1+x2))
}
RajuStandard<-function(x1,x2,prop){
covar<-cov(x1,x2)
sumvar<-var(x1)+var(x2)+2*covar
covar / (prop * (1-prop) * sumvar)
}
vec.sd<-function(x,na.rm=F){
if(na.rm){x<-na.omit(x)}
sqrt(sum((x-mean.default(x))^2) / (length(x)-1))
}
vec.scale<-function(x){
xt<-na.omit(x)
m<-mean.default(xt)
(x-m)/sqrt((sum((xt-m)^2)/(length(xt)-1)))
}
vec.madscale<-function(x){
(x-median.default(x,na.rm=T))/mad(x,na.rm=T)
}
val_between<-function(x,lb,ub){x>lb & x<ub}
lim<-function(x,minx,maxx){ x[x<minx]<-minx; x[x>maxx]<-maxx; x }
drop_empty_cases<-function(iterds,subjvar){
ids<-vapply(split(iterds$key,iterds[[subjvar]]),
FUN=function(x){any(x==1)&any(x==0)},
FUN.VALUE=FALSE)
outds<-iterds[which(iterds[[subjvar]] %in% names(ids)[ids]),]
outds[[subjvar]]<-droplevels(outds[[subjvar]])
outds
}
form2char<-function(x){
if(is.character(x)){ return(x) }
fs<-as.character(x)
fs<-paste(fs[2],fs[1],fs[3])
return(fs)
}
is.formula <- function(x){
inherits(x,"formula")
}
mf<-function(x,digits=2){
s<-format(round(x,digits=digits),
digits=digits,drop0trailing=T,scientific=F,nsmall=digits)
s<-gsub("^0\\.","\\.",s)
return(s)
}
r_check_limit_cores <- function() {
Rcheck <- tolower(Sys.getenv("_R_CHECK_LIMIT_CORES_", ""))
return((nchar(Rcheck[1]) > 0) & (Rcheck != "false"))
}
unregisterDoParallel <- function(cluster) {
stopCluster(cluster)
registerDoSEQ()
#env <- foreach:::.foreachGlobals
#rm(list=ls(name=env), pos=env)
}
aat_preparedata<-function(ds,subjvar,pullvar,targetvar=NULL,rtvar,stratvars=NULL,...){
args<-list(...)
cols<-c(subjvar,pullvar,targetvar,rtvar,stratvars,args$errorvar,args$blockvar,args$stimvar)
if("formula" %in% names(args)){
formterms <- args$formula %>% as.formula() %>% terms() %>%
attr("variables") %>% as.character()
formterms <- formterms[-1]
if(any(!(formterms %in% colnames(ds)))){
stop("Formula term(s) ",paste(formterms[!(formterms %in% colnames(ds))],collapse=", ")," missing from dataset")
}
cols <- c(cols,formterms)
}
missingcols<-!(cols %in% colnames(ds))
if(any(missingcols)){
stop("Missing column(s) in dataset: ",paste0(cols[missingcols],collapse=" "))
}
ds<-ds[,cols]
ds[[subjvar]]%<>%as.factor()
if(is.logical(ds[,pullvar])){
warning("Recoded ",pullvar," from logical to numeric. Please make sure that FALSE ",
"represents push trials and TRUE represents pull trials")
ds[,pullvar]%<>%as.numeric()
}
if(is.factor(ds[,pullvar])){
warning("Recoded ",pullvar," from factor to numeric. Please make sure that ",
levels(ds[,pullvar])[1], " represents push trials and ",levels(ds[,pullvar])[2],
" represents pull trials")
ds[,pullvar]<-as.numeric(ds[,pullvar])-1
}
if(!is.null(targetvar)){
if(is.logical(ds[,targetvar])){
warning("Recoded ",targetvar," from logical to numeric. Please make sure that FALSE ",
"represents control/neutral stimuli and TRUE represents target stimuli")
ds[,targetvar]%<>%as.numeric()
}
if(is.factor(ds[,targetvar])){
warning("Recoded ",targetvar," from factor to numeric. Please make sure that ",
levels(ds[,targetvar])[1], " represents control/neutral stimuli and ",
levels(ds[,targetvar])[2], " represents target stimuli")
ds[,targetvar]<-as.numeric(ds[,targetvar])-1
}
}
rmindices <- ds[,cols] %>% lapply(FUN=is.na) %>% as.data.frame %>%
apply(MARGIN=1,FUN=any) %>% which
if(length(rmindices)>0){
ds<-ds[-rmindices,]
warning("Removed ",length(rmindices),
" rows due to presence of NA in critical variable(s)")
}
return(ds)
}
| /scratch/gouwar.j/cran-all/cranData/AATtools/R/helpers.R |
# Outlier removing algorithms ####
#' @title Pre-processing rules
#' @description These are pre-processing rules that can be used in \link{aat_splithalf}, \link{aat_bootstrap}, and \link{aat_compute}.
#'
#' \itemize{
#' \item The following rules are to be used for the \code{trialdropfunc} argument.
#' The way you handle outliers for the reliability computation and bootstrapping more broadly
#' should mimic the way you do it in your regular analyses.
#' It is recommended to exclude outlying trials when computing AAT scores using the mean double-dfference scores and regression scoring approaches,
#' but not when using d-scores or median double-difference scores.
#' \itemize{
#' \item \code{prune_nothing} excludes no trials (default)
#' \item \code{trial_prune_3SD} excludes trials deviating more than 3SD from the mean per participant.
#' \item \code{trial_prune_3MAD} excludes trials deviating more than 3 median absolute deviations from the median per participant.
#' \item \code{trial_prune_grubbs} applies a Grubbs' test to the data, removing one outlier at a time until the test is no longer significant.
#' \item \code{trial_prune_SD_dropcases} removes trials deviating more than a specific number of standard deviations from the participant's mean,
#' and removes participants with an excessive percentage of outliers.
#' Required arguments:
#' \itemize{
#' \item \code{trialsd} - trials deviating more than \code{trialsd} standard deviations from the participant's mean are excluded (optional; default is 3)
#' \item \code{maxoutliers} - participants with a higher percentage of outliers are removed from the data. (optional; default is .15)
#' }
#' \item \code{trial_recode_SD} recodes outlying reaction times to the nearest non-outlying value,
#' with outliers defined as reaction times deviating more than a certain number of standard deviations from the participant's mean. Required argument:
#' \itemize{
#' \item \code{trialsd} - trials deviating more than this many standard deviations from the mean are classified as outliers.
#' }
#' \item \code{trial_prune_percent_subject} and \code{trial_prune_percent_sample} remove trials below and/or above certain percentiles,
#' on a subject-by-subject basis or sample-wide, respectively. The following arguments are available:
#' \itemize{
#' \item \code{lowerpercent} and \code{uppperpercent} (optional; defaults are .01 and .99).
#' }
#' }
#' \item The following pre-procesing rules are to be used for the \code{errortrialfunc} argument.
#' They determine what is to be done with errors - remove or recode?
#'
#' \itemize{
#' \item \code{prune_nothing} removes no errors (default).
#' \item \code{error_replace_blockmeanplus} replaces error trial reaction times with the block mean, plus an arbitrary extra quantity.
#' If used, the following additional arguments are required:
#' \itemize{
#' \item \code{blockvar} - Quoted name of the block variable (mandatory)
#' \item \code{errorvar} - Quoted name of the error variable, where errors are 1 or TRUE and correct trials are 0 or FALSE (mandatory)
#' \item \code{errorbonus} - Amount to add to the reaction time of error trials. Default is 0.6 (recommended by \code{Greenwald, Nosek, & Banaji, 2003})
#' }
#' \item \code{error_prune_dropcases} removes errors and drops participants if they have more errors than a given percentage. The following arguments are available:
#' \itemize{
#' \item \code{errorvar} - Quoted name of the error variable, where errors are 1 or TRUE and correct trials are 0 or FALSE (mandatory)
#' \item \code{maxerrors} - participants with a higher percentage of errors are excluded from the dataset. Default is .15.
#' }
#' }
#' \item These are pre-processing rules to be used for the \code{casedropfunc} argument.
#' The way you handle outliers here should mimic the way you do it in your regular analyses.
#' \itemize{
#' \item \code{prune_nothing} excludes no participants (default)
#' \item \code{case_prune_3SD} excludes participants deviating more than 3SD from the sample mean.
#' }
#' }
#' @param ds A data.frame.
#' @param subjvar The name of the subject variable.
#' @param rtvar The name of the reaction time variable.
#' @param blockvar The name of the block variable.
#' @param errorvar The name of the error variable.
#' @param lowerpercent,upperpercent for \code{trial_prune_percent_subject} and \code{trial_prune_percent_sample},
#' the lower and upper proportions beyond which trials are considered outliers and removed (defaults to .01 and .99).
#' @param trialsd The amount of deviation from the participant mean (in SD) after which a trial is considered an outlier and excluded (defaults to 3).
#' @param maxoutliers for \code{trial_prune_SD_dropcases}, the maximum percentage of outliers, after which a participant is excluded from the data.
#' @param errorbonus for \code{error_replace_blockmeanplus}, the amount of seconds to add to the block mean
#' and use as a replacement for error trial reaction times (default is 0.6).
#' @param maxerrors for \code{error_prune_dropcases}, the maximum percentage of errors, after which a participant is excluded from the data.
#' @param ... Other arguments (ignored).
#' @name Preprocessing
NULL
#' @export
#' @rdname Preprocessing
prune_nothing<-function(ds,...){
ds
}
#' @export
#' @rdname Preprocessing
trial_prune_percent_subject<-function(ds,subjvar,rtvar,lowerpercent=.01,upperpercent=.99,...){
ds %>% group_by(!!sym(subjvar),key) %>%
mutate(percentile=(rank(!!sym(rtvar))-1)/(n()-1)) %>%
filter(.data$percentile > lowerpercent & .data$percentile< upperpercent) %>% ungroup()
}
trial_prune_percent_subject_alt<-function(ds,subjvar,rtvar,lowerpercent=.01,upperpercent=.99,...){
ds$percentile <- ave(ds[[rtvar]],ds[[subjvar]],ds[["key"]],FUN=function(x){ (rank(x)-1)/(length(x)-1) })
ds[ds$percentile > lowerpercent & ds$percentile < upperpercent,]
}
#' @export
#' @rdname Preprocessing
trial_prune_percent_sample<-function(ds,rtvar,lowerpercent=.01,upperpercent=.99,...){
ds %>% group_by(key) %>% mutate(percentile=(rank(!!sym(rtvar))-1)/(n()-1)) %>%
filter(.data$percentile > lowerpercent & .data$percentile< upperpercent) %>% ungroup()
}
trial_prune_percent_sample_alt<-function(ds,rtvar,lowerpercent=.01,upperpercent=.99,...){
ds$percentile <- ave(ds[[rtvar]],ds[["key"]],FUN=function(x){ (rank(x)-1)/(length(x)-1) })
ds[(ds$percentile > lowerpercent & ds$percentile < upperpercent),]
}
#' @export
#' @rdname Preprocessing
trial_prune_3SD<-function(ds,subjvar,rtvar,...){
ds %>% group_by(!!sym(subjvar),key) %>% filter(abs(vec.scale(!!sym(rtvar))) <3) %>% ungroup()
}
#' @export
#' @rdname Preprocessing
trial_prune_3MAD<-function(ds,subjvar,rtvar,...){
ds %>% group_by(!!sym(subjvar),key) %>% filter(abs(vec.madscale(!!sym(rtvar))) <3) %>% ungroup()
}
trial_prune_3SD_alt<-function(ds,subjvar,rtvar,...){
h<-ave(ds[[rtvar]],ds[[subjvar]],ds[["key"]],FUN=vec.scale)
ds[which(abs(h)<3),]
}
#' @export
#' @rdname Preprocessing
trial_prune_SD_dropcases<-function(ds,subjvar,rtvar,trialsd=3,maxoutliers=.15,...){
ds %>% group_by(!!sym(subjvar),key) %>%
mutate(is.ol=as.numeric(abs(vec.scale(!!sym(rtvar))) >=trialsd),
avg.ol=mean.default(.data$is.ol)) %>%
ungroup() %>% filter(.data$is.ol==0 & .data$avg.ol<maxoutliers)
}
#' @export
#' @rdname Preprocessing
trial_recode_SD<-function(ds,subjvar,rtvar,trialsd=3,...){
dsa<- ds %>% group_by(!!sym(subjvar),key) %>%
mutate(ol.z.score=vec.scale(!!sym(rtvar)),
ol.type=(.data$ol.z.score >= trialsd) - (.data$ol.z.score <= -trialsd),
is.ol=abs(.data$ol.type),
ol.max.rt=mean.default(!!sym(rtvar))+vec.sd(!!sym(rtvar))*trialsd,
ol.min.rt=mean.default(!!sym(rtvar))-vec.sd(!!sym(rtvar))*trialsd)
dsa[which(dsa$is.ol!=0),rtvar]<-ifelse(dsa[which(dsa$is.ol!=0),]$ol.type==1,
dsa[which(dsa$is.ol!=0),]$ol.max.rt,
dsa[which(dsa$is.ol!=0),]$ol.min.rt)
#dsa %>% dplyr::select(-.data$ol.type,-.data$ol.max.rt,-.data$ol.min.rt,-.data$ol.z.score)
dsa$ol.type<-dsa$ol.max.rt<-dsa$ol.min.rt<-dsa$ol.z.score<-NULL
return(dsa)
}
trial_recode_SD_alt<-function(ds,subjvar,rtvar,trialsd=3,...){
ds$ol.grmean<-ave(ds[[rtvar]],ds[[subjvar]],ds$key,FUN=mean.default)
ds$ol.grsd<-ave(ds[[rtvar]],ds[[subjvar]],ds$key,FUN=vec.sd)
ds$ol.z.score<-(ds[[rtvar]]-ds$ol.grmean)/ds$ol.grsd
ds$is.ol<-abs(ds$ol.z.score)>=trialsd
ds[[rtvar]]<-(!ds$is.ol)*ds[[rtvar]] + ds$is.ol*(ds$ol.grmean+sign(ds$ol.z.score)*ds$ol.grsd*trialsd)
ds$ol.grmean<-ds$ol.grsd<-ds$ol.z.score<-ds$ol.type<-NULL
ds
}
#' @export
#' @rdname Preprocessing
trial_prune_grubbs<-function(ds,subjvar,rtvar,...){
ds %>% group_by(!!sym(subjvar)) %>% filter(!grubbsFilter(!!sym(rtvar))) %>% ungroup()
}
grubbsFilter<-function(x,alphalevel=.05){
pval<-0
is.ol<-rep(F,length(x))
while(pval<alphalevel & sum(is.ol) < length(x)){
scaled<-vec.scale(x[!is.ol])
biggest<-which.max(abs(scaled))
pval<-pgrubbs(scaled[biggest],sum(!is.ol))
if(pval<alphalevel)
is.ol[!is.ol][biggest]<-T
}
is.ol
}
# Borrowed from the {outliers} package
pgrubbs<-function(p,n){
s <- (p^2 * n * (2 - n))/(p^2 * n - (n - 1)^2)
t <- sqrt(s)
if (is.nan(t)) {
res <- 0
}
else {
res <- n * (1 - pt(t, n - 2))
res[res > 1] <- 1
}
return(res)
}
#' @export
#' @rdname Preprocessing
case_prune_3SD<-function(ds,...){
ds[which(abs(vec.scale(ds$abhalf0))<3 & abs(vec.scale(ds$abhalf1))<3),]
}
#Replace error trial latencies with correct block mean RT + 600
#' @export
#' @rdname Preprocessing
error_replace_blockmeanplus<-function(ds,subjvar,rtvar,blockvar,errorvar,errorbonus, ...){
if(!("is.ol" %in% colnames(ds))){ ds$is.ol<-0 }
ds%<>%group_by(!!sym(subjvar),!!sym(blockvar), key)%>%
mutate(newrt=mean.default((!!sym(rtvar))[!(!!sym(errorvar)) & .data$is.ol==0])+errorbonus)%>%ungroup()
errids<-which(ds[[errorvar]]==1)
ds[[rtvar]][errids]<-ds$newrt[errids]
ds$newrt<-NULL
ds
}
error_replace_blockmeanplus_alt<-function(ds,subjvar,rtvar,blockvar,errorvar,errorbonus, ...){
if(!("is.ol" %in% colnames(ds))){ ds$is.ol<-0 }
ds$.corrmean<-ave(ds$rt+ifelse(!ds[[errorvar]] & !ds$is.ol,0,NA),
ds[[rtvar]],ds[[blockvar]],ds[["key"]],
FUN=function(x){mean.default(x[!is.na(x)])})
ds[[rtvar]][ds[[errorvar]]==TRUE]<-ds$.corrmean[ds[[errorvar]]==TRUE]+errorbonus
ds$.corrmean<-NULL
ds
}
#' @export
#' @rdname Preprocessing
error_prune_dropcases<-function(ds,subjvar, errorvar, maxerrors = .15, ...){
ds%>%group_by(!!sym(subjvar), key)%>%
filter(mean.default(!!sym(errorvar))<maxerrors & !!sym(errorvar) == FALSE)
}
error_prune_dropcases_alt<-function(ds,subjvar, errorvar, maxerrors = .15, ...){
ds$merr<-ave(ds[[errorvar]],ds[[subjvar]],ds$key,FUN=mean.default)
ds[which(ds$merr<maxerrors & !ds[[errorvar]]),]
}
| /scratch/gouwar.j/cran-all/cranData/AATtools/R/outlierhandlers.R |
#' Compute psychological experiment reliability
#' @description This function can be used to compute an exact reliability score for a psychological task whose results involve a difference score.
#' The resulting intraclass correlation coefficient is equivalent to the average all possible split-half reliability scores.
#' It ranges from -1 to 1, with -1 implying that all variance in the data is explained by within-subjects variability,
#' 1 implying that all variance is explained by between-subjects variability,
#' and 0 implying that within-subjects and between-subjects variability contribute equally to the total variance in the sample.
#' @param ds a long-format data.frame
#' @param subjvar name of the subject variable
#' @param formula a formula predicting the participant's reaction time using trial-level variables such as movement direction and stimulus category
#' @param aatterm a string denoting the term in the formula that contains the participant's approach bias
#'
#' @return a qreliability object, containing the reliability coefficient,
#' and a data.frame with participants' bias scores and score variance.
#'
#' Please note that the valence of the bias scores may or may not correspond with
#' approach and avoidance. If you plan to use these scores in your analyses,
#' always verify that they are in the right direction by correlating them with
#' independently calculated bias scores, for example using \code{aat_compute()}.
#'
#' @export
#' @author Sercan Kahveci
#' @examples
#' # Double-difference score reliability
#' q_reliability(ds=erotica,subjvar="subject",
#' formula= RT ~ is_pull * is_target, aatterm = "is_pull:is_target")
#'
#' # Single-difference reliability for target stimuli
#' q_reliability(ds=erotica[erotica$is_target ==1,],subjvar="subject",
#' formula= RT ~ is_pull, aatterm = "is_pull")
#'
#' # Reliability of the mean reaction time of approaching target stimuli (no difference score)
#' q_reliability(ds=erotica[erotica$is_target ==1 & erotica$is_pull ==1,],subjvar="subject",
#' formula= RT ~ 1, aatterm = "1")
#'
q_reliability<-function(ds,subjvar,formula,aatterm=NA){
# argument checks
cols<-c(subjvar,as.character(attr(terms(formula),"variables"))[-1])
stopifnot(all(cols %in% colnames(ds)))
ds<-ds[apply(!is.na(ds[,cols]),MARGIN=1,FUN=all),]
if(aatterm=="1"){ aatterm<-NA }
# functional part
coefs<-data.frame(pp=unique(ds[[subjvar]]),ab=NA,var=NA)
for(u in 1:nrow(coefs)){
iterset<-ds[ds[[subjvar]]==coefs[u,]$pp,]
mod<-lm(formula,data=iterset)
coefs[u,]$ab <- -coef(mod)[ifelse(is.na(aatterm),length(coef(mod)),aatterm)]
coefs[u,]$var <- (diag(vcov(mod)))[ifelse(is.na(aatterm),length(coef(mod)),aatterm)] # squared standard error
}
bv<-var(coefs$ab,na.rm=TRUE)
wv<-mean(coefs$var,na.rm=TRUE)
q<-(bv-wv)/(bv)
return(structure(list(q=q,coefs=coefs),class="qreliability"))
}
#' @rdname q_reliability
#' @param splitvars Vector of column names over which to split the data
#' to compute difference scores. This can be used to compute the
#' reliability of single, double, or even triple difference scores.
#' @param rtvar Column name of the variable containing reaction times
#' @param dscore If true, reliability will be computed for a difference score
#' that is divided by the subject's standard deviation (as in D-scores)
#' @param na.rm If true, remove rows with missing values from the data
#' @export
#' @examples
#' q_reliability2(ds=erotica,subjvar="subject",
#' splitvars=c("is_pull", "is_target"),rtvar="RT")
q_reliability2<-function(ds,subjvar,splitvars,rtvar,dscore=F,na.rm=F){
#remove missing
if(na.rm){
ds<-ds[,c(subjvar,rtvar,splitvars)]
ds<-ds[rowSums(is.na(ds))<1,]
}
#divide RTs by person-specific SD to make it possible to compute D-score by simply
# doing a double mean difference
if(dscore){
sds<-tapply(ds[[rtvar]],ds[[subjvar]],sd)
ds[[rtvar]]<-ds[[rtvar]]/sds[as.character(ds[[subjvar]])]
}
#scores
sc<-tapply(X=ds[[rtvar]],
INDEX = ds[,c(splitvars,subjvar)],
FUN=mean)
if(!all(dim(sc)[-length(dim(sc))]==2)){
stop("Not all split variables consist of only 2 values.")
}
while(length(dim(sc))>1){
sc<-arrextract(sc,1,1)-arrextract(sc,1,2)
}
#variances
variances<-tapply(X=ds[[rtvar]],
INDEX = ds[,c(splitvars,subjvar)],
FUN=function(x){var(x)/length(x)}) %>%
apply(X=.,MARGIN=length(dim(.)),sum)
#remove missing
unmissing<-which(!is.na(variances) & !is.na(sc))
sc<-sc[unmissing]
variances<-variances[unmissing]
#rel
bv<-var(sc)
wv<-mean(variances)
rel<-(bv-wv)/(bv)
#output
return(structure(list(q=rel,coefs=data.frame(pp=names(sc),
bias=sc,
var=variances)),
class="qreliability"))
}
#borrowed from stackoverflow
arrextract <- function(A, .dim, .value){
idx.list <- lapply(dim(A), seq_len)
idx.list[[.dim]] <- .value
do.call(`[`, c(list(A), idx.list))
}
#' @export
#' @rdname q_reliability
#' @param x a \code{qreliability} object
#' @param ... Other arguments passed to the generic \code{print} and \code{plot} functions.
print.qreliability<-function(x,...){
cat("q = ",x$q,"\n",sep="")
}
#' @export
#' @rdname q_reliability
#' @param x a \code{qreliability} object
#' @param ... Other arguments passed to the generic \code{print} and \code{plot} functions.
plot.qreliability<-function(x,...){
bv<-var(x$coefs$ab,na.rm=TRUE) / nrow(x$coefs)*1.96 *2
wv<-mean(x$coefs$var,na.rm=TRUE) / nrow(x$coefs)*1.96 *2
plotset<-data.frame(x=mean(x$coefs$ab) + cos(0:100 / 100 * 2*pi)*bv * 1/2*sqrt(2) - sin(0:100 / 100 * 2*pi)*wv * 1/2*sqrt(2),
y=mean(x$coefs$ab) + cos(0:100 / 100 * 2*pi)*bv * 1/2*sqrt(2) + sin(0:100 / 100 * 2*pi)*wv * 1/2*sqrt(2))
plot(plotset$x,plotset$y,type="l",main=paste0("Reliability\n","q = ",round(x$q,digits=2)),xlab="Participants' scores",ylab="Participants' scores")
points(x$coefs$ab,x$coefs$ab)
dispval<-(bv+wv)/100
plotset<-data.frame(xstart=c(x$coefs$ab+dispval,x$coefs$ab-dispval),
ystart=c(x$coefs$ab-dispval,x$coefs$ab+dispval),
xend=c(x$coefs$ab+sqrt(x$coefs$var) *1/2*sqrt(2),
x$coefs$ab-sqrt(x$coefs$var) *1/2*sqrt(2)),
yend=c(x$coefs$ab-sqrt(x$coefs$var) *1/2*sqrt(2),
x$coefs$ab+sqrt(x$coefs$var) *1/2*sqrt(2)))
segments(plotset$xstart,plotset$ystart,plotset$xend,plotset$yend)
}
| /scratch/gouwar.j/cran-all/cranData/AATtools/R/q_reliability.R |
# utils ####
#' @name splitrel
#' @title Split Half-Based Reliability Coefficients
#' @seealso \link{covrel}
NULL
#' @describeIn splitrel Perform a Spearman-Brown correction on the provided correlation score.
#'
#' @param corr To-be-corrected correlation coefficient
#' @param ntests An integer indicating how many times larger the full test is, for which the corrected correlation coefficient is being computed.
#' When \code{ntests=2}, the formula will compute what the correlation coefficient would be if the test were twice as long.
#' @param fix.negative Determines how to deal with a negative value. "nullify" sets it to zero,
#' "bilateral" applies the correction as if it were a positive number, and then sets it to negative.
#' "none" gives the raw value. It should be noted that negative values are not supposed to occur,
#' and there is no commonly accepted way to deal with them when they do occur.
#' @return Spearman-Brown-corrected correlation coefficient.
#' @export
#'
#' @examples
#'
#' SpearmanBrown(.5)
SpearmanBrown<-function(corr,ntests=2,fix.negative=c("none","nullify","bilateral")){
fix.negative<-match.arg(fix.negative)
if(fix.negative=="bilateral"){
s<-sign(corr)
corr<-abs(corr)
sb<-ntests*corr / (1+(ntests-1)*corr)
return(s*sb)
}else{
sb<-ntests*corr / (1+(ntests-1)*corr)
if(fix.negative=="nullify"){
return(ifelse(sb<0,0,sb))
}else{
return(sb)
}
}
}
#' @describeIn splitrel Compute the true reliability using the Flanagan-Rulon formula,
#' which takes into account inequal variances between split halves.
#' @param x1 scores from half 1
#' @param x2 scores from half 2
#' @export
#'
#' @examples
#' FlanaganRulon(a<-rnorm(50),rnorm(50)+a*.5,fix.negative="bilateral")
FlanaganRulon<-function(x1,x2,fix.negative=c("none","nullify","bilateral")){
fix.negative<-match.arg(fix.negative)
d<-var(x1-x2)
k<-var(x1+x2)
if(fix.negative=="none"){
return(1-d/k)
}else if(fix.negative=="bilateral"){
fr<-(1-d/k)
#fr<-ifelse(fr>0,fr,fr / (1-fr))
fr<-fr/max(1, 1-fr)
return(fr)
}else if(fix.negative=="nullify"){
fr<-1-d/k
return(ifelse(fr>0,fr,0))
}
}
#' @describeIn splitrel Compute split-half reliability using the Raju formula,
#' which takes into account unequal split-halves and variances.
#'
#' @param prop Proportion of the first half to the complete sample
#'
#' @export
#'
#' @examples
#' a<-rnorm(50)
#' b<-rnorm(50)+a*.5
#' RajuCoefficient(a,b,prop=.4,fix.negative="bilateral")
RajuCoefficient<-function(x1,x2,prop,fix.negative=c("none","nullify","bilateral")){
fix.negative<-match.arg(fix.negative)
covar<-cov(x1,x2)
if(fix.negative=="bilateral"){
sumvar<-var(x1)+var(x2)+2*abs(covar)
}else{
sumvar<-var(x1)+var(x2)+2*covar
}
raju<-covar / (prop * (1-prop) * sumvar)
return(ifelse(fix.negative=="nullify" & raju<0,0,raju))
}
#' @name covrel
#' @title Covariance Matrix-Based Reliability Coefficients
#' @description These functions allow for the computation of the reliability of a dataset
#' from the covariance matrix of its variables.
#' @seealso \link{splitrel}
#' @examples
#' # compute reliability from covariance
#' h<-cov(iris[,1:4])
#' calpha(h)
#' lambda2(h)
#' lambda4(h)
#' # Lambda-2 and Lambda-4 are significantly larger because
#' # some of the variables in the iris dataset are negatively correlated.
NULL
#' @describeIn covrel Cronbach's alpha
#' @param covmat a covariance matrix
#' @export
calpha<-function(covmat){
(nrow(covmat)/(nrow(covmat)-1))*(1 - sum(diag(covmat))/sum(covmat))
}
#' @describeIn covrel Guttman's Lambda-2
#' @export
lambda2<-function(covmat){
offs<-covmat[upper.tri(covmat)]
covs<-2*sum(offs)
sqcov<-2*sum(offs^2)
sums<-sum(covmat)
n<-dim(covmat)[1]
covs/sums + sqrt(n/(n-1)*sqcov)/sums
}
#' @describeIn covrel Guttman's Lambda-4. This algorithm tries to get the highest attainable reliability by
#' @export
lambda4<-function(covmat){
flip<-rep(1,ncol(covmat))
itermaxid<- -1
itermax<- -1
while(itermaxid != 0){
itermaxid<-0
for(i in seq_along(flip)){
key<-rep(1,ncol(covmat))
key[i]<- -1
itera<-calpha(t(t(covmat*flip*key)*flip*key))
if(itera>itermax){
itermax<-itera
itermaxid<-i
}
}
if(itermaxid>0){
flip[itermaxid]<- -flip[itermaxid]
}
}
calpha(t(t(covmat*flip)*flip))
}
| /scratch/gouwar.j/cran-all/cranData/AATtools/R/relcorrections.R |
#' @import dplyr
#' @import magrittr
#' @import doParallel
#' @import foreach
#' @importFrom magrittr %>% %<>% %$%
#' @importFrom dplyr group_by ungroup mutate summarise sample_n n filter select
#' @importFrom parallel detectCores makeCluster stopCluster
#' @importFrom foreach getDoParRegistered registerDoSEQ
#' @importFrom stats var median mad sd lm vcov terms as.formula coef cor cov setNames quantile
#' pt rnorm rgamma pnorm qnorm ave median.default na.omit weighted.mean
#' @importFrom graphics abline points segments text plot par axis strwidth image
.onLoad<-function(libname, pkgname){
#avoid CRAN errors
utils::globalVariables(c("abhalf0","abhalf1","ab","key","."),"AATtools")
#register generic functions
registerS3method("print",class="aat_splithalf",method=print.aat_splithalf)
registerS3method("plot",class="aat_splithalf",method=plot.aat_splithalf)
registerS3method("print",class="aat_bootstrap",method=print.aat_bootstrap)
registerS3method("plot",class="aat_bootstrap",method=plot.aat_bootstrap)
registerS3method("print",class="qreliability",method=print.qreliability)
registerS3method("plot",class="qreliability",method=plot.qreliability)
registerS3method("print",class="aat_covreliability",method=print.aat_covreliability)
registerS3method("print",class="aat_covreliability_jackknife",method=print.aat_covreliability_jackknife)
registerS3method("plot",class="aat_covreliability_jackknife",method=plot.aat_covreliability_jackknife)
#set max number of cores to use
if (r_check_limit_cores()) {
num_workers <- 2L
} else {
num_workers <- max(parallel::detectCores(),1L)
}
options(AATtools.workers=num_workers)
#greet user
#packageStartupMessage("Thank you for loading AATtools v0.0.1")
}
| /scratch/gouwar.j/cran-all/cranData/AATtools/R/zzz.R |
#' Shiny App to Demonstrate Analysis of Variance
#'
#' @name shiny_anova
#' @aliases shiny_anova
#' @description An interactive Shiny app to demonstrate Analysis of Variance.
#' @usage shiny_anova()
#'
#' @details The interactive Shiny app demonstrates the principles of Analysis of Variance.
#' The true parameter values are provided by the user.
#' The user changes sample characteristics, distribution function and simulation features
#' and explores the influence of these changes on the hypothesis testing using principles of analysis of variance.
#'
#' The left panel includes the user inputs for \strong{Simulation Features}, \strong{Population Parameters},
#' \strong{Sample Characteristics}, and \strong{Distribution Function}.
#' To use the app at first instance, just click the \code{Update} button.
#' To alter the input values, edit the text box or move the point on the slider and
#' explore the changes in different tabs (see below).
#'
#' To obtain identical outcomes in a separate run of the app,
#' set a common seed value at the bottom of the left panel and click \code{Update}.
#' All subsequent updates will produce identical results provided other inputs are identical.
#' The seed value is ignored when the option \code{check the box to update instantly} is selected.
#'
#' @return The outcomes are presented in several tabs.
#' \item{Population}{contains the density plots of three populations and
#' rug plots of the sample units randomly drawn from these populations.
#' It also shows the population parameter values chosen by the user.}
#' \item{Sample}{contains the dot plots and box plots of three samples drawn
#' randomly from the three populations and rug plots of the sample units.
#' It also includes the estimates of mean and standard deviation of three samples.}
#' \item{SS & MS}{contains the bar plots showing the between and within sum of squares (SS)
#' and mean squares (MS) as well as the proportion of between and within SS over total SS.}
#' \item{Test Statistic}{contains the plots showing the mean difference between groups.
#' and corresponding 95\% confidence intervals (CI).
#' The tab also contains the distribution of the test statistic \code{F},
#' the observed value of the test statistic and probabilities
#' under the given value of the Type 1 error}
#' \item{Summary}{includes the summary of the sampled data and outcomes
#' from the one-way analysis of variance. Different sections are:
#' (1) Hypothesis, highlighting the null and alternative hypothesis;
#' (2) Sample, tabulating the full sampled data;
#' (3) Summary Statistics, summarising the summary information of three samples;
#' (4) Model Outputs, the outputs from fitting the analysis of variance model.
#' The section also present the multiple comparision of means using
#' Tukey's Honest Significant Differences test.
#' This section represents standard R outputs based on fitting an \code{\link{lm}} function.}
#'
#' @note \url{https://shiny.abdn.ac.uk/Stats/apps/}
#'
#' @author Mintu Nath
#'
#' @seealso Function in base R for normal distribution, F distribution and fitting linear model including
#' \code{\link{dnorm}}, \code{\link{pnorm}}, \code{\link{qnorm}}, \code{\link{rnorm}},
#' \code{\link{df}}, \code{\link{pf}}, \code{\link{qf}}, \code{\link{rf}},
#' \code{\link{lm}}, \code{\link{aov}},
#' \code{\link{anova.lm}}, \code{\link{summary.lm}}
#' \code{\link{summary.aov}}, \code{\link{model.tables}}
#'
#' @examples
#' if(interactive()){
#' library(ggplot2)
#' library(shiny)
#' library(ABACUS)
#' # Run shiny app
#' shiny_anova()
#' }
#'
#' @import shiny
#' @import ggplot2
#' @export
# Function
shiny_anova <- function() {
shiny::runApp(appDir = system.file("app_anova", package = "ABACUS"), launch.browser = TRUE)
Sys.setenv("shiny_anova" = "")
}
| /scratch/gouwar.j/cran-all/cranData/ABACUS/R/shiny_anova.R |
#' Shiny App to Explore Properties of the Normal Distribution
#'
#' @name shiny_dnorm
#' @aliases shiny_dnorm
#' @description An interactive Shiny app to demonstrate properties of the Normal distribution.
#' @usage shiny_dnorm()
#'
#' @details The interactive Shiny app demonstrates the properties of Normal distribution.
#' The app considers parameters (mean and standard deviation) of the Normal distribution and captures its
#' properties using different graphical outputs.
#' The user changes the population parameter values, sample characteristics, distribution function and
#' simulation features and explores the influence of these changes on the hypothesis testing.
#'
#' The left panel includes the user inputs for \strong{Simulation Features}, \strong{Population Parameters},
#' \strong{Sample Characteristics}, and \strong{Distribution Function}.
#' To use the app at first instance, just click the \code{Update} button.
#' To alter the input values, edit the text box or move the point on the slider and
#' explore the changes in different tabs (see below).
#'
#' To obtain identical outcomes in a separate run of the app,
#' set a common seed value at the bottom of the left panel and click \code{Update}.
#' All subsequent updates will produce identical results provided other inputs are identical.
#' The seed value is ignored when the option \code{check the box to update instantly} is selected.
#'
#' @return The outcomes are presented in several tabs.
#' \item{Sample}{contains the histogram of sampling units randomly drawn from the given population.
#' Increasing the sample size and the number of bins creates the shape of the Normal distribution.
#' It also creates the normal density plot based on empirical data and
#' theoretical normal distribution given the parameter values}
#' \item{Distribution}{contains the plot for the probability density function of the Normal distribution
#' with given parameter values.
#' The user can also explore centring and scaling effect on the probability density function.}
#' \item{Probability & Quantile}{contains the plots for the probability density function and
#' cumulative probability density function. The user can explore the relationship between the
#' cumulative probability and quantile corresponding to tails of the distribution.}
#'
#' @seealso Function in base R for normal distribution including
#' \code{\link{dnorm}}, \code{\link{pnorm}}, \code{\link{qnorm}}, \code{\link{rnorm}}.
#'
#' @note \url{https://shiny.abdn.ac.uk/Stats/apps/}
#'
#' @author Mintu Nath
#'
#' @seealso Function in base R for normal distribution, including
#' \code{\link{dnorm}}, \code{\link{pnorm}},
#' \code{\link{qnorm}}, \code{\link{rnorm}}
#'
#' @examples
#' if(interactive()){
#' library(ggplot2)
#' library(shiny)
#' library(ABACUS)
#' # Run shiny app
#' shiny_dnorm()
#' }
#'
#' @import shiny
#' @import ggplot2
#' @export
# Function
shiny_dnorm <- function() {
shiny::runApp(appDir = system.file("app_dnorm", package = "ABACUS"), launch.browser = TRUE)
Sys.setenv("shiny_dnorm" = "")
}
| /scratch/gouwar.j/cran-all/cranData/ABACUS/R/shiny_dnorm.R |
#' Shiny App to Explore Properties of Normal and Student's t Distributions
#'
#' @name shiny_dnorm_dt
#' @aliases shiny_dnorm_dt
#' @description An interactive Shiny app to demonstrate Normal and Student's t distributions.
#' @usage shiny_dnorm_dt()
#'
#' @details The interactive Shiny app demonstrates the properties of Normal and Student's t distributions.
#' The app considers parameters (mean and standard deviation) of the standard Normal distribution
#' along with Student's t distribution given degrees of freedom.
#'
#' The left panel includes the user inputs for
#' \strong{Parameters} of standard Normal distribution (mean = 0, sd = 1) and
#' Student's \code{t} distribution (degrees of freedom), and
#' \strong{Probability} with options to change cumulative probability and tails of probability.
#' To alter the input values, move the point on the slider for the degrees of freedom
#' of \code{t} distribution and explore the changes in different tabs (see below).
#'
#'
#' @return The outcomes are presented in two tabs.
#' \item{Probability Density Function}{displays the probability density function of
#' the standard Normal distribution (red) and \code{t} distribution (blue).}
#' \item{Distribution & Probability}{contains the plot for the probablity density function of
#' the standard Normal distribution and Student's \code{t} distribution with given degrees of freedom.}
#'
#'
#' @note \url{https://shiny.abdn.ac.uk/Stats/apps/}
#'
#' @author Mintu Nath
#'
#' @seealso Function in base R for normal distribution, including
#' \code{\link{dnorm}}, \code{\link{pnorm}},
#' \code{\link{qnorm}}, \code{\link{rnorm}}
#' \code{\link{dt}}, \code{\link{pt}}
#' \code{\link{qt}}, \code{\link{rt}}
#'
#' @examples
#' if(interactive()){
#' library(ggplot2)
#' library(shiny)
#' library(ABACUS)
#' # Run shiny app
#' shiny_dnorm_dt()
#' }
#'
#' @import shiny
#' @import ggplot2
#' @export
# Function
shiny_dnorm_dt <- function() {
shiny::runApp(appDir = system.file("app_dnorm_dt", package = "ABACUS"), launch.browser = TRUE)
Sys.setenv("shiny_dnorm_dt" = "")
}
| /scratch/gouwar.j/cran-all/cranData/ABACUS/R/shiny_dnorm_dt.R |
#' Shiny App to Demonstrate One-Sample Student's t-Test
#'
#' @name shiny_onesampt
#' @aliases shiny_onesampt
#' @description An interactive Shiny app to demonstrate one-sample Student's t-test.
#' @usage shiny_onesampt()
#'
#' @details The interactive Shiny app demonstrates the principles of the hypothesis testing of means
#' in a one-sample design where the population variance is unknown.
#' The true population parameters are provided by the user.
#' The user changes the hypothesised population mean and other features and explores
#' how Student's t-test compares the hypothesised mean
#' with the mean of the sample randomly drawn from the population.
#'
#' The left panel includes the user inputs for \strong{Simulation Features}, \strong{Population Parameters},
#' \strong{Sample Characteristics}, and \strong{Distribution Function}.
#' To use the app at first instance, just click the \code{Update} button.
#' To alter the input values, edit the text box or move the point on the slider and
#' explore the changes in different tabs (see below).
#'
#' To obtain identical outcomes in a separate run of the app,
#' set a common seed value at the bottom of the left panel and click \code{Update}.
#' All subsequent updates will produce identical results provided other inputs are identical.
#' The seed value is ignored when the option \code{check the box to update instantly} is selected.
#'
#' @return The outcomes are presented in several tabs.
#' \item{Population}{contains the density plots of the population and
#' rug plots of the sample units randomly drawn from the population.
#' It also includes the population parameter values chosen by the user.}
#' \item{Sample}{contains the dot plot and box plot of the sample drawn
#' randomly from the population and rug plot of the sample units.
#' It also includes the mean and standard deviation of the random sample.}
#' \item{Test Statistic}{presents the plot showing the mean difference
#' between the sample mean and hypothesised mean and corresponding 95\% confidence intervals (CI).
#' The tab also contains the distribution of the test statistic \code{t}
#' with the observed value of the test statistic and probabilities under the given value of the Type 1 error}
#' \item{Summary}{includes the summary of the sampled data and outcomes
#' from the one-sample Student's t-test. Different sections are:
#' (1) Hypothesis, highlighting the null and alternative hypothesis;
#' (2) Sample, tabulating the full sampled data;
#' (3) Summary Statistics, summarising the summary information of the sample;
#' (4) Test Statistic, presenting the outputs from the one-sample Student's t-test.
#' (5) Confidence Interval, highlighting the mean difference and corresponding 95\% confidence intervals (CI).}
#'
#' @note \url{https://shiny.abdn.ac.uk/Stats/apps/}
#'
#' @author Mintu Nath
#'
#' @seealso Function in base R for normal distribution and t distribution including
#' \code{\link{dnorm}}, \code{\link{pnorm}}, \code{\link{qnorm}}, \code{\link{rnorm}},
#' \code{\link{dt}}, \code{\link{pt}}, \code{\link{qt}}, \code{\link{rt}}
#' The app \code{\link{shiny_onesampz}} performs the hypothesis testing of mean
#' when the population variance is unknown.
#'
#' @examples
#' if(interactive()){
#' library(ggplot2)
#' library(shiny)
#' library(ABACUS)
#' # Run shiny app
#' shiny_onesampt()
#' }
#'
#' @import shiny
#' @import ggplot2
#' @export
# Function
shiny_onesampt <- function() {
shiny::runApp(appDir = system.file("app_onesampt", package = "ABACUS"), launch.browser = TRUE)
Sys.setenv("shiny_onesampt" = "")
}
| /scratch/gouwar.j/cran-all/cranData/ABACUS/R/shiny_onesampt.R |
#' Shiny App to Demonstrate One-Sample Z-Test
#'
#' @name shiny_onesampz
#' @aliases shiny_onesampz
#' @description An interactive Shiny app to demonstrate one-sample Z-test.
#' @usage shiny_onesampz()
#'
#' @details The interactive Shiny app demonstrates the principles of the hypothesis testing of means
#' in a one-sample design where the population variance is known.
#' The true population parameters are provided by the user.
#' The user changes the hypothesised population mean and other features and explores
#' how the Z-test compares the hypothesised mean
#' with the mean of the sample randomly drawn from the population.
#'
#' The left panel includes the user inputs for \strong{Simulation Features}, \strong{Population Parameters},
#' \strong{Sample Characteristics}, and \strong{Distribution Function}.
#' To use the app at first instance, just click the \code{Update} button.
#' To alter the input values, edit the text box or move the point on the slider and
#' explore the changes in different tabs (see below).
#'
#' To obtain identical outcomes in a separate run of the app,
#' set a common seed value at the bottom of the left panel and click \code{Update}.
#' All subsequent updates will produce identical results provided other inputs are identical.
#' The seed value is ignored when the option \code{check the box to update instantly} is selected.
#'
#' @return The outcomes are presented in several tabs.
#' \item{Population}{contains the density plots of the population and
#' rug plots of the sample units randomly drawn from the population.
#' It also includes the population parameter values chosen by the user.}
#' \item{Sample}{contains the dot plot and box plot of the sample drawn
#' randomly from the population and rug plot of the sample units.
#' It also includes the mean and standard deviation of the random sample.}
#' \item{Test Statistic}{contains the plot showing the mean difference
#' between the sample mean and hypothesised mean and corresponding 95\% confidence intervals (CI).
#' The tab also contains the distribution of the test statistic \code{t}
#' with the observed value of the test statistic and probabilities under the given value of the Type 1 error}
#' \item{Summary}{includes the summary of the sampled data and outcomes
#' from the one-sample Z-test. Different sections are:
#' (1) Hypothesis, highlighting the null and alternative hypothesis;
#' (2) Sample, tabulating the full sampled data;
#' (3) Summary Statistics, summarising the summary information of the sample;
#' (4) Test Statistic, presenting the outputs from the one-sample Z-test.
#' (5) Confidence Interval, highlighting the mean difference and corresponding 95\% confidence intervals (CI).}
#'
#' @note \url{https://shiny.abdn.ac.uk/Stats/apps/}
#'
#' @author Mintu Nath
#' @seealso Function in base R for normal distribution including
#' \code{\link{dnorm}}, \code{\link{pnorm}}, \code{\link{qnorm}}, \code{\link{rnorm}}.
#' The app \code{\link{shiny_onesampt}} performs the hypothesis testing of mean
#' when the population variance is known.
#'
#' @examples
#' if(interactive()){
#' library(ggplot2)
#' library(shiny)
#' library(ABACUS)
#' # Run shiny app
#' shiny_onesampz()
#' }
#'
#' @import shiny
#' @import ggplot2
#' @export
# Function
shiny_onesampz <- function() {
shiny::runApp(appDir = system.file("app_onesampz", package = "ABACUS"), launch.browser = TRUE)
Sys.setenv("shiny_onesampz" = "")
}
| /scratch/gouwar.j/cran-all/cranData/ABACUS/R/shiny_onesampz.R |
#' Shiny App to Explore Properties of Sampling Distributions
#'
#' @name shiny_sampling
#' @aliases shiny_sampling
#' @description An interactive Shiny app to demonstrate properties of the sampling distributions.
#' @usage shiny_sampling()
#'
#' @details The interactive Shiny app demonstrates the properties of the sampling distribution.
#' The true population parameter values of the Normal distribution are provided by the user.
#' The user draws many samples from the population with the given sample characteristics
#' and explore the variability of sample means.
#' The app also includes the construction of 95\% confidence interval for all samples.
#' Altering the population and sample characteristics, the user can explore
#' the influence of these changes on the sampling distribution.
#'
#' The left panel includes the user inputs for \strong{Simulation Features}, \strong{Population Parameters},
#' \strong{Sample Characteristics} and \strong{Distribution Function}.
#' To use the app at first instance, just click the \code{Update} button.
#' To alter the input values, edit the text box or move the point on the slider and
#' explores the changes in different tabs (see below).
#'
#' To obtain identical outcomes in a separate run of the app,
#' set a common seed value at the bottom of the left panel and click \code{Update}.
#' All subsequent updates will produce identical results provided other inputs are identical.
#' The seed value is ignored when the option \code{check the box to update instantly} is selected.
#'
#' @return The outcomes are presented in several tabs.
#' \item{Population & Sample}{contains the density plots of the population and
#' dot plot of the sample units for the first sample randomly drawn from the population.
#' It also includes the population parameter values are chosen by the user as well as
#' estimates of sample mean and standard deviation based on the first sample.}
#' \item{Sampling Distribution}{contains a panel of 8 dot plots based on the sample drawn
#' randomly from the population with given parameters.
#' Each plot depicts the mean and standard deviation of the random sample.}
#' \item{Sample Estimators}{contains the histogram of the observed sample means and
#' the empirical distribution of sample means. It also includes the rug plot of all sample means.}
#' \item{Confidence Interval}{contains the plot showing the 95\% confidence intervals (CI) of all samples.
#' The plot shows the true population mean as a red horizontal line.
#' It also provides the exact number of these estimated CI that include the true population mean.}
#' \item{Summary}{includes the summary of the sampled data and outcomes
#' from the one-sample z-test. Different sections are:
#' (1) Sample, tabulating the full sampled data;
#' (2) Sample Distribution, highlighting the expection of sample mean and sample standard deviation
#' as well as standard error of mean;
#' (3) Confidence Interval, showing the concept of 95\% confidence intervals (CI) of mean.}
#'
#' @note \url{https://shiny.abdn.ac.uk/Stats/apps/}
#'
#' Also note that under the central limit theorem, the distribution of the sample means will follow normal distribution
#' whatever the distribution of the variable in the population.
#'
#' @author Mintu Nath
#' @seealso Function in base R for normal distribution including
#' \code{\link{dnorm}}, \code{\link{pnorm}}, \code{\link{qnorm}}, \code{\link{rnorm}}, \code{\link{sample}}.
#'
#' @examples
#' if(interactive()){
#' library(ggplot2)
#' library(shiny)
#' library(ABACUS)
#' # Run shiny app
#' shiny_sampling()
#' }
#'
#' @import shiny
#' @import ggplot2
#' @export
# Funcition
shiny_sampling <- function() {
shiny::runApp(appDir = system.file("app_sampling", package = "ABACUS"), launch.browser = TRUE)
Sys.setenv("shiny_sampling" = "")
}
| /scratch/gouwar.j/cran-all/cranData/ABACUS/R/shiny_sampling.R |
#' Shiny App to Demonstrate Two-Sample Independent (Unpaired) Student's t-Test
#'
#' @name shiny_twosampt
#' @aliases shiny_twosampt
#' @description An interactive Shiny app to demonstrate two-sample independent (unpaired) Student's t-test.
#' @usage shiny_twosampt()
#'
#' @details The interactive Shiny app demonstrates the principles of the hypothesis testing of means
#' in a two-sample independent (unpaired) design where the population variances are equal but unknown.
#' The true parameter values are provided by the user.
#' The user changes sample characteristics, distribution function and simulation features
#' and explores the influence of these changes on the hypothesis testing using Student's t-test.
#'
#' The left panel includes the user inputs for \strong{Simulation Features}, \strong{Population Parameters},
#' \strong{Sample Characteristics}, and \strong{Distribution Function}.
#' To use the app at first instance, just click the \code{Update} button.
#' To alter the input values, edit the text box or move the point on the slider and
#' explore the changes in different tabs (see below).
#'
#' To obtain identical outcomes in a separate run of the app,
#' set a common seed value at the bottom of the left panel and click \code{Update}.
#' All subsequent updates will produce identical results provided other inputs are identical.
#' The seed value is ignored when the option \code{check the box to update instantly} is selected.
#'
#' @return The outcomes are presented in several tabs.
#' \item{Population}{contains the density plots of two populations and
#' rug plots of the sample units sample units randomly drawn from these populations.
#' It also includes the population parameter values chosen by the user.}
#' \item{Sample}{contains the dot plots and box plots of two samples drawn
#' randomly from the two populations and rug plots of the sample units.
#' It also includes the mean and standard deviation of two random samples.}
#' \item{Test Statistic}{contains the plots showing the mean difference between two groups
#' and corresponding 95\% confidence intervals (CI).
#' The tab also contains a panel of the distribution of the test statistic \code{t}
#' with the observed value of the test statistic and probabilities under the given value of the Type 1 error}
#' \item{Summary}{includes the summary of the sampled data and outcomes
#' from the one-sample Student's t-test. Different sections are:
#' (1) Hypothesis, highlighting the null and alternative hypothesis;
#' (2) Sample, tabulating the full sampled data;
#' (3) Summary Statistics, summarising the summary information of two samples;
#' (4) Test Statistic, presenting the outputs from independent two-sample Student's t-test.
#' (5) Confidence Interval, highlighting the mean difference and corresponding 95\% confidence intervals (CI).}
#'
#' @note \url{https://shiny.abdn.ac.uk/Stats/apps/}
#'
#' @author Mintu Nath
#'
#' @seealso Function in base R for normal distribution and t distribution including
#' \code{\link{dnorm}}, \code{\link{pnorm}}, \code{\link{qnorm}}, \code{\link{rnorm}},
#' \code{\link{dt}}, \code{\link{pt}}, \code{\link{qt}}, \code{\link{rt}}
#'
#' @examples
#' if(interactive()){
#' library(ggplot2)
#' library(shiny)
#' library(ABACUS)
#' # Run shiny app
#' shiny_twosampt()
#' }
#'
#' @import shiny
#' @import ggplot2
#' @export
# Function
shiny_twosampt <- function() {
shiny::runApp(appDir = system.file("app_twosampt", package = "ABACUS"), launch.browser = TRUE)
Sys.setenv("shiny_twosampt" = "")
}
| /scratch/gouwar.j/cran-all/cranData/ABACUS/R/shiny_twosampt.R |
# Shiny global functions for ANOVA
library(shiny)
library(ggplot2)
#_________________________________________________________________________________________
# Function to fit Model
fn_InputData <- function(pmean1, pmean2, pmean3,
psd, n1, n2, n3,
p, p_tail){
xrs1 <- round(rnorm(n = n1, mean = pmean1, sd = psd), digits = 1)
smean1 <- round(mean(xrs1, na.rm = TRUE), 2)
ssd1 <- round(sd(xrs1, na.rm = TRUE), 2)
sse1 <- round(ssd1 / sqrt(n1), 2)
xrs2 <- round(rnorm(n = n2, mean = pmean2, sd = psd), digits = 1)
smean2 <- round(mean(xrs2, na.rm = TRUE), 2)
ssd2 <- round(sd(xrs2, na.rm = TRUE), 2)
sse2 <- round(ssd2 / sqrt(n2), 2)
xrs3 <- round(rnorm(n = n3, mean = pmean3, sd = psd), digits = 1)
smean3 <- round(mean(xrs3, na.rm = TRUE), 2)
ssd3 <- round(sd(xrs3, na.rm = TRUE), 2)
sse3 <- round(ssd3 / sqrt(n3), 2)
gr <- c(rep(x = 'Group 1', length = n1),
rep(x = 'Group 2', length = n2),
rep(x = 'Group 3', length = n3))
sDF <- data.frame(Group = gr, xrs = c(xrs1, xrs2, xrs3)) # density not required
sstat <- data.frame(Group = c('Group 1', 'Group 2', 'Group 3'),
n = c(n1, n2, n3),
pmean = c(pmean1, pmean2, pmean3),
psd = c(psd, psd, psd),
smean = c(smean1, smean2, smean3),
ssd = c(ssd1, ssd2, ssd3),
sse = c(sse1, sse2, sse3))
sstat$lower <- sstat$smean - 1.96*sstat$sse
sstat$upper <- sstat$smean + 1.96*sstat$sse
fm <- lm(xrs ~ Group, data = sDF)
afm <- anova(fm)
df1 <- afm$Df[1]
df2 <- afm$Df[2]
fcal <- round(afm$`F value`[1], digits = 2)
pcal <- round(afm$`Pr(>F)`[1], digits = 4)
bMS <- afm$`Mean Sq`[1]
wMS <- afm$`Mean Sq`[2]
df <- c(df1, df2, n1+n2+n3-1)
vSS = round(c(afm$`Sum Sq`, sum(afm$`Sum Sq`)), digits = 2)
vMS <- round(c(bMS, wMS, NA), digits = 2)
SS <- data.frame(Source = c('Between', 'Within', 'Total'), df = df, SS = vSS, MS = vMS)
afm <- aov(xrs ~ Group, data = sDF)
mean_diff <- as.data.frame(TukeyHSD(afm)$Group)
names(mean_diff) <- c('mean_diff', 'lower', 'upper')
mean_diff$comp <- c('G2 vs G1', 'G3 vs G1', 'G3 vs G2')
xmin <- min(pmean1 - 3.5*psd, pmean2 - 3.5*psd)
xmax <- max(pmean1 + 3.5*psd, pmean2 + 3.5*psd)
norm_xlim <- c(xmin, xmax)
fr <- rf(n = 10000, df1 = df1, df2 = df2)
f_xlim <- c(min(fr), max(fr))
rm(fr)
q_out <- switch(EXPR = p_tail,
lower = qf(p = p, df1 = df1, df2 = df2, lower.tail = TRUE),
upper = qf(p = p, df1 = df1, df2 = df2, lower.tail = FALSE))
fstat <- c(fcal = fcal,
pcal = pcal,
df1 = df1, df2 = df2,
p = p, q_out = round(q_out, 2))
xpos1 <- fstat['q_out']
tail <- c(p_tail = p_tail)
q_out_txt <- paste0( ' p = ', round(p, 2), '; q = ', round(q_out, 2) )
qText = q_out_txt
annotateText <- c('pText')
annotateDF <- data.frame(
xpos = c(xpos1),
ypos = c(Inf),
annotateText = c(qText),
hjustvar = c(0) ,
vjustvar = c(2)) #<- adjust
hTitle <- paste0('H0: tau1 = tau2 = tau3, ; H1: At least one tau[k] <> 0')
dTitle <- paste0( 'Population: Mean1 = ', round(pmean1,2),
', Mean2 = ', round(pmean2,2),
', Mean3 = ', round(pmean3,2),
', SD = ', round(psd,2) )
rTitle <- paste0( 'Sample: Mean1 = ', round(smean1,2),
'; Mean2 = ', round(smean2,2),
', Mean3 = ', round(smean3,2),
'; Overall Mean = ', round(smean2,2))
ssTitle <- paste0('Between and Within Mean Squares ( ',
'df = (', df1, ', ', df2, ')')
fTitle <- paste0('Mean Squares (',
'Between = ', SS$MS[1],
'; Within = ', SS$MS[2],
'); F-statistic = ', round(fcal,2),
'; df = (', df1, ', ', df2, ')',
'; p-value = ', sprintf('%1.2e', pcal))
txtTitle <- c(hTitle = hTitle, dTitle = dTitle, rTitle = rTitle,
ssTitle = ssTitle, fTitle = fTitle)
out <- list(sDF = sDF, sstat = sstat, fstat = fstat, tail = tail,
mean_diff = mean_diff, SS = SS,
norm_xlim = norm_xlim, f_xlim = f_xlim,
annotateDF = annotateDF,
txtTitle = txtTitle)
return(out)
}
#_________________________________________________________________________________________
# Population density
fn_dnorm <- function(inputData){
list2env(inputData, envir = environment())
rm(inputData)
pmean1 <- sstat$pmean[1]
pmean2 <- sstat$pmean[2]
pmean3 <- sstat$pmean[3]
psd <- sstat$psd[1]
dTitle1 <- bquote( 'Population Mean & SD: ' ~
mu[1] == .(pmean1) ~ ', ' ~
sigma[1] == .(psd) ~ '; ' ~
mu[2] == .(pmean2) ~ ', ' ~
sigma[2] == .(psd) ~ ', ' ~
mu[3] == .(pmean3) ~ ', ' ~
sigma[3] == .(psd))
dTitle2 <- 'Rugplots represent the random samples drawn from three populations'
g <- ggplot(data = NULL, mapping = aes(norm_xlim))
g <- g + geom_area(stat = 'function', fun = dnorm,
args = list(mean = pmean1, sd = psd),
xlim = norm_xlim, fill = '#F8766D', alpha = 0.3)
g <- g + geom_area(stat = 'function', fun = dnorm,
args = list(mean = pmean2, sd = psd),
xlim = norm_xlim, fill = '#009933', alpha = 0.3)
g <- g + geom_area(stat = 'function', fun = dnorm,
args = list(mean = pmean3, sd = psd),
xlim = norm_xlim, fill = '#00BFC4', alpha = 0.3)
g <- g + geom_rug(data = sDF[(sDF$Group == 'Group 1'),],
mapping = aes(x = xrs),
colour = '#F8766D', sides = 'b')
g <- g + geom_rug(data = sDF[(sDF$Group == 'Group 2'),],
mapping = aes(x = xrs),
colour = '#009933', sides = 'b')
g <- g + geom_rug(data = sDF[(sDF$Group == 'Group 3'),],
mapping = aes(x = xrs),
colour = '#00BFC4', sides = 'b')
g <- g + geom_vline(xintercept = pmean1, size = 1, linetype = 2, colour = 'darkred')
g <- g + geom_vline(xintercept = pmean2, size = 1, linetype = 2, colour = 'green')
g <- g + geom_vline(xintercept = pmean3, size = 1, linetype = 2, colour = 'blue')
g <- g + labs(title = dTitle1, subtitle = dTitle2,
x = 'Populations: X (unit)', y = 'Density')
xscale <- seq(from = norm_xlim[1], to = norm_xlim[2], length = 21)
xscale <- round(xscale, digits = 1)
g <- g + scale_x_continuous(breaks = xscale, limits = norm_xlim)
g <- g + theme_bw()
g <- g + theme(axis.text.x = element_text(face = 'plain', color = 'blue',
size = 14, angle = 0),
axis.text.y = element_text(face = 'plain', color = 'blue',
size = 14, angle = 90, vjust = 0.5),
axis.title.x = element_text(size = 16, colour = 'purple'),
axis.title.y = element_text(size = 16, colour = 'purple'),
title = element_text(face = 'plain', color = 'blue',
size = 16, angle = 0))
print(g)
}
#_________________________________________________________________________________________
# Sample distribution: dotplot
fn_dotplot <- function(inputData){
list2env(inputData, envir = environment())
rm(inputData)
type <- 'freq'
xmean1 <- sstat$smean[1]
xmean2 <- sstat$smean[2]
xmean3 <- sstat$smean[3]
xsd1 <- sstat$ssd[1]
xsd2 <- sstat$ssd[2]
xsd3 <- sstat$ssd[3]
rTitle <- bquote( 'Sample Mean & SD: ' ~
bar(x[1]) == .(round(xmean1,2)) ~ ', ' ~
s[1] == .(round(xsd1,2)) ~ '; ' ~
bar(x[2]) == .(round(xmean2,2)) ~ ', ' ~
s[2] == .(round(xsd2,2)) ~ '; ' ~
bar(x[3]) == .(round(xmean3,2)) ~ ', ' ~
s[3] == .(round(xsd3,2)) )
xmean <- mean(sDF$xrs, na.rm = TRUE)
scale_factor <- (norm_xlim[2] - norm_xlim[1])/100
g <- ggplot(data = sDF, aes(x = xrs, fill = Group))
g <- g + geom_dotplot(method = 'dotdensity',
binwidth = scale_factor, # dotsize = 0.3,
stackdir = 'centerwhole', stackratio = 0.7, alpha = 0.7)
g <- g + scale_y_continuous(NULL, breaks = NULL)
g <- g + geom_rug(mapping = aes(colour = Group))
g <- g + geom_vline(xintercept = xmean, size = 1, linetype = 1, colour = 'purple')
g <- g + geom_vline(xintercept = xmean1, size = 1, linetype = 2, colour = 'darkred')
g <- g + geom_vline(xintercept = xmean2, size = 1, linetype = 2, colour = 'green')
g <- g + geom_vline(xintercept = xmean3, size = 1, linetype = 2, colour = 'blue')
g <- g + theme_bw()
g <- g + theme(axis.text.x = element_text(face = 'plain', color = 'blue',
size = 14, angle = 0),
axis.text.y = element_text(face = 'plain', color = 'blue',
size = 14, angle = 90, vjust = 0.5),
axis.title.x = element_text(size = 16, colour = 'purple'),
axis.title.y = element_text(size = 16, colour = 'purple'),
title = element_text(face = 'plain', color = 'blue',
size = 16, angle = 0))
g <- g + theme(legend.position = 'bottom')
print(g)
}
#_________________________________________________________________________________________
# Sample distribution: boxplot
fn_boxplot <- function(inputData){
list2env(inputData, envir = environment())
rm(inputData)
xmean1 <- sstat$smean[1]
xmean2 <- sstat$smean[2]
xmean3 <- sstat$smean[3]
xsd1 <- sstat$ssd[1]
xsd2 <- sstat$ssd[2]
xsd3 <- sstat$ssd[3]
rTitle <- bquote( 'Sample Mean & SD: ' ~
bar(x[1]) == .(round(xmean1,2)) ~ ', ' ~
s[1] == .(round(xsd1,2)) ~ '; ' ~
bar(x[2]) == .(round(xmean2,2)) ~ ', ' ~
s[2] == .(round(xsd2,2)) ~ '; ' ~
bar(x[3]) == .(round(xmean3,2)) ~ ', ' ~
s[3] == .(round(xsd3,2)) )
xmean <- mean(sDF$xrs, na.rm = TRUE)
g <- ggplot(data = sDF, aes(x = Group, y = xrs, fill = Group))
g <- ggplot(data = sDF, aes(x = Group, y = xrs))
g <- g + geom_boxplot(mapping = aes(colour = factor(Group), fill = factor(Group)),
alpha = 0.4, size = 1.0)
g <- g + geom_jitter(mapping=aes(colour = factor(Group)),
width = 0.25, height = 0.001,
shape = 16, size=5, alpha = 0.9)
g <- g + geom_rug(mapping = aes(colour = factor(Group)), sides = 'b')
g <- g + geom_hline(yintercept = xmean, size = 1, linetype = 1, colour = 'purple')
g <- g + geom_hline(yintercept = xmean1, size = 1, linetype = 2, colour = 'darkred')
g <- g + geom_hline(yintercept = xmean2, size = 1, linetype = 2, colour = 'green')
g <- g + geom_hline(yintercept = xmean3, size = 1, linetype = 2, colour = 'blue')
g <- g + labs(title = rTitle, x = 'Group', y = 'Variable (unit)')
yscale <- seq(from = norm_xlim[1], to = norm_xlim[2], length = 21)
yscale <- round(yscale, digits = 1)
g <- g + scale_y_continuous(breaks = yscale, limits = norm_xlim)
g <- g + coord_flip()
g <- g + theme_bw()
g <- g + theme(axis.text.x = element_text(face = 'plain', color = 'blue',
size = 14, angle = 0),
axis.text.y = element_text(face = 'plain', color = 'blue',
size = 14, angle = 90, vjust = 0.5),
axis.title.x = element_text(size = 16, colour = 'purple'),
axis.title.y = element_text(size = 16, colour = 'purple'),
title = element_text(face = 'plain', color = 'blue',
size = 16, angle = 0))
g <- g + theme(legend.position = 'none')
print(g)
}
#_________________________________________________________________________________________
# Standard F Density: Plot1 with Type 1 error
fn_df_plot1 <- function(inputData){
list2env(inputData, envir = environment())
rm(inputData)
p_tail <- unname(tail['p_tail'])
p <- unname(fstat['p'])
q_out <- unname(fstat['q_out'])
fcal <- unname(fstat['fcal'])
df1 <- unname(fstat['df1'])
df2 <- unname(fstat['df2'])
hTitle <- bquote(H[0] ~ ':' ~ 'All k Group effects ' ~ tau[k] == 0 ~ '; '
~ H[A] ~ ':' ~ 'At least one Group effect ' ~ tau[k] != 0)
fTitle <- unname(txtTitle['fTitle'])
g <- ggplot(data = NULL, mapping = aes(f_xlim))
if(p_tail == 'lower'){
f_xlim1 <- c(f_xlim[1], q_out)
f_xlim2 <- c(q_out, f_xlim[2])
g <- g + geom_area(stat = 'function', fun = dt,
args = list(df = df1), colour = 'darkred',
xlim = f_xlim1, fill = '#ff0000', alpha = 0.5)
g <- g + geom_area(stat = 'function', fun = dt,
args = list(df = df1), colour = 'darkred',
xlim = f_xlim2, fill = '#ffff00', alpha = 0.7)
g <- g + geom_vline(xintercept = q_out, size = 1, linetype = 2, colour = 'orange')
}
if(p_tail == 'upper'){
f_xlim1 <- c(f_xlim[1], q_out)
f_xlim2 <- c(q_out, f_xlim[2])
g <- g + geom_area(stat = 'function', fun = dt,
args = list(df = df1), colour = 'darkred',
xlim = f_xlim1, fill = '#ffff00', alpha = 0.7)
g <- g + geom_area(stat = 'function', fun = dt,
args = list(df = df1), colour = 'darkred',
xlim = f_xlim2, fill = '#ff0000', alpha = 0.5)
g <- g + geom_vline(xintercept = q_out, size = 1, linetype = 2, colour = 'orange')
}
g <- g + geom_vline(xintercept = fcal, size = 2, linetype = 1, colour = 'red')
g <- g + labs(title = hTitle, subtitle = fTitle, x = 'Test Statistic: F', y = 'Density')
g <- g + geom_text(data = annotateDF[1,],
aes(x = xpos, y = ypos,
hjust = hjustvar, vjust = vjustvar,
label = annotateText),
colour = c('blue'), size = 4)
g <- g + theme_bw()
g <- g + theme(axis.text.x = element_text(face = 'plain', color = 'blue',
size = 14, angle = 0),
axis.text.y = element_text(face = 'plain', color = 'blue',
size = 14, angle = 90, vjust = 0.5),
axis.title.x = element_text(size = 16, colour = 'purple'),
axis.title.y = element_text(size = 16, colour = 'purple'),
title = element_text(face = 'plain', color = 'blue',
size = 16, angle = 0))
print(g)
}
#_________________________________________________________________________________________
# Mean & CI
fn_mean <- function(inputData){
list2env(inputData, envir = environment())
rm(inputData)
mDF <- sstat
xmean <- mean(sDF$xrs, na.rm = TRUE)
g <- ggplot(data = mDF, mapping=aes(x = smean, y = Group, colour = Group))
g <- g + geom_point(size = 20, shape = 15, colour = 'blue')
g <- g + geom_errorbarh(aes(xmin = lower, xmax = upper), size = 1.5, colour = 'darkred')
g <- g + labs(title = '', subtitle = 'Group Means & 95% CI',
x = 'Mean & 95% CI (unit)',
y = 'Group')
g <- g + geom_vline(xintercept = xmean, size = 1.5, linetype = 2, colour = 'purple')
g <- g + theme_bw()
g <- g + theme(axis.text.x = element_text(face = 'plain', color = 'blue',
size = 14, angle = 0),
axis.text.y = element_text(face = 'plain', color = 'blue',
size = 14, angle = 90, vjust = 0.5),
axis.title.x = element_text(size = 16, colour = 'purple'),
axis.title.y = element_text(size = 16, colour = 'purple'),
title = element_text(face = 'plain', color = 'blue',
size = 16, angle = 0),
# axis.title.y = element_blank(),
# axis.text.y = element_blank(),
# axis.ticks.y = element_blank(),
# axis.ticks.length = unit(0, "pt"),
axis.line = element_blank(),
panel.grid.major = element_blank(), panel.grid.minor = element_blank())
print(g)
}
#_________________________________________________________________________________________
# Sum of Squares
fn_SS <- function(inputData){
list2env(inputData, envir = environment())
rm(inputData)
sDF <- SS
sDF$Source <- factor(sDF$Source,
levels = c('Between', 'Within', 'Total'),
labels = c('Between', 'Within', 'Total'))
g <- ggplot(data = sDF, mapping=aes(x = factor(Source), label = SS))
g <- g + geom_bar(mapping = aes(weight=SS), position='dodge',
fill = c('#ffbf00', '#00bfff', '#669900'))
g <- g + geom_text(mapping = aes(y = SS), size = 10, position = position_stack(vjust = 0.5))
g <- g + labs(title = '', subtitle = 'Between, Within and Total Sum of Squares ',
x = 'Source',
y = 'Sum of Squares')
g <- g + coord_flip()
g <- g + theme_bw()
g <- g + theme(axis.text.x = element_text(face = 'plain', color = 'blue',
size = 14, angle = 0),
axis.text.y = element_text(face = 'plain', color = 'blue',
size = 10, angle = 0, vjust = 0.5),
axis.title.x = element_text(size = 16, colour = 'purple'),
axis.title.y = element_text(size = 16, colour = 'purple'),
title = element_text(face = 'plain', color = 'blue',
size = 16, angle = 0),
axis.ticks.y = element_blank(),
axis.line = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank())
g <- g + theme(legend.position = 'bottom')
print(g)
}
#_________________________________________________________________________________________
# Sum of Squares as Stacked: proportion
fn_SS_stack <- function(inputData){
list2env(inputData, envir = environment())
rm(inputData)
sDF <- SS[1:2,]
sDF$Source <- as.character(sDF$Source)
sDF$Source <- factor(sDF$Source,
levels = c('Between', 'Within'),
labels = c('Between', 'Within'))
sDF$pSS <- round(sDF$SS / sum(sDF$SS), 2)
g <- ggplot(data = sDF, mapping=aes(x = factor('SS'), y = pSS, fill = Source, label = pSS))
g <- g + geom_col(width = 0.3)
g <- g + geom_text(mapping = aes(y = pSS), size = 10, position = position_stack(vjust = 0.5))
g <- g + scale_fill_manual(values = c('#ffbf00', '#00bfff'))
g <- g + labs(title = '', subtitle = 'Between and Within Sum of Squares ',
x = 'Source',
y = 'Sum of Squares')
g <- g + coord_flip()
g <- g + theme_bw()
g <- g + theme(axis.text.x = element_text(face = 'plain', color = 'blue',
size = 14, angle = 0),
axis.text.y = element_text(face = 'plain', color = 'blue',
size = 10, angle = 0, vjust = 0.5),
axis.title.x = element_text(size = 16, colour = 'purple'),
axis.title.y = element_text(size = 16, colour = 'purple'),
title = element_text(face = 'plain', color = 'blue',
size = 16, angle = 0),
axis.line = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank())
g <- g + theme(legend.position = 'bottom')
print(g)
}
#_________________________________________________________________________________________
# Mean Squares
fn_MS <- function(inputData){
list2env(inputData, envir = environment())
rm(inputData)
sDF <- SS[1:2,]
sDF$Source <- as.character(sDF$Source)
sDF$Source <- factor(sDF$Source,
levels = c('Between', 'Within'),
labels = c('Between', 'Within'))
fTitle <- unname(txtTitle['fTitle'])
g <- ggplot(data = sDF, mapping=aes(x = factor(Source), label = MS))
g <- g + geom_bar(mapping = aes(weight=MS), position='dodge',
fill = c('#ffbf00', '#00bfff'))
g <- g + geom_text(mapping = aes(y = MS), size = 10, position = position_stack(vjust = 0.5))
# g <- g + geom_text(data = sDF, mapping = aes(x = Source, y = 0),
# label = levels(sDF$Source), position = position_stack(vjust = 0.5), size = 4)
g <- g + labs(title = '', subtitle = fTitle,
x = 'Source',
y = 'Mean Squares')
g <- g + coord_flip()
g <- g + theme_bw()
g <- g + theme(axis.text.x = element_text(face = 'plain', color = 'blue',
size = 14, angle = 0),
axis.text.y = element_text(face = 'plain', color = 'blue',
size = 10, angle = 0, vjust = 0.5),
axis.title.x = element_text(size = 16, colour = 'purple'),
axis.title.y = element_text(size = 16, colour = 'purple'),
title = element_text(face = 'plain', color = 'blue',
size = 16, angle = 0),
axis.ticks.y = element_blank(),
axis.line = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank())
g <- g + theme(legend.position = 'bottom')
print(g)
}
#_________________________________________________________________________________________
# Mean Difference & CI
fn_mean_diff <- function(inputData){
list2env(inputData, envir = environment())
rm(inputData)
mDF <- mean_diff
g <- ggplot(data = mDF, mapping=aes(x = mean_diff, y = comp, colour = comp))
g <- g + geom_errorbarh(aes(xmin = lower, xmax = upper), size = 1.5, colour = '#0000cc')
g <- g + geom_point(size = 20, shape = 15, colour = '#ff9966')
g <- g + labs(title = '', subtitle = 'Mean Difference between Groups & 95% CI',
x = 'Mean difference & 95% CI (unit)',
y = 'Comparisons')
g <- g + geom_vline(xintercept = 0, size = 1.5, linetype = 2, colour = 'purple')
g <- g + theme_bw()
g <- g + theme(axis.text.x = element_text(face = 'plain', color = 'blue',
size = 14, angle = 0),
axis.text.y = element_text(face = 'plain', color = 'blue',
size = 14, angle = 0, vjust = 0.5),
axis.title.x = element_text(size = 16, colour = 'purple'),
axis.title.y = element_text(size = 16, colour = 'purple'),
title = element_text(face = 'plain', color = 'blue',
size = 16, angle = 0),
axis.line = element_blank(),
panel.grid.major = element_blank(), panel.grid.minor = element_blank())
print(g)
}
#_________________________________________________________________________________________
# Report preparation
fn_Report <- function(inputData){
list2env(inputData, envir = environment())
rm(inputData)
H <- h4(HTML("Hypothesis:
H<sub>0</sub>: τ <sub>1</sub> =
τ <sub>2</sub> =
τ <sub>3</sub>
H<sub>1</sub>: At least one τ <sub>k</sub> ≠ 0"), style="color:blue")
pval <- paste0('Probability = ', fstat['p'], '; Tail: ', unname(tail))
names(sDF) <- c('Group', 'X')
sDF$SampleID <- 1:nrow(sDF)
sDF <- sDF[, c('SampleID', 'Group', 'X')]
sstat <- sstat[, 1:7]
names(sstat) <- c('Group', 'N', 'Population Mean', 'Population SD',
'Sample Mean', 'Sample SD', 'SE')
# Change Group level for presentation
nDF <- sDF
nDF$Group <- factor(nDF$Group,
levels = c('Group 1', 'Group 2', 'Group 3'),
labels = c('1', '2', '3'))
fm1 <- lm(X ~ Group, data = nDF)
fm2 <- aov(X ~ Group, data = nDF)
afm <- anova(fm1)
sfm <- summary(fm1)
hsd <- as.data.frame(round(TukeyHSD(fm2)$Group, 4))
names(hsd) <- c('Mean Diff', '95% LCL', '95% UCL', 'Adj P-value')
row.names(hsd) <- c('Group 2-Group 1', 'Group 3-Group 1', 'Group 3-Group 2')
rst <- list(ANOVA = afm, SUMMARY = sfm, `Tukey's Honest Significant Differences` = hsd)
rpt <- list(H = H, sDF = sDF, sstat = sstat, rst = rst)
}
#_________________________________________________________________________________________
| /scratch/gouwar.j/cran-all/cranData/ABACUS/inst/app_anova/global.R |
# Shiny server for ANOVA
#_________________________________________________________________________________________
shinyServer(function(input, output) {
#_________________________________________________________________________________________
# eventReactive function
# Reactive
inputVal_eventReactive <- eventReactive(input$cmdUpdate, {
iseed <- input$numRN + as.integer(input$cmdUpdate)
pmean1 <- input$pmean1
pmean2 <- input$pmean2
pmean3 <- input$pmean3
psd <- input$psd
n1 <- input$n1
n2 <- input$n2
n3 <- input$n3
p <- input$p # user probability
p_tail <- input$p_tail
fn_InputData_repeatable <- repeatable(rngfunc = fn_InputData, seed = iseed)
fn_InputData_repeatable(pmean1 = pmean1,
pmean2 = pmean2,
pmean3 = pmean3,
psd = psd,
n1 = n1, n2 = n2, n3 = n3,
p = p, p_tail = p_tail)
})
#_________________________________________________________________________________________
# Reactive function
inputVal_reactive <- reactive({
pmean1 <- input$pmean1
pmean2 <- input$pmean2
pmean3 <- input$pmean3
psd <- input$psd
n1 <- input$n1
n2 <- input$n2
n3 <- input$n3
p <- input$p # user probability
p_tail <- input$p_tail
fn_InputData(pmean1 = pmean1,
pmean2 = pmean2,
pmean3 = pmean3,
psd = psd,
n1 = n1, n2 = n2, n3 = n3,
p = p, p_tail = p_tail)
})
#_________________________________________________________________________________________
# Output
output$dnorm_plot <- renderPlot({
if(input$chkUpdate){
fn_dnorm(inputVal_reactive())
} else {
fn_dnorm(inputVal_eventReactive())
}
})
output$dotplot <- renderPlot({
if(input$chkUpdate){
fn_dotplot(inputVal_reactive())
} else {
fn_dotplot(inputVal_eventReactive())
}
})
output$boxplot <- renderPlot({
if(input$chkUpdate){
fn_boxplot(inputVal_reactive())
} else {
fn_boxplot(inputVal_eventReactive())
}
})
output$ss_plot <-renderPlot({
if(input$chkUpdate){
fn_SS(inputVal_reactive())
} else {
fn_SS(inputVal_eventReactive())
}
})
output$ss_stack <- renderPlot({
if(input$chkUpdate){
fn_SS_stack(inputVal_reactive())
} else {
fn_SS_stack(inputVal_eventReactive())
}
})
output$ms_plot <-renderPlot({
if(input$chkUpdate){
fn_MS(inputVal_reactive())
} else {
fn_MS(inputVal_eventReactive())
}
})
output$mdiff_plot1 <- renderPlot({
if(input$chkUpdate){
fn_mean_diff(inputVal_reactive())
} else {
fn_mean_diff(inputVal_eventReactive())
}
})
output$df_plot1 <- renderPlot({
if(input$chkUpdate){
fn_df_plot1(inputVal_reactive())
} else {
fn_df_plot1(inputVal_eventReactive())
}
})
output$H <- renderUI({
if(input$chkUpdate){
fn_Report(inputVal_reactive())[['H']]
} else {
fn_Report(inputVal_eventReactive())[['H']]
}
})
dt_sample <- renderTable({
if(input$chkUpdate){
fn_Report(inputVal_reactive())[['sDF']]
} else {
fn_Report(inputVal_eventReactive())[['sDF']]
}
}, striped = TRUE, hover = TRUE, bordered = TRUE, align = 'c')
output$dt_sample <- renderUI(fluidPage(dt_sample, style="overflow-y:scroll; height: 300px"))
output$dt_sstat <- renderTable({
if(input$chkUpdate){
fn_Report(inputVal_reactive())[['sstat']]
} else {
fn_Report(inputVal_eventReactive())[['sstat']]
}
}, striped = TRUE, hover = TRUE, bordered = TRUE, align = 'c')
output$txt_rst <- renderPrint({
if(input$chkUpdate){
fn_Report(inputVal_reactive())[['rst']]
} else {
fn_Report(inputVal_eventReactive())[['rst']]
}
})
#_________________________________________________________________________________________
})
#_________________________________________________________________________________________
| /scratch/gouwar.j/cran-all/cranData/ABACUS/inst/app_anova/server.R |
# Shiny ui for ANOVA
library(shiny)
library(ggplot2)
source('global.R')
# Define UI for the application
shinyUI(pageWithSidebar(
# Application title
headerPanel(title = div(img(src='abacus.png', align = 'left'),
"Hypothesis Testing: One-way Analysis of Variance"),
windowTitle = "Hypothesis Testing: One-way Analysis of Variance"),
sidebarPanel(
tags$hr(style="border-color: purple;"),
tags$p(style="color:blue", tags$strong("Simulation Features")),
tags$hr(style="border-color: purple;"),
checkboxInput(inputId = 'chkUpdate',
label = tags$strong('Check the box to update instantly',
style="color:darkblue"),
value = FALSE, width = '100%'),
actionButton(inputId = 'cmdUpdate', label = 'Update'),
tags$hr(style="border-color: green;"),
numericInput(inputId = 'numRN',
label = tags$p('Seed value for generating the random number',
style="color:darkblue"),
value = 12345, min = 1),
tags$hr(style="border-color: purple;"),
tags$p(style="color:blue", tags$strong("Population Parameters")),
tags$hr(style="border-color: purple;"),
numericInput(inputId = 'pmean1',
label = tags$strong('True Population 1 Mean: ',
HTML("μ<sub>1"), style="color:darkblue"),
value = 20),
numericInput(inputId = 'pmean2',
label = tags$strong('True Population 2 Mean: ',
HTML("μ<sub>2"), style="color:darkblue"),
value = 18),
numericInput(inputId = 'pmean3',
label = tags$strong('True Population 3 Mean: ',
HTML("μ<sub>3"), style="color:darkblue"),
value = 19),
tags$hr(style='border-color: green;'),
numericInput(inputId = 'psd',
label = tags$strong('True Population Standard Deviation: ',
HTML("σ"), style="color:darkblue"),
value = 2, min = 1),
tags$hr(style="border-color: purple;"),
tags$p(style="color:blue", tags$strong("Sample Characteristics")),
tags$hr(style="border-color: purple;"),
sliderInput(inputId = 'n1',
label = tags$strong('Sample 1 (Group 1) Size: ',
HTML("n<sub>1"), style="color:darkblue"),
min = 10, max = 50, value = 15, step = 1),
sliderInput(inputId = 'n2',
label = tags$strong('Sample 2 (Group 2) Size: ',
HTML("n<sub>2"), style="color:darkblue"),
min = 10, max = 50, value = 20, step = 1),
sliderInput(inputId = 'n3',
label = tags$strong('Sample 3 (Group 3) Size: ',
HTML("n<sub>3"), style="color:darkblue"),
min = 10, max = 50, value = 20, step = 1),
tags$hr(style="border-color: purple;"),
tags$p(style="color:blue", tags$strong("Distribution Function")),
tags$hr(style="border-color: purple;"),
sliderInput(inputId = 'p',
label = tags$strong('Type 1 Error', style="color:darkblue"),
value = 0.05, min = 0.01, max = 0.10, step = 0.01),
radioButtons(inputId = 'p_tail',
label = tags$strong('Probability Tail', style="color:darkblue"),
choices = c('Lower tail (Left tail)' = 'lower',
'Upper tail (Right tail)' = 'upper'),
selected = 'upper'),
tags$hr(style="border-color: purple;")
),
mainPanel(
tabsetPanel(
tabPanel(title = 'Population',
plotOutput(outputId = 'dnorm_plot', height = '800')),
tabPanel(title = 'Sample',
plotOutput(outputId = 'dotplot', height = '400'),
plotOutput(outputId = 'boxplot', height = '400')),
tabPanel(title = 'SS & MS',
plotOutput(outputId = 'ss_plot', height = '300'),
plotOutput(outputId = 'ss_stack', height = '300'),
plotOutput(outputId = 'ms_plot', height = '200')),
tabPanel(title = 'Test Statistic',
plotOutput(outputId = 'mdiff_plot1', height = '200'),
plotOutput(outputId = 'df_plot1', height = '600')),
tabPanel(title = 'Summary',
h1("Hypothesis"),
uiOutput('H', height = '20px'),
tags$hr(style="border-color: purple;"),
h2("Sample"),
tableOutput(outputId = 'dt_sample'),
tags$hr(style="border-color: purple;"),
tags$hr(style="border-color: purple;"),
h2("Summary Statistics"),
tableOutput(outputId = 'dt_sstat'),
tags$hr(style="border-color: purple;"),
h2("Model Outputs"),
verbatimTextOutput(outputId = 'txt_rst'),
tags$head(tags$style("#txt_rst{overflow-y:scroll; height: 300px;}")),
tags$hr(style="border-color: purple;"))
)
)
))
| /scratch/gouwar.j/cran-all/cranData/ABACUS/inst/app_anova/ui.R |
# Shiny global function for Normal distribution
library(shiny)
library(ggplot2)
#_________________________________________________________________________________________
# Function to fit Model
fn_InputData <- function(pmean, psd, n, bins, type,
p, p_tail, cs_xscale){
xrs <- rnorm(n = n, mean = pmean, sd = psd)
smean <- round(mean(xrs, na.rm = TRUE), 2)
ssd <- round(sd(xrs, na.rm = TRUE), 2)
sse <- round(ssd / sqrt(n), 2)
sDF <- data.frame(xrs = xrs) # density not required
sstat <- data.frame(n = n,
pmean = pmean,
psd = psd,
bins = bins,
smean = smean,
ssd = ssd,
sse = sse)
sstat$lower <- sstat$smean - 1.96*sstat$sse
sstat$upper <- sstat$smean + 1.96*sstat$sse
xmin <- pmean - 3.5*psd
xmax <- pmean + 3.5*psd
norm_xlim <- c(xmin, xmax)
q_out <- switch(EXPR = p_tail,
lower = qnorm(p = p, mean = pmean, sd = psd, lower.tail = TRUE),
upper = qnorm(p = p, mean = pmean, sd = psd, lower.tail = FALSE),
both = c(qnorm(p = p/2, mean = pmean, sd = psd, lower.tail = TRUE),
qnorm(p = p/2, mean = pmean, sd = psd, lower.tail = FALSE)))
zstat <- c(p = p, q_out = round(q_out, 2))
tail <- c(p_tail = p_tail)
if(p_tail == 'lower' | p_tail == 'upper'){
q_out_txt <- paste0(' p = ', zstat['p'], '; q = ', zstat['q_out'] )
xpos1 <- zstat['q_out']
} else {
q_out_txt <- paste0(' p = ', zstat['p'],
'; q = ', round(q_out[1], 2), ', ', round(q_out[2], 2) )
xpos1 <- zstat['q_out1']
}
qText = q_out_txt
annotateText <- c('pText')
annotateDF <- data.frame(
xpos = c(xpos1),
ypos = c(Inf),
annotateText = c(qText),
hjustvar = c(0) ,
vjustvar = c(2)) #<- adjust
dTitle <- paste0( 'Population: Mean = ', round(pmean,2), ', SD = ', round(psd,2) )
rTitle <- paste0( 'Sample: Mean = ', round(smean,2), ', SD = ', round(ssd,2) )
txtTitle <- c(dTitle = dTitle, rTitle = rTitle)
out <- list(sDF = sDF, sstat = sstat, zstat = zstat,
type = type, tail = tail,
norm_xlim = norm_xlim,
annotateDF = annotateDF,
txtTitle = txtTitle,
cs_xscale = cs_xscale)
return(out)
}
#_________________________________________________________________________________________
# Sample distribution: Histogram and Density plot
fn_rnorm <- function(inputData){
list2env(inputData, envir = environment())
rm(inputData)
pmean <- sstat$pmean[1]
psd <- sstat$psd[1]
smean <- sstat$smean[1]
ssd <- sstat$ssd[1]
bins <- sstat$bins[1]
rTitle <- bquote( 'Population Mean & SD: ' ~
mu == .(pmean) ~ ', ' ~
sigma == .(psd) ~ '; ' ~
'Sample Mean & SD: ' ~
bar(x) == .(smean) ~ ', ' ~
s == .(ssd) )
g <- ggplot(data = sDF, aes(x = xrs))
if(type == 'freq'){
g <- g + geom_histogram(bins = bins,
colour = 'purple', fill = 'darkolivegreen1')
} else {
g <- g + geom_histogram(mapping = aes(x = xrs, y =..density.., ),
bins = bins, colour = 'purple', fill = 'darkolivegreen1')
g <- g + geom_density(mapping = aes(x = xrs, y =..density.., colour = 'Empirical Distribution'),
n = 1000, size = 1)
g <- g + stat_function(fun = dnorm, mapping = aes(colour = 'Theoretical Normal Distribution'),
args = list(mean = pmean, sd = psd),
xlim = norm_xlim, n = 1000, geom = 'line', size = 1)
g <- g + scale_colour_manual(name = 'Density', values = c('red', 'blue'))
}
if(type == 'freq'){
g <- g + labs(title = rTitle, x = 'X', y = 'Frequency')
} else {
g <- g + labs(title = rTitle, x = 'X', y = 'Density')
}
g <- g + geom_rug(colour = '#F8766D', sides = 'b')
g <- g + theme_bw()
g <- g + theme(axis.text.x = element_text(face = 'plain', color = 'blue',
size = 14, angle = 0),
axis.text.y = element_text(face = 'plain', color = 'blue',
size = 14, angle = 90, vjust = 0.5),
axis.title.x = element_text(size = 16, colour = 'purple'),
axis.title.y = element_text(size = 16, colour = 'purple'))
g <- g + theme(legend.position = 'bottom')
print(g)
}
#_________________________________________________________________________________________
# Normal distribution: Density plot
fn_dnorm1 <- function(inputData){
list2env(inputData, envir = environment())
rm(inputData)
pmean <- sstat$pmean[1]
psd <- sstat$psd[1]
smean <- sstat$smean[1]
ssd <- sstat$ssd[1]
p <- unname(zstat['p'])
p_tail <- unname(tail['p_tail'])
if(p_tail == 'both'){
q_out <- unname(c(zstat['q_out1'], zstat['q_out2']))
} else {
q_out <- unname(zstat['q_out'])
}
p_out <- unname(zstat['p_out'])
dTitle <- bquote( 'Population Mean & SD: ' ~
mu == .(pmean) ~ ', ' ~
sigma == .(psd) )
g <- ggplot(data = NULL, mapping = aes(norm_xlim))
if(p_tail == 'lower'){
norm_xlim1 <- c(norm_xlim[1], q_out)
norm_xlim2 <- c(q_out, norm_xlim[2])
g <- g + geom_area(stat = 'function', fun = dnorm,
args = list(mean = pmean, sd = psd), colour = 'darkred',
xlim = norm_xlim1, fill = '#ff0000', alpha = 0.5)
g <- g + geom_area(stat = 'function', fun = dnorm,
args = list(mean = pmean, sd = psd), colour = 'darkred',
xlim = norm_xlim2, fill = '#ffff00', alpha = 0.7)
g <- g + geom_vline(xintercept = q_out, size = 1, linetype = 2, colour = 'orange')
}
if(p_tail == 'upper'){
norm_xlim1 <- c(norm_xlim[1], q_out)
norm_xlim2 <- c(q_out, norm_xlim[2])
g <- g + geom_area(stat = 'function', fun = dnorm,
args = list(mean = pmean, sd = psd), colour = 'darkred',
xlim = norm_xlim1, fill = '#ffff00', alpha = 0.7)
g <- g + geom_area(stat = 'function', fun = dnorm,
args = list(mean = pmean, sd = psd), colour = 'darkred',
xlim = norm_xlim2, fill = '#ff0000', alpha = 0.5)
g <- g + geom_vline(xintercept = q_out, size = 1, linetype = 2, colour = 'orange')
}
if(p_tail == 'both'){
norm_xlim1 <- c(norm_xlim[1], q_out[1])
norm_xlim2 <- c(q_out[1], q_out[2])
norm_xlim3 <- c(q_out[2], norm_xlim[2])
g <- g + geom_area(stat = 'function', fun = dnorm,
args = list(mean = pmean, sd = psd), colour = 'darkred',
xlim = norm_xlim1, fill = '#ff0000', alpha = 0.5)
g <- g + geom_area(stat = 'function', fun = dnorm,
args = list(mean = pmean, sd = psd), colour = 'darkred',
xlim = norm_xlim2, fill = '#ffff00', alpha = 0.7)
g <- g + geom_area(stat = 'function', fun = dnorm,
args = list(mean = pmean, sd = psd), colour = 'darkred',
xlim = norm_xlim3, fill = '#ff0000', alpha = 0.5)
g <- g + geom_vline(xintercept = q_out[1], size = 1, linetype = 2, colour = 'orange')
g <- g + geom_vline(xintercept = q_out[2], size = 1, linetype = 2, colour = 'orange')
}
g <- g + labs(title = dTitle, x = 'Populations: X (unit)', y = 'Density')
g <- g + geom_text(data = annotateDF[1,],
aes(x = xpos, y = ypos,
hjust = hjustvar, vjust = vjustvar,
label = annotateText),
colour = c('blue'), size = 4)
if(pmean == 0 & psd == 1){
g <- g + labs(title = dTitle, x = 'z', y = 'Density')
} else {
g <- g + labs(title = dTitle, x = 'X', y = 'Density')
}
yval <- mean(dnorm(x = pmean, mean = pmean, sd = psd))/2
xscale <- seq(from = norm_xlim[1], to = norm_xlim[2], length = 21)
xscale <- round(xscale, digits = 1)
g <- g + scale_x_continuous(breaks = xscale, limits = norm_xlim)
g <- g + theme_bw()
g <- g + theme(axis.text.x = element_text(face = 'plain', color = 'blue',
size = 14, angle = 0),
axis.text.y = element_text(face = 'plain', color = 'blue',
size = 14, angle = 90, vjust = 0.5),
axis.title.x = element_text(size = 16, colour = 'purple'),
axis.title.y = element_text(size = 16, colour = 'purple'))
print(g)
}
#_________________________________________________________________________________________
# Normal distribution: Density plot (Center & Scale)
fn_dnorm2 <- function(inputData){
list2env(inputData, envir = environment())
rm(inputData)
pmean <- sstat$pmean[1]
psd <- sstat$psd[1]
smean <- sstat$smean[1]
ssd <- sstat$ssd[1]
p <- unname(zstat['p'])
p_tail <- unname(tail['p_tail'])
if(p_tail == 'both'){
q_out <- unname(c(zstat['q_out1'], zstat['q_out2']))
} else {
q_out <- unname(zstat['q_out'])
}
p_out <- unname(zstat['p_out'])
dTitle <- bquote( 'Population Mean & SD: ' ~
mu == .(pmean) ~ ', ' ~
sigma == .(psd) )
g <- ggplot(data = NULL, mapping = aes(norm_xlim))
if(p_tail == 'lower'){
norm_xlim1 <- c(norm_xlim[1], q_out)
norm_xlim2 <- c(q_out, norm_xlim[2])
g <- g + geom_area(stat = 'function', fun = dnorm,
args = list(mean = pmean, sd = psd), colour = 'darkred',
xlim = norm_xlim1, fill = '#ff0000', alpha = 0.5)
g <- g + geom_area(stat = 'function', fun = dnorm,
args = list(mean = pmean, sd = psd), colour = 'darkred',
xlim = norm_xlim2, fill = '#ffff00', alpha = 0.7)
g <- g + geom_vline(xintercept = q_out, size = 1, linetype = 2, colour = 'orange')
}
if(p_tail == 'upper'){
norm_xlim1 <- c(norm_xlim[1], q_out)
norm_xlim2 <- c(q_out, norm_xlim[2])
g <- g + geom_area(stat = 'function', fun = dnorm,
args = list(mean = pmean, sd = psd), colour = 'darkred',
xlim = norm_xlim1, fill = '#ffff00', alpha = 0.7)
g <- g + geom_area(stat = 'function', fun = dnorm,
args = list(mean = pmean, sd = psd), colour = 'darkred',
xlim = norm_xlim2, fill = '#ff0000', alpha = 0.5)
g <- g + geom_vline(xintercept = q_out, size = 1, linetype = 2, colour = 'orange')
}
if(p_tail == 'both'){
norm_xlim1 <- c(norm_xlim[1], q_out[1])
norm_xlim2 <- c(q_out[1], q_out[2])
norm_xlim3 <- c(q_out[2], norm_xlim[2])
g <- g + geom_area(stat = 'function', fun = dnorm,
args = list(mean = pmean, sd = psd), colour = 'darkred',
xlim = norm_xlim1, fill = '#ff0000', alpha = 0.5)
g <- g + geom_area(stat = 'function', fun = dnorm,
args = list(mean = pmean, sd = psd), colour = 'darkred',
xlim = norm_xlim2, fill = '#ffff00', alpha = 0.7)
g <- g + geom_area(stat = 'function', fun = dnorm,
args = list(mean = pmean, sd = psd), colour = 'darkred',
xlim = norm_xlim3, fill = '#ff0000', alpha = 0.5)
g <- g + geom_vline(xintercept = q_out[1], size = 1, linetype = 2, colour = 'orange')
g <- g + geom_vline(xintercept = q_out[2], size = 1, linetype = 2, colour = 'orange')
}
g <- g + labs(title = dTitle, x = 'Populations: X (unit)', y = 'Density')
g <- g + theme_bw()
g <- g + theme(axis.text.x = element_text(face = 'plain', color = 'blue',
size = 14, angle = 0),
axis.text.y = element_text(face = 'plain', color = 'blue',
size = 14, angle = 90, vjust = 0.5),
axis.title.x = element_text(size = 16, colour = 'purple'),
axis.title.y = element_text(size = 16, colour = 'purple'),
title = element_text(face = 'plain', color = 'blue',
size = 16, angle = 0))
g <- g + geom_text(data = annotateDF[1,],
aes(x = xpos, y = ypos,
hjust = hjustvar, vjust = vjustvar,
label = annotateText),
colour = c('blue'), size = 4)
if(pmean == 0 & psd == 1){
g <- g + labs(title = dTitle, x = 'z', y = 'Density')
} else {
g <- g + labs(title = dTitle, x = 'X', y = 'Density')
}
yval <- mean(dnorm(x = pmean, mean = pmean, sd = psd))/2
g <- g + xlim(cs_xscale)
g <- g + theme_bw()
g <- g + theme(axis.text.x = element_text(face = 'plain', color = 'blue',
size = 14, angle = 0),
axis.text.y = element_text(face = 'plain', color = 'blue',
size = 14, angle = 90, vjust = 0.5),
axis.title.x = element_text(size = 16, colour = 'purple'),
axis.title.y = element_text(size = 16, colour = 'purple'))
print(g)
}
#_________________________________________________________________________________________
# Normal distribution: Density plot (No shading)
fn_dnorm3 <- function(inputData){
list2env(inputData, envir = environment())
rm(inputData)
pmean <- sstat$pmean[1]
psd <- sstat$psd[1]
smean <- sstat$smean[1]
ssd <- sstat$ssd[1]
p <- unname(zstat['p'])
p_tail <- unname(tail['p_tail'])
if(p_tail == 'both'){
q_out <- unname(c(zstat['q_out1'], zstat['q_out2']))
} else {
q_out <- unname(zstat['q_out'])
}
p_out <- unname(zstat['p_out'])
dTitle <- bquote( 'Population Mean & SD: ' ~
mu == .(pmean) ~ ', ' ~
sigma == .(psd) )
g <- ggplot(data = NULL, mapping = aes(norm_xlim))
if(p_tail == 'lower'){
norm_xlim1 <- c(norm_xlim[1], q_out)
norm_xlim2 <- c(q_out, norm_xlim[2])
g <- g + geom_area(stat = 'function', fun = dnorm,
args = list(mean = pmean, sd = psd), colour = 'darkred',
xlim = norm_xlim1, fill = '#ff0000', alpha = 0.5)
g <- g + geom_area(stat = 'function', fun = dnorm,
args = list(mean = pmean, sd = psd), colour = 'darkred',
xlim = norm_xlim2, fill = '#ffff00', alpha = 0.7)
g <- g + geom_vline(xintercept = q_out, size = 1, linetype = 2, colour = 'orange')
}
if(p_tail == 'upper'){
norm_xlim1 <- c(norm_xlim[1], q_out)
norm_xlim2 <- c(q_out, norm_xlim[2])
g <- g + geom_area(stat = 'function', fun = dnorm,
args = list(mean = pmean, sd = psd), colour = 'darkred',
xlim = norm_xlim1, fill = '#ffff00', alpha = 0.7)
g <- g + geom_area(stat = 'function', fun = dnorm,
args = list(mean = pmean, sd = psd), colour = 'darkred',
xlim = norm_xlim2, fill = '#ff0000', alpha = 0.5)
g <- g + geom_vline(xintercept = q_out, size = 1, linetype = 2, colour = 'orange')
}
if(p_tail == 'both'){
norm_xlim1 <- c(norm_xlim[1], q_out[1])
norm_xlim2 <- c(q_out[1], q_out[2])
norm_xlim3 <- c(q_out[2], norm_xlim[2])
g <- g + geom_area(stat = 'function', fun = dnorm,
args = list(mean = pmean, sd = psd), colour = 'darkred',
xlim = norm_xlim1, fill = '#ff0000', alpha = 0.5)
g <- g + geom_area(stat = 'function', fun = dnorm,
args = list(mean = pmean, sd = psd), colour = 'darkred',
xlim = norm_xlim2, fill = '#ffffff', alpha = 0.7) # No fill
g <- g + geom_area(stat = 'function', fun = dnorm,
args = list(mean = pmean, sd = psd), colour = 'darkred',
xlim = norm_xlim3, fill = '#ff0000', alpha = 0.5)
g <- g + geom_vline(xintercept = q_out[1], size = 1, linetype = 2, colour = 'orange')
g <- g + geom_vline(xintercept = q_out[2], size = 1, linetype = 2, colour = 'orange')
}
g <- g + labs(title = dTitle, x = 'Populations: X (unit)', y = 'Density')
g <- g + geom_text(data = annotateDF[1,],
aes(x = xpos, y = ypos,
hjust = hjustvar, vjust = vjustvar,
label = annotateText),
colour = c('blue'), size = 4)
if(pmean == 0 & psd == 1){
g <- g + labs(title = dTitle, x = 'z', y = 'Density')
} else {
g <- g + labs(title = dTitle, x = 'X', y = 'Density')
}
yval <- mean(dnorm(x = pmean, mean = pmean, sd = psd))/2
xscale <- seq(from = norm_xlim[1], to = norm_xlim[2], length = 21)
xscale <- round(xscale, digits = 1)
g <- g + scale_x_continuous(breaks = xscale, limits = norm_xlim)
g <- g + theme_bw()
g <- g + theme(axis.text.x = element_text(face = 'plain', color = 'blue',
size = 14, angle = 0),
axis.text.y = element_text(face = 'plain', color = 'blue',
size = 14, angle = 90, vjust = 0.5),
axis.title.x = element_text(size = 16, colour = 'purple'),
axis.title.y = element_text(size = 16, colour = 'purple'))
print(g)
}
#_________________________________________________________________________________________
# Normal distribution: Cumulative probability distribution plot
fn_pnorm <- function(inputData){
list2env(inputData, envir = environment())
rm(inputData)
pmean <- sstat$pmean[1]
psd <- sstat$psd[1]
p <- unname(zstat['p'])
p_tail <- unname(tail['p_tail'])
if(p_tail == 'both'){
q_out <- unname(c(zstat['q_out1'], zstat['q_out2']))
} else {
q_out <- unname(zstat['q_out'])
}
p_out <- unname(zstat['p_out'])
dTitle <- bquote( 'Population Mean & SD: ' ~
mu == .(pmean) ~ ', ' ~
sigma == .(psd) )
DF <- data.frame(xr = rnorm(n = 10000, mean = pmean, sd = psd))
g <- ggplot(data = DF, aes(x = xr))
g <- g + stat_function(fun = pnorm,
args = list(mean = pmean, sd = psd, lower.tail = TRUE),
xlim = norm_xlim, geom = 'line',
color = 'darkred', size = 1)
if(p_tail == 'lower' | p_tail == 'upper'){
g <- g + geom_vline(xintercept = q_out, size = 1, linetype = 2, colour = 'orange')
}
if(p_tail == 'both'){
g <- g + geom_vline(xintercept = q_out[1], size = 1, linetype = 2, colour = 'orange')
g <- g + geom_vline(xintercept = q_out[2], size = 1, linetype = 2, colour = 'orange')
}
g <- g + geom_text(data = annotateDF[1,],
aes(x = xpos, y = ypos,
hjust = hjustvar, vjust = 4,
label = annotateText),
colour = c('blue'), size = 4)
if(pmean == 0 & psd == 1){
g <- g + labs(title = dTitle, x = 'z', y = 'Cumulative Probability')
} else {
g <- g + labs(title = dTitle, x = 'X', y = 'Cumulative Probability')
}
pexp <- ''
g <- g + geom_text(data = annotateDF[1,],
aes(x = pmean, y = 0.8),
label = pexp, parse = TRUE, size = 8, colour = 'blue')
xscale <- seq(from = norm_xlim[1], to = norm_xlim[2], length = 21)
xscale <- round(xscale, digits = 1)
g <- g + scale_x_continuous(breaks = xscale, limits = norm_xlim)
g <- g + theme_bw()
g <- g + theme(axis.text.x = element_text(face = 'plain', color = 'blue',
size = 14, angle = 0),
axis.text.y = element_text(face = 'plain', color = 'blue',
size = 14, angle = 90, vjust = 0.5),
axis.title.x = element_text(size = 16, colour = 'purple'),
axis.title.y = element_text(size = 16, colour = 'purple'))
print(g)
}
#_________________________________________________________________________________________
| /scratch/gouwar.j/cran-all/cranData/ABACUS/inst/app_dnorm/global.R |
# Shiny server for Normal distribution
#_________________________________________________________________________________________
shinyServer(function(input, output) {
#_________________________________________________________________________________________
# Reactive
inputVal_eventReactive <- eventReactive(input$cmdUpdate, {
iseed <- input$numRN + as.integer(input$cmdUpdate)
pmean <- input$pmean
psd <- input$psd
n <- input$n
type <- input$type
bins <- input$bins
p <- input$p # user probability
p_tail <- input$p_tail
cs_xscale <- input$cs_xscale
fn_InputData_repeatable <- repeatable(rngfunc = fn_InputData, seed = iseed)
fn_InputData_repeatable(pmean = pmean, psd = psd,
n = n, bins = bins,
type = type,
p = p, p_tail = p_tail,
cs_xscale = cs_xscale)
})
inputVal_reactive <- reactive({
pmean <- input$pmean
psd <- input$psd
n <- input$n
type <- input$type
bins <- input$bins
p <- input$p # user probability
p_tail <- input$p_tail
cs_xscale <- input$cs_xscale
fn_InputData(pmean = pmean, psd = psd,
n = n, bins = bins,
type = type,
p = p, p_tail = p_tail,
cs_xscale = cs_xscale)
})
#_________________________________________________________________________________________
output$rnorm_plot <- renderPlot({
if(input$chkUpdate){
fn_rnorm(inputVal_reactive())
} else {
fn_rnorm(inputVal_eventReactive())
}
})
output$dnorm1_plot <- renderPlot({
if(input$chkUpdate){
fn_dnorm1(inputVal_reactive())
} else {
fn_dnorm1(inputVal_eventReactive())
}
})
output$dnorm2_plot <- renderPlot({
if(input$chkUpdate){
fn_dnorm2(inputVal_reactive())
} else {
fn_dnorm2(inputVal_eventReactive())
}
})
output$pnorm_plot <- renderPlot({
if(input$chkUpdate){
fn_pnorm(inputVal_reactive())
} else {
fn_pnorm(inputVal_eventReactive())
}
})
output$dnorm3_plot <- renderPlot({
if(input$chkUpdate){
fn_dnorm3(inputVal_reactive())
} else {
fn_dnorm3(inputVal_eventReactive())
}
})
#_________________________________________________________________________________________
})
#_________________________________________________________________________________________
| /scratch/gouwar.j/cran-all/cranData/ABACUS/inst/app_dnorm/server.R |
# Shiny ui for Normal distribution
source('global.R')
# Define UI for the application
shinyUI(pageWithSidebar(
headerPanel(title = div(img(src='abacus.png', align = 'left'),
"Normal Distribution"),
windowTitle = "Normal Distribution"),
sidebarPanel(
tags$hr(style="border-color: purple;"),
tags$p(style="color:blue", tags$strong("Simulation Features")),
tags$hr(style="border-color: purple;"),
checkboxInput(inputId = 'chkUpdate',
label = tags$strong('Check the box to update instantly', style="color:darkblue"),
value = FALSE, width = '100%'),
actionButton(inputId = 'cmdUpdate', label = 'Update'),
tags$hr(style="border-color: purple;"),
numericInput(inputId = 'numRN',
label = tags$p('Seed value for generating the random number', style="color:darkblue"),
value = 12345, min = 1),
tags$hr(style="border-color: purple;"),
tags$p(style="color:blue", tags$strong("Population Parameters")),
tags$hr(style="border-color: purple;"),
numericInput(inputId = 'pmean',
label = tags$strong('True Population Mean: ', HTML("μ"), style="color:darkblue"),
value = 20),
numericInput(inputId = 'psd',
label = tags$strong('True Population Standard Deviation: ', HTML("σ"), style="color:darkblue"),
value = 4, min = 1),
sliderInput(inputId = 'cs_xscale',
label = tags$p('X-axis scale for the center and scale effect', style="color:darkblue"),
min = -20, max = 1000, value = c(-20,60), step = 20),
tags$hr(style="border-color: purple;"),
tags$p(style="color:blue", tags$strong("Sample Characteristics")),
tags$hr(style="border-color: purple;"),
sliderInput(inputId = 'n',
label = tags$strong('Sample: Number of observations', style="color:darkblue"),
value = 50, min = 0, max = 10000),
sliderInput(inputId = 'bins',
label = tags$strong('Number of bins', style="color:darkblue"),
value = 20, min = 1, max = 1000),
radioButtons(inputId = 'type',
label = tags$strong('Plot type:', style="color:darkblue"),
choices = c('Frequency Distribtion ' = 'freq',
'Overlay Normal Density' = 'density')),
tags$hr(style="border-color: purple;"),
tags$p(style="color:blue", tags$strong("Distribution Function")),
tags$hr(style="border-color: purple;"),
sliderInput(inputId = 'p',
label = tags$strong('Cumulative probability', style="color:darkblue"),
min = 0.01, max = 1, value = 0.05, step = 0.01),
radioButtons(inputId = 'p_tail',
label = tags$strong('Probability Tail', style="color:darkblue"),
choices = c('Lower tail (Left tail)' = 'lower',
'Upper tail (Right tail) ' = 'upper',
'Both tails (Two-tailed)' = 'both'),
selected = 'both'),
tags$hr(style="border-color: purple;")
),
mainPanel(
tabsetPanel(
tabPanel(title = 'Sample',
plotOutput(outputId = 'rnorm_plot', height = '800')),
tabPanel(title = 'Distribution',
plotOutput(outputId = 'dnorm1_plot', height = '400'),
plotOutput(outputId = 'dnorm2_plot', height = '400')),
tabPanel(title = 'Probability & Quantile',
plotOutput(outputId = 'pnorm_plot', height = '400'),
plotOutput(outputId = 'dnorm3_plot', height = '400'))
)
)
))
| /scratch/gouwar.j/cran-all/cranData/ABACUS/inst/app_dnorm/ui.R |
# Shiny global function for Normal distribution
library(shiny)
library(ggplot2)
#_________________________________________________________________________________________
# Function to fit Model
fn_InputData <- function(pmean, psd, df1, p, p_tail){
sstat <- data.frame(pmean = pmean, psd = psd)
xmin <- -5
xmax <- 5
norm_xlim <- c(xmin, xmax)
tr <- rt(n = 10000, df = df1)
t_xlim <- c(min(tr), max(tr))
if(max(abs(t_xlim)) > 15) t_xlim <- c(-15, 15)
rm(tr)
z_q_out <- switch(EXPR = p_tail,
lower = qnorm(p = p, mean = pmean, sd = psd, lower.tail = TRUE),
upper = qnorm(p = p, mean = pmean, sd = psd, lower.tail = FALSE),
both = c(qnorm(p = p/2, mean = pmean, sd = psd, lower.tail = TRUE),
qnorm(p = p/2, mean = pmean, sd = psd, lower.tail = FALSE)))
zstat <- c(p = p, q_out = round(z_q_out, 2))
tail <- c(p_tail = p_tail)
t_q_out <- switch(EXPR = p_tail,
lower = qt(p = p, df = df1, lower.tail = TRUE),
upper = qt(p = p, df = df1, lower.tail = FALSE),
both = c(qt(p = p/2, df = df1, lower.tail = TRUE),
qt(p = p/2, df = df1, lower.tail = FALSE)))
tstat <- c(df1 = df1, p = p, q_out = round(t_q_out, 2))
if(p_tail == 'lower' | p_tail == 'upper'){
q_out_txt <- paste0(' p = ', zstat['p'], '; q = ', zstat['q_out'] )
xpos1 <- zstat['q_out']
} else {
q_out_txt <- paste0(' p = ', zstat['p'],
'; q = ', round(z_q_out[1], 2), ', ', round(z_q_out[2], 2) )
xpos1 <- zstat['q_out1']
}
qText = q_out_txt
annotateText <- c('pText')
z_annotateDF <- data.frame(
xpos = c(xpos1),
ypos = c(Inf),
annotateText = c(qText),
hjustvar = c(0) ,
vjustvar = c(2)) #<- adjust
if(p_tail == 'lower' | p_tail == 'upper'){
q_out_txt <- paste0(' p = ', tstat['p'], '; q = ', tstat['q_out'] )
xpos1 <- tstat['q_out']
} else {
q_out_txt <- paste0(' p = ', tstat['p'],
'; q = ', round(t_q_out[1], 2), ', ', round(t_q_out[2], 2) )
xpos1 <- tstat['q_out1']
}
qText = q_out_txt
annotateText <- c('pText')
t_annotateDF <- data.frame(
xpos = c(xpos1),
ypos = c(Inf),
annotateText = c(qText),
hjustvar = c(0) ,
vjustvar = c(2)) #<- adjust
dTitle <- paste0( 'Standard Normal Distribution = ', round(pmean,2), ', SD = ', round(psd,2) )
tTitle <- paste0( "Student's t Distribution ", ', df = ', df1 )
txtTitle <- c(dTitle = dTitle, tTitle = tTitle)
out <- list(sstat = sstat, tail = tail,
zstat = zstat, tstat = tstat,
norm_xlim = norm_xlim, t_xlim = t_xlim,
z_annotateDF = z_annotateDF,
t_annotateDF = t_annotateDF,
txtTitle = txtTitle)
return(out)
}
#_________________________________________________________________________________________
# Density plot: Combined Normal & t distribution
fn_dnorm_dt <- function(inputData){
list2env(inputData, envir = environment())
rm(inputData)
pmean <- sstat$pmean[1]
psd <- sstat$psd[1]
df1 <- unname(tstat['df1'])
dTitle <- bquote( 'Standard Normal Distribution: ' ~
mu == .(pmean) ~ ', ' ~
sigma == .(psd) ~ '; ' ~
"Student's t Distribution: df = " ~ .(df1))
g <- ggplot(data = NULL, mapping = aes(norm_xlim))
g <- g + geom_area(stat = 'function', fun = dnorm,
args = list(mean = pmean, sd = psd),
xlim = norm_xlim, fill = '#F8766D', alpha = 0.3)
g <- g + geom_area(stat = 'function', fun = dt,
args = list(df = df1),
xlim = norm_xlim, fill = '#00BFC4', alpha = 0.3)
g <- g + geom_vline(xintercept = 0, size = 1, linetype = 2, colour = 'darkred')
g <- g + labs(title = dTitle, x = 'X', y = 'P(X)')
aDF <- data.frame(xpos = 0, ypos = Inf,
txt = c('Standard Normal Distribution', "Student's t Distribution"),
hjustvar = c(0, 0), vjustvar = c(2, 4))
g <- g + geom_text(data = aDF,
aes(x = xpos, y = ypos,
hjust = hjustvar, vjust = vjustvar,
label = txt),
colour = c('#F8766D','#00BFC4'), size = 6)
xscale <- seq(from = t_xlim[1], to = t_xlim[2], length.out = 15)
xscale <- round(xscale, digits = 1)
g <- g + scale_x_continuous(breaks = xscale)
g <- g + theme_bw()
g <- g + theme(axis.text.x = element_text(face = 'plain', color = 'blue',
size = 14, angle = 0),
axis.text.y = element_text(face = 'plain', color = 'blue',
size = 14, angle = 90, vjust = 0.5),
axis.title.x = element_text(size = 16, colour = 'purple'),
axis.title.y = element_text(size = 16, colour = 'purple'),
title = element_text(face = 'plain', color = 'blue',
size = 16, angle = 0),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank())
print(g)
}
#_________________________________________________________________________________________
# Normal distribution: Density plot
fn_dnorm <- function(inputData){
list2env(inputData, envir = environment())
rm(inputData)
pmean <- sstat$pmean[1]
psd <- sstat$psd[1]
p_tail <- unname(tail['p_tail'])
p <- unname(zstat['p'])
annotateDF <- z_annotateDF
if(p_tail == 'both'){
q_out <- unname(c(zstat['q_out1'], zstat['q_out2']))
} else {
q_out <- unname(zstat['q_out'])
}
dTitle <- bquote( 'Standard Normal Distribution: ' ~
mu == .(pmean) ~ ', ' ~
sigma == .(psd) )
g <- ggplot(data = NULL, mapping = aes(norm_xlim))
if(p_tail == 'lower'){
norm_xlim1 <- c(norm_xlim[1], q_out)
norm_xlim2 <- c(q_out, norm_xlim[2])
g <- g + geom_area(stat = 'function', fun = dnorm,
args = list(mean = pmean, sd = psd), colour = 'darkred',
xlim = norm_xlim1, fill = '#ff0000', alpha = 0.5)
g <- g + geom_area(stat = 'function', fun = dnorm,
args = list(mean = pmean, sd = psd), colour = 'darkred',
xlim = norm_xlim2, fill = '#ffff00', alpha = 0.7)
g <- g + geom_vline(xintercept = q_out, size = 1, linetype = 2, colour = 'orange')
}
if(p_tail == 'upper'){
norm_xlim1 <- c(norm_xlim[1], q_out)
norm_xlim2 <- c(q_out, norm_xlim[2])
g <- g + geom_area(stat = 'function', fun = dnorm,
args = list(mean = pmean, sd = psd), colour = 'darkred',
xlim = norm_xlim1, fill = '#ffff00', alpha = 0.7)
g <- g + geom_area(stat = 'function', fun = dnorm,
args = list(mean = pmean, sd = psd), colour = 'darkred',
xlim = norm_xlim2, fill = '#ff0000', alpha = 0.5)
g <- g + geom_vline(xintercept = q_out, size = 1, linetype = 2, colour = 'orange')
}
if(p_tail == 'both'){
norm_xlim1 <- c(norm_xlim[1], q_out[1])
norm_xlim2 <- c(q_out[1], q_out[2])
norm_xlim3 <- c(q_out[2], norm_xlim[2])
g <- g + geom_area(stat = 'function', fun = dnorm,
args = list(mean = pmean, sd = psd), colour = 'darkred',
xlim = norm_xlim1, fill = '#ff0000', alpha = 0.5)
g <- g + geom_area(stat = 'function', fun = dnorm,
args = list(mean = pmean, sd = psd), colour = 'darkred',
xlim = norm_xlim2, fill = '#ffff00', alpha = 0.7)
g <- g + geom_area(stat = 'function', fun = dnorm,
args = list(mean = pmean, sd = psd), colour = 'darkred',
xlim = norm_xlim3, fill = '#ff0000', alpha = 0.5)
g <- g + geom_vline(xintercept = q_out[1], size = 1, linetype = 2, colour = 'orange')
g <- g + geom_vline(xintercept = q_out[2], size = 1, linetype = 2, colour = 'orange')
}
g <- g + geom_vline(xintercept = pmean, size = 1, linetype = 1, colour = 'blue')
g <- g + labs(title = dTitle, x = 'z', y = 'Density')
g <- g + geom_text(data = annotateDF[1,],
aes(x = xpos, y = ypos,
hjust = hjustvar, vjust = vjustvar,
label = annotateText),
colour = c('blue'), size = 4)
xscale <- seq(from = norm_xlim[1], to = norm_xlim[2], by = 2)
xscale <- round(xscale, digits = 1)
g <- g + scale_x_continuous(breaks = xscale, limits = norm_xlim)
g <- g + theme_bw()
g <- g + theme(axis.text.x = element_text(face = 'plain', color = 'blue',
size = 14, angle = 0),
axis.text.y = element_text(face = 'plain', color = 'blue',
size = 14, angle = 90, vjust = 0.5),
axis.title.x = element_text(size = 16, colour = 'purple'),
axis.title.y = element_text(size = 16, colour = 'purple'),
title = element_text(face = 'plain', color = 'blue',
size = 16, angle = 0))
print(g)
}
#_________________________________________________________________________________________
# Standard t Density: Plot1 with Type 1 error
fn_dt <- function(inputData){
list2env(inputData, envir = environment())
rm(inputData)
annotateDF <- t_annotateDF
p_tail <- unname(tail['p_tail'])
p <- unname(tstat['p'])
if(p_tail == 'both'){
q_out <- unname(c(tstat['q_out1'], tstat['q_out2']))
} else {
q_out <- unname(tstat['q_out'])
}
df1 <- unname(tstat['df1'])
tTitle <- unname(txtTitle['tTitle'])
g <- ggplot(data = NULL, mapping = aes(t_xlim))
if(p_tail == 'lower'){
t_xlim1 <- c(t_xlim[1], q_out)
t_xlim2 <- c(q_out, t_xlim[2])
g <- g + geom_area(stat = 'function', fun = dt,
args = list(df = df1), colour = 'darkred',
xlim = t_xlim1, fill = '#ff0000', alpha = 0.5)
g <- g + geom_area(stat = 'function', fun = dt,
args = list(df = df1), colour = 'darkred',
xlim = t_xlim2, fill = '#ffff00', alpha = 0.7)
g <- g + geom_vline(xintercept = q_out, size = 1, linetype = 2, colour = 'orange')
}
if(p_tail == 'upper'){
t_xlim1 <- c(t_xlim[1], q_out)
t_xlim2 <- c(q_out, t_xlim[2])
g <- g + geom_area(stat = 'function', fun = dt,
args = list(df = df1), colour = 'darkred',
xlim = t_xlim1, fill = '#ffff00', alpha = 0.7)
g <- g + geom_area(stat = 'function', fun = dt,
args = list(df = df1), colour = 'darkred',
xlim = t_xlim2, fill = '#ff0000', alpha = 0.5)
g <- g + geom_vline(xintercept = q_out, size = 1, linetype = 2, colour = 'orange')
}
if(p_tail == 'both'){
t_xlim1 <- c(t_xlim[1], q_out[1])
t_xlim2 <- c(q_out[1], q_out[2])
t_xlim3 <- c(q_out[2], t_xlim[2])
g <- g + geom_area(stat = 'function', fun = dt,
args = list(df = df1), colour = 'darkred',
xlim = t_xlim1, fill = '#ff0000', alpha = 0.5)
g <- g + geom_area(stat = 'function', fun = dt,
args = list(df = df1), colour = 'darkred',
xlim = t_xlim2, fill = '#ffff00', alpha = 0.7)
g <- g + geom_area(stat = 'function', fun = dt,
args = list(df = df1), colour = 'darkred',
xlim = t_xlim3, fill = '#ff0000', alpha = 0.5)
g <- g + geom_vline(xintercept = q_out[1], size = 1, linetype = 2, colour = 'orange')
g <- g + geom_vline(xintercept = q_out[2], size = 1, linetype = 2, colour = 'orange')
}
g <- g + geom_vline(xintercept = 0, size = 1, linetype = 1, colour = 'blue')
g <- g + labs(title = tTitle, x = 't', y = 'Density')
g <- g + geom_text(data = annotateDF[1,],
aes(x = xpos, y = ypos,
hjust = hjustvar, vjust = vjustvar,
label = annotateText),
colour = c('blue'), size = 4)
xscale <- seq(from = t_xlim[1], to = t_xlim[2], by = 1)
xscale <- round(xscale, digits = 1)
g <- g + scale_x_continuous(breaks = xscale, limits = t_xlim)
g <- g + theme_bw()
g <- g + theme(axis.text.x = element_text(face = 'plain', color = 'blue',
size = 14, angle = 0),
axis.text.y = element_text(face = 'plain', color = 'blue',
size = 14, angle = 90, vjust = 0.5),
axis.title.x = element_text(size = 16, colour = 'purple'),
axis.title.y = element_text(size = 16, colour = 'purple'),
title = element_text(face = 'plain', color = 'blue',
size = 16, angle = 0))
print(g)
}
#_________________________________________________________________________________________
| /scratch/gouwar.j/cran-all/cranData/ABACUS/inst/app_dnorm_dt/global.R |
# Shiny server for Normal distribution
#_________________________________________________________________________________________
shinyServer(function(input, output) {
#_________________________________________________________________________________________
# Reactive
inputVal_reactive <- reactive({
pmean <- 0
psd <- 1
df1 <- input$df
p <- input$p # user probability
p_tail <- input$p_tail
fn_InputData(pmean = pmean, psd = psd,
df1 = df1,
p = p, p_tail = p_tail)
})
#_________________________________________________________________________________________
output$dnorm_dt_plot <- renderPlot({
fn_dnorm_dt(inputVal_reactive())
})
output$dnorm_plot <- renderPlot({
fn_dnorm(inputVal_reactive())
})
output$dt_plot <- renderPlot({
fn_dt(inputVal_reactive())
})
#_________________________________________________________________________________________
})
#_________________________________________________________________________________________
| /scratch/gouwar.j/cran-all/cranData/ABACUS/inst/app_dnorm_dt/server.R |
# Shiny ui for Normal & t distributions
source('global.R')
# Define UI for the application
shinyUI(pageWithSidebar(
headerPanel(title = div(img(src='abacus.png', align = 'left'),
"Properties of Standard Normal and Student's t Distributions"),
windowTitle = "Properties of Standard Normal and Student's t Distributions"),
sidebarPanel(
tags$br(),
tags$hr(style="border-color: purple;"),
tags$p(style="color:blue", tags$strong("Standard Normal Distribution: Parameters")),
tags$hr(style="border-color: purple;"),
tags$br(),
tags$div(
tags$span(style="color:darkred",
tags$strong(
HTML("Mean ("),
HTML("μ"),
HTML(") = 0")
)
)
),
tags$br(),
tags$div(
tags$span(style="color:darkred",
tags$strong(
HTML("Standard deviation ("),
HTML("σ"),
HTML(") = 1")
)
)
),
tags$br(),
tags$br(),
tags$hr(style="border-color: purple;"),
tags$p(style="color:blue", tags$strong("Student's t Distribution: Parameter")),
tags$hr(style="border-color: purple;"),
tags$br(),
sliderInput(inputId = 'df',
label = tags$strong('Degrees of freedom: df', style="color:darkblue"),
value = 1, min = 1, max = 500, step = 1),
tags$br(),
tags$hr(style="border-color: purple;"),
tags$p(style="color:blue", tags$strong("Probability")),
tags$hr(style="border-color: purple;"),
sliderInput(inputId = 'p',
tags$strong('Cumulative probability', style="color:darkblue"),
min = 0.05, max = 1, value = 0.05, step = 0.01),
radioButtons(inputId = 'p_tail',
label = tags$strong('Probability Tail:', style="color:darkblue"),
choices = c('Lower tail (Left tail)' = 'lower',
'Upper tail (Right tail)' = 'upper',
'Both tails (Two-tailed)' = 'both'),
selected = 'both'),
tags$hr(style="border-color: blue;")
),
mainPanel(
tabsetPanel(
tabPanel(title = 'Probability Density Function',
plotOutput(outputId = 'dnorm_dt_plot', height = '800')),
tabPanel(title = 'Distribution & Probability',
plotOutput(outputId = 'dnorm_plot', height = '400'),
plotOutput(outputId = 'dt_plot', height = '400'))
)
)
))
| /scratch/gouwar.j/cran-all/cranData/ABACUS/inst/app_dnorm_dt/ui.R |
# Shiny global function for One-sample t-test
library(shiny)
library(ggplot2)
#_________________________________________________________________________________________
# Function to fit Model
fn_InputData <- function(pmean, hpmean, psd, n,
p, p_tail){
xrs <- rnorm(n = n, mean = pmean, sd = psd)
smean <- round(mean(xrs, na.rm = TRUE), 2)
ssd <- round(sd(xrs, na.rm = TRUE), 2)
sse <- round(ssd / sqrt(n), 2)
sDF <- data.frame(Group = 'Group 1', xrs = xrs) # density not required
sstat <- data.frame(Group = 'Group 1',
n = n,
pmean = pmean,
hpmean = hpmean,
psd = psd,
smean = smean,
ssd = ssd,
sse = sse)
sstat$lower <- sstat$smean - 1.96*sstat$sse
sstat$upper <- sstat$smean + 1.96*sstat$sse
se <- psd / sqrt(n)
alt <- switch(p_tail,
'lower' = 'less',
'upper' = 'greater',
'both' = 'two.sided')
fm <- t.test(x = sDF$xrs,
alternative = alt,
mu = hpmean, paired = FALSE,
var.equal = TRUE,
conf.level = 0.95)
tcal <- unname(round(fm$statistic, digits = 4))
df1 <- unname(fm$parameter)
pcal <- round(fm$p.value, digits = 4)
sed <- round(fm$stderr, digits = 4)
if(p_tail == 'lower' | p_tail == 'upper'){
pcal <- pt(q = abs(tcal), df = df1, lower.tail = FALSE)
ttab <- qt(p = 0.05, df = df1, lower.tail = FALSE)
} else {
pcal <- pt(q = abs(tcal), df = df1, lower.tail = FALSE) * 2
ttab <- qt(p = 0.025, df = df1, lower.tail = FALSE)
}
mean_diff <- round((smean - hpmean), digits = 2)
ci_mean_diff <- round(c(mean_diff - ttab*se, mean_diff + ttab*se), 2)
xmin <- pmean - 3.5*psd
xmax <- pmean + 3.5*psd
norm_xlim <- c(xmin, xmax)
tr <- rt(n = 10000, df = df1)
t_xlim <- c(min(tr), max(tr))
rm(tr)
q_out <- switch(EXPR = p_tail,
lower = qt(p = p, df = df1, lower.tail = TRUE),
upper = qt(p = p, df = df1, lower.tail = FALSE),
both = c(qt(p = p/2, df = df1, lower.tail = TRUE),
qt(p = p/2, df = df1, lower.tail = FALSE)))
tstat <- c(tcal = round(tcal, 4),
df1 = round(df1, 4),
pcal = round(pcal, 4),
p = round(p, 4),
q_out = round(q_out, 4))
tail <- c(p_tail = p_tail)
if(p_tail == 'lower' | p_tail == 'upper'){
q_out_txt <- paste0('p = ', tstat['p'], '; q = ', tstat['q_out'] )
xpos1 <- tstat['q_out']
} else {
q_out_txt <- paste0('p = ', tstat['p'], ';
q = ', round(q_out[1], 2), ', ',
round(q_out[2], 2) )
xpos1 <- tstat['q_out2']
}
qText = q_out_txt
annotateText <- c('pText')
annotateDF <- data.frame(
xpos = c(xpos1),
ypos = c(Inf),
annotateText = c(qText),
hjustvar = c(0) ,
vjustvar = c(2)) #<- adjust
hTitle <- paste0('H0: mu = ', hpmean, '; H1: mu <> ', hpmean)
dTitle <- paste0( 'True Population: dnorm(x = X', ', mean = ', pmean, ', sd = ', psd, ')' )
rTitle <- paste0( 'Sample: Mean = ', round(smean,2), ', SD = ', round(ssd,2) )
tTitle1 <- paste0( 'Calculated t-statistic: ', round(tcal, 4),
', df = ', df1, ', p-value = ', round(pcal, 4) )
tTitle2 <- paste0('Difference = ', round(mean_diff,2),
'; 95% CI = ', ci_mean_diff[1], ', ', ci_mean_diff[2])
txtTitle <- c(hTitle = hTitle, dTitle = dTitle, rTitle = rTitle,
tTitle1 = tTitle1, tTitle2 = tTitle2)
out <- list(sDF = sDF,
sstat = sstat, tstat = tstat, tail = tail,
mean_diff = mean_diff, ci_mean_diff = ci_mean_diff,
norm_xlim = norm_xlim, t_xlim = t_xlim,
annotateDF = annotateDF,
txtTitle = txtTitle)
return(out)
}
#_________________________________________________________________________________________
# Population density
fn_dnorm1 <- function(inputData){
list2env(inputData, envir = environment())
rm(inputData)
pmean <- sstat$pmean[1]
psd <- sstat$psd[1]
dTitle1 <- bquote( 'True Population Mean & SD: ' ~
mu == .(pmean) ~ ', ' ~
sigma == .(psd) )
dTitle2 <- 'Rugplot represents the random samples drawn from the population'
g <- ggplot(data = NULL, mapping = aes(norm_xlim))
g <- g + geom_area(stat = 'function', fun = dnorm,
args = list(mean = pmean, sd = psd), colour = 'darkred',
xlim = norm_xlim, fill = '#ffffff', alpha = 0.0)
g <- g + geom_rug(data = sDF, mapping = aes(x = xrs),
colour = 'blue', sides = 'b')
g <- g + geom_vline(xintercept = pmean, size = 1, linetype = 2, colour = 'blue')
g <- g + labs(title = dTitle1, subtitle = dTitle2,
x = 'Populations: X (unit)', y = 'Density')
xscale <- seq(from = norm_xlim[1], to = norm_xlim[2], length.out = 21)
xscale <- round(xscale, digits = 1)
g <- g + scale_x_continuous(breaks = xscale, limits = norm_xlim)
g <- g + theme_bw()
g <- g + theme(axis.text.x = element_text(face = 'plain', color = 'blue',
size = 14, angle = 0),
axis.text.y = element_text(face = 'plain', color = 'blue',
size = 14, angle = 90, vjust = 0.5),
axis.title.x = element_text(size = 16, colour = 'purple'),
axis.title.y = element_text(size = 16, colour = 'purple'))
print(g)
}
#_________________________________________________________________________________________
# Sample distribution: dotplot
fn_dotplot <- function(inputData){
list2env(inputData, envir = environment())
rm(inputData)
xmean <- sstat$smean[1]
xsd <- sstat$ssd[1]
rTitle <- bquote( 'Sample Mean & SD: ' ~
bar(x[1]) == .(round(xmean,2)) ~ ', ' ~
s[1] == .(round(xsd,2)) )
scale_factor <- (norm_xlim[2] - norm_xlim[1])/100
g <- ggplot(data = sDF, aes(x = xrs))
g <- g + geom_dotplot(fill = 'cyan', method = 'dotdensity',
binwidth = scale_factor, # dotsize = 0.5,
stackdir = 'center', stackratio = 0.9, alpha = 0.7)
g <- g + scale_y_continuous(NULL, breaks = NULL)
g <- g + geom_rug(colour = 'blue')
g <- g + geom_vline(xintercept = xmean, size = 1, linetype = 1, colour = 'purple')
g <- g + labs(title = rTitle, x = 'Variable (unit)', y = 'Density')
g <- g + theme_bw()
g <- g + theme(axis.text.x = element_text(face = 'plain', color = 'blue',
size = 14, angle = 0),
axis.text.y = element_text(face = 'plain', color = 'blue',
size = 14, angle = 90, vjust = 0.5),
axis.title.x = element_text(size = 16, colour = 'purple'),
axis.title.y = element_text(size = 16, colour = 'purple'),
title = element_text(face = 'plain', color = 'blue',
size = 16, angle = 0))
print(g)
}
#_________________________________________________________________________________________
# Sample distribution: boxplot
fn_boxplot <- function(inputData){
list2env(inputData, envir = environment())
rm(inputData)
xmean <- sstat$smean[1]
xsd <- sstat$ssd[1]
rTitle <- bquote( 'Sample Mean & SD: ' ~
bar(x[1]) == .(round(xmean,2)) ~ ', ' ~
s[1] == .(round(xsd,2)) )
g <- ggplot(data = sDF, aes(y = xrs, x = 1))
g <- g + geom_boxplot(alpha = 0.4, size = 1.0, colour = '#ff9966', varwidth = TRUE)
g <- g + geom_jitter(fill = 'cyan', width = 0.25, height = 0.001,
shape = 21, size = 10, alpha = 0.7)
g <- g + geom_hline(yintercept = xmean, size = 1, linetype = 1, colour = 'purple')
g <- g + labs(title = rTitle, x = '', y = 'Variable (unit)')
yscale <- seq(from = norm_xlim[1], to = norm_xlim[2], length.out = 21)
yscale <- round(yscale, digits = 1)
g <- g + scale_y_continuous(breaks = yscale, limits = norm_xlim)
g <- g + coord_flip()
g <- g + geom_rug(colour = 'blue', sides = 'b')
g <- g + theme_bw()
g <- g + theme(axis.text.x = element_text(face = 'plain', color = 'blue',
size = 14, angle = 0),
axis.text.y = element_text(face = 'plain', color = 'blue',
size = 14, angle = 90, vjust = 0.5),
axis.title.x = element_text(size = 16, colour = 'purple'),
axis.title.y = element_text(size = 16, colour = 'purple'),
title = element_text(face = 'plain', color = 'blue',
size = 16, angle = 0))
g <- g + theme(legend.position = 'none')
print(g)
}
#_________________________________________________________________________________________
# Standard t Density: Plot1 with Type 1 error
fn_dt_plot1 <- function(inputData){
list2env(inputData, envir = environment())
rm(inputData)
hpmean <- unname(sstat$hpmean[1])
p <- unname(tstat['p'])
p_tail <- unname(tail['p_tail'])
if(p_tail == 'both'){
q_out <- unname(c(tstat['q_out1'], tstat['q_out2']))
} else {
q_out <- unname(tstat['q_out'])
}
tcal <- unname(tstat['tcal'])
df1 <- unname(tstat['df1'])
hTitle <- bquote(H[0] ~ ':' ~ mu == .(hpmean) ~ '; ' ~ H[A] ~ ':' ~ mu != .(hpmean))
tTitle1 <- unname(txtTitle['tTitle1'])
g <- ggplot(data = NULL, mapping = aes(t_xlim))
if(p_tail == 'lower'){
t_xlim1 <- c(t_xlim[1], q_out)
t_xlim2 <- c(q_out, t_xlim[2])
g <- g + geom_area(stat = 'function', fun = dt,
args = list(df = df1), colour = 'darkred',
xlim = t_xlim1, fill = '#ff0000', alpha = 0.5)
g <- g + geom_area(stat = 'function', fun = dt,
args = list(df = df1), colour = 'darkred',
xlim = t_xlim2, fill = '#ffff00', alpha = 0.7)
g <- g + geom_vline(xintercept = q_out, size = 1, linetype = 2, colour = 'orange')
}
if(p_tail == 'upper'){
t_xlim1 <- c(t_xlim[1], q_out)
t_xlim2 <- c(q_out, t_xlim[2])
g <- g + geom_area(stat = 'function', fun = dt,
args = list(df = df1), colour = 'darkred',
xlim = t_xlim1, fill = '#ffff00', alpha = 0.7)
g <- g + geom_area(stat = 'function', fun = dt,
args = list(df = df1), colour = 'darkred',
xlim = t_xlim2, fill = '#ff0000', alpha = 0.5)
g <- g + geom_vline(xintercept = q_out, size = 1, linetype = 2, colour = 'orange')
}
if(p_tail == 'both'){
t_xlim1 <- c(t_xlim[1], q_out[1])
t_xlim2 <- c(q_out[1], q_out[2])
t_xlim3 <- c(q_out[2], t_xlim[2])
g <- g + geom_area(stat = 'function', fun = dt,
args = list(df = df1), colour = 'darkred',
xlim = t_xlim1, fill = '#ff0000', alpha = 0.5)
g <- g + geom_area(stat = 'function', fun = dt,
args = list(df = df1), colour = 'darkred',
xlim = t_xlim2, fill = '#ffff00', alpha = 0.7)
g <- g + geom_area(stat = 'function', fun = dt,
args = list(df = df1), colour = 'darkred',
xlim = t_xlim3, fill = '#ff0000', alpha = 0.5)
g <- g + geom_vline(xintercept = q_out[1], size = 1, linetype = 2, colour = 'orange')
g <- g + geom_vline(xintercept = q_out[2], size = 1, linetype = 2, colour = 'orange')
}
g <- g + geom_vline(xintercept = tcal, size = 2, linetype = 1, colour = 'red')
g <- g + labs(title = hTitle, subtitle = tTitle1, x = 'Test Statistic: t', y = 'Density')
g <- g + geom_text(data = annotateDF[1,],
aes(x = xpos, y = ypos,
hjust = hjustvar, vjust = vjustvar,
label = annotateText),
colour = c('blue'), size = 4)
g <- g + geom_text(data = annotateDF[1,],
aes(x = xpos, y = ypos,
hjust = hjustvar, vjust = vjustvar,
label = annotateText),
colour = c('blue'), size = 4)
xscale <- seq(from = -5, to = 5, by = 0.5)
g <- g + scale_x_continuous(breaks = xscale)
g <- g + theme_bw()
g <- g + theme(axis.text.x = element_text(face = 'plain', color = 'blue',
size = 14, angle = 0),
axis.text.y = element_text(face = 'plain', color = 'blue',
size = 14, angle = 90, vjust = 0.5),
axis.title.x = element_text(size = 16, colour = 'purple'),
axis.title.y = element_text(size = 16, colour = 'purple'),
title = element_text(face = 'plain', color = 'blue',
size = 16, angle = 0))
print(g)
}
#_________________________________________________________________________________________
# Mean & CI
fn_mean_diff <- function(inputData){
list2env(inputData, envir = environment())
rm(inputData)
tTitle2 <- unname(txtTitle['tTitle2'])
mDF <- data.frame(mean_diff = mean_diff, lower = ci_mean_diff[1], upper = ci_mean_diff[2], y = 0)
g <- ggplot(data = mDF, mapping=aes(x = mean_diff, y = y))
g <- g + geom_errorbarh(aes(xmin = lower, xmax = upper), size = 1.5, colour = '#0000cc')
g <- g + geom_point(size = 20, shape = 15, colour = '#ff9966')
g <- g + labs(title = '', subtitle = tTitle2,
x = 'Difference between Sample Mean & Hypothesised Mean with 95% CI', y = NULL)
g <- g + scale_y_continuous(expand = c(0,0))
g <- g + geom_vline(xintercept = 0, size = 1.5, linetype = 2, colour = 'purple')
g <- g + theme_bw()
g <- g + theme(axis.text.x = element_text(face = 'plain', color = 'blue',
size = 14, angle = 0),
axis.title.x = element_text(size = 16, colour = 'purple'),
title = element_text(face = 'plain', color = 'blue',
size = 16, angle = 0),
axis.title.y = element_blank(),
axis.text.y = element_blank(),
axis.ticks.y = element_blank(),
axis.ticks.length = unit(0, "pt"),
axis.line = element_blank(),
panel.grid.major = element_blank(), panel.grid.minor = element_blank())
print(g)
}
#_________________________________________________________________________________________
# Report preparation
fn_Report <- function(inputData){
list2env(inputData, envir = environment())
rm(inputData)
p_tail <- unname(tail['p_tail'])
txtH <- switch(EXPR = p_tail,
lower = paste0("Hypothesis:
H<sub>0</sub>: μ = ", sstat$hpmean[1], "
H<sub>1</sub>: μ < ", sstat$hpmean[1]),
upper = paste0("Hypothesis:
H<sub>0</sub>: μ = ", sstat$hpmean[1], "
H<sub>1</sub>: μ > ", sstat$hpmean[1]),
both = paste0("Hypothesis:
H<sub>0</sub>: μ = ", sstat$hpmean[1], "
H<sub>1</sub>: μ ≠ ", sstat$hpmean[1]) )
H <- tags$h3(HTML(txtH), style="color:blue")
pval <- paste0('Probability = ', tstat['p'], '; Tail: ', unname(tail))
names(sDF) <- c('Group', 'X')
sDF$SampleID <- 1:nrow(sDF)
sDF <- sDF[, c('SampleID', 'Group', 'X')]
sstat <- sstat[, 1:7]
names(sstat) <- c('Group', 'N', 'Population Mean', 'Population SD', 'Sample Mean', 'Sample SD', 'SE')
tstat <- as.data.frame(t(tstat))
tstat <- tstat[,1:5]
tstat[,5] <- abs(tstat[,5]) # only take absolute Tabulated t
tstat$tcal = sprintf('%.4f', tstat$tcal)
tstat$pcal= sprintf('%1.4f', tstat$pcal)
names(tstat) <- c('Cal t', 'DF', 'Pr(>|t|)', 'Type 1 Error', 'Tabulated |t|')
txtCI <- paste0('Mean difference & 95% CI: ', mean_diff,
' (', ci_mean_diff[1], ', ', ci_mean_diff[2], ')')
txtCI <- tags$h4(HTML(txtCI), style="color:blue")
rpt <- list(H = H, sDF = sDF, sstat = sstat, tstat = tstat, txtCI = txtCI)
}
#_________________________________________________________________________________________
| /scratch/gouwar.j/cran-all/cranData/ABACUS/inst/app_onesampt/global.R |
# Shiny server for One-sample t-test
#_________________________________________________________________________________________
shinyServer(function(input, output) {
#_________________________________________________________________________________________
# eventReactive----
inputVal_eventReactive <- eventReactive(input$btnUpdate, {
iseed <- input$numRN + as.integer(input$btnUpdate)
pmean <- input$pmean
hpmean <- input$hpmean
psd <- input$psd
n <- input$n
p <- input$p # user probability
p_tail <- input$p_tail
fn_InputData_repeatable <- repeatable(rngfunc = fn_InputData, seed = iseed)
fn_InputData_repeatable(pmean = pmean, hpmean = hpmean,
psd = psd, n = n,
p = p, p_tail = p_tail)
})
#_________________________________________________________________________________________
# reactive----
inputVal_reactive <- reactive({
pmean <- input$pmean
hpmean <- input$hpmean
psd <- input$psd
n <- input$n
p <- input$p # user probability
p_tail <- input$p_tail
fn_InputData(pmean = pmean, hpmean = hpmean,
psd = psd, n = n,
p = p, p_tail = p_tail)
})
#_________________________________________________________________________________________
output$dnorm_plot <- renderPlot({
if(input$chkUpdate){
fn_dnorm1(inputVal_reactive())
} else {
fn_dnorm1(inputVal_eventReactive())
}
})
output$dotplot <- renderPlot({
if(input$chkUpdate){
fn_dotplot(inputVal_reactive())
} else {
fn_dotplot(inputVal_eventReactive())
}
})
output$boxplot <- renderPlot({
if(input$chkUpdate){
fn_boxplot(inputVal_reactive())
} else {
fn_boxplot(inputVal_eventReactive())
}
})
output$mdiff_plot1 <- renderPlot({
if(input$chkUpdate){
fn_mean_diff(inputVal_reactive())
} else {
fn_mean_diff(inputVal_eventReactive())
}
})
output$dt_plot1 <- renderPlot({
if(input$chkUpdate){
fn_dt_plot1(inputVal_reactive())
} else {
fn_dt_plot1(inputVal_eventReactive())
}
})
#_________________________________________________________________________________________
output$H <- renderUI({
if(input$chkUpdate){
fn_Report(inputVal_reactive())[['H']]
} else {
fn_Report(inputVal_eventReactive())[['H']]
}
})
samp <- renderTable({
if(input$chkUpdate){
fn_Report(inputVal_reactive())[['sDF']]
} else {
fn_Report(inputVal_eventReactive())[['sDF']]
}
}, striped = TRUE, hover = TRUE, bordered = TRUE, align = 'c')
output$samp <- renderUI(fluidPage(samp, style="overflow-y:scroll; height: 300px"))
output$sstat <- renderTable({
if(input$chkUpdate){
fn_Report(inputVal_reactive())[['sstat']]
} else {
fn_Report(inputVal_eventReactive())[['sstat']]
}
}, striped = TRUE, hover = TRUE, bordered = TRUE, align = 'c')
output$tstat <- renderTable({
if(input$chkUpdate){
fn_Report(inputVal_reactive())[['tstat']]
} else {
fn_Report(inputVal_eventReactive())[['tstat']]
}
}, striped = TRUE, hover = TRUE, bordered = TRUE, align = 'c')
output$CI <- renderUI(
if(input$chkUpdate){
fn_Report(inputVal_reactive())[['txtCI']]
} else {
fn_Report(inputVal_eventReactive())[['txtCI']]
}
)
#_________________________________________________________________________________________
})
| /scratch/gouwar.j/cran-all/cranData/ABACUS/inst/app_onesampt/server.R |
# Shiny ui for One-sample t-test
shinyUI(pageWithSidebar(
headerPanel(title = div(img(src='abacus.png', align = 'left'),
"Hypothesis Testing: One Sample, Student's t-Test"),
windowTitle = "Hypothesis Testing: One Sample, Student's t-Test"),
sidebarPanel(
tags$hr(style="border-color: purple;"),
tags$p(style="color:blue", tags$strong("Simulation Features")),
tags$hr(style="border-color: purple;"),
checkboxInput(inputId = 'chkUpdate',
label = tags$strong('Check the box to update instantly', style="color:darkblue"),
value = FALSE, width = '100%'),
actionButton(inputId = 'btnUpdate', label = 'Update'),
tags$hr(style="border-color: purple;"),
numericInput(inputId = 'numRN',
label = tags$p('Seed value for generating the random number', style="color:darkblue"),
value = 12345, min = 1),
tags$hr(style="border-color: purple;"),
tags$p(style="color:blue", tags$strong("Population Parameters")),
tags$hr(style="border-color: purple;"),
numericInput(inputId = 'pmean',
label = tags$strong('True Population Mean: ', HTML("μ"), style="color:darkblue"),
value = 20),
numericInput(inputId = 'hpmean',
label = tags$strong('Hypothesised Population Mean: ', HTML("μ<sub>0"), style="color:darkblue"),
value = 21),
tags$hr(style='border-color: purple;'),
numericInput(inputId = 'psd',
label = tags$strong('True Population Standard Deviation: ', HTML("σ"), style="color:darkblue"),
value=4, min = 1),
tags$hr(style="border-color: purple;"),
tags$p(style="color:blue", tags$strong("Sample Characteristics")),
tags$hr(style="border-color: purple;"),
sliderInput(inputId = 'n',
label = tags$strong('Sample Size', style="color:darkblue"),
min = 10, max = 50, value = 15, step = 1),
tags$hr(style="border-color: purple;"),
tags$p(style="color:blue", tags$strong("Distribution Function")),
tags$hr(style="border-color: purple;"),
sliderInput(inputId = 'p',
label = tags$strong('Type 1 Error', style="color:darkblue"),
min = 0.01, max = 0.10, value = 0.05, step = 0.01),
radioButtons(inputId = 'p_tail',
label = tags$strong('Probability Tail', style="color:darkblue"),
choices = c('Lower tail (Left tail)' = 'lower',
'Upper tail (Right tail) ' = 'upper',
'Both tails (Two-tailed)' = 'both'),
selected = 'both'),
tags$hr(style="border-color: purple;")
),
mainPanel(
tabsetPanel(
tabPanel(title = 'Population',
plotOutput(outputId = 'dnorm_plot', height = '1000px')),
tabPanel(title = 'Sample',
plotOutput(outputId = 'dotplot', height = '500px'),
plotOutput(outputId = 'boxplot', height = '500px')),
tabPanel(title = 'Test Statistic',
plotOutput(outputId = 'mdiff_plot1', height = '250px'),
plotOutput(outputId = 'dt_plot1', height = '750px')),
tabPanel(title = 'Summary',
h1("Hypothesis"),
uiOutput('H', height = '20px'),
tags$hr(style="border-color: purple;"),
h2("Sample"),
tableOutput(outputId = 'samp'),
tags$hr(style="border-color: purple;"),
h2("Summary Statistics"),
tableOutput(outputId = 'sstat'),
tags$hr(style="border-color: purple;"),
h2("Test Statistic"),
tableOutput(outputId = 'tstat'),
tags$hr(style="border-color: purple;"),
h2("Confidence Interval"),
uiOutput('CI', height = '50'))
)
)
))
| /scratch/gouwar.j/cran-all/cranData/ABACUS/inst/app_onesampt/ui.R |
# Shiny global function for One-sample z-test
library(shiny)
library(ggplot2)
#_________________________________________________________________________________________
# Function to fit Model
fn_InputData <- function(pmean, hpmean, psd, n,
p, p_tail){
xrs <- rnorm(n = n, mean = pmean, sd = psd)
smean <- round(mean(xrs, na.rm = TRUE), 2)
ssd <- round(sd(xrs, na.rm = TRUE), 2)
sse <- round(ssd / sqrt(n), 2)
sDF <- data.frame(Group = 'Group 1', xrs = xrs) # density not required
sstat <- data.frame(Group = 'Group 1',
n = n,
pmean = pmean,
hpmean = hpmean,
psd = psd,
smean = smean,
ssd = ssd,
sse = sse)
sstat$lower <- sstat$smean - 1.96*sstat$sse
sstat$upper <- sstat$smean + 1.96*sstat$sse
xmin <- pmean - 3.5*psd
xmax <- pmean + 3.5*psd
norm_xlim <- c(xmin, xmax)
se <- psd / sqrt(n)
zcal <- (smean - hpmean)/se
if(p_tail == 'lower' | p_tail == 'upper'){
pcal <- pnorm(q = abs(zcal), mean = 0, sd = 1, lower.tail = FALSE)
z <- qnorm(p = 0.05, mean = 0, sd = 1, lower.tail = FALSE)
} else {
pcal <- pnorm(q = abs(zcal), mean = 0, sd = 1, lower.tail = FALSE) * 2
z <- qnorm(p = 0.025, mean = 0, sd = 1, lower.tail = FALSE)
}
mean_diff <- round((smean - hpmean), digits = 2)
ci_mean_diff <- round(c(mean_diff - z*se, mean_diff + z*se), 2)
q_out <- switch(EXPR = p_tail,
lower = qnorm(p = p, mean = 0, sd = 1, lower.tail = TRUE),
upper = qnorm(p = p, mean = 0, sd = 1, lower.tail = FALSE),
both = c(qnorm(p = p/2, mean = 0, sd = 1, lower.tail = TRUE),
qnorm(p = p/2, mean = 0, sd = 1, lower.tail = FALSE)))
zstat <- c(zcal = round(zcal, 4),
pcal = round(pcal, 4),
p = round(p, 4),
q_out = round(q_out, 2))
tail <- c(p_tail = p_tail)
if(p_tail == 'lower' | p_tail == 'upper'){
q_out_txt <- paste0(' p = ', zstat['p'], '; q = ', zstat['q_out'] )
xpos1 <- zstat['q_out']
} else {
q_out_txt <- paste0(' p = ', zstat['p'],
'; q = ', round(q_out[1], 2), ', ', round(q_out[2], 2) )
xpos1 <- zstat['q_out2']
}
qText = q_out_txt
annotateText <- c('pText')
annotateDF <- data.frame(
xpos = c(xpos1),
ypos = c(Inf),
annotateText = c(qText),
hjustvar = c(0) ,
vjustvar = c(2)) #<- adjust
dTitle <- paste0( 'True Population: Mean = ', round(pmean,2), ', SD = ', round(psd,2) )
rTitle <- paste0( 'Sample: Mean = ', round(smean,2), ', SD = ', round(ssd,2) )
zTitle1 <- paste0( 'Calculated z-statistic: ',
round(zcal, 4), '; p-value = ', round(pcal, 4) )
zTitle2 <- paste0('Difference = ', mean_diff, '; 95% CI = ', ci_mean_diff[1], ', ', ci_mean_diff[2])
txtTitle <- c(dTitle = dTitle, rTitle = rTitle,
zTitle1 = zTitle1, zTitle2 = zTitle2)
out <- list(sDF = sDF,
sstat = sstat, zstat = zstat, tail = tail,
mean_diff = mean_diff, ci_mean_diff = ci_mean_diff,
norm_xlim = norm_xlim,
annotateDF = annotateDF,
txtTitle = txtTitle)
return(out)
}
#_________________________________________________________________________________________
# Population density
fn_dnorm1 <- function(inputData){
list2env(inputData, envir = environment())
rm(inputData)
pmean <- sstat$pmean[1]
psd <- sstat$psd[1]
smean <- sstat$smean[1]
ssd <- sstat$ssd[1]
p <- unname(zstat['p'])
p_tail <- unname(tail['p_tail'])
if(p_tail == 'both'){
q_out <- unname(c(zstat['q_out1'], zstat['q_out2']))
} else {
q_out <- unname(zstat['q_out'])
}
dTitle1 <- bquote( 'True Population Mean & SD: ' ~
mu == .(pmean) ~ ', ' ~
sigma == .(psd) )
dTitle2 <- 'Rugplot represents the random samples drawn from the population'
g <- ggplot(data = NULL, mapping = aes(norm_xlim))
g <- g + geom_area(stat = 'function', fun = dnorm,
args = list(mean = pmean, sd = psd), colour = 'darkred',
xlim = norm_xlim, fill = '#ffffff', alpha = 0.5)
g <- g + geom_rug(data = sDF, mapping = aes(x = xrs),
colour = 'blue', sides = 'b')
g <- g + geom_vline(xintercept = pmean, size = 1, linetype = 2, colour = 'blue')
g <- g + labs(title = dTitle1, subtitle = dTitle2,
x = 'Populations: X (unit)', y = 'Density')
xscale <- seq(from = norm_xlim[1], to = norm_xlim[2], length.out = 21)
xscale <- round(xscale, digits = 1)
g <- g + scale_x_continuous(breaks = xscale, limits = norm_xlim)
g <- g + theme_bw()
g <- g + theme(axis.text.x = element_text(face = 'plain', color = 'blue',
size = 14, angle = 0),
axis.text.y = element_text(face = 'plain', color = 'blue',
size = 14, angle = 90, vjust = 0.5),
axis.title.x = element_text(size = 16, colour = 'purple'),
axis.title.y = element_text(size = 16, colour = 'purple'))
print(g)
}
#_________________________________________________________________________________________
# Sample distribution: dotplot
fn_dotplot <- function(inputData){
list2env(inputData, envir = environment())
rm(inputData)
# type <- 'density'
xmean <- sstat$smean[1]
xsd <- sstat$ssd[1]
rTitle <- bquote( 'Sample Mean & SD: ' ~
bar(x[1]) == .(round(xmean,2)) ~ ', ' ~
s[1] == .(round(xsd,2)) )
# scale_factor <- max(0.5, xmean/40)
scale_factor <- (norm_xlim[2] - norm_xlim[1])/100
# browser()
g <- ggplot(data = sDF, aes(x = xrs))
g <- g + geom_dotplot(fill = 'cyan', method = 'dotdensity',
binwidth = scale_factor, # dotsize = 0.4,
stackdir = 'center', stackratio = 0.9, alpha = 0.7)
g <- g + scale_y_continuous(NULL, breaks = NULL)
g <- g + geom_rug(colour = 'blue')
g <- g + geom_vline(xintercept = xmean, size = 1, linetype = 1, colour = 'purple')
g <- g + labs(title = rTitle, x = 'Variable (unit)', y = 'Density')
g <- g + theme_bw()
g <- g + theme(axis.text.x = element_text(face = 'plain', color = 'blue',
size = 14, angle = 0),
axis.text.y = element_text(face = 'plain', color = 'blue',
size = 14, angle = 90, vjust = 0.5),
axis.title.x = element_text(size = 16, colour = 'purple'),
axis.title.y = element_text(size = 16, colour = 'purple'),
title = element_text(face = 'plain', color = 'blue',
size = 16, angle = 0))
print(g)
}
#_________________________________________________________________________________________
# Sample distribution: boxplot
fn_boxplot <- function(inputData){
list2env(inputData, envir = environment())
rm(inputData)
xmean <- sstat$smean[1]
xsd <- sstat$ssd[1]
rTitle <- bquote( 'Sample Mean & SD: ' ~
bar(x[1]) == .(round(xmean,2)) ~ ', ' ~
s[1] == .(round(xsd,2)) )
g <- ggplot(data = sDF, aes(y = xrs, x = 1))
g <- g + geom_boxplot(alpha = 0.4, size = 1.0, colour = '#ff9966', varwidth = TRUE)
g <- g + geom_jitter(fill = 'cyan', width = 0.25, height = 0.001,
shape = 21, size = 10, alpha = 0.7)
g <- g + geom_hline(yintercept = xmean, size = 1, linetype = 1, colour = 'purple')
g <- g + labs(title = rTitle, x = '', y = 'Variable (unit)')
yscale <- seq(from = norm_xlim[1], to = norm_xlim[2], length.out = 21)
yscale <- round(yscale, digits = 1)
g <- g + scale_y_continuous(breaks = yscale, limits = norm_xlim)
g <- g + coord_flip()
g <- g + geom_rug(colour = 'blue', sides = 'b')
g <- g + theme_bw()
g <- g + theme(axis.text.x = element_text(face = 'plain', color = 'blue',
size = 14, angle = 0),
axis.text.y = element_text(face = 'plain', color = 'blue',
size = 14, angle = 90, vjust = 0.5),
axis.title.x = element_text(size = 16, colour = 'purple'),
axis.title.y = element_text(size = 16, colour = 'purple'),
title = element_text(face = 'plain', color = 'blue',
size = 16, angle = 0))
g <- g + theme(legend.position = 'none')
print(g)
}
#_________________________________________________________________________________________
# Standard Normal Density: With Type1 error
fn_dnorm_z_plot1 <- function(inputData){
list2env(inputData, envir = environment())
rm(inputData)
hpmean <- unname(sstat$hpmean[1])
p <- unname(zstat['p'])
p_tail <- unname(tail['p_tail'])
zcal <- unname(zstat['zcal'])
pcal <- unname(zstat['pcal'])
if(p_tail == 'both'){
q_out <- unname(c(zstat['q_out1'], zstat['q_out2']))
} else {
q_out <- unname(zstat['q_out'])
}
hTitle <- bquote(H[0] ~ ':' ~ mu == .(hpmean) ~ '; ' ~ H[A] ~ ':' ~ mu != .(hpmean))
zTitle1 <- unname(txtTitle['zTitle1'])
z_xlim <- c(-3.5, 3.5)
g <- ggplot(data = NULL, mapping = aes(z_xlim))
if(p_tail == 'lower'){
z_xlim1 <- c(z_xlim[1], q_out)
z_xlim2 <- c(q_out, z_xlim[2])
g <- g + geom_area(stat = 'function', fun = dnorm,
args = list(mean = 0, sd = 1), colour = 'darkred',
xlim = z_xlim1, fill = '#ff0000', alpha = 0.5)
g <- g + geom_area(stat = 'function', fun = dnorm,
args = list(mean = 0, sd = 1), colour = 'darkred',
xlim = z_xlim2, fill = '#ffff00', alpha = 0.7)
g <- g + geom_vline(xintercept = q_out, size = 1, linetype = 2, colour = 'orange')
}
if(p_tail == 'upper'){
z_xlim1 <- c(z_xlim[1], q_out)
z_xlim2 <- c(q_out, z_xlim[2])
g <- g + geom_area(stat = 'function', fun = dnorm,
args = list(mean = 0, sd = 1), colour = 'darkred',
xlim = z_xlim1, fill = '#ffff00', alpha = 0.7)
g <- g + geom_area(stat = 'function', fun = dnorm,
args = list(mean = 0, sd = 1), colour = 'darkred',
xlim = z_xlim2, fill = '#ff0000', alpha = 0.5)
g <- g + geom_vline(xintercept = q_out, size = 1, linetype = 2, colour = 'orange')
}
if(p_tail == 'both'){
z_xlim1 <- c(z_xlim[1], q_out[1])
z_xlim2 <- c(q_out[1], q_out[2])
z_xlim3 <- c(q_out[2], z_xlim[2])
g <- g + geom_area(stat = 'function', fun = dnorm,
args = list(mean = 0, sd = 1), colour = 'darkred',
xlim = z_xlim1, fill = '#ff0000', alpha = 0.5)
g <- g + geom_area(stat = 'function', fun = dnorm,
args = list(mean = 0, sd = 1), colour = 'darkred',
xlim = z_xlim2, fill = '#ffff00', alpha = 0.7)
g <- g + geom_area(stat = 'function', fun = dnorm,
args = list(mean = 0, sd = 1), colour = 'darkred',
xlim = z_xlim3, fill = '#ff0000', alpha = 0.5)
g <- g + geom_vline(xintercept = q_out[1], size = 1, linetype = 2, colour = 'orange')
g <- g + geom_vline(xintercept = q_out[2], size = 1, linetype = 2, colour = 'orange')
}
g <- g + geom_vline(xintercept = zcal, size = 2, linetype = 1, colour = 'red')
g <- g + labs(title = hTitle, subtitle = zTitle1, x = 'Test Statistic: z', y = 'Density')
g <- g + geom_text(data = annotateDF[1,],
aes(x = xpos, y = ypos,
hjust = hjustvar, vjust = vjustvar,
label = annotateText),
colour = c('blue'), size = 4)
xscale <- seq(from = -3.5, to = 3.5, by = 0.5)
g <- g + scale_x_continuous(breaks = xscale)
g <- g + theme_bw()
g <- g + theme(axis.text.x = element_text(face = 'plain', color = 'blue',
size = 14, angle = 0),
axis.text.y = element_text(face = 'plain', color = 'blue',
size = 14, angle = 90, vjust = 0.5),
axis.title.x = element_text(size = 16, colour = 'purple'),
axis.title.y = element_text(size = 16, colour = 'purple'),
title = element_text(face = 'plain', color = 'blue',
size = 16, angle = 0))
print(g)
}
#_________________________________________________________________________________________
# Mean & CI
fn_mean_diff <- function(inputData){
list2env(inputData, envir = environment())
rm(inputData)
zTitle2 <- unname(txtTitle['zTitle2'])
mDF <- data.frame(mean_diff = mean_diff, lower = ci_mean_diff[1], upper = ci_mean_diff[2], y = 0)
g <- ggplot(data = mDF, mapping=aes(x = mean_diff, y = y))
g <- g + geom_errorbarh(aes(xmin = lower, xmax = upper), size = 1.5, colour = '#0000cc')
g <- g + geom_point(size = 20, shape = 15, colour = '#ff9966')
g <- g + labs(title = '', subtitle = zTitle2,
x = 'Difference between Sample Mean & Hypothesised Mean with 95% CI', y = NULL)
g <- g + scale_y_continuous(expand = c(0,0))
g <- g + geom_vline(xintercept = 0, size = 1.5, linetype = 2, colour = 'purple')
g <- g + theme_bw()
g <- g + theme(axis.text.x = element_text(face = 'plain', color = 'blue',
size = 14, angle = 0),
axis.title.x = element_text(size = 16, colour = 'purple'),
title = element_text(face = 'plain', color = 'blue',
size = 16, angle = 0),
axis.title.y = element_blank(),
axis.text.y = element_blank(),
axis.ticks.y = element_blank(),
axis.ticks.length = unit(0, "pt"),
axis.line = element_blank(),
panel.grid.major = element_blank(), panel.grid.minor = element_blank())
print(g)
}
#_________________________________________________________________________________________
# Report preparation
fn_Report <- function(inputData){
list2env(inputData, envir = environment())
rm(inputData)
p_tail <- unname(tail['p_tail'])
txtH <- switch(EXPR = p_tail,
lower = paste0("Hypothesis:
H<sub>0</sub>: μ = ", sstat$hpmean[1], "
H<sub>1</sub>: μ < ", sstat$hpmean[1]),
upper = paste0("Hypothesis:
H<sub>0</sub>: μ = ", sstat$hpmean[1], "
H<sub>1</sub>: μ > ", sstat$hpmean[1]),
both = paste0("Hypothesis:
H<sub>0</sub>: μ = ", sstat$hpmean[1], "
H<sub>1</sub>: μ ≠ ", sstat$hpmean[1]) )
H <- tags$h3(HTML(txtH), style="color:blue")
pval <- paste0('Probability = ', zstat['p'], '; Tail: ', unname(tail))
names(sDF) <- c('Group', 'X')
sDF$SampleID <- 1:nrow(sDF)
sDF <- sDF[, c('SampleID', 'Group', 'X')]
sstat <- sstat[, 1:7]
names(sstat) <- c('Group', 'N', 'Population Mean', 'Population SD', 'Sample Mean', 'Sample SD', 'SE')
zstat <- as.data.frame(t(zstat))
zstat <- zstat[,1:4]
zstat[,4] <- abs(zstat[,4]) # only take absolute Tabulated z
zstat$zcal = sprintf('%.4f', zstat$zcal)
zstat$pcal= sprintf('%1.4f', zstat$pcal)
names(zstat) <- c('Cal z', 'Pr(>|z|)', 'Type 1 Error', 'Tabulated |z|')
txtCI <- paste0('Mean difference & 95% CI: ', mean_diff,
' (', ci_mean_diff[1], ', ', ci_mean_diff[2], ')')
txtCI <- tags$h4(HTML(txtCI), style="color:blue")
rpt <- list(H = H, sDF = sDF, sstat = sstat, zstat = zstat, txtCI = txtCI)
}
#_________________________________________________________________________________________
| /scratch/gouwar.j/cran-all/cranData/ABACUS/inst/app_onesampz/global.R |
# Shiny server for One-sample z-test
#_________________________________________________________________________________________
shinyServer(function(input, output) {
#_________________________________________________________________________________________
# eventReactive----
inputVal_eventReactive <- eventReactive(input$cmdUpdate, {
iseed <- input$numRN + as.integer(input$cmdUpdate)
pmean <- input$pmean
hpmean <- input$hpmean
psd <- input$psd
n <- input$n
p <- input$p # user probability
p_tail <- input$p_tail
fn_InputData_repeatable <- repeatable(rngfunc = fn_InputData, seed = iseed)
fn_InputData_repeatable(pmean = pmean, hpmean = hpmean,
psd = psd, n = n,
p = p, p_tail = p_tail)
})
#_________________________________________________________________________________________
# reactive----
inputVal_reactive <- reactive({
pmean <- input$pmean
hpmean <- input$hpmean
psd <- input$psd
n <- input$n
p <- input$p # user probability
p_tail <- input$p_tail
fn_InputData(pmean = pmean, hpmean = hpmean,
psd = psd, n = n,
p = p, p_tail = p_tail)
})
#_________________________________________________________________________________________
output$dnorm_plot <- renderPlot({
if(input$chkUpdate){
fn_dnorm1(inputVal_reactive())
} else {
fn_dnorm1(inputVal_eventReactive())
}
})
output$dotplot <- renderPlot({
if(input$chkUpdate){
fn_dotplot(inputVal_reactive())
} else {
fn_dotplot(inputVal_eventReactive())
}
})
output$boxplot <- renderPlot({
if(input$chkUpdate){
fn_boxplot(inputVal_reactive())
} else {
fn_boxplot(inputVal_eventReactive())
}
})
output$mdiff_plot1 <- renderPlot({
if(input$chkUpdate){
fn_mean_diff(inputVal_reactive())
} else {
fn_mean_diff(inputVal_eventReactive())
}
})
output$dnorm_z_plot1 <- renderPlot({
if(input$chkUpdate){
fn_dnorm_z_plot1(inputVal_reactive())
} else {
fn_dnorm_z_plot1(inputVal_eventReactive())
}
})
#_________________________________________________________________________________________
output$H <- renderUI({
if(input$chkUpdate){
fn_Report(inputVal_reactive())[['H']]
} else {
fn_Report(inputVal_eventReactive())[['H']]
}
})
sample <- renderTable({
if(input$chkUpdate){
fn_Report(inputVal_reactive())[['sDF']]
} else {
fn_Report(inputVal_eventReactive())[['sDF']]
}
}, striped = TRUE, hover = TRUE, bordered = TRUE, align = 'c')
output$sample <- renderUI(fluidPage(sample, style="overflow-y:scroll; height: 300px"))
output$sstat <- renderTable({
if(input$chkUpdate){
fn_Report(inputVal_reactive())[['sstat']]
} else {
fn_Report(inputVal_eventReactive())[['sstat']]
}
}, striped = TRUE, hover = TRUE, bordered = TRUE, align = 'c')
output$zstat <- renderTable({
if(input$chkUpdate){
fn_Report(inputVal_reactive())[['zstat']]
} else {
fn_Report(inputVal_eventReactive())[['zstat']]
}
}, striped = TRUE, hover = TRUE, bordered = TRUE, align = 'c')
output$CI <- renderUI(
if(input$chkUpdate){
fn_Report(inputVal_reactive())[['txtCI']]
} else {
fn_Report(inputVal_eventReactive())[['txtCI']]
}
)
#_________________________________________________________________________________________
#_________________________________________________________________________________________
})
| /scratch/gouwar.j/cran-all/cranData/ABACUS/inst/app_onesampz/server.R |
# Shiny ui for One-sample z-test
# Define UI for the application
shinyUI(pageWithSidebar(
headerPanel(title = div(img(src='abacus.png', align = 'left'),
"Hypothesis Testing: One Sample, Z Test"),
windowTitle = "Hypothesis Testing: One Sample, Z Test"),
sidebarPanel(
tags$hr(style="border-color: purple;"),
tags$p(style="color:blue", tags$strong("Simulation Features")),
tags$hr(style="border-color: purple;"),
checkboxInput(inputId = 'chkUpdate',
label = tags$strong('Check the box to update instantly', style="color:darkblue"),
value = FALSE, width = '100%'),
actionButton(inputId = 'cmdUpdate', label = 'Update'),
tags$hr(style="border-color: green;"),
numericInput(inputId = 'numRN',
label = tags$p('Seed value for generating the random number', style="color:darkblue"),
value = 12345, min = 1),
tags$hr(style="border-color: purple;"),
tags$p(style="color:blue", tags$strong("Population Parameters")),
tags$hr(style="border-color: purple;"),
numericInput(inputId = 'pmean',
label = tags$strong('True Population Mean: ', HTML("μ"), style="color:darkblue"),
value = 20),
numericInput(inputId = 'hpmean',
label = tags$strong('Hypothesised Population Mean: ', HTML("μ<sub>0"), style="color:darkblue"),
value = 21),
tags$hr(style='border-color: green;'),
numericInput(inputId = 'psd',
label = tags$strong('True Population Standard Deviation: ', HTML("σ"), style="color:darkblue"),
value=4, min = 1),
tags$hr(style="border-color: purple;"),
tags$p(style="color:blue", tags$strong("Sample Characteristics")),
tags$hr(style="border-color: purple;"),
sliderInput(inputId = 'n',
label = tags$strong('Sample Size', style="color:darkblue"),
min = 50, max = 500, value = 100, step = 10),
tags$hr(style="border-color: purple;"),
tags$p(style="color:blue", tags$strong("Distribution Function")),
tags$hr(style="border-color: purple;"),
sliderInput(inputId = 'p',
label = tags$strong('Type 1 Error', style="color:darkblue"),
min = 0.01, max = 0.10, value = 0.05, step = 0.01),
radioButtons(inputId = 'p_tail',
label = tags$strong('Probability Tail', style="color:darkblue"),
choices = c('Lower tail ' = 'lower',
'Upper tail ' = 'upper',
'Both tails ' = 'both'),
selected = 'both'),
tags$hr(style="border-color: purple;")
),
mainPanel(
tabsetPanel(
tabPanel(title = 'Population',
plotOutput(outputId = 'dnorm_plot', height = '1000px')),
tabPanel(title = 'Sample',
plotOutput(outputId = 'dotplot', height = '500px'),
plotOutput(outputId = 'boxplot', height = '500px')),
tabPanel(title = 'Test Statistic',
plotOutput(outputId = 'mdiff_plot1', height = '250px'),
plotOutput(outputId = 'dnorm_z_plot1', height = '750px')),
tabPanel(title = 'Summary',
h1("Hypothesis"),
uiOutput('H', height = '20px'),
tags$hr(style="border-color: purple;"),
h2("Sample"),
tableOutput(outputId = 'sample'),
tags$hr(style="border-color: purple;"),
h2("Summary Statistics"),
tableOutput(outputId = 'sstat'),
tags$hr(style="border-color: purple;"),
h2("Test Statistic"),
tableOutput(outputId = 'zstat'),
tags$hr(style="border-color: purple;"),
h2("Confidence Interval"),
uiOutput('CI', height = '50'))
)
)
))
| /scratch/gouwar.j/cran-all/cranData/ABACUS/inst/app_onesampz/ui.R |
# Global code for Sampling
library(shiny)
library(ggplot2)
#_________________________________________________________________________________________
# Function to fit Model
fn_InputData <- function(pmean, psd, n, k){
pop <- rnorm(n = 1e05, mean = pmean, sd = psd)
xrs <- replicate(k, sample(pop, n, replace = TRUE))
sDF <- data.frame(xrs = xrs) # density not required
row.names(sDF) <- paste0('ID', 1:nrow(sDF))
colnames(sDF) <- paste0('Sample ', 1:ncol(sDF))
pstat <- data.frame(n = n, k = k, pmean = pmean, psd = psd)
smean <- apply(sDF, MARGIN = 2, FUN = 'mean', na.rm = TRUE)
ssd <- apply(sDF, MARGIN = 2, FUN = 'sd', na.rm = TRUE)
sstat <- data.frame(n = n, smean = round(smean, 2), ssd = round(ssd, 2))
sstat$Sample <- 1:nrow(sstat)
sstat$se <- round(sstat$ssd / sqrt(n), 2)
tcal <- qt(p = 0.025, df = (n-1), lower.tail = FALSE)
sstat$lcl <- round(sstat$smean - tcal * sstat$se, 2)
sstat$ucl <- round(sstat$smean + tcal * sstat$se, 2)
xmin <- pmean - 3.5*psd
xmax <- pmean + 3.5*psd
norm_xlim <- c(xmin, xmax)
dTitle <- paste0( 'Population: Mean = ', round(pmean,2), ', SD = ', round(psd,2) )
sTitle1 <- paste0( 'Distribution of ', k, ' samples each with ', n, ' observations')
sTitle2 <- paste0( 'Distribution of means of ', k, ' samples each with ', n, ' observations')
txtTitle <- c(dTitle = dTitle, sTitle1 = sTitle1, sTitle2 = sTitle2)
out <- list(sDF = sDF, tail = tail,
pstat = pstat, sstat = sstat,
norm_xlim = norm_xlim,
txtTitle = txtTitle)
return(out)
}
#_________________________________________________________________________________________
# Normal distribution: Density plot
fn_dnorm <- function(inputData){
list2env(inputData, envir = environment())
rm(inputData)
pmean <- pstat$pmean[1]
psd <- pstat$psd[1]
dTitle <- bquote( 'Normal Distribution: ' ~
mu == .(pmean) ~ ', ' ~
sigma == .(psd))
g <- ggplot(data = NULL, mapping = aes(norm_xlim))
g <- g + geom_area(stat = 'function', fun = dnorm,
args = list(mean = pmean, sd = psd),
xlim = norm_xlim, fill = '#ffff00', alpha = 0.3)
g <- g + geom_vline(xintercept = pmean, size = 1, linetype = 2, colour = 'darkred')
g <- g + labs(title = dTitle, x = 'Variable X (unit)', y = 'Density')
xscale <- seq(from = norm_xlim[1], to = norm_xlim[2], length.out = 21)
xscale <- round(xscale, digits = 1)
g <- g + scale_x_continuous(breaks = xscale)
g <- g + theme_bw()
g <- g + theme(axis.text.x = element_text(face = 'plain', color = 'blue',
size = 14, angle = 0),
axis.text.y = element_text(face = 'plain', color = 'blue',
size = 14, angle = 90, vjust = 0.5),
axis.title.x = element_text(size = 16, colour = 'purple'),
axis.title.y = element_text(size = 16, colour = 'purple'),
title = element_text(face = 'plain', color = 'blue',
size = 16, angle = 0))
print(g)
}
#_________________________________________________________________________________________
# Sample distribution (Single Sample): dotplot
fn_dotplot1 <- function(inputData){
list2env(inputData, envir = environment())
rm(inputData)
n <- pstat$n[1]
xmean <- sstat$smean[1]
xsd <- sstat$ssd[1]
sDF <- sDF[,1, drop=FALSE]
names(sDF) <- 'Sample'
scale_factor <- (norm_xlim[2] - norm_xlim[1])/100
sTitle <- bquote('Sample 1: ' ~
bar(x[1]) == .(round(xmean,2)) ~ ', ' ~
s[1] == .(round(xsd,2)) )
g <- ggplot(data = sDF[,1, drop=FALSE], aes_string(x = 'Sample'))
g <- g + geom_dotplot(fill = 'cyan', method = 'dotdensity',
binwidth = scale_factor, # dotsize = xdotsize,
stackratio = 0.7, alpha = 0.3)
g <- g + scale_y_continuous(NULL, breaks = NULL)
g <- g + geom_rug(colour = 'blue')
g <- g + geom_vline(xintercept = xmean, size = 1, linetype = 1, colour = 'purple')
g <- g + labs(title = sTitle, x = 'Variable X (unit)', y = 'Density')
g <- g + theme_bw()
g <- g + theme(axis.text.x = element_text(face = 'plain', color = 'blue',
size = 14, angle = 0),
axis.text.y = element_text(face = 'plain', color = 'blue',
size = 14, angle = 90, vjust = 0.5),
axis.title.x = element_text(size = 16, colour = 'purple'),
axis.title.y = element_text(size = 16, colour = 'purple'),
title = element_text(face = 'plain', color = 'blue',
size = 16, angle = 0))
print(g)
}
#_________________________________________________________________________________________
# Sample distribution (8 Samples): dotplot
fn_dotplot2 <- function(inputData){
list2env(inputData, envir = environment())
rm(inputData)
n <- pstat$n[1]
sDF <- sDF[,1:8]
sstat <- sstat[1:8,]
sstat$Sample <- row.names(sstat)
sampID <- names(sDF)
sTitle <- unname(txtTitle['sTitle1'])
DF <- reshape(data = sDF,
varying = sampID,
v.names = 'x',
times = sampID,
timevar = 'Sample',
idvar = c('ID'),
drop = NULL,
direction = 'long',
new.row.names = 1:(length(sampID)*nrow(sDF)))
xmean <- sstat$smean
xmin <- min(DF$x)
txt <- paste0('mean = ', sstat$smean, '\nsd = ', sstat$ssd)
aDF <- data.frame(
Sample = sampID,
x = xmin, y = Inf,
aText = txt,
hjustvar = 0 ,
vjustvar = 2
)
scale_factor <- (norm_xlim[2] - norm_xlim[1])/50
g <- ggplot(data = DF, aes(x = x))
g <- g + geom_dotplot(fill = 'cyan', method = 'dotdensity',
binwidth = scale_factor, # dotsize = 0.8,
stackratio = 0.7, alpha = 0.3)
g <- g + scale_y_continuous(NULL, breaks = NULL)
g <- g + geom_rug(colour = 'blue')
g <- g + facet_wrap( ~ Sample, nrow = 2)
g <- g + geom_text(data = aDF,
aes(x = x, y = y,
hjust = hjustvar, vjust = vjustvar,
label = aText),
size = 4, colour = 'blue')
g <- g + geom_vline(data = sstat,
mapping = aes(xintercept = smean),
size = 1, linetype = 1, colour = 'purple')
g <- g + labs(title = sTitle, x = 'Variable X (unit)', y = 'Density')
g <- g + theme_bw()
g <- g + theme(axis.text.x = element_text(face = 'plain', color = 'blue',
size = 14, angle = 0),
axis.text.y = element_text(face = 'plain', color = 'blue',
size = 14, angle = 90, vjust = 0.5),
axis.title.x = element_text(size = 16, colour = 'purple'),
axis.title.y = element_text(size = 16, colour = 'purple'),
title = element_text(face = 'plain', color = 'blue',
size = 16, angle = 0))
print(g)
}
#_________________________________________________________________________________________
# Distribution of Sample Mean: Histogram and Density plot
fn_distn_mean <- function(inputData){
list2env(inputData, envir = environment())
rm(inputData)
n <- pstat$n[1]
bins <- ceiling(1 + 3.322*log(n))
sstat$xrs <- sstat$smean
mean_xbar <- round(mean(sstat$smean), 2)
sd_xbar <- round(sd(sstat$smean), 2)
mean_ssd <- round(mean(sstat$ssd), 2)
s_xlim <- c(floor(min(sstat$smean)), ceiling(max(sstat$smean)))
rTitle <- bquote( 'Mean of Sample Mean (' ~
bar(x) ~')' == .(mean_xbar) ~ ', ' ~
'SD of ' ~
bar(x) == .(sd_xbar) ~ ', ' ~
'Mean of Sample SD (s)'
== .(mean_ssd) )
g <- ggplot(data = sstat, aes(x = xrs))
g <- g + geom_histogram(mapping = aes(x = xrs, y =..density..),
bins = bins, colour = 'purple', fill = 'orange')
g <- g + geom_density(mapping = aes(x = xrs,
y =..density..),
n = 1000, size = 1, colour = 'red')
g <- g + geom_rug(colour = 'blue')
g <- g + labs(title = rTitle, x = 'Sample Mean (unit)', y = 'Density')
xscale <- seq(from = s_xlim[1], to = s_xlim[2], length.out = 21)
xscale <- round(xscale, digits = 1)
g <- g + scale_x_continuous(breaks = xscale, limits = s_xlim)
g <- g + theme_bw()
g <- g + theme(axis.text.x = element_text(face = 'plain', color = 'blue',
size = 14, angle = 0),
axis.text.y = element_text(face = 'plain', color = 'blue',
size = 14, angle = 90, vjust = 0.5),
axis.title.x = element_text(size = 16, colour = 'purple'),
axis.title.y = element_text(size = 16, colour = 'purple'))
g <- g + theme(legend.position = 'bottom')
print(g)
}
#_________________________________________________________________________________________
# Sample distribution: Confidenc Interval
fn_CI_1 <- function(inputData){
list2env(inputData, envir = environment())
rm(inputData)
pmean <- pstat$pmean[1]
n <- pstat$n[1]
k <- pstat$k[1]
index <- which(sstat$lcl > pmean | sstat$ucl < pmean)
pCI <- round(100*(k - length(index))/k, digits = 2)
rTitle <- paste0(pCI, '% of estimated 95%CI of means obtained from ', k,
' random samples of size ', n,
' include the True Population Mean ', pmean)
g <- ggplot(data = sstat, aes(x=Sample, y=smean))
g <- g + geom_point(shape=16, size=3, colour='blue')
g <- g + geom_errorbar(aes(ymin=lcl, ymax=ucl), width=0.5, colour='purple')
g <- g + geom_hline(yintercept = pmean, colour='red', size=1.5)
xscale <- seq(from = 1, to = nrow(sstat), by = 10)
g <- g + scale_x_continuous(breaks = xscale)
g <- g + labs(title = rTitle, x = 'Sample', y = 'Mean')
g <- g + theme_bw()
g <- g + theme(axis.text.x = element_text(face = 'plain', color = 'blue',
size = 14, angle = 90, hjust = 1),
axis.text.y = element_text(face = 'plain', color = 'blue',
size = 14, angle = 90, vjust = 0.5),
axis.title.x = element_text(size = 16, colour = 'purple'),
axis.title.y = element_text(size = 16, colour = 'purple'))
g <- g + theme(legend.position = 'bottom')
print(g)
}
#_________________________________________________________________________________________
# Sample distribution: ordered Confidenc Interval
fn_CI_2 <- function(inputData){
list2env(inputData, envir = environment())
rm(inputData)
pmean <- pstat$pmean[1]
n <- pstat$n[1]
k <- pstat$k[1]
index <- order(sstat$smean, decreasing = FALSE)
sstat <- sstat[index,]
sstat$Sample <- 1:nrow(sstat)
index <- which(sstat$lcl > pmean | sstat$ucl < pmean)
pCI <- round(100*(k - length(index))/k, digits = 2)
rTitle <- paste0(pCI, '% of estimated 95%CI of means obtained from ', k,
' random samples of size ', n,
' include the True Population Mean ', pmean)
g <- ggplot(data = sstat, aes(x=Sample, y=smean))
g <- g + geom_point(shape=16, size=3, colour='blue')
g <- g + geom_errorbar(aes(ymin=lcl, ymax=ucl), width=0.5, colour='purple')
g <- g + geom_hline(yintercept = pmean, colour='red', size=1.5)
xscale <- seq(from = 1, to = nrow(sstat), by = 10)
g <- g + scale_x_continuous(breaks = xscale)
g <- g + labs(title = rTitle, x = 'Ordered Sample', y = 'Mean')
g <- g + theme_bw()
g <- g + theme(axis.text.x = element_text(face = 'plain', color = 'blue',
size = 14, angle = 90, hjust = 1),
axis.text.y = element_text(face = 'plain', color = 'blue',
size = 14, angle = 90, vjust = 0.5),
axis.title.x = element_text(size = 16, colour = 'purple'),
axis.title.y = element_text(size = 16, colour = 'purple'))
g <- g + theme(legend.position = 'bottom')
print(g)
}
#_________________________________________________________________________________________
# Report preparation
fn_Report <- function(inputData){
list2env(inputData, envir = environment())
rm(inputData)
sstat <- sstat[, c('Sample','n','smean','ssd','se','lcl','ucl')]
sstat$Sample <- paste0('Sample ', sstat$Sample)
mean_xbar <- round(mean(sstat$smean), 2)
sd_xbar <- round(sd(sstat$smean), 2)
mean_ssd <- round(mean(sstat$ssd), 2)
txtMean <- paste0("Mean of Sample Mean (x̄): ", mean_xbar,
"; SD of x̄: ", sd_xbar,
"; Mean of Sample SD (s): ", mean_ssd)
txtMean <- h4(HTML(txtMean), style="color:blue")
pmean <- pstat$pmean[1]
n <- pstat$n[1]
k <- pstat$k[1]
index <- which(sstat$lcl > pmean | sstat$ucl < pmean)
pCI <- round(100*(k - length(index))/k, digits = 2)
txtCI <- paste0(pCI, '% of estimated 95%CI of means obtained from ', k,
' random samples of size ', n,
' include the True Population Mean ', pmean)
txtCI <- h4(HTML(txtCI), style="color:blue")
names(sstat) <- c('Sample ID', 'Sample Size',
'Sample Mean', 'Sample SD',
'SE', '95% LCL', '95% UCL')
rpt <- list(sstat = sstat, txtMean = txtMean, txtCI = txtCI)
}
#_________________________________________________________________________________________
#_________________________________________________________________________________________
| /scratch/gouwar.j/cran-all/cranData/ABACUS/inst/app_sampling/global.R |
# # Shiny server: Sampling
#_________________________________________________________________________________________
shinyServer(function(input, output) {
#_________________________________________________________________________________________
# eventReactive----
inputVal_eventReactive <- eventReactive(input$cmdUpdate, {
iseed <- input$numRN + (as.integer(input$cmdUpdate)-1)
pmean <- input$pmean
psd <- input$psd
n <- input$n
k <- input$k
fn_InputData_repeatable <- repeatable(rngfunc = fn_InputData, seed = iseed)
fn_InputData_repeatable(pmean = pmean, psd = psd,
n = n, k = k)
})
#_________________________________________________________________________________________
# reactive----
inputVal_reactive <- reactive({
pmean <- input$pmean
psd <- input$psd
n <- input$n
k <- input$k
fn_InputData(pmean = pmean, psd = psd,
n = n, k = k)
})
#_________________________________________________________________________________________
# Output ----
output$dnorm_plot <- renderPlot({
if(input$chkUpdate){
fn_dnorm(inputVal_reactive())
} else {
fn_dnorm(inputVal_eventReactive())
}
})
output$dotplot1 <- renderPlot({
if(input$chkUpdate){
fn_dotplot1(inputVal_reactive())
} else {
fn_dotplot1(inputVal_eventReactive())
}
})
output$dotplot2 <- renderPlot({
if(input$chkUpdate){
fn_dotplot2(inputVal_reactive())
} else {
fn_dotplot2(inputVal_eventReactive())
}
})
output$txt1 <- renderText({
k = input$k
paste0("... continuing to Sample ", k,".")
})
output$distn_mean <-renderPlot({
if(input$chkUpdate){
fn_distn_mean(inputVal_reactive())
} else {
fn_distn_mean(inputVal_eventReactive())
}
})
output$txt2 <- renderText({
distname = "population following normal distribution"
k = input$k
n = input$n
paste("Distribution of means of", k, "random samples,\n
each consisting of", n, " observations\n
from a", distname)
})
output$CI_1 <- renderPlot({
if(input$chkUpdate){
fn_CI_1(inputVal_reactive())
} else {
fn_CI_1(inputVal_eventReactive())
}
})
output$CI_2 <- renderPlot({
if(input$chkUpdate){
fn_CI_2(inputVal_reactive())
} else {
fn_CI_2(inputVal_eventReactive())
}
})
output$txtCI <- renderText({
distname = "population following normal distribution"
k = input$k
n = input$n
paste("95% Confidence Interval of means of", k, "random samples,\n
each consisting of", n, " observations\n
from a", distname)
})
samp <- renderTable({
if(input$chkUpdate){
fn_Report(inputVal_reactive())[['sstat']]
} else {
fn_Report(inputVal_eventReactive())[['sstat']]
}
}, striped = TRUE, hover = TRUE, bordered = TRUE, align = 'c')
output$samp <- renderUI(fluidPage(samp, style="overflow-y:scroll; height: 300px"))
output$smean <- renderUI(
if(input$chkUpdate){
fn_Report(inputVal_reactive())[['txtMean']]
} else {
fn_Report(inputVal_eventReactive())[['txtMean']]
}
)
output$CI <- renderUI(
if(input$chkUpdate){
fn_Report(inputVal_reactive())[['txtCI']]
} else {
fn_Report(inputVal_eventReactive())[['txtCI']]
}
)
#_________________________________________________________________________________________
})
#_________________________________________________________________________________________
| /scratch/gouwar.j/cran-all/cranData/ABACUS/inst/app_sampling/server.R |
# Shiny ui: Sampling
shinyUI(pageWithSidebar(
headerPanel(title = div(img(src='abacus.png', align = 'left'),
"Sampling Distribution"),
windowTitle = "Sampling Distribution"),
sidebarPanel(
tags$hr(style="border-color: purple;"),
tags$p(style="color:blue", tags$strong("Simulation features")),
tags$hr(style="border-color: purple;"),
sliderInput(inputId = 'k',
label = tags$strong('Number of samples', style="color:darkblue"),
min=10, max=1000, value=100, step = 10),
tags$hr(style="border-color: green;"),
checkboxInput(inputId = 'chkUpdate',
label = tags$strong('Check the box to update instantly', style="color:darkblue"),
value = FALSE, width = '100%'),
actionButton(inputId = 'cmdUpdate', label = 'Update'),
tags$hr(style="border-color: green;"),
tags$br(),
numericInput(inputId = 'numRN',
label = tags$p('Seed value for generating the random number', style="color:darkblue"),
value = 12345, min = 1),
tags$hr(style="border-color: purple;"),
tags$p(style="color:blue", tags$strong("Population Parameters")),
tags$hr(style="border-color: purple;"),
numericInput(inputId = 'pmean',
label = tags$strong('True Population Mean: ', HTML("μ"), style="color:darkblue"),
value = 20),
numericInput(inputId = 'psd',
label = tags$strong('True Population Standard Deviation: ', HTML("σ"), style="color:darkblue"),
value = 4, min = 1),
tags$hr(style="border-color: purple;"),
tags$p(style="color:blue", tags$strong("Sample Characteristics")),
tags$hr(style="border-color: purple;"),
sliderInput(inputId = 'n',
label = tags$strong('Sample size', style="color:darkblue"),
min=2, max=500, value=50, step = 1),
tags$hr(style="border-color: purple;")
),
mainPanel(
tabsetPanel(
tabPanel(title = 'Population & Sample',
plotOutput(outputId = 'dnorm_plot', height = '400'),
plotOutput(outputId = 'dotplot1', height = '400')),
tabPanel(title = 'Sample',
plotOutput(outputId = 'dotplot2', height = '800'),
textOutput(outputId = 'txt1')),
tabPanel(title = 'Sample Estimator',
plotOutput(outputId = 'distn_mean', height = '800'),
textOutput(outputId = 'txt2')),
tabPanel(title = 'Confidence Interval',
plotOutput(outputId = 'CI_1', height = '400'),
plotOutput(outputId = 'CI_2', height = '400'),
textOutput(outputId = 'txtCI')),
tabPanel(title = 'Summary',
h2("Sample"),
tableOutput(outputId = 'samp'),
tags$hr(style="border-color: purple;"),
h2("Sample Distribution"),
uiOutput('smean', height = '50'),
tags$hr(style="border-color: purple;"),
h2("Confidence Interval"),
uiOutput('CI', height = '50'),
tags$hr(style="border-color: purple;"))
)
)
))
| /scratch/gouwar.j/cran-all/cranData/ABACUS/inst/app_sampling/ui.R |
# Hypothesis Testing of Means: Two Samples, Unknown Equal Variance
# Global code for Two-sample t-test
library(shiny)
library(ggplot2)
#_________________________________________________________________________________________
# Function to fit Model
fn_InputData <- function(pmean1, pmean2,
psd, n1, n2, p, p_tail){
xrs1 <- round(rnorm(n = n1, mean = pmean1, sd = psd), digits = 1)
smean1 <- round(mean(xrs1, na.rm = TRUE), 2)
ssd1 <- round(sd(xrs1, na.rm = TRUE), 2)
sse1 <- round(ssd1 / sqrt(n1), 2)
xrs2 <- round(rnorm(n = n2, mean = pmean2, sd = psd), digits = 1)
smean2 <- round(mean(xrs2, na.rm = TRUE), 2)
ssd2 <- round(sd(xrs2, na.rm = TRUE), 2)
sse2 <- round(ssd2 / sqrt(n2), 2)
gr <- c(rep(x = 'Group 1', length = n1), rep(x = 'Group 2', length = n2))
sDF <- data.frame(Group = gr, xrs = c(xrs1, xrs2)) # density not required
sstat <- data.frame(Group = c('Group 1', 'Group 2'),
n = c(n1, n2),
pmean = c(pmean1, pmean2),
psd = c(psd, psd),
smean = c(smean1, smean2),
ssd = c(ssd1, ssd2),
sse = c(sse1, sse2))
sstat$lower <- sstat$smean - 1.96*sstat$sse
sstat$upper <- sstat$smean + 1.96*sstat$sse
alt <- switch(p_tail,
'lower' = 'less',
'upper' = 'greater',
'both' = 'two.sided')
fm <- t.test(xrs ~ Group, data = sDF,
alternative = alt,
mu = 0, paired = FALSE,
var.equal = TRUE,
conf.level = 0.95)
tcal <- unname(round(fm$statistic, digits = 4))
df1 <- unname(fm$parameter)
pcal <- round(fm$p.value, digits = 4)
sed <- round(fm$stderr, digits = 4)
mean_diff <- round((smean1 - smean2), digits = 2)
ci_mean_diff <- round(fm$conf.int, digits = 2)
xmin <- min(pmean1 - 3.5*psd, pmean2 - 3.5*psd)
xmax <- max(pmean1 + 3.5*psd, pmean2 + 3.5*psd)
norm_xlim <- c(xmin, xmax)
tr <- rt(n = 10000, df = df1)
t_xlim <- c(min(tr), max(tr))
rm(tr)
q_out <- switch(EXPR = p_tail,
lower = qt(p = p, df = df1, lower.tail = TRUE),
upper = qt(p = p, df = df1, lower.tail = FALSE),
both = c(qt(p = p/2, df = df1, lower.tail = TRUE),
qt(p = p/2, df = df1, lower.tail = FALSE)))
tstat <- c(tcal = round(tcal, 4),
df1 = round(df1, 4),
pcal = round(pcal, 4),
p = round(p, 4),
q_out = round(q_out, 4))
tail <- c(p_tail = p_tail)
if(p_tail == 'lower' | p_tail == 'upper'){
q_out_txt <- paste0('p = ', tstat['p'], '; q = ', tstat['q_out'] )
xpos1 <- tstat['q_out']
} else {
q_out_txt <- paste0('p = ', tstat['p'], '; q = ', round(q_out[1], 2), ', ', round(q_out[2], 2) )
xpos1 <- tstat['q_out2']
}
qText = q_out_txt
annotateText <- c('pText')
annotateDF <- data.frame(
xpos = c(xpos1),
ypos = c(Inf),
annotateText = c(qText),
hjustvar = c(0) ,
vjustvar = c(2)) #<- adjust
hTitle <- paste0('H0: mu1 = mu2, ; H1: mu1 <> mu2')
dTitle <- paste0( 'Population: Mean1 = ', round(pmean1,2),
', Mean2 = ', round(pmean2,2),
', SD = ', round(psd,2) )
rTitle <- paste0( 'Sample: Mean1 = ', round(smean1,2),
', SD1 = ', round(ssd1,2),
'; Mean2 = ', round(smean2,2),
', SD2 = ', round(ssd2,2) )
tTitle1 <- paste0( 'Calculated t-statistic: ',
round(tcal, 4),
', df = ', df1,
', p-value = ', round(pcal, 4) )
tTitle2 <- paste0('Mean difference = ', round(mean_diff,2),
'; 95% CI = ', ci_mean_diff[1], ', ', ci_mean_diff[2])
txtTitle <- c(hTitle = hTitle, dTitle = dTitle, rTitle = rTitle,
tTitle1 = tTitle1, tTitle2 = tTitle2)
out <- list(sDF = sDF,
sstat = sstat, tstat = tstat, tail = tail,
mean_diff = mean_diff, ci_mean_diff = ci_mean_diff,
norm_xlim = norm_xlim, t_xlim = t_xlim,
annotateDF = annotateDF,
txtTitle = txtTitle)
return(out)
}
#_________________________________________________________________________________________
# Population density: Density plot
fn_dnorm <- function(inputData){
list2env(inputData, envir = environment())
rm(inputData)
pmean1 <- sstat$pmean[1]
pmean2 <- sstat$pmean[2]
psd <- sstat$psd[1]
dTitle1 <- bquote( 'Population Mean & SD: ' ~
mu[1] == .(pmean1) ~ ', ' ~
sigma[1] == .(psd) ~ '; ' ~
mu[2] == .(pmean2) ~ ', ' ~
sigma[2] == .(psd) )
dTitle2 <- 'Rugplots represent the random samples drawn from two populations'
g <- ggplot(data = NULL, mapping = aes(norm_xlim))
g <- g + geom_area(stat = 'function', fun = dnorm,
args = list(mean = pmean1, sd = psd),
xlim = norm_xlim, fill = '#F8766D', alpha = 0.3)
g <- g + geom_area(stat = 'function', fun = dnorm,
args = list(mean = pmean2, sd = psd),
xlim = norm_xlim, fill = '#00BFC4', alpha = 0.3)
g <- g + geom_rug(data = sDF[(sDF$Group == 'Group 1'),],
mapping = aes(x = xrs),
colour = '#F8766D', sides = 'b')
g <- g + geom_rug(data = sDF[(sDF$Group == 'Group 2'),],
mapping = aes(x = xrs),
colour = '#00BFC4', sides = 'b')
g <- g + geom_vline(xintercept = pmean1, size = 1, linetype = 2, colour = 'darkred')
g <- g + geom_vline(xintercept = pmean2, size = 1, linetype = 2, colour = 'blue')
g <- g + labs(title = dTitle1, subtitle = dTitle2,
x = 'Populations: X (unit)', y = 'Density')
xscale <- seq(from = norm_xlim[1], to = norm_xlim[2], length.out = 21)
xscale <- round(xscale, digits = 1)
g <- g + scale_x_continuous(breaks = xscale, limits = norm_xlim)
g <- g + theme_bw()
g <- g + theme(axis.text.x = element_text(face = 'plain', color = 'blue',
size = 14, angle = 0),
axis.text.y = element_text(face = 'plain', color = 'blue',
size = 14, angle = 90, vjust = 0.5),
axis.title.x = element_text(size = 16, colour = 'purple'),
axis.title.y = element_text(size = 16, colour = 'purple'),
title = element_text(face = 'plain', color = 'blue',
size = 16, angle = 0))
print(g)
}
#_________________________________________________________________________________________
# Sample distribution: dotplot
fn_dotplot <- function(inputData){
list2env(inputData, envir = environment())
rm(inputData)
type <- 'density'
xmean1 <- sstat$smean[1]
xmean2 <- sstat$smean[2]
xsd1 <- sstat$ssd[1]
xsd2 <- sstat$ssd[2]
rTitle <- bquote( 'Sample Mean & SD: ' ~
bar(x[1]) == .(round(xmean1,2)) ~ ', ' ~
s[1] == .(round(xsd1,2)) ~ '; ' ~
bar(x[2]) == .(round(xmean2,2)) ~ ', ' ~
s[2] == .(round(xsd2,2)) )
xmean <- mean(sDF$xrs, na.rm = TRUE)
scale_factor <- (norm_xlim[2] - norm_xlim[1])/100
g <- ggplot(data = sDF, aes(x = xrs, fill = Group))
g <- g + labs(title = rTitle, x = 'Variable (unit)')
g <- g + geom_dotplot(method = 'dotdensity',
binwidth = scale_factor, # dotsize = 0.3,
stackdir = 'center', stackratio = 0.9, alpha = 0.7)
g <- g + scale_y_continuous(NULL, breaks = NULL)
g <- g + geom_rug(mapping = aes(colour = Group))
g <- g + geom_vline(xintercept = xmean, size = 1, linetype = 1, colour = 'purple')
g <- g + geom_vline(xintercept = xmean1, size = 1, linetype = 2, colour = 'darkred')
g <- g + geom_vline(xintercept = xmean2, size = 1, linetype = 2, colour = 'blue')
g <- g + theme_bw()
g <- g + theme(axis.text.x = element_text(face = 'plain', color = 'blue',
size = 14, angle = 0),
axis.text.y = element_text(face = 'plain', color = 'blue',
size = 14, angle = 90, vjust = 0.5),
axis.title.x = element_text(size = 16, colour = 'purple'),
axis.title.y = element_text(size = 16, colour = 'purple'),
title = element_text(face = 'plain', color = 'blue',
size = 16, angle = 0))
g <- g + theme(legend.position = 'bottom')
print(g)
}
#_________________________________________________________________________________________
# Sample distribution: boxplot
fn_boxplot <- function(inputData){
list2env(inputData, envir = environment())
rm(inputData)
xmean1 <- sstat$smean[1]
xmean2 <- sstat$smean[2]
xsd1 <- sstat$ssd[1]
xsd2 <- sstat$ssd[2]
rTitle <- bquote( 'Sample Mean & SD: ' ~
bar(x[1]) == .(round(xmean1,2)) ~ ', ' ~
s[1] == .(round(xsd1,2)) ~ '; ' ~
bar(x[2]) == .(round(xmean2,2)) ~ ', ' ~
s[2] == .(round(xsd2,2)) )
xmean <- mean(sDF$xrs, na.rm = TRUE)
g <- ggplot(data = sDF, aes(x = Group, y = xrs, fill = Group))
g <- g + geom_boxplot(mapping = aes(colour = factor(Group), fill = factor(Group)),
alpha = 0.4, size = 1.0)
g <- g + geom_jitter(mapping=aes(fill = factor(Group)),
width = 0.25, height = 0.001,
shape = 21, size=10, alpha = 0.7)
g <- g + geom_rug(mapping = aes(colour = factor(Group)), sides = 'b')
g <- g + geom_hline(yintercept = xmean, size = 1, linetype = 1, colour = 'purple')
g <- g + geom_hline(yintercept = xmean1, size = 1, linetype = 2, colour = 'darkred')
g <- g + geom_hline(yintercept = xmean2, size = 1, linetype = 2, colour = 'blue')
g <- g + labs(title = rTitle, x = 'Group', y = 'Variable (unit)')
yscale <- seq(from = norm_xlim[1], to = norm_xlim[2], length.out = 21)
yscale <- round(yscale, digits = 1)
g <- g + scale_y_continuous(breaks = yscale, limits = norm_xlim)
g <- g + coord_flip()
g <- g + theme_bw()
g <- g + theme(axis.text.x = element_text(face = 'plain', color = 'blue',
size = 14, angle = 0),
axis.text.y = element_text(face = 'plain', color = 'blue',
size = 14, angle = 90, vjust = 0.5),
axis.title.x = element_text(size = 16, colour = 'purple'),
axis.title.y = element_text(size = 16, colour = 'purple'),
title = element_text(face = 'plain', color = 'blue',
size = 16, angle = 0))
g <- g + theme(legend.position = 'none')
print(g)
}
#_________________________________________________________________________________________
# Standard t Density: Plot1 with Type 1 error
fn_dt_plot1 <- function(inputData){
list2env(inputData, envir = environment())
rm(inputData)
p_tail <- unname(tail['p_tail'])
q_tail <- unname(tail['q_tail'])
p <- unname(tstat['p'])
if(p_tail == 'both'){
q_out <- unname(c(tstat['q_out1'], tstat['q_out2']))
} else {
q_out <- unname(tstat['q_out'])
}
tcal <- unname(tstat['tcal'])
df1 <- unname(tstat['df1'])
hTitle <- bquote(H[0] ~ ':' ~ mu[1] == mu[2] ~ '; ' ~ H[A] ~ ':' ~ mu[1] != mu[2])
tTitle1 <- unname(txtTitle['tTitle1'])
g <- ggplot(data = NULL, mapping = aes(t_xlim))
# p-value
if(p_tail == 'lower'){
t_xlim1 <- c(t_xlim[1], q_out)
t_xlim2 <- c(q_out, t_xlim[2])
g <- g + geom_area(stat = 'function', fun = dt,
args = list(df = df1), colour = 'darkred',
xlim = t_xlim1, fill = '#ff0000', alpha = 0.5)
g <- g + geom_area(stat = 'function', fun = dt,
args = list(df = df1), colour = 'darkred',
xlim = t_xlim2, fill = '#ffff00', alpha = 0.7)
g <- g + geom_vline(xintercept = q_out, size = 1, linetype = 2, colour = 'orange')
}
if(p_tail == 'upper'){
t_xlim1 <- c(t_xlim[1], q_out)
t_xlim2 <- c(q_out, t_xlim[2])
g <- g + geom_area(stat = 'function', fun = dt,
args = list(df = df1), colour = 'darkred',
xlim = t_xlim1, fill = '#ffff00', alpha = 0.7)
g <- g + geom_area(stat = 'function', fun = dt,
args = list(df = df1), colour = 'darkred',
xlim = t_xlim2, fill = '#ff0000', alpha = 0.5)
g <- g + geom_vline(xintercept = q_out, size = 1, linetype = 2, colour = 'orange')
}
if(p_tail == 'both'){
t_xlim1 <- c(t_xlim[1], q_out[1])
t_xlim2 <- c(q_out[1], q_out[2])
t_xlim3 <- c(q_out[2], t_xlim[2])
g <- g + geom_area(stat = 'function', fun = dt,
args = list(df = df1), colour = 'darkred',
xlim = t_xlim1, fill = '#ff0000', alpha = 0.5)
g <- g + geom_area(stat = 'function', fun = dt,
args = list(df = df1), colour = 'darkred',
xlim = t_xlim2, fill = '#ffff00', alpha = 0.7)
g <- g + geom_area(stat = 'function', fun = dt,
args = list(df = df1), colour = 'darkred',
xlim = t_xlim3, fill = '#ff0000', alpha = 0.5)
g <- g + geom_vline(xintercept = q_out[1], size = 1, linetype = 2, colour = 'orange')
g <- g + geom_vline(xintercept = q_out[2], size = 1, linetype = 2, colour = 'orange')
}
g <- g + geom_vline(xintercept = tcal, size = 2, linetype = 1, colour = 'red')
g <- g + labs(title = hTitle, subtitle = tTitle1, x = 'Test Statistic: t', y = 'Density')
g <- g + geom_text(data = annotateDF[1,],
aes(x = xpos, y = ypos,
hjust = hjustvar, vjust = vjustvar,
label = annotateText),
colour = c('blue'), size = 4)
g <- g + geom_text(data = annotateDF[1,],
aes(x = xpos, y = ypos,
hjust = hjustvar, vjust = vjustvar,
label = annotateText),
colour = c('blue'), size = 4)
g <- g + theme_bw()
g <- g + theme(axis.text.x = element_text(face = 'plain', color = 'blue',
size = 14, angle = 0),
axis.text.y = element_text(face = 'plain', color = 'blue',
size = 14, angle = 90, vjust = 0.5),
axis.title.x = element_text(size = 16, colour = 'purple'),
axis.title.y = element_text(size = 16, colour = 'purple'),
title = element_text(face = 'plain', color = 'blue',
size = 16, angle = 0))
print(g)
}
#_________________________________________________________________________________________
# Mean & CI
fn_mean_diff <- function(inputData){
list2env(inputData, envir = environment())
rm(inputData)
tTitle2 <- unname(txtTitle['tTitle2'])
mDF <- data.frame(mean_diff = mean_diff, lower = ci_mean_diff[1], upper = ci_mean_diff[2], y = 0)
g <- ggplot(data = mDF, mapping=aes(x = mean_diff, y = y))
g <- g + geom_errorbarh(aes(xmin = lower, xmax = upper), size = 1.5, colour = '#0000cc')
g <- g + geom_point(size = 20, shape = 15, colour = '#ff9966')
g <- g + labs(title = '', subtitle = tTitle2,
x = 'Mean difference & 95% CI: Mean1 - Mean2 (unit)', y = NULL)
g <- g + scale_y_continuous(expand = c(0,0))
g <- g + geom_vline(xintercept = 0, size = 1.5, linetype = 1, colour = 'orange')
g <- g + theme_bw()
g <- g + theme(axis.text.x = element_text(face = 'plain', color = 'blue',
size = 14, angle = 0),
axis.title.x = element_text(size = 16, colour = 'purple'),
title = element_text(face = 'plain', color = 'blue',
size = 16, angle = 0),
axis.title.y = element_blank(),
axis.text.y = element_blank(),
axis.ticks.y = element_blank(),
axis.ticks.length = unit(0, "pt"),
axis.line = element_blank(),
panel.grid.major = element_blank(), panel.grid.minor = element_blank())
print(g)
}
#_________________________________________________________________________________________
# Report preparation
fn_Report <- function(inputData){
list2env(inputData, envir = environment())
rm(inputData)
p_tail <- unname(tail['p_tail'])
txtH <- switch(EXPR = p_tail,
lower = paste0("Hypothesis:
H<sub>0</sub>: μ <sub>1</sub> = μ <sub>2</sub>
H<sub>1</sub>: μ <sub>1</sub> - μ <sub>2</sub> < 0"),
upper = paste0("Hypothesis:
H<sub>0</sub>: μ <sub>1</sub> = μ <sub>2</sub>
H<sub>1</sub>: μ <sub>1</sub> - μ <sub>2</sub> > 0"),
both = ("Hypothesis:
H<sub>0</sub>: μ <sub>1</sub> = μ <sub>2</sub>
H<sub>1</sub>: μ <sub>1</sub> ≠ μ <sub>2</sub>") )
H <- tags$h3(HTML(txtH), style="color:blue")
pval <- paste0('Probability = ', tstat['p'], '; Tail: ', unname(tail))
names(sDF) <- c('Group', 'X')
sDF$SampleID <- 1:nrow(sDF)
sDF <- sDF[, c('SampleID', 'Group', 'X')]
sstat <- sstat[, 1:7]
names(sstat) <- c('Group', 'N', 'Population Mean', 'Population SD', 'Sample Mean', 'Sample SD', 'SE')
tstat <- as.data.frame(t(tstat))
tstat <- tstat[,1:5]
tstat[,5] <- abs(tstat[,5]) # only take absolute Tabulated t
tstat$tcal = sprintf('%.4f', tstat$tcal)
tstat$pcal= sprintf('%1.4f', tstat$pcal)
names(tstat) <- c('Cal t', 'DF', 'Pr(>|t|)', 'Type 1 Error', 'Tabulated |t|')
txtCI <- paste0('Mean difference & 95% CI: ', mean_diff,
' (', ci_mean_diff[1], ', ', ci_mean_diff[2], ')')
txtCI <- h4(HTML(txtCI), style="color:blue")
rpt <- list(H = H, sDF = sDF, sstat = sstat, tstat = tstat, txtCI = txtCI)
}
#_________________________________________________________________________________________
| /scratch/gouwar.j/cran-all/cranData/ABACUS/inst/app_twosampt/global.R |
# Hypothesis Testing of Means: Two Samples, Unknown Equal Variance
# Shiny server for Two-sample t-test
shinyServer(function(input, output) {
inputVal_eventReactive <- eventReactive(input$cmdUpdate, {
iseed <- input$numRN + (as.integer(input$cmdUpdate) - 1)
pmean1 <- input$pmean1
pmean2 <- input$pmean2
psd <- input$psd
n1 <- input$n1
n2 <- input$n2
p <- input$p # user probability
p_tail <- input$p_prob_type
fn_InputData_repeatable <- repeatable(rngfunc = fn_InputData, seed = iseed)
fn_InputData_repeatable(pmean1 = pmean1, pmean2 = pmean2,
psd = psd,
n1 = n1, n2 = n2,
p = p, p_tail = p_tail)
})
inputVal_reactive <- reactive({
pmean1 <- input$pmean1
pmean2 <- input$pmean2
psd <- input$psd
n1 <- input$n1
n2 <- input$n2
p <- input$p # user probability
p_tail <- input$p_prob_type
fn_InputData(pmean1 = pmean1, pmean2 = pmean2,
psd = psd,
n1 = n1, n2 = n2,
p = p, p_tail = p_tail)
})
output$dnorm_plot <- renderPlot({
if(input$chkUpdate){
fn_dnorm(inputVal_reactive())
} else {
fn_dnorm(inputVal_eventReactive())
}
})
output$dotplot <- renderPlot({
if(input$chkUpdate){
fn_dotplot(inputVal_reactive())
} else {
fn_dotplot(inputVal_eventReactive())
}
})
output$boxplot <- renderPlot({
if(input$chkUpdate){
fn_boxplot(inputVal_reactive())
} else {
fn_boxplot(inputVal_eventReactive())
}
})
output$mdiff_plot1 <- renderPlot({
if(input$chkUpdate){
fn_mean_diff(inputVal_reactive())
} else {
fn_mean_diff(inputVal_eventReactive())
}
})
output$dt_plot1 <- renderPlot({
if(input$chkUpdate){
fn_dt_plot1(inputVal_reactive())
} else {
fn_dt_plot1(inputVal_eventReactive())
}
})
output$H <- renderUI({
if(input$chkUpdate){
fn_Report(inputVal_reactive())[['H']]
} else {
fn_Report(inputVal_eventReactive())[['H']]
}
})
dt_sample <- renderTable({
if(input$chkUpdate){
fn_Report(inputVal_reactive())[['sDF']]
} else {
fn_Report(inputVal_eventReactive())[['sDF']]
}
}, striped = TRUE, hover = TRUE, bordered = TRUE, align = 'c')
output$dt_sample <- renderUI(fluidPage(dt_sample, style="overflow-y:scroll; height: 300px"))
output$dt_sstat <- renderTable({
if(input$chkUpdate){
fn_Report(inputVal_reactive())[['sstat']]
} else {
fn_Report(inputVal_eventReactive())[['sstat']]
}
}, striped = TRUE, hover = TRUE, bordered = TRUE, align = 'c')
output$dt_tstat <- renderTable({
if(input$chkUpdate){
fn_Report(inputVal_reactive())[['tstat']]
} else {
fn_Report(inputVal_eventReactive())[['tstat']]
}
}, striped = TRUE, hover = TRUE, bordered = TRUE, align = 'c')
output$CI <- renderUI(
if(input$chkUpdate){
fn_Report(inputVal_reactive())[['txtCI']]
} else {
fn_Report(inputVal_eventReactive())[['txtCI']]
}
)
})
| /scratch/gouwar.j/cran-all/cranData/ABACUS/inst/app_twosampt/server.R |
# Hypothesis Testing of Means: Two Samples, Unknown Equal Variance
# Shiny ui for Two-sample t-test
# Define UI for the application
shinyUI(pageWithSidebar(
headerPanel(title = div(img(src='abacus.png', align = 'left'),
"Hypothesis Testing: Two Samples, Student's t-Test"),
windowTitle = "Hypothesis Testing: Two Samples, Student's t-Test"),
sidebarPanel(
tags$hr(style="border-color: purple;"),
tags$p(style="color:blue", tags$strong("Simulation Features")),
tags$hr(style="border-color: purple;"),
checkboxInput(inputId = 'chkUpdate',
label = tags$strong('Check the box to update instantly', style="color:darkblue"),
value = FALSE, width = '100%'),
actionButton(inputId = 'cmdUpdate', label = 'Update'),
tags$hr(style="border-color: green;"),
numericInput(inputId = 'numRN',
label = tags$p('Seed value for generating the random number', style="color:darkblue"),
value = 12345, min = 1),
tags$hr(style="border-color: purple;"),
tags$p(style="color:blue", tags$strong("Population Parameters")),
tags$hr(style="border-color: purple;"),
numericInput(inputId = 'pmean1',
label = tags$strong('True Population 1 Mean: ', HTML("μ<sub>1"), style="color:darkblue"),
value = 20),
numericInput(inputId = 'pmean2',
label = tags$strong('True Population 2 Mean: ', HTML("μ<sub>2"), style="color:darkblue"),
value = 21),
tags$hr(style="border-color: green;"),
numericInput(inputId = 'psd',
label = tags$strong('True Population Standard Deviation: ', HTML("σ"), style="color:darkblue"),
value = 2, min = 1),
tags$hr(style="border-color: purple;"),
tags$p(style="color:blue", tags$strong("Sample Characteristics")),
tags$hr(style="border-color: purple;"),
sliderInput(inputId = 'n1',
label = tags$strong('Sample 1 (Group 1) Size: ', HTML("n<sub>1"), style="color:darkblue"),
min = 10, max = 50, value = 15, step = 1),
sliderInput(inputId = 'n2',
label = tags$strong('Sample 2 (Group 2) Size: ', HTML("n<sub>2"), style="color:darkblue"),
min = 10, max = 50, value = 20, step = 1),
tags$hr(style="border-color: purple;"),
tags$p(style="color:blue", tags$strong("Distribution Function")),
tags$hr(style="border-color: purple;"),
sliderInput(inputId = 'p',
label = tags$strong('Type 1 Error', style="color:darkblue"),
value = 0.05, min = 0.01, max = 0.10, step = 0.01),
radioButtons(inputId = 'p_prob_type',
label = tags$strong('Probability Tail', style="color:darkblue"),
choices = c('Lower tail (Left tail)' = 'lower',
'Upper tail (Right tail) ' = 'upper',
'Both tails (Two-tailed)' = 'both'),
selected = 'both'),
tags$hr(style="border-color: purple;")
), # sidebarpanel
mainPanel(
tabsetPanel(
tabPanel(title = 'Population',
plotOutput(outputId = 'dnorm_plot', height = '1000px')),
tabPanel(title = 'Sample',
plotOutput(outputId = 'dotplot', height = '500px'),
plotOutput(outputId = 'boxplot', height = '500px')),
tabPanel(title = 'Test Statistic',
plotOutput(outputId = 'mdiff_plot1', height = '250px'),
plotOutput(outputId = 'dt_plot1', height = '750px')),
tabPanel(title = 'Summary',
h1("Hypothesis"),
uiOutput('H', height = '20px'),
tags$hr(style="border-color: purple;"),
h2("Sample"),
tableOutput(outputId = 'dt_sample'),
tags$hr(style="border-color: purple;"),
tags$hr(style="border-color: purple;"),
h2("Summary Statistics"),
tableOutput(outputId = 'dt_sstat'),
tags$hr(style="border-color: purple;"),
h2("Test Statistic"),
tableOutput(outputId = 'dt_tstat'),
tags$hr(style="border-color: purple;"),
h2("Confidence Interval"),
uiOutput('CI', height = '50'))
)
)
))
| /scratch/gouwar.j/cran-all/cranData/ABACUS/inst/app_twosampt/ui.R |
---
title: "Using ABACUS"
subtitle: "ABACUS: Apps Based Activities for Communicating and Understanding Statistics"
author: "Mintu Nath"
date: "`r Sys.Date()`"
vignette: >
%\VignetteIndexEntry{ABACUS Introduction}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
output:
knitr:::html_vignette:
toc: yes
---
----------------------------------
# Main Features
- Narrative of statistical principles and theories in a simple, user-friendly and interactive interface
- Extensive use of simulation to capture statistical theories when the TRUTH is known
- Graphical representation of statistical concepts intertwined with the statistical outputs
- Captures frequentist view of the inferential framework
----------------------------------
<br>
# A Quick Start Guide to ABACUS
There are TWO options to explore ABACUS. A brief outline is given here. See the INPUTS and OUTPUTS sections for further details.
## Option 1: Replicable simulation given a seed value
- Change the input values in the left panel
- Click the **Update** button
- Explore the outcomes in different tabs
If you are using ABACUS for the first time, consider the default input values and just click the **Update** button. Explore the outcomes in different tabs.
## Option 2: Non-replicable instant simulation
- Select the option: **Check the box to update instantly**
- Change the input values by editing the input box or moving the points in the slider bar
- The app will react to these changes
- Explore the outcomes in different tabs
<br>
# INPUTS
Here is a quick guideline on how to use the apps effectively. Different apps may need different types of inputs and display different outputs, but the general framework remains the same.
The options to provide INPUT values are on the left panel of the apps. All inputs are set at appropriate default values, but one can change the input values if needed.
The required INPUT values will depend on the specific app. These may include:
- **Population Parameters**
- **Sample Characteristics**
- **Distribution Function**
- **Simulation Features**
To use the app at first instance, accept the default INPUT values and click the **Update** button. The app will produce the outcomes in different tabs. Explore the outcomes to understand the apps in the first instance.
To alter the input values, provide the inputs for the **Population Parameters** (for example, $\mu$ and $\sigma$) in the text box and alter the other input values by moving the slider bar within the given range. Depending on the **Simulation Feature** option, the outputs in different tabs will change with altered inputs.
To fine-tune the INPUT values in the slider to the required value, you may select and drag the point nearer the desired value and then press left or right arrow keys to fine-tune.
<br>
## Simulation Features
There are two options to run repeated random sampling:
<br>
### Option 1: Replicable simulation given a seed value
When you press the **Update** button the first time, it will draw a random sample based on the INPUT values. The random sample will be drawn given the **seed value** included in the text box for seed value (default is given as 12345).
In the next step, you may wish to alter the inputs again OR keep the inputs unaltered and press the **Update** button again. This will conduct a second random sampling conditional on the given INPUT values.
For example, reload the app for Normal distribution. Keep all the INPUT values at its default values including the seed value as 12345. Click the **Update** button THREE times. Note the Sample Mean and SD displayed in the plot on the right (see the app). Three successive values displayed should be as follows:
- On the first click of 'Update': $\bar{x} = 20.07$, $s = 4.55$
- On the second click of 'Update': $\bar{x} = 18.13$, $s = 3.88$
- on the third click of 'Update': $\bar{x} = 19.43$, $s = 4.09$
Since we generated the sample from a pre-specified seed value (here 12345, but you can change it to any value), these outcomes are replicable. That means if you reload the app and do it again, you will get the identical three values of $\bar{x}$ and $s$ of the sample in successive three clicks. In other words, if you and your friend run the app together with the identical seed value and other input values, both will get identical estimates.
<br>
### Option 2: Non-replicable instant simulation
If you wish to explore many scenarios and do not necessarily wish to replicate your outputs, then select the option: **Check the box to update instantly**. The important difference here is that you cannot obtain multiple outcomes for the same input values as in Option 1. You have to change the input values (at least a minor change) to trigger the app to sample data again. In other words, the app in this option only 'reacts' when you change the INPUT values. Also, note that the **Update** button _will not respond_ in this option and the random sampling will not be conducted using the seed value (as in Option 1). In Option 2, the seed value is instantly set based on the current time and process id, therefore, outputs are not replicable.
<br>
## App-specific Inputs
### Normal Distribution
- Simulation Features
- Check the box to update instantly
- Update
- Seed value for generating the random number
- Population Parameters
- True Population Mean: $\mu$
- True Population Standard Deviation: $\sigma$
- X-axis scale for the center and scale effect
- Sample Characteristics
- Sample: Number of observations
- Number of bins
- Plot type: Frequency Distribution; Overlay Normal Density
- Distribution Function
- Cumulative probability
- Probability Tail
### Sampling Distribution
- Simulation Features
- Check the box to update instantly
- Update
- Seed value for generating the random number
- Population Parameters
- True Population Mean: $\mu$
- True Population Standard Deviation: $\sigma$
- Sample Characteristics
- Sample size
### Hypothesis Testing: One-Sample Z-Test
- Simulation Features
- Check the box to update instantly
- Update
- Seed value for generating the random number
- Population Parameters
- True Population Mean: $\mu$
- Hypothesised Population Mean: $\mu_0$
- True Population Standard Deviation: $\sigma$
- Sample Characteristics
- Sample size
- Distribution Function
- Type 1 Error
- Probability Tail: Lower tail; Upper tail; Both tails
### Hypothesis Testing: One-Sample Student's t-Test
- Simulation Features
- Check the box to update instantly
- Update
- Seed value for generating the random number
- Population Parameters
- True Population Mean: $\mu$
- Hypothesised Population Mean: $\mu_0$
- True Population Standard Deviation: $\sigma$
- Sample Characteristics
- Sample size
- Distribution Function
- Type 1 Error
- Probability Tail: Lower tail; Upper tail; Both tails
### Hypothesis Testing: Two-Sample Independent (Unpaired) t-Test
- Simulation Features
- Check the box to update instantly
- Update
- Seed value for generating the random number
- Population Parameters
- True Population 1 Mean: $\mu_1$
- True Population 2 Mean: $\mu_2$
- True Population Standard Deviation: $\sigma$
- Sample Characteristics
- Sample 1 (Group 1) Size: $n_1$
- Sample 2 (Group 2) Size: $n_2$
- Distribution Function
- Type 1 Error
- Probability Tail: Lower (Left) tail; Upper (Right) tail; Both (Two) tails
### Hypothesis Testing: One-way Analysis of Variance
- Population Parameters
- True Population 1 Mean: $\mu_1$
- True Population 2 Mean: $\mu_2$
- True Population 3 Mean: $\mu_3$
- True Population Standard Deviation: $\sigma$
- Sample Characteristics
- Sample 1 (Group 1) Size: $n_1$
- Sample 2 (Group 2) Size: $n_2$
- Sample 3 (Group 3) Size: $n_3$
- Distribution Function
- Type 1 Error
- Probability Tail: Lower (Left) tail; Upper (Right) tail; Both (Two) tails
- Simulation Features
- Check the box to update instantly
- Update
- Seed value for generating the random number
<br>
<br>
# OUTPUTS
The outcomes are presented in several tabs depending on the app.
## App-specific Outputs
### Normal Distribution
- Sample
- Distribution
- Probability & Quantile
### Sampling Distribution
- Population & Sample
- Sample
- Sample Estimators
- Confidence Interval
- Summary
### Hypothesis Testing: One-Sample Z-Test
- Population
- Sample
- Test Statistic
- Summary
### Hypothesis Testing: One-Sample Student's t-Test
- Population
- Sample
- Test Statistic
- Summary
### Hypothesis Testing: Two-Sample Independent (Unpaired) t-Test
- Population
- Sample
- Test Statistic
- Summary
### Hypothesis Testing: One-way Analysis of Variance
- Population
- Sample
- SS & MS (Sum of Squares & Mean Squares)
- Test Statistic
- Summary
<br>
<br>
| /scratch/gouwar.j/cran-all/cranData/ABACUS/inst/doc/ABACUS.Rmd |
---
title: "ABACUS Activities"
subtitle: "ABACUS: Apps Based Activities for Communicating and Understanding Statistics"
author: "Mintu Nath"
date: "`r Sys.Date()`"
vignette: >
%\VignetteIndexEntry{ABACUS Activities}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
output:
knitr:::html_vignette:
toc: yes
toc_depth: 2
---
----------------------------------
# ABACUS
----------------------------------
The basic premise of ABACUS is to explore, understand and assess the concepts and theories of simple statistical techniques and tools using simulation and graphics when the TRUTH is known. It creates an environment to communicate and understand the Statistical concepts to a wider audience without complex mathematical derivation or elaborate programming. The ABACUS uses two techniques to convey the concepts: it implements the power of statistical simulation to simulate data under wide-ranging sampling scenarios, and secondly, it uses graphical interfaces to visualise the statistical concepts.
In the following sections, a list of Activities is suggested. Teachers can implement and integrate these activities with the current lectures and practical classes. Please send your suggestions and ideas for further improvement of these activities.
<br>
<br>
----------------------------------
# Normal Distribution
----------------------------------
<br>
## Intended Learning Outcomes
- Explain the probability density function and cumulative distribution function of Normal distribution and Standard normal distribution.
- Understand and create a histogram using different bin size and identify the shape of the Normal distribution.
- Demonstrate the concept and effect of centring and scaling of a variable.
- Describe properties of Normal distribution and Standard normal distribution.
- Describe the concepts of cumulative probability, probability tail and quantile and the relationship between these terms.
- Recognise the concept of statistical simulation and the importance of seed value in a computer simulation.
- Explain and generalise the statistical concepts and implement in your area of research.
<br>
## Activity
For each of the following activity:
- Describe the problem
- Identify the assumptions (if any)
- Outline the successive steps of calculation supported by appropriate formulae
- Conduct the apps-based experiment
- Summarise the outputs
- Interpret the results
- Draw valid conclusions
- Generalise the problem in your research area
<br>
### Activity 1
- Select the checkbox against "Check the box to update instantly".
- Select the tab "Sample" and note the shape of the sample data,
- Increase the sample size to >1000; describe the change of shape of the distribution.
- Increase the sample size further and describe the shape.
- Change the number of bins to >300. Explain the shape.
- Overlay the data with Normal density function by selecting the appropriate option.
<br>
### Activity 2
- Reload the app (click 'Refresh' or 'Reload' button in the browser).
- Select the checkbox against "Check the box to update instantly".
- Select the tab "Distribution".
- Increase or decrease the value of $\mu$; this is analogous to centring effect.
- Increase or decrease the value of $\sigma$; this is analogous to scale effect.
- Explain the Probability Density function due to the centring and scaling of the data.
<br>
### Activity 3
- Reload the app (click 'Refresh' or 'Reload' button in the browser).
- Select the checkbox against "Check the box to update instantly".
- Select the tab "Probability and Quantile".
- Change the point on the slider of 'Cumulative probability'.
- Select different options for the probability tails.
- Describe the concepts of cumulative probability, probability tail and quantile and the relationship between these terms.
<br>
### Activity 4
- Reload the app (click 'Refresh' or 'Reload' button in the browser).
- Select the checkbox against "Check the box to update instantly".
- Select the tab "Probability and Quantile".
- Using the plots for probability density function and cumulative density function as well as the input values, prove that for a Normal distribution: Mean = Median = Mode.
- Explain that Normal distribution is unimodal and symmetric around the point $x = \mu$.
<br>
### Activity 5
- Reload the app (click 'Refresh' or 'Reload' button in the browser).
- Select the checkbox against "Check the box to update instantly".
- Select the tab "Probability and Quantile".
- Given that the variable follows a Normal distribution with the population parameters as $\mu = 20$ and $\sigma = 4$, find out the following:
- The minimum value of top 5%, 10%, 20%, 50% of the population
- The maximum value of bottom 5%, 10%, 20%, 50% of the population
- The minimum value of top 97.5% of the population
- The maximum value of the bottom 2.5% of the population
Note: To fine-tune the point on the slider, move the slider nearer the value and then use left or right keys to fine-tune it.
<br>
### Activity 6
- Reload the app (click 'Refresh' or 'Reload' button in the browser).
- Select the checkbox against "Check the box to update instantly".
- Select the tab "Probability and Quantile".
- Given that the variable follows a Normal distribution with the population parameters as $\mu = 20$ and $\sigma = 4$, find out the following:
- What proportion of the population is greater than 26.58?
- What proportion of the population is less than 26.58?
- What proportion of the population is greater than 13.42?
- What proportion of the population is greater than 13.42?
- What proportion of the population is between 12.16 and 27.84?
- Explain the area under the curve.
- What is the total probability under the Normal distribution curve and over the x-axis?
Note: To fine-tune the point on the slider, move the slider nearer the value and then use left or right keys to fine-tune it.
<br>
### Activity 7
- Reload the app (click 'Refresh' or 'Reload' button in the browser).
- Select the checkbox against "Check the box to update instantly".
- Select the tab "Probability and Quantile".
- Create the probability density function and cumulative density function of the Standard Normal distribution.
- Given that the variable ($z$) follows a Standard Normal distribution, find out the following:
- The value of $z$ with the lower-tail cumulative probability of 0.05
- The value of $z$ with the upper-tail cumulative probability of 0.05
- The value of $z$ with the lower-tail cumulative probability of 0.95
- The value of $z$ with the upper-tail cumulative probability of 0.95
- The value of $z$ with the upper-tail cumulative probability of 0.50
- The value of $z$ with the two-tailed cumulative probability of 0.05
- The value of $z$ with the two-tailed cumulative probability of 0.01
Note: To fine-tune the point on the slider, move the slider nearer the value and then use left or right keys to fine-tune it.
<br>
### Activity 8
- Reload the app (click 'Refresh' or 'Reload' button in the browser).
- Identify a variable from your subject area that follows Normal distribution.
- Enter the true population mean and true population standard deviation for the variable (based on literature or prior available data).
- Explore sample characteristics and distribution functions; explain the outcomes and associated plots.
- Go to the 'Sample' tab.
- Press 'Update' button multiple times and explore the random sampling scenarios.
- Explain the concept of statistical simulation and the importance of seed value.
<br>
<br>
----------------------------------
# Sampling Distributions
----------------------------------
<br>
## Intended Learning Outcomes
- Describe the concept of random sampling and sampling units.
- Recognise the concept of parameters and estimates.
- Demonstrate and generalise the distribution of sample means.
- Summarise the expected value of the sample mean and sample standard deviation.
- Interpret standard error of a sample mean and its importance in the context of random sampling.
- Construct the confidence interval and appraise the concept relevant to sampling.
- Describe the concepts of cumulative probability, probability tail and quantile and the relationship between them.
- Explain and generalise the statistical theory underlying sampling distribution and implement in own area of research.
<br>
## Activity
For each of the following activity:
- Describe the problem
- Identify the assumptions (if any)
- Outline the successive steps of calculation supported by appropriate formulae
- Conduct the apps-based experiment
- Summarise the outputs
- Interpret the results
- Draw valid conclusions
- Generalise the problem in your research area
<br>
### Activity 1
- Load the app
- Select the checkbox against "Check the box to update instantly".
- Select the tab "Sample". Note the spread of the sampled data as well as the mean and sd of each sample.
- Increase the sample size to >200. Describe how the spread of the sampled data is changing.
- Select the tab "Sample Estimator". Note the mean of sample means, mean of sample standard deviations (SD).
- Explain the meaning of the expected value of sample means and sample standard deviations (SD).
- Note the standard deviation of sample means. What does it signify?
- What is the standard error of the sample mean?
- What is the distribution of the sample mean?
- Select the tab "Confidence Interval"; explain the concept of a 95% confidence interval.
- Explain the concept of sampling distribution under the Frequentist inferential framework.
<br>
### Activity 2
- Reload the app (click 'Refresh' or 'Reload' button in the browser).
- Select the checkbox against "Check the box to update instantly".
- Investigate the effect of altering the following input values on the standard error of mean and 95% confidence interval of mean:
- Increase and decrease $\mu$
- Increase and decrease $\sigma$
- Increase and decrease sample size
- Explore the 'central limit theorem' and explain how the theorem generalises to the theory of sampling distribution.
<br>
### Activity 3
- Reload the app (click 'Refresh' or 'Reload' button in the browser).
- Identify a variable from your subject area that follows the Normal distribution.
- Enter the true population mean and true population standard deviation for the variable (based on literature or prior available data).
- Explore sample characteristics and distribution functions; explain the outcomes and associated plots in the context of the sampling distribution.
- Illustrate your results and identify how different sample characteristics may affect your experiment.
- Explain the concept of statistical simulation and the importance of seed value.
<br>
<br>
----------------------------------
# Hypothesis Testing: One-Sample Z-Test
----------------------------------
<br>
## Intended Learning Outcomes
- Describe the steps of null hypotheses significance testing of the mean for one sample when the population variance is known.
- Summarise the data and identify the inputs for conducting the hypothesis testing.
- Conduct hypothesis testing, evaluate the outcomes, interpret the results and draw valid conclusions.
- Explain and generalise the statistical concepts and implement in own area of research.
<br>
## Activity
Consider the following statements along with specific instructions given for each activity:
- Describe the steps of null hypotheses significance testing and associated assumptions
- State null and alternative hypotheses.
- Record the statistical significance level (Type 1 error) and probability tails
- Identify the appropriate test statistic.
- Descriptive statistics of the observed sample data.
- Explain the test statistic, p-value, quantile under type 1 error, 95% confidence interval.
- Present the summary of the data, interpret the results and draw appropriate conclusions.
- Assess that the conclusion conforms with test statistic, p-value, quantile under type 1 error and estimated 95% confidence interval.
<br>
### Activity 1
- Accept all default values; click the 'Update' button ONCE.
- Note the z-statistic, p-value, difference, 95% CI of difference.
<br>
### Activity 2
- Reload the app (click 'Refresh' or 'Reload' button in the browser).
- Accept all default values AND select 'lower-tail probability'
- Click the 'Update' button ONCE.
- Note the z-statistic, p-value, difference, 95% CI of difference.
- Which values got changed compared to Activity 1 and why?
- What are the interpretations? Do you expect these results?
<br>
### Activity 3
- Reload the app (click 'Refresh' or 'Reload' button in the browser).
- Accept all default values; click the 'Update' button FIVE times.
- Note that the first update should produce the identical output as Activity 1.
- Note the z-statistic and p-value on each simulation (each click of Update button).
- Do you have different interpretations at each simulation stage? Can you explain the results?
- Do you have any instance when you cannot reject the null hypotheses? What could be the reasons?
<br>
### Activity 4
- Reload the app (click 'Refresh' or 'Reload' button in the browser).
- Select "Check the box to update instantly".
- Investigate the effect of the following:
- Increase and decrease $\mu_0$
- Increase and decrease $\sigma$
- Increase and decrease sample size
- Increase and decrease type 1 error
- Change the probability tail to lower/upper or both
- Explain and interpret the outcomes for each scenario.
- Do you have any instance when the outcomes are different from what you expected given the TRUTH is known?
- What could be the reasons?
- While explaining, keep in mind that ABACUS is simulating the data at each instance aligning with the Frequentist inferential framework. When in doubt, deselect 'check the box to update instantly' and click the 'Update' button multiple times and explore each outcome.
- Also, note that the statistical power of a test depends on other essential components. Can you explain these components based on your observations with different scenarios above?
<br>
### Activity 5
- Reload the app (click 'Refresh' or 'Reload' button in the browser).
- Identify a variable from your subject area.
- Enter the true population mean and true population standard deviation for the variable (based on literature or prior available data).
- Explore the sampling scenarios and explain the outcomes due to changes of mu0, sigma, sample size, type 1 error and probability tail.
<br>
<br>
----------------------------------
# Hypothesis Testing: One-Sample Student's t-Test
----------------------------------
<br>
## Intended Learning Outcomes
- Describe the steps of null hypotheses significance testing.
- Test the null hypothesis that the sample data are from a population with a hypothesised mean when the population variance is known.
- Summarise the data and identify the inputs for conducting the hypothesis testing and associated assumptions.
- Conduct hypothesis testing, evaluate the outcomes, interpret the results and draw valid conclusions.
- Explain and generalise the statistical concepts and implement in own area of research.
<br>
## Activity
Consider the following statements along with specific instruction given for each activity:
- Describe the steps of null hypotheses significance testing
- State null and alternative hypotheses
- Record the statistical significance level (Type 1 error) and probability tails
- Identify the appropriate test statistic
- Descriptive statistics of the observed sample data
- Explain the test statistic, p-value, quantile under type 1 error, 95% confidence interval
- Present the summary of the data, interpret the results and draw appropriate conclusions
- Assess that the conclusion conforms with test statistic, p-value, quantile under type 1 error, 95% confidence interval
<br>
### Activity 1
- Accept all default values; click the 'Update' button ONCE.
- Note the t-statistic, p-value, difference and 95% CI of difference.
- Do you expect these results given the TRUTH is known? Explain.
<br>
### Activity 2
- Reload the app (click 'Refresh' or 'Reload' button in the browser).
- Accept all default values and ENTER: $\sigma = 3$, sample size = 50
- Click the 'Update' button ONCE
- Note the t-statistic, p-value, difference and 95% CI of difference
- What are the interpretations? Do you expect these results?
<br>
### Activity 3
- Reload the app (click 'Refresh' or 'Reload' button in the browser).
- Accept all default values; click the 'Update' button FIVE times.
- Note that the first update should produce the identical output as Activity 1.
- Note the t-statistic and p-value on each simulation (each click of Update button)
- Do you have different interpretations at each simulation stage? Can you explain the results?
- Do you have any instance when you cannot reject the null hypotheses? What could be the reasons?
- How do compare these outcomes with the similar activity that you conducted for 'Hypothesis testing: One sample, known variance'
<br>
### Activity 4
- Reload the app (click 'Refresh' or 'Reload' button in the browser).
- Check the box to update instantly.
- Investigate the following scenarios and explain each instance:
- Increase and decrease $\mu_0$
- Increase and decrease $\sigma$
- Increase and decrease sample size
- Increase and decrease type 1 error
- Change the probability tail to lower/upper or both
- Explain and interpret the outcomes for each scenario.
- Do you have any instance when the outcomes are different from what you expected given the TRUTH is known?
- What could be the reasons?
- While explaining, keep in mind that ABACUS is simulating the data at each instance aligning with the Frequentist inferential framework. When in doubt, deselect 'check the box to update instantly' and click the 'Update' button multiple times and explore each outcome.
- Also, note that the statistical power of a test depends on other essential components. Can you explain these components based on your observations with different scenarios above?
<br>
### Activity 5
- Reload the app (click 'Refresh' or 'Reload' button in the browser).
- Identify a variable from your subject area.
- Enter the true population mean and true population standard deviation for the variable (based on literature or prior available data).
- Explore the sampling scenarios and explain the outcomes due to changes of mu0, sigma, sample size, type 1 error and probability tail.
<br>
<br>
----------------------------------
# Hypothesis Testing: Two-Sample Independent (Unpaired) t-Test
----------------------------------
<br>
## Intended Learning Outcomes
- Describe the steps of null hypotheses significance testing.
- Test the null hypothesis that two independent groups of observations are sampled from the same population.
- Summarise the data and identify the inputs for conducting the hypothesis testing and associated assumptions.
- Conduct hypothesis testing, evaluate the outcomes, interpret the results and draw valid conclusions.
- Explain and generalise the statistical concepts and implement in own area of research.
<br>
## Activity
Consider the following statements along with specific instruction given for each activity:
- Describe the steps of null hypotheses significance testing
- State null and alternative hypotheses
- Record the statistical significance level (Type 1 error) and probability tails
- Identify the appropriate test statistic
- Descriptive statistics of the observed sample data
- Explain the test statistic, p-value, quantile under type 1 error, 95% confidence interval
- Present the summary of the data, interpret the results and draw appropriate conclusions
- Evaluate the assumptions of the test
- Assess that the conclusion conforms with test statistic, p-value, quantile under type 1 error and 95% confidence interval
<br>
### Activity 1
- Accept all default values; click the 'Update' button ONCE.
- Note the t-statistic, p-value, mean difference, 95% CI of the mean difference.
- Do you expect these results given the TRUTH is known? Explain.
<br>
### Activity 2
- Reload the app (click 'Refresh' or 'Reload' button in the browser).
- Accept all default values AND enter sample size for Group 2 = 15.
- Click the 'Update' button ONCE.
- Note the t-statistic, p-value, difference and 95% CI of difference.
- What are the interpretations? Do you expect these results?
<br>
### Activity 3
- Reload the app (click 'Refresh' or 'Reload' button in the browser).
- Accept all default values; click the 'Update' button FIVE times.
- Note that the first update should produce the identical output as Activity 1.
- Note the t-statistic and p-value on each simulation (each click of Update button).
- Do you have different interpretations at each simulation stage? Can you explain the results?
- Do you have any instance when you cannot reject the null hypotheses? What could be the reasons?
<br>
### Activity 4
- Reload the app (click 'Refresh' or 'Reload' button in the browser).
- Check the box to update instantly.
- Investigate the following scenarios and explain each instance:
- Increase and decrease $\mu_1$ or $\mu_2$
- Increase and decrease $\sigma$
- Increase and decrease sample size
- Increase and decrease type 1 error
- Change the probability tail to lower/upper or both
- Explain and interpret the outcomes for each scenario.
- Do you have any instance when the outcomes are different from what you expect given the TRUTH is known?
- What could be the reasons?
- While explaining, keep in mind that ABACUS is simulating the data at each instance aligning with the Frequentist inferential framework. When in doubt, deselect 'check the box to update instantly' and click the 'Update' button multiple times and explore each outcome.
- Also, note that the statistical power of a test depends on other essential components. Can you explain these components based on your observations with different scenarios above?
<br>
### Activity 5
- Reload the app (click 'Refresh' or 'Reload' button in the browser).
- Identify a variable from your subject area.
- Enter the true population means (equal as well as unequal) and true population standard deviation for the variable (based on literature or prior available data).
- Explore the sampling scenarios and explain the outcomes due to changes of $\mu_0$, $\sigma$, sample size, type 1 error and probability tail.
<br>
<br>
----------------------------------
# Hypothesis Testing: One-way Analysis of Variance
----------------------------------
<br>
## Intended Learning Outcomes
- Describe the steps of null hypotheses significance testing.
- Test the null hypothesis that three independent groups of observations are sampled from the same population.
- Summarise the data and identify the inputs for conducting the hypothesis testing and associated assumptions.
- Conduct hypothesis testing, evaluate the outcomes, interpret the results and draw valid conclusions.
- Explain and generalise the statistical concepts and implement in own area of research.
<br>
## Activity
Consider the following statements along with specific instruction given for each activity:
- Describe the steps of null hypotheses significance testing.
- State null and alternative hypotheses.
- Record the statistical significance level (Type 1 error) and probability tails
- Identify the appropriate test statistic.
- Present the descriptive statistics of the observed sample data.
- Explain the test statistic, p-value, quantile under type 1 error and 95% confidence interval.
- Present the summary of the data, interpret the results and draw appropriate conclusions.
- Evaluate the assumptions of the test.
- Explain the changes in between and within sum of squares and mean squares due to changes in inputs.
- Assess that the conclusion conforms with test statistic, p-value, quantile under type 1 error and 95% confidence interval.
<br>
### Activity 1
- Accept all default values; click the 'Update' button ONCE.
- Note the sum of squares, mean squares, F-statistic, p-value, quantile, mean difference and 95% CI of difference.
- Do you expect these results given the TRUTH is known? Explain.
<br>
### Activity 2
- Reload the app (click 'Refresh' or 'Reload' button in the browser).
- Accept all default values AND enter sample size for all groups = 10.
- Click the 'Update' button ONCE.
- Note the sum of squares, mean squares, F-statistic, p-value, quantile, mean difference and 95% CI of difference.
- What are the interpretations? Do you expect these results?
<br>
### Activity 3
- Reload the app (click 'Refresh' or 'Reload' button in the browser).
- Accept all default values; click the 'Update' button FIVE times.
- Note that the first update should produce the identical output as Activity 1.
- Note the sum of squares, mean squares, F-statistic, p-value, quantile, mean difference and 95% CI of difference for each click of the 'Update' button.
- Do you have different interpretations at each simulation stage? Can you explain the results?
- Do you have any instance when you cannot reject the null hypotheses? What could be the reasons?
<br>
### Activity 4
- Reload the app (click 'Refresh' or 'Reload' button in the browser)
- Check the box to update instantly
- Investigate the following scenarios and explain each instance:
- Increase and decrease $\mu_1$ / $\mu_2$ / $\mu_3$
- Increase and decrease $\sigma$
- Increase and decrease sample size
- Increase and decrease type 1 error
- Change the probability tail to lower/upper or both
- Explain and interpret the outcomes for each scenario
- Do you have any instance when the outcomes are different from what you expect given the TRUTH is known?
- What could be the reasons?
- While explaining, keep in mind that ABACUS is simulating the data at each instance aligning with the Frequentist inferential framework. When in doubt, deselect 'check the box to update instantly' and click the 'Update' button multiple times and explore each outcome.
- Also, note that the statistical power of a test depends on other essential components. Can you explain these components based on your observations with different scenarios above?
<br>
### Activity 5
- Reload the app (click 'Refresh' or 'Reload' button in the browser).
- Identify a variable from your subject area.
- Enter the true population means (equal as well as unequal) and true population standard deviation for the variable (based on literature or prior available data).
- Explore the sampling scenarios and explain the outcomes due to changes of $\mu$, $\sigma$, sample size, type 1 error and probability tail.
<br>
<br>
| /scratch/gouwar.j/cran-all/cranData/ABACUS/inst/doc/ABACUS_activities.Rmd |
---
title: "Using ABACUS"
subtitle: "ABACUS: Apps Based Activities for Communicating and Understanding Statistics"
author: "Mintu Nath"
date: "`r Sys.Date()`"
vignette: >
%\VignetteIndexEntry{ABACUS Introduction}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
output:
knitr:::html_vignette:
toc: yes
---
----------------------------------
# Main Features
- Narrative of statistical principles and theories in a simple, user-friendly and interactive interface
- Extensive use of simulation to capture statistical theories when the TRUTH is known
- Graphical representation of statistical concepts intertwined with the statistical outputs
- Captures frequentist view of the inferential framework
----------------------------------
<br>
# A Quick Start Guide to ABACUS
There are TWO options to explore ABACUS. A brief outline is given here. See the INPUTS and OUTPUTS sections for further details.
## Option 1: Replicable simulation given a seed value
- Change the input values in the left panel
- Click the **Update** button
- Explore the outcomes in different tabs
If you are using ABACUS for the first time, consider the default input values and just click the **Update** button. Explore the outcomes in different tabs.
## Option 2: Non-replicable instant simulation
- Select the option: **Check the box to update instantly**
- Change the input values by editing the input box or moving the points in the slider bar
- The app will react to these changes
- Explore the outcomes in different tabs
<br>
# INPUTS
Here is a quick guideline on how to use the apps effectively. Different apps may need different types of inputs and display different outputs, but the general framework remains the same.
The options to provide INPUT values are on the left panel of the apps. All inputs are set at appropriate default values, but one can change the input values if needed.
The required INPUT values will depend on the specific app. These may include:
- **Population Parameters**
- **Sample Characteristics**
- **Distribution Function**
- **Simulation Features**
To use the app at first instance, accept the default INPUT values and click the **Update** button. The app will produce the outcomes in different tabs. Explore the outcomes to understand the apps in the first instance.
To alter the input values, provide the inputs for the **Population Parameters** (for example, $\mu$ and $\sigma$) in the text box and alter the other input values by moving the slider bar within the given range. Depending on the **Simulation Feature** option, the outputs in different tabs will change with altered inputs.
To fine-tune the INPUT values in the slider to the required value, you may select and drag the point nearer the desired value and then press left or right arrow keys to fine-tune.
<br>
## Simulation Features
There are two options to run repeated random sampling:
<br>
### Option 1: Replicable simulation given a seed value
When you press the **Update** button the first time, it will draw a random sample based on the INPUT values. The random sample will be drawn given the **seed value** included in the text box for seed value (default is given as 12345).
In the next step, you may wish to alter the inputs again OR keep the inputs unaltered and press the **Update** button again. This will conduct a second random sampling conditional on the given INPUT values.
For example, reload the app for Normal distribution. Keep all the INPUT values at its default values including the seed value as 12345. Click the **Update** button THREE times. Note the Sample Mean and SD displayed in the plot on the right (see the app). Three successive values displayed should be as follows:
- On the first click of 'Update': $\bar{x} = 20.07$, $s = 4.55$
- On the second click of 'Update': $\bar{x} = 18.13$, $s = 3.88$
- on the third click of 'Update': $\bar{x} = 19.43$, $s = 4.09$
Since we generated the sample from a pre-specified seed value (here 12345, but you can change it to any value), these outcomes are replicable. That means if you reload the app and do it again, you will get the identical three values of $\bar{x}$ and $s$ of the sample in successive three clicks. In other words, if you and your friend run the app together with the identical seed value and other input values, both will get identical estimates.
<br>
### Option 2: Non-replicable instant simulation
If you wish to explore many scenarios and do not necessarily wish to replicate your outputs, then select the option: **Check the box to update instantly**. The important difference here is that you cannot obtain multiple outcomes for the same input values as in Option 1. You have to change the input values (at least a minor change) to trigger the app to sample data again. In other words, the app in this option only 'reacts' when you change the INPUT values. Also, note that the **Update** button _will not respond_ in this option and the random sampling will not be conducted using the seed value (as in Option 1). In Option 2, the seed value is instantly set based on the current time and process id, therefore, outputs are not replicable.
<br>
## App-specific Inputs
### Normal Distribution
- Simulation Features
- Check the box to update instantly
- Update
- Seed value for generating the random number
- Population Parameters
- True Population Mean: $\mu$
- True Population Standard Deviation: $\sigma$
- X-axis scale for the center and scale effect
- Sample Characteristics
- Sample: Number of observations
- Number of bins
- Plot type: Frequency Distribution; Overlay Normal Density
- Distribution Function
- Cumulative probability
- Probability Tail
### Sampling Distribution
- Simulation Features
- Check the box to update instantly
- Update
- Seed value for generating the random number
- Population Parameters
- True Population Mean: $\mu$
- True Population Standard Deviation: $\sigma$
- Sample Characteristics
- Sample size
### Hypothesis Testing: One-Sample Z-Test
- Simulation Features
- Check the box to update instantly
- Update
- Seed value for generating the random number
- Population Parameters
- True Population Mean: $\mu$
- Hypothesised Population Mean: $\mu_0$
- True Population Standard Deviation: $\sigma$
- Sample Characteristics
- Sample size
- Distribution Function
- Type 1 Error
- Probability Tail: Lower tail; Upper tail; Both tails
### Hypothesis Testing: One-Sample Student's t-Test
- Simulation Features
- Check the box to update instantly
- Update
- Seed value for generating the random number
- Population Parameters
- True Population Mean: $\mu$
- Hypothesised Population Mean: $\mu_0$
- True Population Standard Deviation: $\sigma$
- Sample Characteristics
- Sample size
- Distribution Function
- Type 1 Error
- Probability Tail: Lower tail; Upper tail; Both tails
### Hypothesis Testing: Two-Sample Independent (Unpaired) t-Test
- Simulation Features
- Check the box to update instantly
- Update
- Seed value for generating the random number
- Population Parameters
- True Population 1 Mean: $\mu_1$
- True Population 2 Mean: $\mu_2$
- True Population Standard Deviation: $\sigma$
- Sample Characteristics
- Sample 1 (Group 1) Size: $n_1$
- Sample 2 (Group 2) Size: $n_2$
- Distribution Function
- Type 1 Error
- Probability Tail: Lower (Left) tail; Upper (Right) tail; Both (Two) tails
### Hypothesis Testing: One-way Analysis of Variance
- Population Parameters
- True Population 1 Mean: $\mu_1$
- True Population 2 Mean: $\mu_2$
- True Population 3 Mean: $\mu_3$
- True Population Standard Deviation: $\sigma$
- Sample Characteristics
- Sample 1 (Group 1) Size: $n_1$
- Sample 2 (Group 2) Size: $n_2$
- Sample 3 (Group 3) Size: $n_3$
- Distribution Function
- Type 1 Error
- Probability Tail: Lower (Left) tail; Upper (Right) tail; Both (Two) tails
- Simulation Features
- Check the box to update instantly
- Update
- Seed value for generating the random number
<br>
<br>
# OUTPUTS
The outcomes are presented in several tabs depending on the app.
## App-specific Outputs
### Normal Distribution
- Sample
- Distribution
- Probability & Quantile
### Sampling Distribution
- Population & Sample
- Sample
- Sample Estimators
- Confidence Interval
- Summary
### Hypothesis Testing: One-Sample Z-Test
- Population
- Sample
- Test Statistic
- Summary
### Hypothesis Testing: One-Sample Student's t-Test
- Population
- Sample
- Test Statistic
- Summary
### Hypothesis Testing: Two-Sample Independent (Unpaired) t-Test
- Population
- Sample
- Test Statistic
- Summary
### Hypothesis Testing: One-way Analysis of Variance
- Population
- Sample
- SS & MS (Sum of Squares & Mean Squares)
- Test Statistic
- Summary
<br>
<br>
| /scratch/gouwar.j/cran-all/cranData/ABACUS/vignettes/ABACUS.Rmd |
---
title: "ABACUS Activities"
subtitle: "ABACUS: Apps Based Activities for Communicating and Understanding Statistics"
author: "Mintu Nath"
date: "`r Sys.Date()`"
vignette: >
%\VignetteIndexEntry{ABACUS Activities}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
output:
knitr:::html_vignette:
toc: yes
toc_depth: 2
---
----------------------------------
# ABACUS
----------------------------------
The basic premise of ABACUS is to explore, understand and assess the concepts and theories of simple statistical techniques and tools using simulation and graphics when the TRUTH is known. It creates an environment to communicate and understand the Statistical concepts to a wider audience without complex mathematical derivation or elaborate programming. The ABACUS uses two techniques to convey the concepts: it implements the power of statistical simulation to simulate data under wide-ranging sampling scenarios, and secondly, it uses graphical interfaces to visualise the statistical concepts.
In the following sections, a list of Activities is suggested. Teachers can implement and integrate these activities with the current lectures and practical classes. Please send your suggestions and ideas for further improvement of these activities.
<br>
<br>
----------------------------------
# Normal Distribution
----------------------------------
<br>
## Intended Learning Outcomes
- Explain the probability density function and cumulative distribution function of Normal distribution and Standard normal distribution.
- Understand and create a histogram using different bin size and identify the shape of the Normal distribution.
- Demonstrate the concept and effect of centring and scaling of a variable.
- Describe properties of Normal distribution and Standard normal distribution.
- Describe the concepts of cumulative probability, probability tail and quantile and the relationship between these terms.
- Recognise the concept of statistical simulation and the importance of seed value in a computer simulation.
- Explain and generalise the statistical concepts and implement in your area of research.
<br>
## Activity
For each of the following activity:
- Describe the problem
- Identify the assumptions (if any)
- Outline the successive steps of calculation supported by appropriate formulae
- Conduct the apps-based experiment
- Summarise the outputs
- Interpret the results
- Draw valid conclusions
- Generalise the problem in your research area
<br>
### Activity 1
- Select the checkbox against "Check the box to update instantly".
- Select the tab "Sample" and note the shape of the sample data,
- Increase the sample size to >1000; describe the change of shape of the distribution.
- Increase the sample size further and describe the shape.
- Change the number of bins to >300. Explain the shape.
- Overlay the data with Normal density function by selecting the appropriate option.
<br>
### Activity 2
- Reload the app (click 'Refresh' or 'Reload' button in the browser).
- Select the checkbox against "Check the box to update instantly".
- Select the tab "Distribution".
- Increase or decrease the value of $\mu$; this is analogous to centring effect.
- Increase or decrease the value of $\sigma$; this is analogous to scale effect.
- Explain the Probability Density function due to the centring and scaling of the data.
<br>
### Activity 3
- Reload the app (click 'Refresh' or 'Reload' button in the browser).
- Select the checkbox against "Check the box to update instantly".
- Select the tab "Probability and Quantile".
- Change the point on the slider of 'Cumulative probability'.
- Select different options for the probability tails.
- Describe the concepts of cumulative probability, probability tail and quantile and the relationship between these terms.
<br>
### Activity 4
- Reload the app (click 'Refresh' or 'Reload' button in the browser).
- Select the checkbox against "Check the box to update instantly".
- Select the tab "Probability and Quantile".
- Using the plots for probability density function and cumulative density function as well as the input values, prove that for a Normal distribution: Mean = Median = Mode.
- Explain that Normal distribution is unimodal and symmetric around the point $x = \mu$.
<br>
### Activity 5
- Reload the app (click 'Refresh' or 'Reload' button in the browser).
- Select the checkbox against "Check the box to update instantly".
- Select the tab "Probability and Quantile".
- Given that the variable follows a Normal distribution with the population parameters as $\mu = 20$ and $\sigma = 4$, find out the following:
- The minimum value of top 5%, 10%, 20%, 50% of the population
- The maximum value of bottom 5%, 10%, 20%, 50% of the population
- The minimum value of top 97.5% of the population
- The maximum value of the bottom 2.5% of the population
Note: To fine-tune the point on the slider, move the slider nearer the value and then use left or right keys to fine-tune it.
<br>
### Activity 6
- Reload the app (click 'Refresh' or 'Reload' button in the browser).
- Select the checkbox against "Check the box to update instantly".
- Select the tab "Probability and Quantile".
- Given that the variable follows a Normal distribution with the population parameters as $\mu = 20$ and $\sigma = 4$, find out the following:
- What proportion of the population is greater than 26.58?
- What proportion of the population is less than 26.58?
- What proportion of the population is greater than 13.42?
- What proportion of the population is greater than 13.42?
- What proportion of the population is between 12.16 and 27.84?
- Explain the area under the curve.
- What is the total probability under the Normal distribution curve and over the x-axis?
Note: To fine-tune the point on the slider, move the slider nearer the value and then use left or right keys to fine-tune it.
<br>
### Activity 7
- Reload the app (click 'Refresh' or 'Reload' button in the browser).
- Select the checkbox against "Check the box to update instantly".
- Select the tab "Probability and Quantile".
- Create the probability density function and cumulative density function of the Standard Normal distribution.
- Given that the variable ($z$) follows a Standard Normal distribution, find out the following:
- The value of $z$ with the lower-tail cumulative probability of 0.05
- The value of $z$ with the upper-tail cumulative probability of 0.05
- The value of $z$ with the lower-tail cumulative probability of 0.95
- The value of $z$ with the upper-tail cumulative probability of 0.95
- The value of $z$ with the upper-tail cumulative probability of 0.50
- The value of $z$ with the two-tailed cumulative probability of 0.05
- The value of $z$ with the two-tailed cumulative probability of 0.01
Note: To fine-tune the point on the slider, move the slider nearer the value and then use left or right keys to fine-tune it.
<br>
### Activity 8
- Reload the app (click 'Refresh' or 'Reload' button in the browser).
- Identify a variable from your subject area that follows Normal distribution.
- Enter the true population mean and true population standard deviation for the variable (based on literature or prior available data).
- Explore sample characteristics and distribution functions; explain the outcomes and associated plots.
- Go to the 'Sample' tab.
- Press 'Update' button multiple times and explore the random sampling scenarios.
- Explain the concept of statistical simulation and the importance of seed value.
<br>
<br>
----------------------------------
# Sampling Distributions
----------------------------------
<br>
## Intended Learning Outcomes
- Describe the concept of random sampling and sampling units.
- Recognise the concept of parameters and estimates.
- Demonstrate and generalise the distribution of sample means.
- Summarise the expected value of the sample mean and sample standard deviation.
- Interpret standard error of a sample mean and its importance in the context of random sampling.
- Construct the confidence interval and appraise the concept relevant to sampling.
- Describe the concepts of cumulative probability, probability tail and quantile and the relationship between them.
- Explain and generalise the statistical theory underlying sampling distribution and implement in own area of research.
<br>
## Activity
For each of the following activity:
- Describe the problem
- Identify the assumptions (if any)
- Outline the successive steps of calculation supported by appropriate formulae
- Conduct the apps-based experiment
- Summarise the outputs
- Interpret the results
- Draw valid conclusions
- Generalise the problem in your research area
<br>
### Activity 1
- Load the app
- Select the checkbox against "Check the box to update instantly".
- Select the tab "Sample". Note the spread of the sampled data as well as the mean and sd of each sample.
- Increase the sample size to >200. Describe how the spread of the sampled data is changing.
- Select the tab "Sample Estimator". Note the mean of sample means, mean of sample standard deviations (SD).
- Explain the meaning of the expected value of sample means and sample standard deviations (SD).
- Note the standard deviation of sample means. What does it signify?
- What is the standard error of the sample mean?
- What is the distribution of the sample mean?
- Select the tab "Confidence Interval"; explain the concept of a 95% confidence interval.
- Explain the concept of sampling distribution under the Frequentist inferential framework.
<br>
### Activity 2
- Reload the app (click 'Refresh' or 'Reload' button in the browser).
- Select the checkbox against "Check the box to update instantly".
- Investigate the effect of altering the following input values on the standard error of mean and 95% confidence interval of mean:
- Increase and decrease $\mu$
- Increase and decrease $\sigma$
- Increase and decrease sample size
- Explore the 'central limit theorem' and explain how the theorem generalises to the theory of sampling distribution.
<br>
### Activity 3
- Reload the app (click 'Refresh' or 'Reload' button in the browser).
- Identify a variable from your subject area that follows the Normal distribution.
- Enter the true population mean and true population standard deviation for the variable (based on literature or prior available data).
- Explore sample characteristics and distribution functions; explain the outcomes and associated plots in the context of the sampling distribution.
- Illustrate your results and identify how different sample characteristics may affect your experiment.
- Explain the concept of statistical simulation and the importance of seed value.
<br>
<br>
----------------------------------
# Hypothesis Testing: One-Sample Z-Test
----------------------------------
<br>
## Intended Learning Outcomes
- Describe the steps of null hypotheses significance testing of the mean for one sample when the population variance is known.
- Summarise the data and identify the inputs for conducting the hypothesis testing.
- Conduct hypothesis testing, evaluate the outcomes, interpret the results and draw valid conclusions.
- Explain and generalise the statistical concepts and implement in own area of research.
<br>
## Activity
Consider the following statements along with specific instructions given for each activity:
- Describe the steps of null hypotheses significance testing and associated assumptions
- State null and alternative hypotheses.
- Record the statistical significance level (Type 1 error) and probability tails
- Identify the appropriate test statistic.
- Descriptive statistics of the observed sample data.
- Explain the test statistic, p-value, quantile under type 1 error, 95% confidence interval.
- Present the summary of the data, interpret the results and draw appropriate conclusions.
- Assess that the conclusion conforms with test statistic, p-value, quantile under type 1 error and estimated 95% confidence interval.
<br>
### Activity 1
- Accept all default values; click the 'Update' button ONCE.
- Note the z-statistic, p-value, difference, 95% CI of difference.
<br>
### Activity 2
- Reload the app (click 'Refresh' or 'Reload' button in the browser).
- Accept all default values AND select 'lower-tail probability'
- Click the 'Update' button ONCE.
- Note the z-statistic, p-value, difference, 95% CI of difference.
- Which values got changed compared to Activity 1 and why?
- What are the interpretations? Do you expect these results?
<br>
### Activity 3
- Reload the app (click 'Refresh' or 'Reload' button in the browser).
- Accept all default values; click the 'Update' button FIVE times.
- Note that the first update should produce the identical output as Activity 1.
- Note the z-statistic and p-value on each simulation (each click of Update button).
- Do you have different interpretations at each simulation stage? Can you explain the results?
- Do you have any instance when you cannot reject the null hypotheses? What could be the reasons?
<br>
### Activity 4
- Reload the app (click 'Refresh' or 'Reload' button in the browser).
- Select "Check the box to update instantly".
- Investigate the effect of the following:
- Increase and decrease $\mu_0$
- Increase and decrease $\sigma$
- Increase and decrease sample size
- Increase and decrease type 1 error
- Change the probability tail to lower/upper or both
- Explain and interpret the outcomes for each scenario.
- Do you have any instance when the outcomes are different from what you expected given the TRUTH is known?
- What could be the reasons?
- While explaining, keep in mind that ABACUS is simulating the data at each instance aligning with the Frequentist inferential framework. When in doubt, deselect 'check the box to update instantly' and click the 'Update' button multiple times and explore each outcome.
- Also, note that the statistical power of a test depends on other essential components. Can you explain these components based on your observations with different scenarios above?
<br>
### Activity 5
- Reload the app (click 'Refresh' or 'Reload' button in the browser).
- Identify a variable from your subject area.
- Enter the true population mean and true population standard deviation for the variable (based on literature or prior available data).
- Explore the sampling scenarios and explain the outcomes due to changes of mu0, sigma, sample size, type 1 error and probability tail.
<br>
<br>
----------------------------------
# Hypothesis Testing: One-Sample Student's t-Test
----------------------------------
<br>
## Intended Learning Outcomes
- Describe the steps of null hypotheses significance testing.
- Test the null hypothesis that the sample data are from a population with a hypothesised mean when the population variance is known.
- Summarise the data and identify the inputs for conducting the hypothesis testing and associated assumptions.
- Conduct hypothesis testing, evaluate the outcomes, interpret the results and draw valid conclusions.
- Explain and generalise the statistical concepts and implement in own area of research.
<br>
## Activity
Consider the following statements along with specific instruction given for each activity:
- Describe the steps of null hypotheses significance testing
- State null and alternative hypotheses
- Record the statistical significance level (Type 1 error) and probability tails
- Identify the appropriate test statistic
- Descriptive statistics of the observed sample data
- Explain the test statistic, p-value, quantile under type 1 error, 95% confidence interval
- Present the summary of the data, interpret the results and draw appropriate conclusions
- Assess that the conclusion conforms with test statistic, p-value, quantile under type 1 error, 95% confidence interval
<br>
### Activity 1
- Accept all default values; click the 'Update' button ONCE.
- Note the t-statistic, p-value, difference and 95% CI of difference.
- Do you expect these results given the TRUTH is known? Explain.
<br>
### Activity 2
- Reload the app (click 'Refresh' or 'Reload' button in the browser).
- Accept all default values and ENTER: $\sigma = 3$, sample size = 50
- Click the 'Update' button ONCE
- Note the t-statistic, p-value, difference and 95% CI of difference
- What are the interpretations? Do you expect these results?
<br>
### Activity 3
- Reload the app (click 'Refresh' or 'Reload' button in the browser).
- Accept all default values; click the 'Update' button FIVE times.
- Note that the first update should produce the identical output as Activity 1.
- Note the t-statistic and p-value on each simulation (each click of Update button)
- Do you have different interpretations at each simulation stage? Can you explain the results?
- Do you have any instance when you cannot reject the null hypotheses? What could be the reasons?
- How do compare these outcomes with the similar activity that you conducted for 'Hypothesis testing: One sample, known variance'
<br>
### Activity 4
- Reload the app (click 'Refresh' or 'Reload' button in the browser).
- Check the box to update instantly.
- Investigate the following scenarios and explain each instance:
- Increase and decrease $\mu_0$
- Increase and decrease $\sigma$
- Increase and decrease sample size
- Increase and decrease type 1 error
- Change the probability tail to lower/upper or both
- Explain and interpret the outcomes for each scenario.
- Do you have any instance when the outcomes are different from what you expected given the TRUTH is known?
- What could be the reasons?
- While explaining, keep in mind that ABACUS is simulating the data at each instance aligning with the Frequentist inferential framework. When in doubt, deselect 'check the box to update instantly' and click the 'Update' button multiple times and explore each outcome.
- Also, note that the statistical power of a test depends on other essential components. Can you explain these components based on your observations with different scenarios above?
<br>
### Activity 5
- Reload the app (click 'Refresh' or 'Reload' button in the browser).
- Identify a variable from your subject area.
- Enter the true population mean and true population standard deviation for the variable (based on literature or prior available data).
- Explore the sampling scenarios and explain the outcomes due to changes of mu0, sigma, sample size, type 1 error and probability tail.
<br>
<br>
----------------------------------
# Hypothesis Testing: Two-Sample Independent (Unpaired) t-Test
----------------------------------
<br>
## Intended Learning Outcomes
- Describe the steps of null hypotheses significance testing.
- Test the null hypothesis that two independent groups of observations are sampled from the same population.
- Summarise the data and identify the inputs for conducting the hypothesis testing and associated assumptions.
- Conduct hypothesis testing, evaluate the outcomes, interpret the results and draw valid conclusions.
- Explain and generalise the statistical concepts and implement in own area of research.
<br>
## Activity
Consider the following statements along with specific instruction given for each activity:
- Describe the steps of null hypotheses significance testing
- State null and alternative hypotheses
- Record the statistical significance level (Type 1 error) and probability tails
- Identify the appropriate test statistic
- Descriptive statistics of the observed sample data
- Explain the test statistic, p-value, quantile under type 1 error, 95% confidence interval
- Present the summary of the data, interpret the results and draw appropriate conclusions
- Evaluate the assumptions of the test
- Assess that the conclusion conforms with test statistic, p-value, quantile under type 1 error and 95% confidence interval
<br>
### Activity 1
- Accept all default values; click the 'Update' button ONCE.
- Note the t-statistic, p-value, mean difference, 95% CI of the mean difference.
- Do you expect these results given the TRUTH is known? Explain.
<br>
### Activity 2
- Reload the app (click 'Refresh' or 'Reload' button in the browser).
- Accept all default values AND enter sample size for Group 2 = 15.
- Click the 'Update' button ONCE.
- Note the t-statistic, p-value, difference and 95% CI of difference.
- What are the interpretations? Do you expect these results?
<br>
### Activity 3
- Reload the app (click 'Refresh' or 'Reload' button in the browser).
- Accept all default values; click the 'Update' button FIVE times.
- Note that the first update should produce the identical output as Activity 1.
- Note the t-statistic and p-value on each simulation (each click of Update button).
- Do you have different interpretations at each simulation stage? Can you explain the results?
- Do you have any instance when you cannot reject the null hypotheses? What could be the reasons?
<br>
### Activity 4
- Reload the app (click 'Refresh' or 'Reload' button in the browser).
- Check the box to update instantly.
- Investigate the following scenarios and explain each instance:
- Increase and decrease $\mu_1$ or $\mu_2$
- Increase and decrease $\sigma$
- Increase and decrease sample size
- Increase and decrease type 1 error
- Change the probability tail to lower/upper or both
- Explain and interpret the outcomes for each scenario.
- Do you have any instance when the outcomes are different from what you expect given the TRUTH is known?
- What could be the reasons?
- While explaining, keep in mind that ABACUS is simulating the data at each instance aligning with the Frequentist inferential framework. When in doubt, deselect 'check the box to update instantly' and click the 'Update' button multiple times and explore each outcome.
- Also, note that the statistical power of a test depends on other essential components. Can you explain these components based on your observations with different scenarios above?
<br>
### Activity 5
- Reload the app (click 'Refresh' or 'Reload' button in the browser).
- Identify a variable from your subject area.
- Enter the true population means (equal as well as unequal) and true population standard deviation for the variable (based on literature or prior available data).
- Explore the sampling scenarios and explain the outcomes due to changes of $\mu_0$, $\sigma$, sample size, type 1 error and probability tail.
<br>
<br>
----------------------------------
# Hypothesis Testing: One-way Analysis of Variance
----------------------------------
<br>
## Intended Learning Outcomes
- Describe the steps of null hypotheses significance testing.
- Test the null hypothesis that three independent groups of observations are sampled from the same population.
- Summarise the data and identify the inputs for conducting the hypothesis testing and associated assumptions.
- Conduct hypothesis testing, evaluate the outcomes, interpret the results and draw valid conclusions.
- Explain and generalise the statistical concepts and implement in own area of research.
<br>
## Activity
Consider the following statements along with specific instruction given for each activity:
- Describe the steps of null hypotheses significance testing.
- State null and alternative hypotheses.
- Record the statistical significance level (Type 1 error) and probability tails
- Identify the appropriate test statistic.
- Present the descriptive statistics of the observed sample data.
- Explain the test statistic, p-value, quantile under type 1 error and 95% confidence interval.
- Present the summary of the data, interpret the results and draw appropriate conclusions.
- Evaluate the assumptions of the test.
- Explain the changes in between and within sum of squares and mean squares due to changes in inputs.
- Assess that the conclusion conforms with test statistic, p-value, quantile under type 1 error and 95% confidence interval.
<br>
### Activity 1
- Accept all default values; click the 'Update' button ONCE.
- Note the sum of squares, mean squares, F-statistic, p-value, quantile, mean difference and 95% CI of difference.
- Do you expect these results given the TRUTH is known? Explain.
<br>
### Activity 2
- Reload the app (click 'Refresh' or 'Reload' button in the browser).
- Accept all default values AND enter sample size for all groups = 10.
- Click the 'Update' button ONCE.
- Note the sum of squares, mean squares, F-statistic, p-value, quantile, mean difference and 95% CI of difference.
- What are the interpretations? Do you expect these results?
<br>
### Activity 3
- Reload the app (click 'Refresh' or 'Reload' button in the browser).
- Accept all default values; click the 'Update' button FIVE times.
- Note that the first update should produce the identical output as Activity 1.
- Note the sum of squares, mean squares, F-statistic, p-value, quantile, mean difference and 95% CI of difference for each click of the 'Update' button.
- Do you have different interpretations at each simulation stage? Can you explain the results?
- Do you have any instance when you cannot reject the null hypotheses? What could be the reasons?
<br>
### Activity 4
- Reload the app (click 'Refresh' or 'Reload' button in the browser)
- Check the box to update instantly
- Investigate the following scenarios and explain each instance:
- Increase and decrease $\mu_1$ / $\mu_2$ / $\mu_3$
- Increase and decrease $\sigma$
- Increase and decrease sample size
- Increase and decrease type 1 error
- Change the probability tail to lower/upper or both
- Explain and interpret the outcomes for each scenario
- Do you have any instance when the outcomes are different from what you expect given the TRUTH is known?
- What could be the reasons?
- While explaining, keep in mind that ABACUS is simulating the data at each instance aligning with the Frequentist inferential framework. When in doubt, deselect 'check the box to update instantly' and click the 'Update' button multiple times and explore each outcome.
- Also, note that the statistical power of a test depends on other essential components. Can you explain these components based on your observations with different scenarios above?
<br>
### Activity 5
- Reload the app (click 'Refresh' or 'Reload' button in the browser).
- Identify a variable from your subject area.
- Enter the true population means (equal as well as unequal) and true population standard deviation for the variable (based on literature or prior available data).
- Explore the sampling scenarios and explain the outcomes due to changes of $\mu$, $\sigma$, sample size, type 1 error and probability tail.
<br>
<br>
| /scratch/gouwar.j/cran-all/cranData/ABACUS/vignettes/ABACUS_activities.Rmd |
#' Identifying genes for which multiple CpG sites show significant methylation difference
#'
#' @description
#' This function calculates the number of significantly different CpG sites between cases and controls for each gene and produces a frequency table with genes that have more than one CpG site.
#'
#' @param x Results from the overlap_data function
#' @examples
#' \donttest{
#' data(test_data)
#' data(nonspecific_probes)
#' data(annotation_file)
#' test_data_filtered <- filter_data(test_data)
#' test_data_ttest <- ttest_data(test_data_filtered, 1, 2, 3, 4, 1e-3)
#' test_data_delta_beta <- delta_beta_data(test_data_filtered, 1, 2, 3, 4, 0.5, -0.5, 0.94, 0.06)
#' test_overlapped_data <- overlap_data(test_data_ttest, test_data_delta_beta)
#' test_CpG_hits <- CpG_hits(test_overlapped_data)
#' }
#' @export
CpG_hits <- function(x) {
ttest_delta_table <- data.frame(table(x$Gene))
ttest_delta_multiple_hits <- x[x$Gene %in% ttest_delta_table$Var1[ttest_delta_table$Freq>1],]
multiple_hits <- ttest_delta_table[ttest_delta_table$Freq > 1,]
return(multiple_hits)
}
| /scratch/gouwar.j/cran-all/cranData/ABC.RAP/R/CpG_hits.R |
utils::globalVariables(c('annotation_file', 'test_data', 'nonspecific_probes'))
#' Annotating the filtered probes
#'
#' @description
#' This function annotates each filtered probe with gene name, chromosome number, probe location, distance from transcription start site (TSS), and relation to CpG islands. The annotation file is based on "UCSC platform" annotation format and was obtained from Illumina GPL13534_HumanMethylation450_15017482_v1.1 file (BS0010894-AQP_content.bpm).
#' @import utils
#' @param x the filtered probes from filter_data
#' @examples
#' data(test_data)
#' data(nonspecific_probes)
#' data(annotation_file)
#' test_data_filtered <- filter_data(test_data)
#' test_data_annotated <- annotate_data(test_data_filtered)
#'
#' @export
annotate_data <- function(x) {
annotation_file_rnames <- annotation_file[,1]
annotation_f <- data.frame(annotation_file[,2:ncol(annotation_file)])
row.names(annotation_f) <- annotation_file_rnames
x_merged <- merge.data.frame(annotation_f, x, by = "row.names")
x_rnmaes <- x_merged[,1]
x_annotated <- data.frame(x_merged[,2:ncol(x_merged)])
row.names(x_annotated) <- x_rnmaes
return(x_annotated)
} | /scratch/gouwar.j/cran-all/cranData/ABC.RAP/R/annotate_data.R |
#' Applying delta beta analysis to calculate the difference between cases and controls
#'
#' @description
#' This function calculates the delta beta value for the filtered probes. It calculates the difference in mean DNA methylation between cases and controls for each probe. Also, it selects probes with DNA methylation differences that are higher in cases than controls by a user specified meth_cutoff value and differences that are lower in cases than controls by the unmeth_cutoff value. In addition, the function provides the option to specify probes where the average beta value of the cases or controls is greater than a high_meth cutoff value or less than a low_meth cutoff value.
#'
#' @param x the filtered 450k probes from filter_data function
#' @param cases_column_1 The first column (column number) for cases in the filtered dataset
#' @param cases_column_n The last column (column number) for cases in the filtered dataset
#' @param controls_column_1 The first column (column number) for controls in the filtered dataset
#' @param controls_column_n The last column (column number) for controls in the filtered dataset
#' @param meth_cutoff The cutoff level for the methylation difference between cases and controls (cases minus controls)
#' @param unmeth_cutoff The cutoff level for the methylation difference between controls and cases (cases minus controls). Consequently, it requires a negative value.
#' @param high_meth The upper margin for the highly methylated probes
#' @param low_meth The lower margin for the low methylation
#'
#' @examples
#' data(test_data)
#' data(nonspecific_probes)
#' test_data_filtered <- filter_data(test_data)
#' test_data_delta_beta <- delta_beta_data(test_data_filtered, 1, 2, 3, 4, 0.5, -0.5, 0.94, 0.06)
#'
#' @export
delta_beta_data <- function(x, cases_column_1, cases_column_n, controls_column_1, controls_column_n, meth_cutoff, unmeth_cutoff, high_meth, low_meth) {
delta_beta_x <- rowMeans(as.matrix(x[,cases_column_1:cases_column_n])) - rowMeans(as.matrix(x[,controls_column_1:controls_column_n]))
delta_beta_dtfm <- as.data.frame(delta_beta_x)
delta_beta_meth_unmeth <- subset(delta_beta_dtfm, delta_beta_x >= meth_cutoff | delta_beta_x <= unmeth_cutoff)
cases_mean <- rowMeans(as.matrix(x[,cases_column_1:cases_column_n]))
cases_mean_dtfm <- as.data.frame(cases_mean)
cases_meth_unmeth <- subset(cases_mean_dtfm, cases_mean >= high_meth | cases_mean <= low_meth)
controls_mean <- rowMeans(as.matrix(x[,controls_column_1:controls_column_n]))
controls_mean_dtfm <- as.data.frame(controls_mean)
controls_meth_unmeth <- subset(controls_mean_dtfm, controls_mean >= high_meth | controls_mean <= low_meth)
cases_controls <- merge(cases_meth_unmeth, controls_meth_unmeth, by = "row.names", all = TRUE)
cases_control_rnames <- cases_controls[,1]
cases_controls_m <- data.matrix(cases_controls[,2:ncol(cases_controls)])
row.names(cases_controls_m) <- cases_control_rnames
delta_cases_controls <- merge(delta_beta_meth_unmeth, cases_controls_m, by = "row.names")
delta_cases_controls_rnames <- delta_cases_controls[,1]
delta_cases_controls_m <- data.matrix(delta_cases_controls[,2])
row.names(delta_cases_controls_m) <- delta_cases_controls_rnames
return(delta_cases_controls_m)
} | /scratch/gouwar.j/cran-all/cranData/ABC.RAP/R/delta_beta_data.R |
#' Filtering DNA methylation 450k non_specific probes
#'
#' @description
#' This function filters the reported nonspecific probes, and also filters probes that interrogate SNPs of minor allele frequency (MAF) > 0.1. A list of nonspecific probes was obtained from Chen et al (2013) supplementary files.
#' @param x The normalised beta values in a data matrix format, where conditions are arranged in columns and cg probes are arranged in rows.
#'
#' @references Chen YA, Lemire M, Choufani S, et al. Discovery of cross-reactive probes and polymorphic CpGs in the Illumina Infinium HumanMethylation450 microarray. Epigenetics 2013;8:203-9.
#' @examples
#' data(test_data)
#' data(nonspecific_probes)
#' test_data_filtered <- filter_data(test_data)
#'
#' @export
filter_data <- function(x) {
x_filtered <- x[!(x[,1] %in% nonspecific_probes[,1]),]
x_rnames <- x_filtered[,1]
x_m <- data.matrix(x_filtered[,2:ncol(x_filtered)])
row.names(x_m) <- x_rnames
return(x_m)
} | /scratch/gouwar.j/cran-all/cranData/ABC.RAP/R/filter_data.R |
#' Overlapping Student's t-test and delta beta results
#'
#' @description
#' This function overlaps the results from both Student’s t-test and delta beta analyses to identify probes (CpG sites) that are highly and significantly different between cases and controls.
#'
#' @param x Results from t-test or delta beta analyses
#' @param y Results from t-test or delta beta analyses
#' @examples
#' \donttest{
#' data(test_data)
#' data(nonspecific_probes)
#' data(annotation_file)
#' test_data_filtered <- filter_data(test_data)
#' test_data_ttest <- ttest_data(test_data_filtered, 1, 2, 3, 4, 1e-3)
#' test_data_delta_beta <- delta_beta_data(test_data_filtered, 1, 2, 3, 4, 0.5, -0.5, 0.94, 0.06)
#' test_overlapped_data <- overlap_data(test_data_ttest, test_data_delta_beta)
#' }
#' @export
overlap_data <- function(x, y) {
ttest_delta_candidate <- merge(x, y, by = "row.names")
ttest_delta_rnames <- ttest_delta_candidate[,1]
ttest_delta_m <- data.matrix(ttest_delta_candidate[,2:ncol(ttest_delta_candidate)])
row.names(ttest_delta_m) <- ttest_delta_rnames
annotation_file_rnames <- annotation_file[,1]
annotation_f <- data.frame(annotation_file[,2:ncol(annotation_file)])
row.names(annotation_f) <- annotation_file_rnames
ttest_delta_annotated <- merge.data.frame(annotation_f, ttest_delta_m, by = "row.names")
ttest_delta_arranged <- ttest_delta_annotated[order(ttest_delta_annotated[,4]),]
return(ttest_delta_arranged)
} | /scratch/gouwar.j/cran-all/cranData/ABC.RAP/R/overlap_data.R |
utils::globalVariables('barplot')
#' Plotting highly different and significant probes annotated by their corresponding gene names
#'
#' @description
#' This function plots the potential candidate genes for which multiple CpG sites show significant difference.
#' @param x Results from the overlap_data function
#' @import graphics
#' @examples
#' \donttest{
#' data(test_data)
#' data(nonspecific_probes)
#' data(annotation_file)
#' test_data_filtered <- filter_data(test_data)
#' test_data_ttest <- ttest_data(test_data_filtered, 1, 2, 3, 4, 1e-3)
#' test_data_delta_beta <- delta_beta_data(test_data_filtered, 1, 2, 3, 4, 0.5, -0.5, 0.94, 0.06)
#' test_overlapped_data <- overlap_data(test_data_ttest, test_data_delta_beta)
#' plot_candidate_genes(test_overlapped_data)
#' }
#' @export
plot_candidate_genes <- function(x) {
ttest_delta_table <- data.frame(table(x$Gene))
ttest_delta_multiple_hits <- x[x$Gene %in% ttest_delta_table$Var1[ttest_delta_table$Freq>1],]
ttest_delta_multiple_hits_ordered <- ttest_delta_multiple_hits[order(ttest_delta_multiple_hits[,2]),]
par(mfrow = c(1,1))
cols <- c("blue", "red")[(ttest_delta_multiple_hits_ordered$V1 > 0) + 1]
barplot(ttest_delta_multiple_hits_ordered$V1, names.arg = ttest_delta_multiple_hits_ordered$Gene, col = cols, main = "Genes with multiple CpG sites", ylab = "delta beta DNA methylation values")
} | /scratch/gouwar.j/cran-all/cranData/ABC.RAP/R/plot_candidate_genes.R |
#' Overview description of the DNA methylation pattern for cases and controls
#'
#' @description
#' This function produces four distribution plots that summarise the DNA methylation patterns for cases (top left) and controls (top right). The top two histograms show the pattern of mean DNA methylation levels for cases and controls. The bottom two plots show the difference in DNA methylation between cases and controls (a boxplot comparing methylation profile for cases and controls, and a delta beta plot describing the methylation difference between cases and controls). The function also provides summary statistics for the delta beta analysis that can be used to select cutoff values for the delta_beta_data function.
#'
#' @param x The filtered 450k probes from filter_data() function
#' @param cases_column_1 The first column (column number) for cases in the filtered dataset
#' @param cases_column_n The last column (column number) for cases in the filtered dataset
#' @param controls_column_1 The first column (column number) for controls in the filtered dataset
#' @param controls_column_n The last column (column number) for controls in the filtered dataset
#'
#' @examples
#' data(test_data)
#' data(nonspecific_probes)
#' test_data_filtered <- filter_data(test_data)
#' plot_data(test_data_filtered, 1, 2, 3, 4)
#'
#' @export
plot_data <- function(x, cases_column_1, cases_column_n, controls_column_1, controls_column_n) {
cases <- x[,cases_column_1:cases_column_n]
controls <- x[,controls_column_1:controls_column_n]
cases_mean <- rowMeans(as.matrix(x[,cases_column_1:cases_column_n]))
controls_mean <- rowMeans(as.matrix(x[,controls_column_1:controls_column_n]))
delta_beta <- rowMeans(as.matrix(x[,cases_column_1:cases_column_n])) - rowMeans(as.matrix(x[,controls_column_1:controls_column_n]))
means <- cbind(cases_mean, controls_mean)
Hist_cal <- hist(delta_beta, plot = FALSE)
hist_categ <- cut(Hist_cal$breaks, c(-Inf, -0.00001, 0.0, Inf))
par(mfrow=c(2,2))
hist(cases, col = "red", xlab = "DNA methylation")
hist(controls, col = "blue", xlab = "DNA methylation")
boxplot(means, col = c("red","blue"), ylim = c(0,1), main = "DNA methylation of cases and controls")
plot(Hist_cal, col = c("blue", "red", "red")[hist_categ], main = "Difference of DNA methylation between cases and controls", xlab = "delta beta values of cases minus controls")
print(summary(delta_beta))
} | /scratch/gouwar.j/cran-all/cranData/ABC.RAP/R/plot_data.R |
#' Plotting and exporting methylation profile for candidate genes
#'
#' @description
#' This function explores the DNA methylation profile for any gene. The function generates four plots: the top plots show the difference in DNA methylation between cases and controls (a bar chart of the delta beta values for all probes arranged from 5’ to 3’ positions and a plot showing the difference in mean DNA methylation between cases and controls). The bottom plots show the distribution of DNA methylation for each probe that interrogates a CpG site in the investigated gene, for cases (left) and controls (right), respectively. Also, an annotation table for the arranged probes is generated with the following columns: probe names, gene name, distance from TSS, mean methylation for cases, mean methylation for controls, delta beta values (cases minus controls), and t-test p.values.
#'
#' @param x The filtered and annotated 450k probes
#' @param b Gene name between quotation marks
#' @param cases_column_1 The first column (column number) for cases in the filtered dataset
#' @param cases_column_n The last column (column number) for cases in the filtered dataset
#' @param controls_column_1 The first column (column number) for controls in the filtered dataset
#' @param controls_column_n The last column (column number) for controls in the filtered dataset
#'
#' @examples
#' data(test_data)
#' data(nonspecific_probes)
#' data(annotation_file)
#' test_data_filtered <- filter_data(test_data)
#' test_data_annotated <- annotate_data(test_data_filtered)
#' KLHL34 <- plot_gene(test_data_annotated, 'KLHL34', 1, 2, 3, 4)
#'
#' @export
plot_gene <- function(x, b, cases_column_1, cases_column_n, controls_column_1, controls_column_n) {
b_gene <- x[grep(b, x$Gene),]
b_ordered <- b_gene[order(b_gene[,3]),]
b_beta <- b_ordered[,6:ncol(b_ordered)]
delta_beta <- rowMeans(as.matrix(b_beta[,cases_column_1:cases_column_n])) - rowMeans(as.matrix(b_beta[,controls_column_1:controls_column_n]))
cases_betas <- b_beta[,cases_column_1:cases_column_n]
controls_betas <- b_beta[,controls_column_1:controls_column_n]
cases_mean <- rowMeans(as.matrix(b_beta[,cases_column_1:cases_column_n]))
controls_mean <- rowMeans(as.matrix(b_beta[,controls_column_1:controls_column_n]))
ttest_p.value <- apply(b_beta, 1, function(x) {t.test(x[cases_column_1:cases_column_n], x[controls_column_1:controls_column_n], "two.sided", var.equal = FALSE)$p.value})
b_exported <- cbind(b_ordered[,c(1,2,4,5)], cases_mean, controls_mean, delta_beta, ttest_p.value)
barplot_cols <- c("blue", "red")[(delta_beta > 0) + 1]
par(mfrow=c(2,2))
barplot(delta_beta, names.arg = b_beta$row.names, col = barplot_cols, main = paste("difference in DNA methylation of cases minus controls for", toString(b)), xlab = "450k probes arranged from 5' -> 3'", ylab = "delta beta values")
plot(cases_mean, col = "red", ylim = c(0, 1), main = paste("mean DNA methylation for",toString(b)), sub = "red circles = cases, blue triangles = controls", xlab = "450k probes arranged from 5' -> 3'", ylab = "beta values")
points(controls_mean, pch = 24, col = "blue", bg = "blue")
boxplot(t(cases_betas), col = "red", ylim = c(0, 1), main = paste(toString(b), ":DNA methylation for cases"), xlab = "450k probes arranged from 5' -> 3'", ylab = "beta values")
boxplot(t(controls_betas), col = "blue", ylim = c(0,1), main = paste(toString(b), ":DNA methylation for controls"), xlab = "450k probes arranged from 5' -> 3'", ylab = "beta values")
print(b_exported)
} | /scratch/gouwar.j/cran-all/cranData/ABC.RAP/R/plot_gene.R |
#' An automated analysis applying all ABC.RAP functions in one script
#'
#' @description
#' This function processes the ABC.RAP workflow automatically
#'
#' @param x The normalised beta values in a data matrix format, where conditions are arranged in columns and cg probes are arranged in rows.
#' @param cases_column_1 The first column (column number) for cases in the filtered dataset
#' @param cases_column_n The last column (column number) for cases in the filtered dataset
#' @param controls_column_1 The first column (column number) for controls in the filtered dataset
#' @param controls_column_n The last column (column number) for controls in the filtered dataset
#' @param ttest_cutoff The cutoff level to filter insignificant p-values
#' @param meth_cutoff The cutoff level for the methylation difference between cases and controls (cases minus controls)
#' @param unmeth_cutoff The cutoff level for the methylation difference between controls and cases (controls minus cases). Consequently, it requires a negative value.
#' @param high_meth The upper margin for the highly methylated probes
#' @param low_meth The lower margin for the low methylation
#'
#' @import grDevices
#' @examples
#' \donttest{
#' data(test_data)
#' data(nonspecific_probes)
#' data(annotation_file)
#' process.ABC.RAP(test_data, 1, 2, 3, 4, 1e-3, 0.5, -0.5, 0.94, 0.06)
#' }
#' @export
process.ABC.RAP <- function(x, cases_column_1, cases_column_n, controls_column_1, controls_column_n, ttest_cutoff, meth_cutoff, unmeth_cutoff, high_meth, low_meth) {
x_filtered <- filter_data(x)
x_annotated <- annotate_data(x_filtered)
x_ttest <- ttest_data(x_filtered, cases_column_1, cases_column_n, controls_column_1, controls_column_n, ttest_cutoff)
x_delta <- delta_beta_data(x_filtered, cases_column_1, cases_column_n, controls_column_1, controls_column_n, meth_cutoff, unmeth_cutoff, high_meth, low_meth)
x_overlap <- overlap_data(x_ttest, x_delta)
x_CpG_hits <- CpG_hits(x_overlap)
pdf(file = "process.ABC.RAP.plots.pdf", width = 11)
sink("process.ABC.RAP.tables.txt")
for(i in x_CpG_hits$Var1) {
plot_gene(x_annotated, i, cases_column_1, cases_column_n, controls_column_1, controls_column_n)
}
dev.off()
sink()
} | /scratch/gouwar.j/cran-all/cranData/ABC.RAP/R/process.ABC.RAP.R |
#' applying t-test analysis
#'
#' @description
#' This function applies "two.sided", unequal variance Student's t-test analysis for each probe comparing cases and controls. A cutoff for p-values can be entered to minimise multiple testing bias to filter insignificant p-values.
#'
#' @param x The filtered 450k probes from filter_data() function
#' @param cases_column_1 The first column (column number) for cases in the filtered dataset
#' @param cases_column_n The last column (column number) for cases in the filtered dataset
#' @param controls_column_1 The first column (column number) for controls in the filtered dataset
#' @param controls_column_n The last column (column number) for controls in the filtered dataset
#' @param ttest_cutoff The cutoff level to filter insignificant p-values
#'
#' @import stats
#' @examples
#' data(test_data)
#' data(nonspecific_probes)
#' test_data_filtered <- filter_data(test_data)
#' test_data_ttest <- ttest_data(test_data_filtered, 1, 2, 3, 4, 1e-3)
#'
#' @export
ttest_data <- function(x, cases_column_1, cases_column_n, controls_column_1, controls_column_n, ttest_cutoff) {
x_ttest <- apply(x, 1, function(x) {t.test(x[cases_column_1:cases_column_n], x[controls_column_1:controls_column_n], "two.sided", var.equal = FALSE)$p.value})
my_ttest_sorted <- sort(x_ttest, decreasing = FALSE)
my_ttest_sorted_dtfm <- as.data.frame(my_ttest_sorted)
my_ttest_candidate <- subset(my_ttest_sorted_dtfm, my_ttest_sorted_dtfm$my_ttest_sorted <= ttest_cutoff)
return(my_ttest_candidate)
} | /scratch/gouwar.j/cran-all/cranData/ABC.RAP/R/ttest_data.R |
## ----read.csv, echo=TRUE-------------------------------------------------
library(ABC.RAP)
data("test_data")
data("nonspecific_probes")
data("annotation_file")
## ----filter, echo=TRUE---------------------------------------------------
test_data_filtered <- filter_data(test_data)
## ----annotation, echo=TRUE-----------------------------------------------
test_data_annotated <- annotate_data(test_data_filtered)
## ----plot_data, echo=TRUE, fig.height= 7, fig.width= 10------------------
plot_data(test_data_filtered, 1, 2, 3, 4)
## ----t.test, echo=TRUE---------------------------------------------------
test_data_ttest <- ttest_data(test_data_filtered, 1, 2, 3, 4, 1e-3)
## ----nrow2, echo=TRUE----------------------------------------------------
nrow(test_data_ttest)
## ----delta_beta, echo=TRUE-----------------------------------------------
test_delta_beta <- delta_beta_data(test_data_filtered, 1, 2, 3, 4, 0.5, -0.5, 0.94, 0.06)
## ----nrow3, echo=TRUE----------------------------------------------------
nrow(test_delta_beta)
## ----overlap, echo=TRUE--------------------------------------------------
test_overlapped_data <- overlap_data(test_data_ttest, test_delta_beta)
## ----nrow, echo=TRUE-----------------------------------------------------
nrow(test_overlapped_data)
## ----CpG_hits, echo=TRUE-------------------------------------------------
test_CpG_hits <- CpG_hits(test_overlapped_data)
## ----CpG_hits2, echo=TRUE------------------------------------------------
test_CpG_hits
## ----plot, echo=TRUE, fig.height=5, fig.width=7--------------------------
plot_candidate_genes(test_overlapped_data)
## ----investigate, echo=TRUE, fig.height=7, fig.width=10------------------
KLHL34 <- plot_gene(test_data_annotated, "KLHL34", 1, 2, 3, 4)
## ----process, echo=TRUE--------------------------------------------------
process.ABC.RAP(test_data, 1, 2, 3, 4, 1e-3, 0.5, -0.5, 0.94, 0.06)
| /scratch/gouwar.j/cran-all/cranData/ABC.RAP/inst/doc/ABC.RAP.R |
---
title: "Array Based CpG Region Analysis Package (ABC.RAP)"
author: "Abdulmonem A. Alsaleh, Robert J. Weeks, Ian M. Morison. Department of Pathology, Dunedin School of Medicine, University of Otago, Dunedin, New Zealand"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Array Based CpG Region Analysis Package (ABC.RAP)}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
ABC-RAP package was developed to analyse human 450k DNA methylation array data and to identify candidate genes that have significant differences in DNA methylation between cases and controls. The following example analysis is based on a small sample dataset "test_data" (included) containing 10,000 probes for 2 B-ALL cases and 2 controls from Busche *et al* (2013).
Busche S, Ge B, Vidal R, etc. Integration of high-resolution methylome and transcriptome analyses to dissect epigenomic changes in childhood acute lymphoblastic leukaemia. Cancer Research 2013; 73(14); 4323-4336.
## Loading Files
```{r read.csv, echo=TRUE}
library(ABC.RAP)
data("test_data")
data("nonspecific_probes")
data("annotation_file")
```
## Summary of the workflow
The package offers a choice of two workflows:
1. Step by step as follows
2. Using a single script (see "using one script" section)
Below is the package workflow using nine functions, and each step is dependent on the previous function.
Filtering the nonspecific probes:
```{r filter, echo=TRUE}
test_data_filtered <- filter_data(test_data)
```
Annotation based on "UCSC platform":
```{r annotation, echo=TRUE}
test_data_annotated <- annotate_data(test_data_filtered)
```
## Browsing the data
This function provides a general overview for the DNA methylation between cases and controls. It produces 4 plots: the upper 2 plots show DNA methylation (distribution) for cases (left) and controls (right). The left bottom plot compares the DNA methylation between cases and controls, and the right bottom plot represents the difference in DNA methylation between cases and controls (cases minus controls). Also, summary statistics for the difference in mean DNA methylation between cases and controls is produced.
Function arguments:
x = the filtered 450k probes from filter_data() function. In this example, it is "test_data_filtered".
cases_column_1 = the first column (column number) for cases in the filtered dataset. In this example, it is column 1.
cases_column_n = the last column (column number) for cases in the filtered dataset. In this example, it is column 2.
controls_column_1 = the first column (column number) for controls in the filtered dataset. In this example, it is column 3.
controls_column_n = the last column (column number) for controls in the filtered dataset. In this example, it is column 4.
```{r plot_data, echo=TRUE, fig.height= 7, fig.width= 10}
plot_data(test_data_filtered, 1, 2, 3, 4)
```
## Applying t-test
This function applies a "two.sided", unequal variance t-test analysis, then selects p-values that are less than or equal to the cutoff value entered. For this example, a cutoff value of 1e-3 is used:
```{r t.test, echo=TRUE}
test_data_ttest <- ttest_data(test_data_filtered, 1, 2, 3, 4, 1e-3)
```
Checking number of rows from t-test output:
```{r nrow2, echo=TRUE}
nrow(test_data_ttest)
```
## Delta beta analysis
This function calculates the difference between the beta values of cases and controls. It requires the minimum desired difference in proportion of DNA methylation for cases minus controls (delta_meth) and for controls minus cases (delta_unmeth). In this example, delta_meth is 0.5 and delta_unmeth is -0.5 which are based on the values from summary statistics from plot_data() function. Also it provides the option to specify probes where the average beta value of the cases or controls is greater than a cutoff value (e.g. 0.94) or less than a cutoff value (e.g. 0.06).
```{r delta_beta, echo=TRUE}
test_delta_beta <- delta_beta_data(test_data_filtered, 1, 2, 3, 4, 0.5, -0.5, 0.94, 0.06)
```
Checking the number of rows from delta beta analysis:
```{r nrow3, echo=TRUE}
nrow(test_delta_beta)
```
## Overlapping t-test and delta beta outputs
The following function overlaps the results of the previous 2 analyses:
```{r overlap, echo=TRUE}
test_overlapped_data <- overlap_data(test_data_ttest, test_delta_beta)
```
Checking the number of rows (CpG sites) that are overlapping between the two analyses:
```{r nrow, echo=TRUE}
nrow(test_overlapped_data)
```
## Identifying genes for which multiple CpG sites show significant methylation differences:
```{r CpG_hits, echo=TRUE}
test_CpG_hits <- CpG_hits(test_overlapped_data)
```
Gene names and their number of significantly different CpG sites:
```{r CpG_hits2, echo=TRUE}
test_CpG_hits
```
Plotting the candidate genes:
```{r plot, echo=TRUE, fig.height=5, fig.width=7}
plot_candidate_genes(test_overlapped_data)
```
## Investigating candidate genes:
"plot_gene" function generates four plots for any investigated gene: plot 1 (top left) shows the difference in beta values between cases and controls for each probe; plot 2 (top right) shows the mean methylation level for cases (red circles) and controls (blue triangles); and plots 3 and 4 (bottom plots) show the distribution of DNA methylation for each probe, for cases and controls, respectively. Also, an annotation table for all probes arranged from 5' to 3' is generated with the following columns: probe names, gene name, distance from transcription start site (TSS), mean methylation for cases, mean methylation for controls, delta beta (cases minus controls), and t-test p.value. KLHL34 is used as an example:
Function arguments:
x = the filtered and annotated 450k probes. In this example, it is "test_data_annotated"
b = gene name between quotation marks. In this example, "KLHL34" is used.
```{r investigate, echo=TRUE, fig.height=7, fig.width=10}
KLHL34 <- plot_gene(test_data_annotated, "KLHL34", 1, 2, 3, 4)
```
## Using one script:
Here is one script that applies all the previous scripts and produce plots for candidate genes automatically. The function exports two files onto the current working directory: 1. "process.ABC.RAP.plots.pdf" containing plots for all the candidate genes, and 2. "process.ABC.RAP.tables.txt" containing the annotation tables for the candidate genes.
Function arguments on the following order:
x = The normalised beta values in a data matrix format, where conditions are arranged in columns and cg probes are arranged in rows. In this example, it is "test_data".
cases_column_1 = the first column (column number) for cases in the filtered dataset. In this example, it is column 1.
cases_column_n = the last column (column number) for cases in the filtered dataset. In this example, it is column 2.
controls_column_1 = the first column (column number) for controls in the filtered dataset. In this example, it is column 3.
controls_column_n = the last column (column number) for controls in the filtered dataset. In this example, it is column 4.
ttest_cutoff = the cutoff level to filter insignificant p-values. In this example, a cutoff value of 1e-3 is used.
meth_cutoff = the cutoff level for the methylation difference between cases and controls (cases minus controls). In this example, a cutoff value of 0.5 is used.
unmeth_cutoff = the cutoff level for the methylation difference between controls and cases (cases minus controls), consequently it is a negative value. In this example, a cutoff value of -0.5 is used.
high_meth = the upper margin for the desired highly methylated probes. In this example, a value of 0.94 is used.
low_meth = the lower margin for the desired highly unmethylated probes. In this example, a value of 0.06 is used.
```{r process, echo=TRUE}
process.ABC.RAP(test_data, 1, 2, 3, 4, 1e-3, 0.5, -0.5, 0.94, 0.06)
```
| /scratch/gouwar.j/cran-all/cranData/ABC.RAP/inst/doc/ABC.RAP.Rmd |
---
title: "Array Based CpG Region Analysis Package (ABC.RAP)"
author: "Abdulmonem A. Alsaleh, Robert J. Weeks, Ian M. Morison. Department of Pathology, Dunedin School of Medicine, University of Otago, Dunedin, New Zealand"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Array Based CpG Region Analysis Package (ABC.RAP)}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
ABC-RAP package was developed to analyse human 450k DNA methylation array data and to identify candidate genes that have significant differences in DNA methylation between cases and controls. The following example analysis is based on a small sample dataset "test_data" (included) containing 10,000 probes for 2 B-ALL cases and 2 controls from Busche *et al* (2013).
Busche S, Ge B, Vidal R, etc. Integration of high-resolution methylome and transcriptome analyses to dissect epigenomic changes in childhood acute lymphoblastic leukaemia. Cancer Research 2013; 73(14); 4323-4336.
## Loading Files
```{r read.csv, echo=TRUE}
library(ABC.RAP)
data("test_data")
data("nonspecific_probes")
data("annotation_file")
```
## Summary of the workflow
The package offers a choice of two workflows:
1. Step by step as follows
2. Using a single script (see "using one script" section)
Below is the package workflow using nine functions, and each step is dependent on the previous function.
Filtering the nonspecific probes:
```{r filter, echo=TRUE}
test_data_filtered <- filter_data(test_data)
```
Annotation based on "UCSC platform":
```{r annotation, echo=TRUE}
test_data_annotated <- annotate_data(test_data_filtered)
```
## Browsing the data
This function provides a general overview for the DNA methylation between cases and controls. It produces 4 plots: the upper 2 plots show DNA methylation (distribution) for cases (left) and controls (right). The left bottom plot compares the DNA methylation between cases and controls, and the right bottom plot represents the difference in DNA methylation between cases and controls (cases minus controls). Also, summary statistics for the difference in mean DNA methylation between cases and controls is produced.
Function arguments:
x = the filtered 450k probes from filter_data() function. In this example, it is "test_data_filtered".
cases_column_1 = the first column (column number) for cases in the filtered dataset. In this example, it is column 1.
cases_column_n = the last column (column number) for cases in the filtered dataset. In this example, it is column 2.
controls_column_1 = the first column (column number) for controls in the filtered dataset. In this example, it is column 3.
controls_column_n = the last column (column number) for controls in the filtered dataset. In this example, it is column 4.
```{r plot_data, echo=TRUE, fig.height= 7, fig.width= 10}
plot_data(test_data_filtered, 1, 2, 3, 4)
```
## Applying t-test
This function applies a "two.sided", unequal variance t-test analysis, then selects p-values that are less than or equal to the cutoff value entered. For this example, a cutoff value of 1e-3 is used:
```{r t.test, echo=TRUE}
test_data_ttest <- ttest_data(test_data_filtered, 1, 2, 3, 4, 1e-3)
```
Checking number of rows from t-test output:
```{r nrow2, echo=TRUE}
nrow(test_data_ttest)
```
## Delta beta analysis
This function calculates the difference between the beta values of cases and controls. It requires the minimum desired difference in proportion of DNA methylation for cases minus controls (delta_meth) and for controls minus cases (delta_unmeth). In this example, delta_meth is 0.5 and delta_unmeth is -0.5 which are based on the values from summary statistics from plot_data() function. Also it provides the option to specify probes where the average beta value of the cases or controls is greater than a cutoff value (e.g. 0.94) or less than a cutoff value (e.g. 0.06).
```{r delta_beta, echo=TRUE}
test_delta_beta <- delta_beta_data(test_data_filtered, 1, 2, 3, 4, 0.5, -0.5, 0.94, 0.06)
```
Checking the number of rows from delta beta analysis:
```{r nrow3, echo=TRUE}
nrow(test_delta_beta)
```
## Overlapping t-test and delta beta outputs
The following function overlaps the results of the previous 2 analyses:
```{r overlap, echo=TRUE}
test_overlapped_data <- overlap_data(test_data_ttest, test_delta_beta)
```
Checking the number of rows (CpG sites) that are overlapping between the two analyses:
```{r nrow, echo=TRUE}
nrow(test_overlapped_data)
```
## Identifying genes for which multiple CpG sites show significant methylation differences:
```{r CpG_hits, echo=TRUE}
test_CpG_hits <- CpG_hits(test_overlapped_data)
```
Gene names and their number of significantly different CpG sites:
```{r CpG_hits2, echo=TRUE}
test_CpG_hits
```
Plotting the candidate genes:
```{r plot, echo=TRUE, fig.height=5, fig.width=7}
plot_candidate_genes(test_overlapped_data)
```
## Investigating candidate genes:
"plot_gene" function generates four plots for any investigated gene: plot 1 (top left) shows the difference in beta values between cases and controls for each probe; plot 2 (top right) shows the mean methylation level for cases (red circles) and controls (blue triangles); and plots 3 and 4 (bottom plots) show the distribution of DNA methylation for each probe, for cases and controls, respectively. Also, an annotation table for all probes arranged from 5' to 3' is generated with the following columns: probe names, gene name, distance from transcription start site (TSS), mean methylation for cases, mean methylation for controls, delta beta (cases minus controls), and t-test p.value. KLHL34 is used as an example:
Function arguments:
x = the filtered and annotated 450k probes. In this example, it is "test_data_annotated"
b = gene name between quotation marks. In this example, "KLHL34" is used.
```{r investigate, echo=TRUE, fig.height=7, fig.width=10}
KLHL34 <- plot_gene(test_data_annotated, "KLHL34", 1, 2, 3, 4)
```
## Using one script:
Here is one script that applies all the previous scripts and produce plots for candidate genes automatically. The function exports two files onto the current working directory: 1. "process.ABC.RAP.plots.pdf" containing plots for all the candidate genes, and 2. "process.ABC.RAP.tables.txt" containing the annotation tables for the candidate genes.
Function arguments on the following order:
x = The normalised beta values in a data matrix format, where conditions are arranged in columns and cg probes are arranged in rows. In this example, it is "test_data".
cases_column_1 = the first column (column number) for cases in the filtered dataset. In this example, it is column 1.
cases_column_n = the last column (column number) for cases in the filtered dataset. In this example, it is column 2.
controls_column_1 = the first column (column number) for controls in the filtered dataset. In this example, it is column 3.
controls_column_n = the last column (column number) for controls in the filtered dataset. In this example, it is column 4.
ttest_cutoff = the cutoff level to filter insignificant p-values. In this example, a cutoff value of 1e-3 is used.
meth_cutoff = the cutoff level for the methylation difference between cases and controls (cases minus controls). In this example, a cutoff value of 0.5 is used.
unmeth_cutoff = the cutoff level for the methylation difference between controls and cases (cases minus controls), consequently it is a negative value. In this example, a cutoff value of -0.5 is used.
high_meth = the upper margin for the desired highly methylated probes. In this example, a value of 0.94 is used.
low_meth = the lower margin for the desired highly unmethylated probes. In this example, a value of 0.06 is used.
```{r process, echo=TRUE}
process.ABC.RAP(test_data, 1, 2, 3, 4, 1e-3, 0.5, -0.5, 0.94, 0.06)
```
| /scratch/gouwar.j/cran-all/cranData/ABC.RAP/vignettes/ABC.RAP.Rmd |
ABCRemoveSmallYields=function(Data,CumSumSmallestPercentage=0.5){
# res = ABCRemoveSmallYields(Data,CumSumSmallestPercentage)
# Data cleanning for ABC analysis
# the smallest data up to a cumulated sum of less than CumSumSmallestPercentage percent
# the total sum (yield) is removed
# negative data values and NaN are treated as zeros
#
# INPUT
# Data(1:n) the data set, may contain NaN, negative values and very small values
#
# OPTIONAL
# CumSumSmallestPercentage (default =0.5),the smallest data up to a cumulated sum of less than CumSumSmallestPercentage
#
# OUTPUT
# SubstantialData(1:n1) columnvector containing Data>=0 and zeros for all NaN and negative values in Data(1:n)
# Data2SubstantialInd Index such that SubstantialData = nantozero(Data(Data2SubstantialInd));
# RemovedInd Data(RemovedInd) is the data that has been removed
# author: MT 08/2015
CleanDatares=ABCcleanData(Data)
CleanData=CleanDatares$CleanedData
#die kleinsten Daten, die zusammen weniger als CumSumSmallestPercentage# ausmachen identifizieren
SortedData=sort(na.last=T,CleanData,decreasing=FALSE)
TotalYield = sum(SortedData)
CumSumPercentage=round(cumsum(SortedData/TotalYield*100),0)
SmallInd=which(CumSumPercentage<CumSumSmallestPercentage) #In Prozent
# Wenn Welche gefunden werden
if(length( SmallInd) >0){
# print('Removing the smallest data up to a cumulated sum of less than 0.5% of the total sum (yield):')
SchwellenIndex=tail(SmallInd,1)+1 # Index des letzten zu kleinen Datensatzes
Schwelle = SortedData[SchwellenIndex] # der Datensatz der gerade noch verbleiben kann
Data2CleanInd = which(CleanData>=Schwelle) # diese Daten koennen bleiben
RemovedInd = which(CleanData<Schwelle) # diese Daten fallen weg
CleanData = CleanData[Data2CleanInd] # bereinigen
# print(paste0(length(RemovedInd),' items removed.'))
}else{
Data2CleanInd=CleanDatares$Data2CleanInd
RemovedInd=setdiff(CleanDatares$Data2CleanInd,1:length(Data))
}
return(list(SubstantialData=CleanData,Data2SubstantialInd=Data2CleanInd,RemovedInd=RemovedInd))
}
| /scratch/gouwar.j/cran-all/cranData/ABCanalysis/R/ABCRemoveSmallYields.R |
ABCanalysis=function(Data,ABCcurvedata,PlotIt=FALSE){
# abcres = ABCanalysis(Data=ABCcleanData(Data)$CleanedData)
# divide the Data in 3 classes A, B,C
# A==Data(Aind) : mit wenig aufwand viel ertrag!
# B==Data(Bind) : Aufwand und ertrag halten sich die Waage
# C==Data(Cind) : viel Aufwand, wenig Ertrag
#
# Grenzziehung AB: minimaler Abstand zum ideal [0,1]
# Grenzziehung BC:
#
# INPUT
# Data(1:n) Ungleichverteilung so dass ABC Analyse sinnvoll ist
#
# OPTIONAL
# PlotIt if variable is used a plot is made, set with PlotIt=1,PlotIt=TRUE, PlotIt='On', etc
# ABCcurvedata Liste V aus ABCcurve()
#
# OUTPUT
# Aind,Bind,Cind so dass:
# A==Data(Aind) : mit wenig aufwand viel ertrag!
# B==Data(Bind) : Aufwand und ertrag halten sich die Waage
# C==Data(Cind) : viel Aufwand, wenig Ertrag
# smallestAData: Grenzziehung AB: minimaler Abstand zum ideal [0,1]
# smallestBData: Grenzziehung BC: Steigung der ABC kurve == 1
#
# AlimitIndInInte\rpolation,BlimitIndInInterpolation indices der ABC genzen in [p,ABC]
# [p,ABC] die interpolationskurve des ABC plots.
#
# A=c(Ax,Ay) Pareto point, Minimum distance to (0,1) = minimal Unrealized potential == min in Effort und min in (1-
# B=c(Bx,By) BreakEven Point: dABC(Bx) == 1
# C=c(Cx,Cy) Submarginal Point: Minimum distance to (Bx,1)
# ABexchanged bool, TRUEif Point A is the BreakEven and point B is the Pareto Point, 0 otherwise
# author: MT 11/2014
#
requireNamespace("plotrix")
#Uses:
# ABCcurve, im else-Fall: ABCanalysisPlot
if(missing(Data)){
if(missing(ABCcurvedata)){stop('argument "Data" and ABCcurvedata are missing')}else{
Data=NULL #Dann muss ABCcurvedata vorhanden sein
}
}
if(!PlotIt){#Wenn nicht geplottet wird muessen die Daten berechnet werden
if(missing(ABCcurvedata)){
# CleanData=1; # bedeutet die kleinen Yieldwerte, die in Summme <0.5% der Gesamtyield
ABCcurvedata = ABCcurve(Data)
}
Effort=ABCcurvedata$Curve[,'Effort']
Yield=ABCcurvedata$Curve[,'Yield']
# Indizies=ABCcurvedata$DataInd
#Distanz zum 0,1 Punkt ueber Euklid berechnet
curve=cbind(Effort,Yield)
distPareto=c()
point=t(as.matrix(c(0,1)))
for(i in 1:length(Effort)){
distPareto[i]=sum(abs(point-curve[i,])^2)
}
ParetoPointInd=which.min(distPareto) #First Minimun
# Bestimme den AB Punkt auf der Spline Kurve
ParetoPoint=curve[ParetoPointInd,]
# Fuer den Punkt BC bestimme die Ableitung der Kurve
# n=length(Effort)
# Curvengleichung=splinefun(Effort,Yield)
# ableitung=Curvengleichung(1:n/n,1)
ableitung=ABCcurvedata$Slope[,'dABC']
# Suche das Minimum zur Differenz Ableitung vordefiniete Steigung
BreakEvenInds=which.min(abs(ableitung - 1)) #Schraenkt B ein, Ableitung==1
# Bestimme die Werte der Spline Kurve an dem BC Punkt
BreakEvenInd=max(BreakEvenInds)# falls es mehr als 1 gibt nimm den rechtesten
BreakEvenPoint=curve[BreakEvenInd,] #Last
if(Effort[BreakEvenInd]<Effort[ParetoPointInd]){
ABexchanged=TRUE
JurenInd=BreakEvenInd
Bx=Effort[ParetoPointInd]
# By=Yield[BreakEvenInd]
A=BreakEvenPoint
B=ParetoPoint
}else{
JurenInd=ParetoPointInd
Bx=Effort[BreakEvenInd]
#By=Yield[ParetoPointInd]
ABexchanged=FALSE
A=ParetoPoint
B=BreakEvenPoint
}
distBx=c()
Juren=t(as.matrix(c(Bx,1)))
for(i in 1:length(Effort)){
distBx[i]=sum(abs(Juren-curve[i,])^2)
}
bgrenze=which.min(distBx) #First Minimun
#print(curve[bgrenze[1],])
C=curve[bgrenze[1],]
## Datenvektor in 3 Gruppen Teilen
if(!is.null(Data)){
# berechne die Werte an den entsprechenden Grenzpositionen
ABLimit = sort(Data, decreasing = T)[round(A[1]*length(Data))]
BCLimit = sort(Data, decreasing = T)[round(C[1]*length(Data))]
Aind = which(Data > ABLimit)
Bind = which((Data <= ABLimit) & (Data >= BCLimit))
Cind = which(Data < BCLimit)
# browser()
#Statt nach in Y-Werten suchen, suchen wir in x-Werten, da diese Eindeutig
#y-Werte koennen dagegen in spezialfaellen mehrfach belegt sein
# Indizies=order(Data,decreasing=TRUE)
#rows=length(Data)
#x=1:rows/rows
# Vergleiche empirische Kurve mit generierten Ma? aus theoretischer Spline Kurve
#Aindvor=which(x<A[1],arr.ind=TRUE)# Suche alle Indizes bis zur ABGrenze
#ABind=which(x<C[1],arr.ind=TRUE) # Suche alle Indizes bis zur BCgrenze
#Setzte in unsortierten Datenvektor die Indizes
#Bind=Indizies[setdiff(ABind,Aindvor)] #Bindizes sind Differenz aus den beiden Zeilen davor
#Aind=Indizies[Aindvor]
#Cind=Indizies[which(x>C[1],arr.ind=TRUE)]
# #Zuerst: Generiere empirische Kurven direkt aus den Daten
# sorted=sort(na.last=T,Data,decreasing=TRUE)
# Indizies=order(Data,decreasing=TRUE)
# N=sum(Data)
# Anteil=sorted/N
# y=cumsum(Anteil)
# # Vergleiche empirische Kurve mit generierten Ma? aus theoretischer Spline Kurve
# Aindvor=which(y<A[2],arr.ind=TRUE)# Suche alle Indizes bis zur ABGrenze
# ABind=which(y<C[2],arr.ind=TRUE) # Suche alle Indizes bis zur BCgrenze
# #Setzte in unsortierten Datenvektor die Indizes
# Bind=Indizies[setdiff(ABind,Aindvor)] #Bindizes sind Differenz aus den beiden Zeilen davor
# Aind=Indizies[Aindvor]
# Cind=Indizies[which(y>C[2],arr.ind=TRUE)]
}else{ #Keine empirischer Datensatz sondern vorgabe theoretischer Kurve
Bind=NULL
Cind=NULL
Aind=NULL
warning('No Data given: Calculating curve and points by given ABCcurvedata')
}
return(list(Aind=Aind,Bind=Bind,Cind=Cind,ABexchanged=ABexchanged,A=A,B=B,C=C,smallestAData=Yield[JurenInd],
smallestBData=Yield[bgrenze],AlimitIndInInterpolation=JurenInd,BlimitIndInInterpolation=bgrenze,p=Effort,ABC=Yield,
ABLimit = ABLimit, BCLimit = BCLimit))
#Falls Plot erwuenscht
}else{ #Dann werden die Daten in ueber ABCanalysisPlot berechnet und ABCplot verwendet
if(missing(Data)|is.null(Data)){
abc=ABCanalysisPlot(ABCcurvedata=ABCcurvedata)$ABCanalysis
}else{
abc=ABCanalysisPlot(Data)$ABCanalysis
}
}
}
| /scratch/gouwar.j/cran-all/cranData/ABCanalysis/R/ABCanalysis.R |
ABCanalysis4curve <- function(p, ABC, PlotIt){
# V = ABCanalysis4curve(p,ABC,PlotIt)
# calculate points A B C bei gegebener ABC kurve
#
# INPUT
# [p,ABC] sind die Werte der ABC curve
#
# OPTIONAL
# PlotIt ein plot der ABC Kurve mit errechneten Punkten
#
# OUTPUT
#
# [ABx,ABy] coordinates of limiting point between set A and set B (Pareto Point)
# [BCx,BCy] coordinates of limiting point between set B and set C (Submarginal Point)
# [Bx,By] B Point: dABC(Bx) == 1 (BreakEven Point)
# berechne A B C Punkte. ABC Kurve ist gegeben (muss absteigend sortiert sein)
EmpiricDeviation <- function(p, value){
# calculates the deviations at positions p. A function graph p vs value is expected.
# deviations = EmpiricDeviation(p, value)
# INPUT
# p positions at which values are given, and deviations will be calculated
# value values at the positions in p
# OUTPUT
# deviations calculated deviations for the positions at p
# Author: FL
dp = c()
dvalue = c()
for(i in 1:(length(value)-1)){
dp[i] = p[i+1] - p[i]
dvalue[i] = value[i+1] - value[i]
}
return(dp/dvalue)
}
Effort = p
Yield = ABC
# Pareto Punkt: kleinster Abstand (0,1)
ParetoPunktIndex = which.min(p^2 + ((1-ABC)^2))
# BreakEven Punkt: Ableitung = 1
dABC = EmpiricDeviation(Effort,Yield)
minValue = min(abs(dABC-1));
BreakEvenPunktIndex = tail(which(abs(dABC-1) ==minValue),1)
# Punkt AB
AB = min(ParetoPunktIndex, BreakEvenPunktIndex)
# Punkt zwischen AB und BC
B = max(ParetoPunktIndex, BreakEvenPunktIndex)
# Submarginal Punkt: minimaler Abstand obere Kante ueber AB
SubmarginalPunktIndex = which.min((Effort-Effort[AB])^2 + (1- Yield)^2)
# Punkt BC
BC = SubmarginalPunktIndex
if(PlotIt){
ylab='fraction of sum of largest data'
xlab='fraction of data'
title='ABC Analysis'
farb.col=c('blue',colors()[452],'green',colors()[175])
farb.labels <- c(expression(italic("data")),expression(italic("identity")),expression(italic("uniform")),'')
# der eigentliche plot
plot(Effort,Yield, xlim=c(0,1),ylim=c(0,1),xaxs='i',yaxs='i',xlab=xlab,ylab=ylab,type='l',
,col=farb.col[1],main=title, lwd=2)
# Vergleichsverteilungen
pNorm = seq(from=0,by=0.01,to=1)
A=0
MaxX=1
B = MaxX-A
normdistr = (-0.5*B*pNorm^2+MaxX*pNorm)/(A+0.5*B)
identdistr = pNorm
points(pNorm,normdistr, type="l", col=farb.col[3]) # Normalverteilung
points(pNorm,identdistr, type="l", col=farb.col[2]) # Gleichverteilung
# diagonale
points(c(0,1),c(1,0),type='l',lty=2,lwd=1,col=farb.col[4],asp=1)
# Punkte einzeichnen
points(Effort[AB], Yield[AB],pch=8,lwd=1.5,col='green',cex=1.5,asp=1)
points(Effort[BC],Yield[BC],pch=8,lwd=1.5,col='blue',cex=1.5,asp=1)
lines(c(0, Effort[AB], Effort[AB]), c(Yield[AB], Yield[AB], 0), col="red")
lines(c(0, Effort[BC], Effort[BC]), c(Yield[BC], Yield[BC], 0), col="red")
points(Effort,Yield,xlim=c(0,1),ylim=c(0,1),lwd=1,col=farb.col[1],main=title,type='l')
}
return(list(BreakEvenPunktIndex = BreakEvenPunktIndex,
ParetoPunktIndex = ParetoPunktIndex,
SubmarginalPunktIndex = SubmarginalPunktIndex,
ABx = Effort[AB],
ABy = Yield[AB],
BCx = Effort[BC],
BCy = Yield[BC],
Bx = Effort[B],
By = Yield[B]))
}
| /scratch/gouwar.j/cran-all/cranData/ABCanalysis/R/ABCanalysis4curve.R |
ABCanalysisPlot=function(Data,LineType=0,LineWidth=3,ShowUniform=TRUE,title='ABC analysis',limits=TRUE,MarkPoints=TRUE,ABCcurvedata,ResetPlotDefaults=TRUE){
# res= ABCanalysisPlot(Data=ABCcleanData(Data)$CleanedData,style='2')
# display ABC Curve : cumulative percentage of largest Data (Effort) vs cumlative percentage of sum of largest Data (Yield)
#
# INPUT
# Data(1:n) oder [Haeufigkeit(1:n),Data(1:n)] or
# Data = [ABCx,ABCy] iff ABCx(1) ==0
#
# OPTIONAL
# LineType for plot default: LineType=0 for Line, other numbers see documentation about pch
# LineWidth Breite der ABC kurve
# ShowUniform ==1 (default) bedeutet die ABC kurve der Uniform verteilung Unifirm[0,beliebig] wird eingezeichnet
# title string, label for the title of the plot
# limits =TRUE Linien zur Einteilung werden gezeichnet, Default=FALSE
# MarkPoints MarkPoints=True => MarkPointsOfInterest, Default=TRUE
# ABCcurvedata Liste V aus ABCcurve()
#
# OUTPUT
# ABC Output von ABCplot
# A=c(Ax,Ay) A Point: Minimum distance to (0,1) = minimal Unrealized potential == min in Effort und min in (1-
# ABCanalysis Liste V aller Daten aus ABCanalysis()
#
# author: MT 11/2014
# 1.Editor: MT 01/2015
# Nota: Diese Funktion ist eine "Faulheitsfunktion" => auch Warnemldungen werden unterdrueckt
#suppressWarnings(require(plotrix))
style=TRUE
#farb.col=c('black','red','blue','green',colors()[452],colors()[57])
farb.col=c('black','red','blue','green',colors()[452],'red')
# farb.col=c('paleturquoise3','magenta','blue','palegreen4','palegreen3','plum2)
#farb.labels <- c(expression(italic("Equilibrium")),expression(italic("set limits")),expression(italic("data")),expression(italic("uniform")),expression(italic("identity")))
farb.labels <- c('',expression(italic("set limits")),expression(italic("data")),expression(italic("uniform")),expression(italic("identity")))
requireNamespace("plotrix")
if(missing(style)){style=FALSE}
if(missing(Data)){
Data=NULL #Faulheitsfunktion, auch Listen?bergabe moeglich
curve=ABCcurvedata
}else{
curve = suppressWarnings(ABCcurve(Data))
}
def.par <- par(no.readonly = TRUE) # save default, for resetting...
if(style==FALSE){
abc=suppressWarnings(ABCplot(Data,LineType=LineType,LineWidth=LineWidth,ShowUniform=ShowUniform,title=title,defaultAxes=FALSE,ABCcurvedata=ABCcurvedata))
}else{#Equilibrium, set limits, data, uniform, identity, ABC-Guppen Buchstaben
#if(missing(title)) title='ABC plot for data grouping'
abc=suppressWarnings(ABCplot(Data,LineType=LineType,LineWidth=LineWidth,ShowUniform=ShowUniform,title=title,defaultAxes=FALSE,ABCcurvedata=ABCcurvedata))
}
axis(1,xlim=c(0,1),col="black",las=1, at=seq(from=0,to=1,by=0.1)) #x-Achse
axis(2,ylim=c(0,1),col="black",las=1, at=seq(from=0,to=1,by=0.1)) #y-Achse
abcres = ABCanalysis(Data,ABCcurvedata=curve) #Achtung hier darf PlotIt nicht angegeben werden!
if(MarkPoints){
if(style==FALSE){
points(abcres$A[1],abcres$A[2],pch=8,lwd=1.5,col='red',cex=1.5,asp=1)
points(abcres$B[1],abcres$B[2],pch=8,lwd=1.5,col='green',cex=1.5,asp=1)
#text(abcres$A[1],abcres$A[2]+0.1,labels=paste0('Ax=',round(abcres$A[1],2)),asp=1)
points(abcres$C[1],abcres$C[2],pch=8,lwd=1.5,col='blue',cex=1.5)
#text(abcres$C[1],abcres$C[2]+0.1,labels=paste0('Cx=',round(abcres$C[1],2)),asp=1)
}
}
nA=length(abcres$Aind)
nB=length(abcres$Bind)
nC=length(abcres$Cind)
if(style==FALSE){
if(limits){
if(!MarkPoints){abcres = ABCanalysis(Data,ABCcurvedata=curve)} #Achtung hier darf PlotIt nicht angegeben werden
#points(c(abcres$A[1],abcres$A[1]),c(0,abcres$C[2]),type='l',col='red')
# points(c(0,abcres$C[1]),c(abcres$A[2],abcres$A[2]),type='l',col='red')
#points(c(abcres$C[1],abcres$C[1]),c(0,1),type='l',col='red')
# points(c(0,1),c(abcres$C[2],abcres$C[2]),type='l',col='red')
linientyp=1 #Linie
#Linien y-achse set limits
points(c(0,abcres$C[1]),c(abcres$C[2],abcres$C[2]),type='l',col=farb.col[2],lty=linientyp)
points(c(0,abcres$A[1]),c(abcres$A[2],abcres$A[2]),type='l',col=farb.col[2],lty=linientyp)
#Linien x-achse set limits
points(c(abcres$A[1],abcres$A[1]),c(0,abcres$A[2]),col=farb.col[2],type='l',lty=linientyp)
points(c(abcres$C[1],abcres$C[1]),c(0,abcres$C[2]),col=farb.col[2],type='l',lty=linientyp)
if(!is.null(Data)){
if(abs(abcres$A[1]-abcres$C[1])>0.1){
plotrix::thigmophobe.labels(x=abcres$A[1]/2,y=abcres$A[2],paste0('A:n=',nA),col='black', cex=1) # Gr??e
plotrix::thigmophobe.labels(x=(abcres$C[1]-abcres$A[1])/2+abcres$A[1],y=abcres$C[2],paste0('B:n=',nB),col='black',cex=1)
}else{
plotrix::thigmophobe.labels(x=abcres$A[1]-0.05,y=abcres$A[2],paste0('A:n=',nA),col='black', cex=1) # Gr??e
plotrix::thigmophobe.labels(x=abcres$C[1]+0.025,y=abcres$C[2]+0.025,paste0('B:n=',nB),col='black',cex=1)
}
plotrix::thigmophobe.labels(x=(1-abcres$C[1])/2+abcres$C[1],y=abcres$C[2],paste0('C:n=',nC),col='black',cex=1)
}
}
}else{
box(col='grey')
#Grenzpunkte
points(abcres$A[1],abcres$A[2],pch=8,lwd=1.5,col=farb.col[2],cex=1.5,asp=1)
points(abcres$B[1],abcres$B[2],pch=8,lwd=1.5,col=farb.col[4],cex=1.5,asp=1)
points(abcres$C[1],abcres$C[2],pch=8,lwd=1.5,col=farb.col[3],cex=1.5)
#Grenzpunkte Zeichen
if(abs(abcres$A[1]-abcres$C[1])>0.1){
plotrix::thigmophobe.labels(x=abcres$A[1],y=abcres$A[2],'A|B',col=farb.col[2], cex=1) # Gr??e
plotrix::thigmophobe.labels(x=abcres$C[1],y=abcres$C[2],'B|C',col=farb.col[2],cex=1)
}else{
plotrix::thigmophobe.labels(x=abcres$A[1]-0.05,y=abcres$A[2],'A|B',col=farb.col[2], cex=1) # Gr??e
plotrix::thigmophobe.labels(x=abcres$C[1]+0.025,y=abcres$C[2]+0.025,'B|C',col=farb.col[2],cex=1)
}
#ABC gruppen (Buchstaben)
plotrix::thigmophobe.labels(x=abcres$A[1]/2,y=abcres$A[2]/4,'A',col=farb.col[6], cex=2.6) # Gr??e
plotrix::thigmophobe.labels(x=(abcres$A[1]+abcres$C[1])/2,y=abcres$A[2]/4,'B',col=farb.col[6],cex=2.1)
plotrix::thigmophobe.labels(x=(abcres$A[1]+abcres$C[1])/2+max(abs(abcres$A[1]-abcres$C[1]),0.1),y=abcres$A[2]/4,'C',col=farb.col[6],cex=1.8)
if(!is.null(Data)){
plotrix::thigmophobe.labels(x=abcres$A[1]/2,y=abcres$A[2]/4-0.05,paste0('n=',nA),col='black', cex=0.8) # Gr??e
plotrix::thigmophobe.labels(x=(abcres$A[1]+abcres$C[1])/2,y=abcres$A[2]/4-0.05,paste0('n=',nB),col='black',cex=0.8)
plotrix::thigmophobe.labels(x=(abcres$A[1]+abcres$C[1])/2+max(abs(abcres$A[1]-abcres$C[1]),0.1)+0.02,y=abcres$A[2]/4-0.05,paste0('n=',nC),col='black',cex=0.8)
}
# detach(package:plotrix)
#Markierungen y-Achse
points(c(0,0.01),c(abcres$C[2],abcres$C[2]),type='l',col=farb.col[2],lwd=2)
points(c(0,0.01),c(abcres$A[2],abcres$A[2]),type='l',col=farb.col[2],lwd=2)
#linientyp=5 #Gestrichelt
linientyp=1 #Linie
#Linien y-achse set limits
points(c(0,abcres$C[1]),c(abcres$C[2],abcres$C[2]),type='l',col=farb.col[2],lty=linientyp)
points(c(0,abcres$A[1]),c(abcres$A[2],abcres$A[2]),type='l',col=farb.col[2],lty=linientyp)
#Markierungen x-Achse
points(c(abcres$C[1],abcres$C[1]),c(0,0.01),type='l',col=farb.col[2],lwd=2)
points(c(abcres$A[1],abcres$A[1]),c(0,0.01),type='l',col=farb.col[2],lwd=2)
#Linien x-achse set limits
points(c(abcres$A[1],abcres$A[1]),c(0,abcres$A[2]),col=farb.col[2],type='l',lty=linientyp)
points(c(abcres$C[1],abcres$C[1]),c(0,abcres$C[2]),col=farb.col[2],type='l',lty=linientyp)
#diagonale: Skewness bzw Gleichgewichtspunkt
# pUnif = seq(from=0,by=0.001,to=1)
# A = min(Data,na.rm=TRUE)
# if(!is.null(Data)){
# A = min(Data,na.rm=TRUE)
# MaxX = max(Data,na.rm=TRUE)
# }else{
# A=0
# MaxX=1
# }
# Bmax = MaxX-A
# ABCuniform = (-0.5*Bmax*pUnif^2+MaxX*pUnif)/(A+0.5*Bmax)
# ind=which.min(abs(pUnif-(1-ABCuniform)))
# # points(c(0,1),c(1,0),type='c',ljoin='mitre',col=farb.col[1])
}
#damit Buchtabe C und Legende sich nicht ueberschneiden
if((abcres$A[1]+abcres$C[1])/2+max(abs(abcres$A[1]-abcres$C[1]),0.1)+0.02<0.8){
legend('bottomright',legend=farb.labels,text.col=farb.col,bty = "n",y.intersp=0.8)
}else{
legend('right',legend=farb.labels,text.col=farb.col,bty = "n",y.intersp=0.8)
}
if(ResetPlotDefaults)
par(def.par)
invisible(list(ABC=abc,ABCanalysis=abcres))
} | /scratch/gouwar.j/cran-all/cranData/ABCanalysis/R/ABCanalysisPlot.R |
ABCcleanData=function(Data){
# V= ABCcleanData(Data)
# Data cleanning for ABC analysis
# only the first column of Data is used
# Data <0 are set to zero, NA in Data are set to zero
# if RemoveSmallYields ==TRUE => the smallest data up to a cumulated sum of less than
# 0.5# of the total sum (yield) is removed
#
# INPUT
# Data(1:n) the data set, may contain NaN, negative values and very small values
#
# OUTPUT List V with
# V$CleanedData(1:n1) columnvector containing Data>=0 and zeros for all NaN and negative values in Data(1:n)
# V$Data2CleanInd Index such that CleanedData = nantozero(Data(Data2CleanInd));
#
# author MT 01/2015, reimplemented from ALUs matlab version
# 1.Editor: MT 08/2015 RemoveSmallYields in eigene Funktion verschoben
if(!is.vector(Data)){
n=nrow(Data)
d=ncol(Data)
warning('Only vectors should be used!')
if(d>1){ #Data is Matrix or data.frame
warning('Using only first column of data')
UncleanData=as.vector(Data[,1]) # use only first column
}else{
UncleanData=Data
}
}else{
UncleanData=Data
}
UncleanData=as.numeric(unname(UncleanData))#Automatische Umwandlung voh Chars/string in NA
rowsbefore=length(UncleanData)
#NA durch Nullen ersetzen
nabools=is.finite(UncleanData)
Data2CleanInd=which(nabools==FALSE)
CleanData=UncleanData
if(length(Data2CleanInd)) CleanData[Data2CleanInd]=0
#Negative Werte entfernen
DataNeg=CleanData[CleanData<0]
cols=1
bools=CleanData %in% DataNeg
CleanData[bools]<-0
rows=rowsbefore-sum(bools)-sum(!nabools)
if(rowsbefore>rows){
warning(paste0(rows,' of ',rowsbefore,' items are positive and beeing used for further calculations.'))
# warning('Please use Data[Data>0], before using Data[Aind] etc.')
}
return(list(CleanedData=CleanData,Data2CleanInd=Data2CleanInd))
}
| /scratch/gouwar.j/cran-all/cranData/ABCanalysis/R/ABCcleanData.R |
ABCcurve=function(Data,p){
# res = ABCcurve(Data,GiniSteigung)
# ABC Curve : cumulative fraction of largest Data in population vs fraction of population
#
# INPUT
# Data(1:n) data vector,only positive data will be used
#
# OPTIONAL
#
# p x-werte fuer Spline Interpolation: wenn vorgegeben dann werden diese genommen
#
# OUTPUT List V with
# Curve A list of Effort and Yield
# Effort: fraction of population in [0,1]
# Yield: cumulative fraction of largest Datas in [0,1]
#CleanedData vector [1:m], columnvector containing Data>=0 and zeros for all NA, NaN and negative values in Data(1:n)
#Slope A list of p and dABC
# p: x-werte fuer Spline Interpolation, defualt: p = (0:0.01:1)
# dABC: first deviation of the functio ABC(p)=Effort(Yield)
#
#
#author: MT 11/2014
# 1.Editor MT 01/2015
# 2.Editor: FL
# 3.Editor: MT 11/2017: Doku neu
cleanData=ABCcleanData(Data)$CleanedData
rows=length(cleanData)
if(missing(p)){
if(rows<101){ p=seq(from=0,to=1,by=0.01)
}else{ p=seq(from=0,to=1,by=0.001)}
}
sorted=sort(na.last=T,cleanData,decreasing=TRUE)
#N=sum(cleanData)
#Anteil=sorted/N
Anteil=sorted
y=cumsum(Anteil)
y=y/tail(y,1)
x=(1:rows)/rows
## Die Kurve muss durch 2 Punkte gehen 0 und 1
if(head(y,1)>0){
x=c(0,x)
y=c(0,y)
}
if(tail(x,1)<1){ #Nach matlab Implementation, ueberfluessig?
x=c(x,1)
y=c(y,1)
}
## Spline Interpolation
V=spline(x,y,xout=p)
Effort=V$x
Yield=V$y
#Fehlerabfang der Interpolation
inds=which(Yield>=1)
ind1=min(inds)
if(ind1<length(Yield))
Yield[c(ind1:length(Yield))]=1
n=length(Effort)
Curvengleichung=splinefun(Effort,Yield)
ableitung=Curvengleichung(1:n/n,1)
return(list(Curve=cbind(Effort=Effort,Yield=Yield),CleanedData=cleanData,Slope=cbind(p=p,dABC=ableitung)))
}
| /scratch/gouwar.j/cran-all/cranData/ABCanalysis/R/ABCcurve.R |
ABCplot=function(Data,LineType=0,LineWidth=3,ShowUniform=TRUE,title,ABCcurvedata,defaultAxes=TRUE){
# res= ABCplot(Data)
# display ABC Curve : cumulative percentage of largest Data (Effort) vs cumlative percentage of sum of largest Data (Yield)
#
# INPUT
# Data(1:n) oder [Haeufigkeit(1:n),Data(1:n)] or
# Data = [ABCx,ABCy] iff ABCx(1) ==0
#
# OPTIONAL
# LineType for plot default: LineType=0 for Line, other numbers see documentation about pch
# LineWidth Breite der ABC kurve
#
# ShowUniform ==1 (default) bedeutet die ABC kurve der Uniform verteilung Unifirm[0,beliebig] wird eingezeichnet
# title string, label for the title of the plot
# style type fancy if you would like to plot in an different style
# ABCcurvedata Input form ABCcurve
# defaultAxes FALSE
#
# OUTPUT
# ABCx cumulative population in Percent
# ABCy cumulative high Datas in Percent
# author: MT 11/2014
# 1.Editor MT 01/2015
style=TRUE
# require(Hmisc) #Noch anders zu loesen
check=F
if(missing(Data)){
curve = ABCcurvedata
Data=NULL
check=T
}
if(is.null(Data)&!check){
curve = ABCcurvedata
}
if(missing(title)){title='ABC plot'}
if(missing(ABCcurvedata)){
curve = ABCcurve(Data)}
Effort=curve$Curve[,'Effort']
Yield=curve$Curve[,'Yield']
par(pty="s")# Plot immer quadratisch
if(missing(style)){
ylab='fraction of sum of largest data'
xlab='fraction of data'
farb.col=c('blue',colors()[452],'green',colors()[175])
farb.labels <- c(expression(italic("data")),expression(italic("identity")),expression(italic("uniform")),'')
}else{
#ylab='yield Y'
#xlab='effort E'
#farb.col=c('blue','palegreen4','palegreen3')
ylab='fraction of sum of largest data'
xlab='fraction of data'
farb.col=c('blue',colors()[452],'green',colors()[175])
#farb.labels <- c(expression(italic("data")),expression(italic("identity")),expression(italic("uniform")),expression(italic("equilibrium")))
farb.labels <- c(expression(italic("data")),expression(italic("identity")),expression(italic("uniform")),expression(italic("equilibrium")))
}
if(!ShowUniform){#Dann ist dieser Plot im Vordergrund
#farb.labels=farb.labels[c(1,3)]
if(LineType==0){
plot(Effort,Yield,xlim=c(0,1),ylim=c(0,1),xaxs='i',yaxs='i',xlab=xlab,ylab=ylab,type='l',lwd=LineWidth,col=farb.col[1],main=title,axes=defaultAxes)
}else{
plot(Effort,Yield,xlim=c(0,1),ylim=c(0,1),asp=1,xaxs='i',yaxs='i',xlab=xlab,ylab=ylab,pch=LineType,lwd=LineWidth,col=farb.col[1],main=title,axes=defaultAxes)
}
}else{
#gleichverteilung
pUnif = seq(from=0,by=0.01,to=1)
if(!is.null(curve$CleanedData)){
A = min(curve$CleanedData,na.rm=TRUE)
MaxX = max(curve$CleanedData,na.rm=TRUE)
if(A==MaxX){
A=0
MaxX=1
}
}else{
A=0
MaxX=1
}
B = MaxX-A
ABCuniform = (-0.5*B*pUnif^2+MaxX*pUnif)/(A+0.5*B)
if(missing(style)){
plot(pUnif,ABCuniform,type='l',col=farb.col[3],asp=1,xaxs='i',yaxs='i',xlab=xlab,ylab=ylab,axes=defaultAxes,main=title)
points(c(0,1),c(1,0),type='l',lty=2,lwd=1,col=farb.col[4],asp=1) #diagonale
}else{
plot(pUnif,ABCuniform,type='l',col=farb.col[3],asp=1,lwd=1,xaxs='i',yaxs='i',xlab=xlab,ylab=ylab,axes=defaultAxes,main=title)
points(c(0,1),c(1,0),type='l',lty=2,lwd=1,col=farb.col[4],asp=1) #diagonale
#points(c(0,1),c(1,0),type='l',col=colors()[234],asp=1) #diagonale
}
if(LineType==0){
points(Effort,Yield,xlim=c(0,1),ylim=c(0,1),lwd=LineWidth,col=farb.col[1],main=title,type='l')
}else{
points(Effort,Yield,xlim=c(0,1),ylim=c(0,1),pch=LineType,lwd=LineWidth,col=farb.col[1],main=title,type='l')
}
}
ableitung=curve$Slope[,'dABC']
# Suche das Minimum zur Differenz Ableitung vordefiniete Steigung
BreakEvenInds=which.min(abs(ableitung - 1)) #Schraenkt B ein, Ableitung==1
# Bestimme die Werte der Spline Kurve an dem BC Punkt
BreakEvenInd=max(BreakEvenInds)# falls es mehr als 1 gibt nimm den rechtesten
Kurve=cbind(Effort,Yield)
BreakEvenPoint=Kurve[BreakEvenInd,] #Last
points(BreakEvenPoint[1],BreakEvenPoint[2],pch=8,lwd=1.5,col='green',cex=1.5,asp=1)
if(!is.null(Data)){
if(length(curve$CleanedData)<20){
sorted=sort(na.last=T,curve$CleanedData,decreasing=TRUE)
Anteil=sorted
y=cumsum(Anteil)
y=y/tail(y,1)
x=(1:length(curve$CleanedData))/length(curve$CleanedData)
points(x,y,pch=1,lwd=1.5,col='blue',cex=1.5,asp=1)
}
}
if(missing(style)){
points(Effort,Effort,type='l',lwd=1,col=farb.col[2],asp=1) #idetitaet
}else{
points(Effort,Effort,type='l',lwd=0.1,col=farb.col[2],asp=1) #identitaet
}
if(defaultAxes){
axis(1, at=seq(from=0,to=1,by=0.1))
axis(2, at=seq(from=0,to=1,by=0.1))
if(defaultAxes){
legend("bottomright",bty = "n",legend=farb.labels,text.col=farb.col)
}
}
#print('test2')
# if(!missing(style)){
# #requireRpackage('Hmisc')
# minor.tick(ny=20, nx=20)
# box()
# }else{
# minor.tick(ny=20, nx=20)
# box(col='grey')
# }
#print('test3')
invisible(list(ABCx=Effort,ABCy=Yield))
} | /scratch/gouwar.j/cran-all/cranData/ABCanalysis/R/ABCplot.R |
Gini4ABC <- function(p, ABC){
# Gini = Gini4ABC(p,ABC)
# Gini index for an ABC curve
#
# INPUT
# p,ABC x/y coorninates of ABC curve ABC(p), p(end) == 1 or 100;
#
# OUTPUT
# Gini gini index i.e. the integral over ABC(p) / 0.5 *100
# given in percent i.e in [0..100]
trapz <- function(x,y){
idx = 2:length(x)
return (as.double( (x[idx] - x[idx-1]) %*% (y[idx] + y[idx-1])) / 2)
}
# prozentuieren
p = p / tail(p,1)
ABC = ABC / tail(ABC,1)
# Flaeche unter ABC
Area = trapz(p, ABC)
Gini = Area*200 - 100
return(Gini)
} | /scratch/gouwar.j/cran-all/cranData/ABCanalysis/R/Gini4ABC.R |
GiniIndex= function(Data,p){
#[Gini,p,ABC,CleanedData] = GiniIndex(Data,p)
# Gini = GiniIndex(Data) # calculation of the Gini-Index
# calculation of the Gini-Index from Data
# uses ABCcurve and Gini4ABC
#
# INPUT
# Data(1:n) data set, it is cleaned using CleanedData = ABCcleanData(Data)
# before results are calculated
#
# OPTIONAL
# p x-values for Spline Interpolation of ABC curve
#
# OUTPUT
# Gini gini index i.e. the integral over Area *200 -100
# given in percent i.e in [0..100]
# [p,ABC] ABC curve spline interpolated for x values in p
# CleanedData = ABCcleanData(Data)
# author: MT, reimplemented from ALUs matlab version
V= ABCcurve(Data,p)
#ABC=V$Slope$dABC
ABCx=V$Curve[,1]
ABCy=V$Curve[,2]
Gini = Gini4ABC(ABCx,ABCy )
if(missing(p)) p=ABCx
return(list(Gini=Gini,p=p,ABC=ABCy,CleanedData=V$CleanedData))
} | /scratch/gouwar.j/cran-all/cranData/ABCanalysis/R/GiniIndex.R |
calculatedABCanalysis=function(Data){
# Vlist= calculatedABCanalysis(Data)
# Vlist$Aind
# Vlist$Bind
# Vlist$Cind
# Vlist$ABlimit
# Vlist$BClimit
# Berechnung der ABC analyse ohne plots und sonstige extras
# berechnet ueber ABCanalysis
#
# INPUT
# Data[1:n] data set, it is cleaned using CleanedData = ABCcleanData(Data,RemoveSmallYields)
# before results are calculated
#
# OUTPUT
# Aind,Bind,Cind indices such that:
# Data[Aind] === set A the "critical few
# Data[Bind] === set B
# Data[Cind] === set C the "trivial many
# ABlimit the limit between sets A and B: [SetA,Aind] = find(Data <=ABlimit );
# BClimit the limit between sets B and C: [SetC,Cind] = find(Data > BClimit );
# author: MT 07/2015
abcres = ABCanalysis(Data=ABCcleanData(Data)$CleanedData)
return(list(Aind=abcres$Aind,Bind=abcres$Bind,Cind=abcres$Cind,ABlimit=abcres$smallestAData,BClimit=abcres$smallestBData))
} | /scratch/gouwar.j/cran-all/cranData/ABCanalysis/R/calculatedABCanalysis.R |
#' An implementation of the Artificial Bee Colony (ABC) Algorithm
#'
#' This is an implementation of Karaboga (2005) ABC optimization algorithm. It
#' was developed upon the basic version programmed in \code{C} and distributed
#' at the algorithm's official website (see the references).
#'
#' Please consider that this version is in alpha state of development, thus any
#' evident (precision) error should be blaimed to the package author (not to
#' the algorithm itself)
#'
#' Please visit the project home for more information:
#' \url{https://github.com/gvegayon/ABCoptim}.
#'
#' @name ABCoptim-package
#' @aliases ABCoptim-package ABCoptim abc
#' @docType package
#' @author George Vega Yon \email{g.vegayon@@gmail.com} [aut],
#'
#' Enyelbert Muñoz \email{enyeldoc2011@@gmail.com} [cnt)
#' @references D. Karaboga, \emph{An Idea based on Honey Bee Swarm for
#' Numerical Optimization}, tech. report TR06,Erciyes University, Engineering
#' Faculty, Computer Engineering Department, 2005
#' \url{http://mf.erciyes.edu.tr/abc/pub/tr06_2005.pdf}
#'
#'
#' Artificial Bee Colony (ABC) Algorithm (website)
#' \url{http://mf.erciyes.edu.tr/abc/index.htm}
#'
#' Basic version of the algorithm implemented in \code{C} (ABC's official
#' website) \url{http://mf.erciyes.edu.tr/abc/form.aspx}
#' @keywords package
#' @examples
#'
#' \dontrun{
#' demo(ABCoptim) # Some functions...
#' }
#'
NULL
#' @useDynLib ABCoptim, .registration = TRUE
#' @importFrom Rcpp sourceCpp
#' @importFrom stats runif
#' @importFrom utils str
#' @importFrom graphics plot
NULL
| /scratch/gouwar.j/cran-all/cranData/ABCoptim/R/ABCoptim-package.R |
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
abc_cpp_ <- function(par, fn, lb, ub, FoodNumber = 20L, limit = 100L, maxCycle = 1000L, criter = 50L) {
.Call(`_ABCoptim_abc_cpp_`, par, fn, lb, ub, FoodNumber, limit, maxCycle, criter)
}
| /scratch/gouwar.j/cran-all/cranData/ABCoptim/R/RcppExports.R |
#' Artificial Bee Colony Optimization
#'
#' Implements Karaboga (2005) Artificial Bee Colony (ABC) Optimization algorithm.
#'
#' @param par Initial values for the parameters to be optimized over
#' @param fn A function to be minimized, with first argument of the vector of
#' parameters over which minimization is to take place. It should return a
#' scalar result.
#' @param ... In the case of \code{abc_*}, further arguments to be passed to 'fn',
#' otherwise, further arguments passed to the method.
#' @param FoodNumber Number of food sources to exploit. Notice that the param
#' \code{NP} has been deprecated.
#' @param lb Lower bound of the parameters to be optimized.
#' @param ub Upper bound of the parameters to be optimized.
#' @param limit Limit of a food source.
#' @param maxCycle Maximum number of iterations.
#' @param optiinteger Whether to optimize binary parameters or not.
#' @param criter Stop criteria (numer of unchanged results) until stopping
#' @param parscale Numeric vector of length \code{length(par)}. Scale applied
#' to the parameters (see \code{\link[stats:optim]{optim}}).
#' @param fnscale Numeric scalar. Scale applied function. If \code{fnscale < 0},
#' then the problem becomes a maximization problem (see \code{\link[stats:optim]{optim}}).
#'
#' @details
#'
#' This implementation of the ABC algorithm was developed based on the basic
#' version written in \code{C} and published at the algorithm's official
#' website (see references).
#'
#' \code{abc_optim} and \code{abc_cpp} are two different implementations of the
#' algorithm, the former using pure \code{R} code, and the later using \code{C++},
#' via the \pkg{Rcpp} package. Besides of the output, another important
#' difference between the two implementations is speed, with \code{abc_cpp}
#' showing between 50\% and 100\% faster performance.
#'
#' Upper and Lower bounds (\code{ub}, \code{lb}) equal to infinite will be replaced
#' by either \code{.Machine$double.xmax} or \code{-.Machine$double.xmax}.
#'
#' If \code{D} (the number of parameters to be optimzed) is greater than one,
#' then \code{lb} and \code{ub} can be either scalars (assuming that all the
#' parameters share the same boundaries) or vectors (the parameters have
#' different boundaries each other).
#'
#' @return An list of class \code{abc_answer}, holding the following elements:
#' \item{Foods}{Numeric matrix. Last position of the bees.}
#' \item{f}{Numeric vector. Value of the function evaluated at each set of \code{Foods}.}
#' \item{fitness}{Numeric vector. Fitness of each \code{Foods}.}
#' \item{trial}{Integer vector. Number of trials at each \code{Foods}.}
#' \item{value}{Numeric scalar. Value of the function evaluated at the optimum.}
#' \item{par}{Numeric vector. Optimum found.}
#' \item{counts}{Integer scalar. Number of cycles.}
#' \item{hist}{Numeric matrix. Trace of the global optimums.}
#'
#' @author George Vega Yon \email{g.vegayon@@gmail.com}
#' @references D. Karaboga, \emph{An Idea based on Honey Bee Swarm for
#' Numerical Optimization}, tech. report TR06,Erciyes University, Engineering
#' Faculty, Computer Engineering Department, 2005
#' \url{http://mf.erciyes.edu.tr/abc/pub/tr06_2005.pdf}
#'
#' Artificial Bee Colony (ABC) Algorithm (website)
#' \url{http://mf.erciyes.edu.tr/abc/index.htm}
#'
#' Basic version of the algorithm implemented in \code{C} (ABC's official
#' website) \url{http://mf.erciyes.edu.tr/abc/form.aspx}
#' @keywords optimization
#' @examples
#'
#' # EXAMPLE 1: The minimum is at (pi,pi) --------------------------------------
#'
#' fun <- function(x) {
#' -cos(x[1])*cos(x[2])*exp(-((x[1] - pi)^2 + (x[2] - pi)^2))
#' }
#'
#' abc_optim(rep(0,2), fun, lb=-10, ub=10, criter=50)
#'
#' # This should be equivalent
#' abc_cpp(rep(0,2), fun, lb=-10, ub=10, criter=50)
#'
#' # We can also turn this into a maximization problem, and get the same
#' # results
#' fun <- function(x) {
#' # We've removed the '-' from the equation
#' cos(x[1])*cos(x[2])*exp(-((x[1] - pi)^2 + (x[2] - pi)^2))
#' }
#'
#' abc_cpp(rep(0,2), fun, lb=-10, ub=10, criter=50, fnscale = -1)
#'
#' # EXAMPLE 2: global minimum at about (-15.81515) ----------------------------
#'
#' fw <- function (x)
#' 10*sin(0.3*x)*sin(1.3*x^2) + 0.00001*x^4 + 0.2*x+80
#'
#' ans <- abc_optim(50, fw, lb=-100, ub=100, criter=100)
#' ans[c("par", "counts", "value")]
#'
#'
#' # EXAMPLE 3: 5D sphere, global minimum at about (0,0,0,0,0) -----------------
#' fs <- function(x) sum(x^2)
#'
#' ans <- abc_optim(rep(10,5), fs, lb=-100, ub=100, criter=200)
#' ans[c("par", "counts", "value")]
#'
#'
#' # EXAMPLE 4: An Ordinary Linear Regression ----------------------------------
#'
#' set.seed(1231)
#' k <- 4
#' n <- 5e2
#'
#' # Data generating process
#' w <- matrix(rnorm(k), ncol=1) # This are the model parameters
#' X <- matrix(rnorm(k*n), ncol = k) # This are the controls
#' y <- X %*% w # This is the observed data
#'
#' # Objective function
#' fun <- function(x) {
#' sum((y - X%*%x)^2)
#' }
#'
#' # Running the regression
#' ans <- abc_optim(rep(0,k), fun, lb = -10000, ub=10000)
#'
#' # Here are the outcomes: Both columns should be the same
#' cbind(ans$par, w)
#' # [,1] [,2]
#' # [1,] -0.08051177 -0.08051177
#' # [2,] 0.69528553 0.69528553
#' # [3,] -1.75956316 -1.75956316
#' # [4,] 0.36156427 0.36156427
#'
#'
#' # This is just like OLS, with no constant
#' coef(lm(y~0+X))
#' # X1 X2 X3 X4
#' #-0.08051177 0.69528553 -1.75956316 0.36156427
#'
#' @export abc_optim
#' @aliases abc_answer
abc_optim <- function(
par, # Vector de parametros a opti
fn, # Funcion objetivo
..., # Argumentos de la funcion (M, x0, X, etc.)
FoodNumber = 20, # Fuentes de alimento
lb = rep(-Inf, length(par)), # Limite inferior de recorrido
ub = rep(+Inf, length(par)), # Limite superior de recorrido
limit = 100, # Limite con que se agota una fuente de alimento
maxCycle = 1000, # Numero maximo de iteraciones
optiinteger = FALSE, # TRUE si es que queremos optimizar en [0,1] (binario)
criter = 50,
parscale = rep(1, length(par)),
fnscale = 1
)
{
D <- length(par)
# Checking limits
if (length(lb) == 1 && length(par) > 1) lb <- rep(lb, D)
if (length(ub) == 1 && length(par) > 1) ub <- rep(ub, D)
lb[is.infinite(lb)] <- -.Machine$double.xmax*1e-10
ub[is.infinite(ub)] <- .Machine$double.xmax*1e-10
# Initial params
Foods <- matrix(double(FoodNumber*D), nrow=FoodNumber)
f <- double(FoodNumber)
fitness <- double(FoodNumber)
trial <- double(FoodNumber)
prob <- double(FoodNumber)
solution <- double(D)
ObjValSol <- double(1)
FitnessSol <- double(1)
neighbour <- integer(1)
param2change<- integer(1)
GlobalMin <- fn(par, ...) # double(1)
GlobalParams<- par #double(D)
#GlobalMins <- double(runtime)
r <- integer(1)
# Fun
fun <- function(par) fn(par/parscale, ...)/fnscale
# Fitness function
CalculateFitness <- function(fun)
{
if (fun >= 0) return(1/(fun + 1))
else return(1 + abs(fun))
}
# CalculateFitness(f[1])
# The best food source is memorized
MemorizeBestSource <- function()
{
oldGlobalMin <- GlobalMin
for(i in seq(1,FoodNumber)) {
if (f[i] < GlobalMin) {
GlobalMin <<- f[i]
# Replacing new group of parameters
GlobalParams <<- Foods[i,]
}
}
# Increasing persistance
if (oldGlobalMin == GlobalMin) persistance <<- persistance + 1
else persistance <<- 0
}
# Variables are initialized in the range [lb,ub]. If each parameter has
# different range, use arrays lb[j], ub[j] instead of lb and ub
# Counters of food sources are also initialized in this function
init <- function(index, ...) {
if (optiinteger) Foods[index,] <<- runif(D) > .5
else {
Foods[index,] <<- sapply(1:D, function(k) runif(1,lb[k],ub[k]) )
}
solution <<- Foods[index,]
f[index] <<- fun(solution)
fitness[index] <<- CalculateFitness(f[index])
trial[index] <<- 0
}
# init(2)
# All food sources are initialized
initial <- function() {
# For the first initialization we set the bees at
# specific places equaly distributed through the
# bounds.
Foods <<-
sapply(1:D, function(k) {
seq(lb[k],ub[k],length.out=FoodNumber)
}
)
for (i in 1:FoodNumber) {
solution <<- Foods[i,]
f[i] <<- fun(solution)
fitness[i] <<- CalculateFitness(f[i])
trial[i] <<- 0
}
}
# initial()
SendEmployedBees <- function() {
for (i in 1:FoodNumber) {
# The parameter to be changed is determined randomly
param2change <- sample(1:D, 1) # floor(runif(1)*D) + 1
# A randomly chosen solution is used in producing a mutant solution of the solution i
# Randomly selected solution must be different from the solution i
neighbour <- i
while(neighbour==i)
neighbour <- sample(1:FoodNumber, 1) # floor(runif(1)*FoodNumber) + 1
solution <<- Foods[i,]
# v_{ij}=x_{ij}+\phi_{ij}*(x_{kj}-x_{ij})
if (optiinteger) solution[param2change] <<- runif(1) > 0.5
else {
solution[param2change] <<-
Foods[i,param2change]+
(Foods[i,param2change]-Foods[neighbour,param2change])*(runif(1)-0.5)*2
# if generated parameter value is out of boundaries, it is shifted onto the boundaries
if (solution[param2change]<lb[param2change])
solution[param2change]<<-lb[param2change]
if (solution[param2change]>ub[param2change])
solution[param2change]<<-ub[param2change]
}
ObjValSol <<- fun(solution)
FitnessSol <<- CalculateFitness(ObjValSol)
# a greedy selection is applied between the current solution i and its mutant*/
if (FitnessSol>fitness[i]) {
# If the mutant solution is better than the current solution i, replace the solution with the mutant and reset the trial counter of solution i*/
trial[i] <<- 0;
#for(j in 1:D) Foods[i,j] <<- solution[j]
Foods[i,] <<- solution
f[i]<<- ObjValSol
fitness[i]<<-FitnessSol
}
else {
# the solution i can not be improved, increase its trial counter*/
trial[i] <<- trial[i]+1
}
}
}
# A food source is chosen with the probability which is proportioal to its quality*/
# Different schemes can be used to calculate the probability values*/
# For example prob(i)=fitness(i)/sum(fitness)*/
# or in a way used in the metot below prob(i)=a*fitness(i)/max(fitness)+b*/
# probability values are calculated by using fitness values and normalized by dividing maximum fitness value*/
CalculateProbabilities <- function() {
maxfit <- fitness[1]
for (i in 1:FoodNumber)
if (fitness[i] > maxfit) maxfit <- fitness[i]
prob <<- .9*(fitness/(maxfit+1e-20)) + .1
# prob[is.nan(prob)] <<- .1
}
SendOnlookerBees <- function()
{
# Onlooker Bee phase
i <- 1
t <- 0
while (t < FoodNumber)
{
# choose a food source depending on its probability to be chosen
if (runif(1) < prob[i]) {
t <- t + 1
# The parameter to be changed is determined randomly
param2change <- sample(1:D, 1) # floor(runif(1)*D) + 1
# A randomly chosen solution is used in producing a mutant solution of the solution i
#Randomly selected solution must be different from the solution i*/
neighbour <- i
while(neighbour==i)
neighbour <- sample(1:FoodNumber, 1) # floor(runif(1)*FoodNumber) + 1
solution <<- Foods[i,]
# v_{ij}=x_{ij}+\phi_{ij}*(x_{kj}-x_{ij}) */
if (optiinteger) solution[param2change] <<- runif(1) > .5
else
{
solution[param2change] <<-
Foods[i,param2change]+
(Foods[i,param2change]-Foods[neighbour,param2change])*(runif(1)-0.5)*2
# if generated parameter value is out of boundaries, it is shifted onto the boundaries*/
if (solution[param2change]<lb[param2change])
solution[param2change] <<- lb[param2change]
if (solution[param2change]>ub[param2change])
solution[param2change] <<- ub[param2change]
}
ObjValSol <<- fun(solution)
FitnessSol <<- CalculateFitness(ObjValSol)
# a greedy selection is applied between the current solution i and its mutant*/
if (FitnessSol>fitness[i])
{
# If the mutant solution is better than the current solution i, replace the solution with the mutant and reset the trial counter of solution i*/
trial[i] <<- 0
Foods[i,] <<- solution
f[i]<<-ObjValSol
fitness[i]<<-FitnessSol
} #if the solution i can not be improved, increase its trial counter*/
else trial[i] <<- trial[i]+1
}
i <- i + 1
if (i==FoodNumber) i <- 1
# end of onlooker bee phase
}
}
# determine the food sources whose trial counter exceeds the "limit" value.
# In Basic ABC, only one scout is allowed to occur in each cycle*/
SendScoutBees <- function() {
maxtrialindex <- 1
for (i in 1:FoodNumber) {
if (trial[i] > trial[maxtrialindex]) maxtrialindex <- i
}
if (trial[maxtrialindex] >= limit) init(maxtrialindex)
}
persistance <- 0
# Inicializa funcion
initial()
# Memoriza la primera mejor solucion
MemorizeBestSource()
ans <- matrix(0, ncol = D, nrow=maxCycle)
iter <- 0
# Comienza a iterar
while ((iter <- iter + 1) < maxCycle)
{
SendEmployedBees()
CalculateProbabilities()
SendOnlookerBees()
MemorizeBestSource()
# Storing parameter and breaking out
ans[iter,] <- GlobalParams
if (persistance > criter) break
SendScoutBees()
}
return(
structure(list(
Foods = Foods,
f = f,
fn = fn,
fitness = fitness,
trial = trial,
value = fun(GlobalParams),
par = GlobalParams,
counts = c("function"=iter),
hist = ans[1:iter,,drop=FALSE]
), class="abc_answer"
))
}
#' @export
#' @param x An object of class \code{abc_answer}.
#' @rdname abc_optim
print.abc_answer <- function(x, ...) {
cat("\n")
cat(" An object of class -abc_answer- (Artificial Bee Colony Optim.):\n")
cat(" par:\n",
paste0(
sprintf(
" %6s: % f",
sprintf("x[%i]", 1:length(x$par)),
x$par),
collapse="\n"
),
"\n", sep=""
)
cat("\n value:\n", sprintf("%9s % f", "", x$value), "\n", sep="")
cat("\n counts:\n", sprintf("%9s % i", "", x$counts), "\n", sep="")
invisible(x)
}
# ################################################################################
# # Ejemplos
# ################################################################################
#
# X <- c(3,2,3,1)
#
# # Funcion de matching
# fun <- function(lambda, x0, X, M)
# {
# norm((x0 - X)*lambda, type="2") + exp(abs(sum(lambda > 0) - M))
# }
#
# # Mejor vecino para
# # x0 = 2
# # X = c(3,2,3,1)
# # M = 1
# # El mejor resultado debe ser [0,1,0,0]
# x1 <- abc_optim(rep(0,4), fun, x0=2, X=X, M=1, lb=0, ub=1, optiinteger=T)
# x1
#
# # Mejores dos vecinos para
# # x0 = 3
# # X = c(3,2,3,1)
# # M = 2
# # El mejor resultado debe ser [1,0,1,0]
# x2 <- abc_optim(rep(0,4), fun, x0=3, X=X, M=2, lb=0, ub=1, optiinteger=T)
# x2
#
# ################################################################################
# # Definicion de la funcion
# fun <- function(x) {
# -cos(x[1])*cos(x[2])*exp(-((x[1] - pi)^2 + (x[2] - pi)^2))
# }
#
# abc_optim(rep(0,2), fun, lb=-5, ub=5, criter=50)
#
# optim(rep(0,2), fn=fun) #lower=-5,upper=5)
#
# ################################################################################
# # Definicion de la funcion
#
# fun <- function(x) {
# -4+(x[1]^2 + x[2]^2)
# }
#
# abc_optim(c(1,1), fn=fun, lb=-100000, ub=100000,criter=100)
#
# ################################################################################
# # Definicion de la funcion
#
# fun <- function(x) {
# -(x^4 - 2*x^2 - 8)
# }
#
# abc_optim(0, fn=fun, lb=-2, ub=2,criter=100)
# #
# library(microbenchmark)
# const <- 2
# fun <- function(x) {
# -cos(x[1])*cos(x[2])*exp(-((x[1] - pi)^const + (x[2] - pi)^const))
# }
#
# microbenchmark(
# ABC_R = abc_optim(rep(0,2), fun, lb=-20, ub=20, criter=20, maxCycle = 20),
# ABC_CPP = abc_cpp(rep(0,2), fun, lb=-20, ub=20, criter=20, maxCycle = 20),
# times=100
# )
#' @export
#' @rdname abc_optim
abc_cpp <- function(
par,
fn,
...,
FoodNumber = 20, # Fuentes de alimento
lb = rep(-Inf, length(par)), # Limite inferior de recorrido
ub = rep(+Inf, length(par)), # Limite superior de recorrido
limit = 100, # Limite con que se agota una fuente de alimento
maxCycle = 1000, # Numero maximo de iteraciones
criter = 50,
parscale = rep(1, length(par)),
fnscale = 1
) {
# Checking limits
if (length(lb)>0) lb <- rep(lb, length(par))
if (length(ub)>0) ub <- rep(ub, length(par))
lb[is.infinite(lb)] <- -(.Machine$double.xmax*1e-10)
ub[is.infinite(ub)] <- +(.Machine$double.xmax*1e-10)
fun <- function(par) fn(par/parscale, ...)/fnscale
ans <- abc_cpp_(par, fun, lb, ub, FoodNumber, limit, maxCycle, criter)
ans[["fn"]] <- fn
structure(
ans[c("Foods", "f", "fn", "fitness", "trial", "value", "par", "counts",
"hist")],
class="abc_answer"
)
}
#' @export
#' @details The \code{plot} method shows the trace of the objective function
#' as the algorithm unfolds. The line is merely the result of the objective
#' function evaluated at each point (row) of the \code{hist} matrix return by
#' \code{abc_optim}/\code{abc_cpp}.
#'
#' For now, the function will return with error if \code{...} was passed to
#' \code{abc_optim}/\code{abc_cpp}, since those argumens are not stored with the
#' result.
#'
#' @rdname abc_optim
#' @param y Ignored
#' @param main Passed to \code{\link[graphics:plot.default]{plot}}.
#' @param xlab Passed to \code{\link[graphics:plot.default]{plot}}.
#' @param ylab Passed to \code{\link[graphics:plot.default]{plot}}.
#' @param type Passed to \code{\link[graphics:plot.default]{plot}}.
plot.abc_answer <- function(
x,
y = NULL,
main = "Trace of the Objective Function",
xlab = "Number of iteration",
ylab = "Value of the objective Function",
type = "l",
...) {
invisible(
graphics::plot(
with(x, apply(hist, 1, fn)),
type=type,
main = main,
ylab = ylab,
xlab = xlab,
...
)
)
}
| /scratch/gouwar.j/cran-all/cranData/ABCoptim/R/abc_optim.R |
################################################################################
# Some examples of ABC algorithm
# Author: George G. Vega
################################################################################
pause <- function() {
invisible(readline("\nPress <return> to continue: "))
}
pause()
## 2D Rosenbrock Banana function, global minimum at about (1,1)
fr <- function(x) 100 * (x[2] - x[1]^2)^2 + (1 - x[1])^2
pause()
abc_optim(c(1,1), fr, lb=-2.028, ub=2.028, criter=200)
## 5D sphere, global minimum at about (0,0,0,0,0)
fs <- function(x) sum(x^2)
pause()
abc_optim(rep(10,5), fs, lb=-100, ub=100, criter=200)
## "wild" function , global minimum at about -15.81515
pause()
fw <- function (x)
10*sin(0.3*x)*sin(1.3*x^2) + 0.00001*x^4 + 0.2*x+80
abc_optim(50, fw, lb=-100, ub=100, criter=100)
## Griewank function, global minimum at 0
pause()
fg <- function(x)
sum(x*x)/4000-prod(cos(x/sqrt(1:2)))+1
abc_optim(50, fg, lb=-100, ub=100, criter=100)
# Rastrigin function, global minimum at (0,0)
pause()
fra <- function(x)
20 + x[1]^2 + x[2]^2 - 10*(cos(2*pi*x[1]) + cos(2*pi*x[2]))
abc_optim(rep(50,2), fra, lb=-100, ub=100, criter=100)
# 10D Rastrigin function, global minimum at 0
pause()
fra10 <- function(x) sum(x^2 - 10*cos(2*pi*x) + 10)
abc_optim(rep(50,10), fra10, lb=-600, ub=600, criter=500) | /scratch/gouwar.j/cran-all/cranData/ABCoptim/demo/ABCoptim.R |
ABC_P2_gamma <-
function(n,ObsMean, S_Lo, S_Hi, R_Lo, R_Hi, delta,iter){
posterior<-c()
discard<-c()
gamma<-c()
Shape<-c()
Rate<-c()
i<-1
j<-1
k<-1
l<-1
m<-1
while(i <= iter){
dispersion<-runif(1,R_Lo,R_Hi)
mean<-runif(1,S_Lo,S_Hi)
gamma<-round(rgamma(n, shape=mean, rate=dispersion))
P2<-runif(1,0,1)
sire2<-rbinom(n,gamma,P2)
meanP2<-mean(sire2)
if(abs(meanP2 - ObsMean)>delta){
discard[k]<-P2
k<-k+1
}else
if(abs(meanP2 - ObsMean)<=delta){
posterior[i]<-P2
Shape[l]<-mean
Rate[m]<-dispersion
i<-i+1
l<-l+1
m<-m+1
}
}
list(posterior = posterior, Shape = Shape, Rate = Rate)
}
| /scratch/gouwar.j/cran-all/cranData/ABCp2/R/ABC_P2_gamma.R |
ABC_P2_norm <-
function(n,ObsMean,M_Lo,M_Hi,SD_Lo,SD_Hi,delta,iter){
posterior<-c()
discard<-c()
Norm<-c()
Avg<-c()
Std<-c()
i<-1
j<-1
k<-1
l<-1
m<-1
while(i <= iter){
avg<-runif(1,M_Lo,M_Hi)
std<-runif(1,SD_Lo,SD_Hi)
while(j<=n){
norm<-round(rnorm(1, mean=avg, sd=std))
if(norm>0){
Norm[j]<-norm
j<-j+1}
}
P2<-runif(1,0,1)
sire2<-rbinom(n,Norm,P2)
meanP2<-mean(sire2)
if(abs(meanP2 - ObsMean)>delta){
discard[k]<-P2
k<-k+1
}else
if(abs(meanP2 - ObsMean)<=delta){
posterior[i]<-P2
Avg[l]<-avg
Std[m]<-std
i<-i+1
l<-l+1
m<-m+1
}
}
list(posterior = posterior, Avg = Avg, Std = Std)
}
| /scratch/gouwar.j/cran-all/cranData/ABCp2/R/ABC_P2_norm.R |
ABC_P2_pois <-
function(n,ObsMean, L_Lo, L_Hi,delta,iter){
posterior<-c()
discard<-c()
pois<-c()
Lambda<-c()
i<-1
j<-1
k<-1
l<-1
while(i <= iter){
lambda<-runif(1,L_Lo,L_Hi)
pois<-round(rgamma(n, lambda))
P2<-runif(1,0,1)
sire2<-rbinom(n,pois,P2)
meanP2<-mean(sire2)
if(abs(meanP2 - ObsMean)>delta){
discard[k]<-P2
k<-k+1
}else
if(abs(meanP2 - ObsMean)<=delta){
posterior[i]<-P2
Lambda[l]<-lambda
i<-i+1
l<-l+1
}
}
list(posterior = posterior, Lambda = Lambda)
}
| /scratch/gouwar.j/cran-all/cranData/ABCp2/R/ABC_P2_pois.R |
library(MASS)
fit_dist_gamma <-
function(dist){
l<-length(dist)
j<-1
data_gamma<-c()
fit_gamma<-fitdistr(dist, "gamma")
while (j<=l){
gamma<-round(rgamma(1, fit_gamma$estimate[1], rate=fit_gamma$estimate[2]))
if(gamma>0){
data_gamma[j]<-gamma
j<-j+1
}
}
chi_gamma<-chisq.test(dist, data_gamma)
list(data_gamma = data_gamma, fit_gamma = fit_gamma, chi_gamma = chi_gamma)
}
| /scratch/gouwar.j/cran-all/cranData/ABCp2/R/fit_dist_gamma.R |
library(MASS)
fit_dist_norm <-
function(dist){
l<-length(dist)
j<-1
data_norm<-c()
fit_norm<-fitdistr(dist, "normal")
while (j<=l){
norm<-round(rnorm(1, mean=fit_norm$estimate[1], sd=fit_norm$estimate[2]))
if(norm>0){
data_norm[j]<-norm
j<-j+1
}
}
chi_norm<-chisq.test(dist,data_norm)
list(data_norm = data_norm, fit_norm = fit_norm, chi_norm = chi_norm)
}
| /scratch/gouwar.j/cran-all/cranData/ABCp2/R/fit_dist_norm.R |
library(MASS)
fit_dist_pois <-
function(dist){
l<-length(dist)
fit_pois<-fitdistr(dist, "poisson")
data_pois<-rpois(l, fit_pois$estimate)
chi_pois<-chisq.test(dist,data_pois)
list(data_pois = data_pois, fit_pois = fit_pois, chi_pois = chi_pois)
}
| /scratch/gouwar.j/cran-all/cranData/ABCp2/R/fit_dist_pois.R |
#' ABHgenotypeR: A package for easy visualization and manipulating of ABH genotypes.
#'
#' The ABHgenotypeR packages is meant as a companion packages in between the TASSEL
#' GBS pipeline and R/qtl. It allows easy visualization of ABH-encoded genotypes
#' in a .csv format as ouptut by the TASSEL ABHGenosPlugin which is also the
#' format necessary for R/qtl input.
#'
#'
#' @docType package
#' @name ABHgenotypeR
#' @import ggplot2
#' @import utils
NULL
#> NULL
| /scratch/gouwar.j/cran-all/cranData/ABHgenotypeR/R/ABHgenotypeR.R |
#' Correct short miscalled stretches based on flanking alleles.
#'
#' @param inputGenos A genotypes list object.
#' @param maxHapLength The maximum length of stretches flanked
#' by non-heterzygous sites that are changed. If set to 1
#' (default) only AXA or BXB will be corrected. If set to 2, both AXA and AXYA
#' (or BXB and BXYB) will be corrected.
#'
#' @return A genotype object in which short miscalled stretches are
#' corrected if both flanking alleles match.
#'
#' @examples \dontrun{corrStretchGenos <- correctStretches(genotypes, maxHapLength = 3)}
#' @export
correctStretches <- function(inputGenos = "genotypes",
maxHapLength = 1) {
geno_raw <- inputGenos$ABHmatrix
#setup a matrix for correcting errors
geno_correctedErr <- matrix(0,
nrow = nrow(geno_raw),
ncol = ncol(geno_raw))
#make reg expressions for A
patExprA <- NULL # a character vector which holds regexp
for(i in 1:maxHapLength) {
patExprA[i] <- paste("(A)([BHN]{",i,"})(?=A)", sep = "")
}
replExprA <- NULL # a character vector which holds regexp
for(i in 1:maxHapLength) {
replExprA[i] <- paste("\\1",
paste(rep("\\1",i), sep = "", collapse = ""),
sep = "", collapse = "")
}
#make reg expressions for B
patExprB <- NULL # a character vector which holds regexp
for(i in 1:maxHapLength) {
patExprB[i] <- paste("(B)([AHN]{",i,"})(?=B)", sep = "")
}
replExprB <- NULL # a character vector which holds regexp
for(i in 1:maxHapLength) {
replExprB[i] <- paste("\\1",
paste(rep("\\1",i), sep = "", collapse = ""),
sep = "", collapse = "")
}
#in geno_raw replace errors
for(chrom_count in unique(inputGenos$chrom)) {
geno_temp <- geno_raw[,inputGenos$chrom == chrom_count]
for (row_count in 1:nrow(geno_correctedErr)) {
for(HapLen in 1:length(patExprA)) { #replace with A
if(HapLen == 1) { #first replace in genotemp, then in geno_correc
geno_correctedErr[row_count, inputGenos$chrom == chrom_count] <- substring(gsub(x = paste(geno_temp[row_count,],
collapse = ""),
pattern = patExprA[HapLen],
replacement = replExprA[HapLen],
perl = TRUE),
1:ncol(geno_temp),
1:ncol(geno_temp))
} else {
geno_correctedErr[row_count, inputGenos$chrom == chrom_count] <- substring(gsub(x = paste(geno_correctedErr[row_count, inputGenos$chrom == chrom_count],
collapse = ""),
pattern = patExprA[HapLen],
replacement = replExprA[HapLen],
perl = TRUE),
1:ncol(geno_temp),
1:ncol(geno_temp))
}
}
for(HapLen in 1:length(patExprB)) { #replace with B, only in geno_correctedErr
geno_correctedErr[row_count, inputGenos$chrom == chrom_count] <- substring(gsub(x = paste(geno_correctedErr[row_count, inputGenos$chrom == chrom_count],
collapse = ""),
pattern = patExprB[HapLen],
replacement = replExprB[HapLen],
perl = TRUE),
1:ncol(geno_temp),
1:ncol(geno_temp))
}
}
}
outputGenos <-inputGenos
outputGenos$ABHmatrix <- geno_correctedErr
dimnames(outputGenos$ABHmatrix) <- list("individual_names" = inputGenos$individual_names,
"marker_names" = inputGenos$marker_names)
reportGenos(inputGenos)
cat(paste("\n"))
reportGenos(outputGenos)
outputGenos
}
| /scratch/gouwar.j/cran-all/cranData/ABHgenotypeR/R/correctStretches.R |
#' Correct undercalled heterozygous sites based on flanking alleles.
#'
#' @param inputGenos A genotypes list object.
#' @param maxHapLength The maximum length of not heterozygous stretches flanked
#' by heterzygous sites that are changed to heterozygous. If set to 1
#' (default) only HAH or HBH will be corrected. If set to 2, both HAH and HAAH
#' (or HBH and HBBH) will be corrected.
#'
#' @return A genotype object in which undercalled heterozygous sites are
#' corrected if both flanking alleles match.
#'
#' @examples \dontrun{corrUndHetsGenos <- correctUndercalledHets(genotypes, maxHapLength = 3)}
#' @export
correctUndercalledHets <- function(inputGenos = "genotypes",
maxHapLength = 1) {
geno_raw <- inputGenos$ABHmatrix
#setup a matrix for correcting undercalled hets
geno_correctedHets <- matrix(0,
nrow = nrow(geno_raw),
ncol = ncol(geno_raw))
#make reg expressions for H
patExprH <- NULL # a character vector which holds regexp
for(i in 1:maxHapLength) {
patExprH[i] <- paste("(H)([ABN]{",i,"})(?=H)", sep = "")
}
replExprH <- NULL # a character vector which holds regexp
for(i in 1:maxHapLength) {
replExprH[i] <- paste("\\1",
paste(rep("\\1",i), sep = "", collapse = ""),
sep = "", collapse = "")
}
#in geno_raw replace undercalled hets
for(chrom_count in unique(inputGenos$chrom)) {
geno_temp <- geno_raw[,inputGenos$chrom == chrom_count]
for (row_count in 1:nrow(geno_correctedHets)) { #first replace in genotemp, then in geno_corrected
for(HapLen in 1:length(patExprH)) {
if(HapLen == 1) {
geno_correctedHets[row_count, inputGenos$chrom == chrom_count] <- substring(gsub(x = paste(geno_temp[row_count,],
collapse = ""),
pattern = patExprH[HapLen],
replacement = replExprH[HapLen],
perl = TRUE),
1:ncol(geno_temp),
1:ncol(geno_temp))
} else {
geno_correctedHets[row_count, inputGenos$chrom == chrom_count] <- substring(gsub(x = paste(geno_correctedHets[row_count, inputGenos$chrom == chrom_count],
collapse = ""),
pattern = patExprH[HapLen],
replacement = replExprH[HapLen],
perl = TRUE),
1:ncol(geno_temp),
1:ncol(geno_temp))
}
}
}
}
#put feedbackfunction here
outputGenos <-inputGenos
outputGenos$ABHmatrix <- geno_correctedHets
dimnames(outputGenos$ABHmatrix) <- list("individual_names" = inputGenos$individual_names,
"marker_names" = inputGenos$marker_names)
reportGenos(inputGenos)
cat(paste("\n"))
reportGenos(outputGenos)
outputGenos
}
| /scratch/gouwar.j/cran-all/cranData/ABHgenotypeR/R/correctUndercalledHets.R |
#' Impute missing genotypes based on flanking alleles
#'
#' @param inputGenos A genotypes list object.
#'
#' @return A genotype object in which missing data is imputed based on flanking
#' alleles. Any number of N is replaced by either A, B or N if the alleles which flank the N match
#'
#' @examples \dontrun{imputedGenos <- imputeByFlanks(genotypes)}
#' @export
imputeByFlanks <- function (inputGenos = "genotypes") {
geno_raw <- inputGenos$ABHmatrix
#setup empty matrix for imputed genotypes
geno_imp <- matrix(0,
nrow = nrow(geno_raw),
ncol = ncol(geno_raw))
#loop through chromosome
for(chrom_count in unique(inputGenos$chrom)) {
geno_temp <- geno_raw[,inputGenos$chrom == chrom_count]
#loop through rows, replace Ns flanked by the same parent/het
for (row_count in 1:nrow(geno_temp)){
geno_imp[row_count, inputGenos$chrom == chrom_count] <- substring(gsub(x = paste(geno_temp[row_count,],
collapse = ""),
pattern = "(?:A|(?<!^)\\G)\\KN(?=N*A)",
replacement = "A",
perl = TRUE),
1:ncol(geno_temp),
1:ncol(geno_temp))
geno_imp[row_count, inputGenos$chrom == chrom_count] <- substring(gsub(x = paste(geno_imp[row_count, inputGenos$chrom == chrom_count],
collapse = ""),
pattern = "(?:B|(?<!^)\\G)\\KN(?=N*B)",
replacement = "B",
perl = TRUE),
1:ncol(geno_temp),
1:ncol(geno_temp))
geno_imp[row_count, inputGenos$chrom == chrom_count] <- substring(gsub(x = paste(geno_imp[row_count, inputGenos$chrom == chrom_count],
collapse = ""),
pattern = "(?:H|(?<!^)\\G)\\KN(?=N*H)",
replacement = "H",
perl = TRUE),
1:ncol(geno_temp),
1:ncol(geno_temp))
}
}
outputGenos <-inputGenos
outputGenos$ABHmatrix <- geno_imp
dimnames(outputGenos$ABHmatrix) <- list("individual_names" = inputGenos$individual_names,
"marker_names" = inputGenos$marker_names)
reportGenos(inputGenos)
cat(paste("\n"))
reportGenos(outputGenos)
outputGenos
}
| /scratch/gouwar.j/cran-all/cranData/ABHgenotypeR/R/imputeByFlanks.R |
#' Plot the parental allele frequencies along the chromosomes.
#'
#' @param genos The output of readABHgenotypes
#'
#' @return A plot of parental allele frequencies along the chromosomes. If the
#' output is assigned a name a ggplot2 object is returned for further
#' manipulation.
#'
#' @examples \dontrun{plotAlleleFreq(genotypes)}
#' \dontrun{p <- plotAlleleFreq(genotypes)}
#' @export
plotAlleleFreq <- function(genos = "genotypes"){
cbbPalette <- c("#000000", "#E69F00", "#56B4E9", "#009E73",
"#F0E442", "#0072B2", "#D55E00", "#CC79A7")
####get the number of A,B,H for each marker####
countAs <- rep(0,length(genos$marker_names))
for(markerloop in 1:length(genos$marker_names)){
countAs[markerloop] <- sum(genos$ABHmatrix[,markerloop] == "A", na.rm = TRUE)
}
countBs <- rep(0,length(genos$marker_names))
for(markerloop in 1:length(genos$marker_names)){
countBs[markerloop] <- sum(genos$ABHmatrix[,markerloop] == "B", na.rm = TRUE)
}
countHs <- rep(0,length(genos$marker_names))
for(markerloop in 1:length(genos$marker_names)){
countHs[markerloop] <- sum(genos$ABHmatrix[,markerloop] == "H", na.rm = TRUE)
}
countNs <- rep(0,length(genos$marker_names))
for(markerloop in 1:length(genos$marker_names)){
countNs[markerloop] <- sum(genos$ABHmatrix[,markerloop] == "N", na.rm = TRUE)
}
#####
#####build the dataframe for plotting#####
genotypeAbs <- cbind("chrom" = genos$chrom,
"pos" = genos$pos,
"countA" = countAs,
"countB" = countBs,
"countH" = countHs,
"countN" = countNs)
row.names(genotypeAbs) <- genos$marker_names
genotypeAbs <- reshape2::melt(as.data.frame(genotypeAbs),id.vars = c("chrom","pos"))
genotypeRatio <- cbind("chrom" = genos$chrom,
"pos" = genos$pos,
"ratioA" = 100 / (countAs+countBs+countHs) * countAs,
"ratioB" = 100 / (countAs+countBs+countHs) * countBs,
"hetero" = 100 / (countAs+countBs+countHs) * countHs)
row.names(genotypeRatio) <- genos$marker_names
genotypeRatio <- reshape2::melt(as.data.frame(genotypeRatio),id.vars = c("chrom","pos"),
variable.name = "allele_state")
#####
#plot the data
pos <- value <- allele_state <- NULL #appease R cmd check
ggplot(data = genotypeRatio, aes(x = pos/1000000, y = value, color = allele_state))+
geom_line(size = 0.75)+
facet_grid(chrom~.)+
xlab("physical position (Mb)")+ylab("parental allele frequency (%)")+
scale_y_continuous(limits = c(0, 100), breaks = c(0, 25, 50, 75))+
scale_color_manual(values = cbbPalette, name = "parental allele",
labels = c(genos$nameA,genos$nameB,"hetero"))+
theme(axis.title = element_text(size = 12, face = "bold"),
axis.title.y=element_text(vjust = 1.1),
axis.text = element_text(color = "black", size = 10),
panel.border = element_rect(fill= NA, colour="grey30"),
panel.margin = unit(0.2, "lines"),
axis.ticks = element_line(colour = "black"),
panel.grid.minor = element_blank(),
panel.grid.major.x = element_blank(),
strip.text.y = element_text(size = 12, angle = 0),
legend.position = "bottom",
legend.title = element_text(size = 12, face = "bold"),
legend.text = element_text(size = 12))
}
| /scratch/gouwar.j/cran-all/cranData/ABHgenotypeR/R/plotAlleleFreq.R |
#' Compare to genotype matrices
#'
#' @param genos_1 Output of readABHgenotypes
#' @param genos_2 Output of readABHgenotypes. Note that both genos object need to
#' have identical numbers of marker x individuals.
#' @param markerToPlot A character vector of marker names which appear in the
#' plot. Defaults to all.
#' @param individualsToPlot A character vector of individual names which appear
#' in the plot. Defaults to all.
#' @param chromToPlot A character vector of chromosome names which appear in the
#' plot. Defaults to all.
#' @param CompColors A character vector of length 2 giving the color names or
#' values to use for differnt and identical markers.
#' Defaults to black and orange.
#' @param textSize The size of all text elements in the plot. Useful for making a
#' nice plot. Defaults to 12.
#' @param showMarkerNames Show the marker names along the x axis. This and
#' showIndividualnames are useful when you display only a few markers and
#' want them labeled. Defaults to FALSE.
#' @param showIndividualNames Show individual names along the y axis.
#'
#' @return A graphical comparison of genotypes.
#'
#' @examples \dontrun{plotCompareGenos(preImpGenotypes,postImpGenotypes)}
#' \dontrun{#for more examples see plotGenos()}
#' @export
plotCompareGenos <- function(genos_1 = "genotypes_1",
genos_2 = "genotypes_2",
markerToPlot = "all",
individualsToPlot = "all",
chromToPlot = "all",
CompColors = c("#000000", "#E69F00"),
textSize = 12,
showMarkerNames = FALSE,
showIndividualNames = FALSE) {
if(showMarkerNames == TRUE) {textX <- element_text(colour = "black", angle = 90)}
else{textX <- element_blank()}
if(showIndividualNames == TRUE) {textY <- element_text(colour = "black")}
else{textY <- element_blank()}
comp <- genos_1$ABHmatrix == genos_2$ABHmatrix
comp_df <- as.data.frame(t(comp*1)) #logical -> numerical -> df
comp_df$chrom <- genos_1$chrom
comp_df$marker_names <- factor(genos_1$marker_names, levels = unique(genos_1$marker_names))
comp_df <- reshape2::melt(comp_df,
id.vars = c("marker_names","chrom"),
variable.name = "individual_names",
value.name = "comp")
if(markerToPlot[1] != "all") comp_df <- comp_df[comp_df$marker_names %in% markerToPlot,]
if(individualsToPlot[1] != "all") comp_df <- comp_df[comp_df$individual_names %in% individualsToPlot,]
if(chromToPlot[1] != "all") comp_df <- comp_df[comp_df$chrom %in% chromToPlot,]
comp_df$individual_names <- factor(comp_df$individual_names,
levels = rev(levels(comp_df$individual_names)))
marker_names <- individual_names <- comp <- NULL #appease R cmd check
ggplot(comp_df, aes(x = marker_names, y = individual_names, fill = factor(comp)))+
geom_tile()+
scale_fill_manual(name = "genotype comparison",
values = c("0" = CompColors[1], "1" = CompColors[2]),
labels = c("different","identical"))+
facet_grid(.~chrom, scales = "free", space = "free_x")+
ylab("individuals")+
xlab("markers")+
theme(text = element_text(size = textSize),
axis.text.x = textX,
axis.text.y = textY,
axis.ticks = element_blank(),
panel.background = element_blank(),
legend.position = "bottom")
}
| /scratch/gouwar.j/cran-all/cranData/ABHgenotypeR/R/plotCompareGenos.R |
#' Plot graphical genotypes.
#'
#' @param genos The output of readABHgenotypes
#' @param markerToPlot A character vector of marker names which appear in the
#' plot. Defaults to all.
#' @param individualsToPlot A character vector of individual names which appear
#' in the plot. Defaults to all.
#' @param chromToPlot A character vector of chromosome names which appear in the
#' plot. Defaults to all.
#' @param alleleColors A character vector of length 4 giving the color names or
#' values to use for the A,B,H and n.d genotypes. Defaults to orange, blue,
#' green and black.
#' @param textSize The size of all text elements in the plot. Useful for making a
#' nice plot. Defaults to 12.
#' @param showMarkerNames Show the marker names along the x axis. This and
#' showIndividualNames are useful when you display only a few markers and
#' want them labeled. Defaults to FALSE.
#' @param showIndividualNames Show individual names along the y axis.
#'
#' @return Graphical genotypes.
#'
#' @examples \dontrun{plotGenos(genotypes)}
#' markerNames <- c("marker1", "marker2", "marker3")
#' individualNames <- c("F2_100", "F2_101", "F2_102", "F2_103")
#' someColors <- c("black", "red", "gold", "white")
#' \dontrun{plotgenos(genotypes, markerNames, individualNames, 1:3, someColors)}
#'
#' \dontrun{p <- plotGenos(genotypes)}
#' @export
plotGenos <- function(genos = "genotypes",
markerToPlot = "all",
individualsToPlot = "all",
chromToPlot = "all",
alleleColors = c("#56B4E9","#E69F00",
"#009E73", "#000000"),
textSize = 12,
showMarkerNames = FALSE,
showIndividualNames = FALSE) {
if(showMarkerNames == TRUE) {textX <- element_text(colour = "black", angle = 90)}
else{textX <- element_blank()}
if(showIndividualNames == TRUE) {textY <- element_text(colour = "black")}
else{textY <- element_blank()}
ggt <- data.frame(t(genos$ABHmatrix), stringsAsFactors = FALSE, check.names = FALSE)
ggt$chrom <- genos$chrom
ggt$index <- 1:length(genos$chrom)
ggt$marker_names <- factor(genos$marker_names, levels = unique(genos$marker_names))
ggt <- reshape2::melt(ggt,
id.vars = c("chrom", "index","marker_names"),
variable.name = "individual_names", value.name = "allele")
if(markerToPlot[1] != "all") ggt <- ggt[ggt$marker_names %in% markerToPlot,]
if(individualsToPlot[1] != "all") ggt <- ggt[ggt$individual_names %in% individualsToPlot,]
if(chromToPlot[1] != "all") ggt <- ggt[ggt$chrom %in% chromToPlot,]
marker_names <- individual_names <- allele <- NULL #appease R cmd check
ggt$individual_names <- factor(ggt$individual_names,
levels = rev(levels(ggt$individual_names)))
ggplot(ggt, aes(x = marker_names, y = individual_names,
fill = allele))+
geom_tile()+
scale_fill_manual(name = "genotypes",
values = c("A" = alleleColors[1], "B" = alleleColors[2],
"H" = alleleColors[3], "N" = alleleColors[4]),
labels = c(genos$nameA, genos$nameB, "hetero", "n.d."))+
facet_grid(.~chrom, scales = "free", space = "free_x")+
xlab("marker")+
ylab("individuals")+
theme(text = element_text(size = textSize),
axis.text.x = textX,
axis.text.y = textY,
axis.ticks = element_blank(),
panel.grid = element_blank(),
panel.background = element_blank(),
strip.background = element_blank(),
legend.position = "bottom",
panel.margin = unit(0, "lines"))
}
| /scratch/gouwar.j/cran-all/cranData/ABHgenotypeR/R/plotGenos.R |
#' Plot the marker density along the chromosomes.
#'
#' @param genos The output of readABHgenotypes
#'
#' @return A plot of marker densities along the chromosomes. If the output is
#' assigned a name a ggplot2 object is returned for further manipulation.
#'
#' @examples \dontrun{plotMarkerDensity(genotypes)}
#' \dontrun{p <- plotMarkerDensity(genotypes)}
#' @export
plotMarkerDensity <- function(genos = "genotypes"){
cbbPalette <- c("#000000", "#E69F00", "#56B4E9", "#009E73",
"#F0E442", "#0072B2", "#D55E00", "#CC79A7")
SNPdistr <- cbind.data.frame("chrom" = genos$chrom,
"pos" = genos$pos)
pos <- NULL #appease R cmd check
ggplot(data = SNPdistr, aes(x = pos/1000000))+
stat_bin(binwidth = 1, drop = TRUE, geom = "line", size = 0.75)+
labs(x = expression(bold(physical~position~(Mb))),
y = expression(bold(site~density~(no.~of~sites~Mb^{-1}))))+
facet_grid(chrom~.)+
scale_colour_manual(values=cbbPalette, name = "max missing")+
theme(axis.title = element_text(size = 12), # bold is wrapped in expression()
axis.text = element_text(color = "black", size = 10),
panel.border = element_rect(fill= NA, colour="grey30"),
axis.ticks = element_line(colour = "black"),
panel.grid.minor = element_blank(),
panel.grid.major.x = element_blank(),
strip.text.y = element_text(size = 12, angle = 0),
legend.position = "bottom",
legend.title = element_text(size = 12, face = "bold"),
legend.text = element_text(size = 12))
}
| /scratch/gouwar.j/cran-all/cranData/ABHgenotypeR/R/plotMarkerDensity.R |
#' Read in the output of the genosToABH plugin.
#'
#' @param pathToABH The path and filename of the input file.
#' @param nameA Name of the parent represented by "A" in the input file.
#' @param nameB Name of the parent represented by "B" in the input file.
#' @param readPos Should the function attempt read the physical position of
#' markers from the input ?
#'
#' @return A genotype list object which holds the information from the input file.
#' This list is the fundamental datastructure used by the other functions in this
#' package. See the vignette for what each item in the list is.
#'
#' @details The input files should be a .csv file holding genotypes as specified by
#' the qtl package and its "csvs" format.
#' All characters in the genotype matrix which are not either A,B or H
#' will be set to N.
#' If readPos = TRUE (default) marker names must conform to S1_123456 meaning 123456 bp
#' on chromosome 1. If FALSE, pos is set to NULL and needs to be manually constructed
#' as shown in the examples. Note that this might throw off some plotting function.
#'
#' @examples \dontrun{genotypes <- readABHgenotypes("./genotypes.csv", "NB", "OL")}
#'
#' \dontrun{otherGenotypes <- readABHgenotypes("./otherGenotypes.csv", readPos = FALSE)}
#' #arbitrary position to keep marker order intact
#' \dontrun{therGenotypes$pos <- 1:length(otherGenotypes$marker_names)}
#' @export
readABHgenotypes <- function(pathToABH, nameA = "A", nameB = "B", readPos = TRUE){
HapMap <- read.delim(file = pathToABH,
stringsAsFactors=FALSE,
sep = ",")
if(readPos == TRUE) {
pos_temp <- as.integer(sub(pattern = ".+_",
x = as.character(colnames(HapMap[,-1])),
replacement = "", perl = TRUE)) #replace everything before and including the _ with ""}
} else {pos_temp <- NULL}
#get names and factors for each site
genotypes <- list(
"ABHmatrix" = as.matrix(HapMap[-1,-1]),
"chrom" = as.integer(HapMap[1,-1]),
"individual_names" = as.character(HapMap[-1,1]),
"marker_names" = as.character(colnames(HapMap[,-1])),
"pos" = pos_temp,
"nameA" = nameA,
"nameB" = nameB
)
dimnames(genotypes$ABHmatrix) <- list("individual_names" = HapMap[-1,1],
"marker_names" = colnames(HapMap[,-1]))
genotypes$ABHmatrix[!(genotypes$ABHmatrix %in% c("A", "B", "H"))] <- "N"
genotypes
}
| /scratch/gouwar.j/cran-all/cranData/ABHgenotypeR/R/readABHgenotypes.R |
#' Report the total and relative number of each allele in a genotype object.
#'
#' @param genos1 A genotypes list object.
#'
#' @return Console output of the total an relative number of each allele.
#'
#' @examples \dontrun{reportGenos(preImputation)}
#' @export
reportGenos <- function(genos1) {
cat(paste("The absolute number of genotypes in", deparse(substitute(genos1)),"is:"))
t1 <- table(genos1$ABHmatrix)
print(t1)
cat(paste("\n"))
cat(paste("or in percentage\n"))
cat(paste("\n"))
df1 <- data.frame("A" = round(100 / sum(t1) * t1[1], 3),
"B" = round(100 / sum(t1) * t1[2], 3),
"H" = round(100 / sum(t1) * t1[3], 3),
"N" = round(100 / sum(t1) * t1[4], 3))
print(df1)
}
| /scratch/gouwar.j/cran-all/cranData/ABHgenotypeR/R/reportGenos.R |
#' Export a genotype list to a .csv file.
#'
#' @param genos The output of readABHgenotypes or one of the impuation/error
#' correction functions.
#' @param outfile The path and filename of the output file.
#'
#' @return A file which can be used in R/qtl or elsewhere.
#'
#' @examples \dontrun{writeABHgenotypes(genotypes, outfile = "./outfile_name.csv")}
#' @export
writeABHgenotypes <- function(genos = "genotypes",
outfile = "./outfile.csv") {
outGenos <- genos$ABHmatrix
outGenos <- cbind("id" = row.names(outGenos),
outGenos)
outGenos <- rbind(as.integer(c("",genos$chrom)),
outGenos)
write.csv(outGenos, file = outfile, row.names = FALSE)
}
| /scratch/gouwar.j/cran-all/cranData/ABHgenotypeR/R/writeABHgenotypes.R |
## ---- echo = FALSE-------------------------------------------------------
knitr::opts_chunk$set(collapse = TRUE, comment = "#>")
library(ggplot2)
library(reshape2)
## ------------------------------------------------------------------------
library(ABHgenotypeR)
## ----readInChunk---------------------------------------------------------
# Start with reading in genotype data:
genotypes <- readABHgenotypes(system.file("extdata",
"preprefall025TestData.csv",
package = "ABHgenotypeR"),
nameA = "NB", nameB = "OL")
## ---- fig.show = 'hold', fig.width = 7, fig.cap = "Fig. 1: raw genotype data"----
# Genotypes can be plotted by:
plotGenos(genotypes)
## ------------------------------------------------------------------------
# Assign the output
plottedGenos <- plotGenos(genotypes)
# bold axis labels and no legend
plottedGenos <- plottedGenos + theme(axis.text = element_text(face = "bold"),
legend.position = "none")
## ------------------------------------------------------------------------
postImpGenotypes <- imputeByFlanks(genotypes)
## ---- eval = FALSE-------------------------------------------------------
# reportGenos(postImpGenotypes)
## ---- fig.show = 'hold', fig.width = 7, fig.cap = "Fig. 2: Raw (top) and imputed (bottom) genotypes from chromosome 1."----
# Genotypes can be plotted by:
plotGenos(genotypes, chromToPlot = 1)
plotGenos(postImpGenotypes,chromToPlot = 1)
## ------------------------------------------------------------------------
#remove undercalled heterozygous alleles
ErrCorr1Genotypes <- correctUndercalledHets(postImpGenotypes, maxHapLength = 3)
#remove other errors
ErrCorr2Genotypes <- correctStretches(ErrCorr1Genotypes, maxHapLength = 3)
## ---- fig.show = 'hold', fig.width = 7, fig.cap = "Fig. 3: Genotypes with corrected undercalled heterozygous (top) and other errors (bottom) from chromosome 1."----
plotGenos(ErrCorr1Genotypes, chromToPlot = 1)
plotGenos(ErrCorr2Genotypes,chromToPlot = 1)
## ---- fig.show = 'hold', fig.width = 7, fig.height = 4, fig.cap = "Fig. 4: Comparison of two genotype matrices"----
plotCompareGenos(genotypes, ErrCorr2Genotypes, chromToPlot = 1:3)
## ---- eval = FALSE-------------------------------------------------------
# writeABHgenotypes(ErrCorr2Genotypes, outfile = "path/to/dir")
## ---- fig.show = 'hold', fig.width = 7, fig.height = 7, fig.cap = "Fig. 5: Marker Density"----
plotMarkerDensity(genos = ErrCorr2Genotypes)
## ---- fig.show = 'hold', fig.width = 7, fig.height = 7, fig.cap = "Fig. 6:Parental allele frequencies"----
plotAlleleFreq(genos = ErrCorr2Genotypes)
| /scratch/gouwar.j/cran-all/cranData/ABHgenotypeR/inst/doc/ABHgenotypeR-vignette.R |
---
title: "ABHGenotypeR"
author: "Tomoyuki Furuta and Stefan Reuscher"
date: "`r Sys.Date()`"
output:
rmarkdown::html_vignette:
fig_caption: yes
vignette: >
%\VignetteIndexEntry{Using ABHGenotypeR}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, echo = FALSE}
knitr::opts_chunk$set(collapse = TRUE, comment = "#>")
library(ggplot2)
library(reshape2)
```
## Introduction
The `ABHgenotypeR` package provides simple imputation, error-correction and plotting capacities for genotype data. The function in this package were initially developed for the GBS/QTL analysis pipeline described in:
*Furuta, Reuscher et. al., 2016 Adaption genotyping by sequencing for rice F2
populations. BMC Genomics XYZ*
The `ABHgenotypeR` package is supposed to serve as an intermediate but independent analysis tool between the TASSEL GBS pipeline and the `qtl` package. `ABHgenotypeR` provides functionalities not found in either TASSEL or `qtl` in addition to visualization of genotypes as "graphical genotypes".
## An example workflow
Load `ABHgenotypeR`.
```{r}
library(ABHgenotypeR)
```
`ABHgenotypeR` requires genotypes encoded as ABHN, whereas A and B denote homozygous genotypes, H denotes heterozygous genotypes and N denotes missing data. Typical data sources can be the TASSEL GenotypesToABHPlugin or data from other genotyping systems as long as the input format conforms to the "csvs" format for genotypes defined in the `qtl` package. If your data comes from the TASSEL GenotypesToABHPlugin it should be in the correct format. Otherwise please refer to the included test dataset for correct formating.
### Reading in genotype files
```{r readInChunk}
# Start with reading in genotype data:
genotypes <- readABHgenotypes(system.file("extdata",
"preprefall025TestData.csv",
package = "ABHgenotypeR"),
nameA = "NB", nameB = "OL")
```
This will load the example dataset inlcuded in the package. The dataset contains genotypes from 50 F2 individuals from a cross of the elite rice cultivar Oryza sativa Nipponbare (NB) and the african wild rice Oryza longistaminata (OL). The file is directly taken from the output of the GenosToABHPlugin and as most real world datasets contains genotyping errors and missing data.
The above command will create a genotype list object which stores all the information from the input file.
### The genotype list object
Genotypes and associated informations are stored in an R list object, hereafter called genotype list. There are seven items in the list:
* ABHmatrix The actual genotypes matrix including dimension names
* chrom Chromosome number from the second line of the input file. Must be integer which means no X-chromosome for now. Sorry animal people.
* marker_names Taken from the first line of the input file.
* individual_names Take from the first column of the input file.
* pos Physical position in bp of each marker. By default extracted from the marker name by removing all characters before and including an underscore and expecting the remaining part to be an integer referring to the position in bp on the respective chromosome. e.g S1_123456 -> 123456 bp on chromosome 1.
* nameA and nameB Name of parent A and parent B. Can be set by the user
### Creating graphical genotypes
Being able to visualize all genotypes across a whole population can give hints about population structure or possible errors.
```{r, fig.show = 'hold', fig.width = 7, fig.cap = "Fig. 1: raw genotype data"}
# Genotypes can be plotted by:
plotGenos(genotypes)
```
The plotGenos() function provides options to plot only selectes markers, individuals or chromosomes and further allows the user to choose the colors assigned to each of the four possible states (ABHN) and if axis labels are displayed. Advanced users can take full advantage of the flexibility and power of `ggplot2` by assigning the output of plotGenos(), which will create a ggplot object for further manipulation.
```{r}
# Assign the output
plottedGenos <- plotGenos(genotypes)
# bold axis labels and no legend
plottedGenos <- plottedGenos + theme(axis.text = element_text(face = "bold"),
legend.position = "none")
```
A you can see from the ouput there are some markers which have a high percentage of missing data and/or tend to be obviously wrong, e.g a single A in a stretch of H. To correct this `ABHgenotypeR` contains a set of function that change genotypes based on their direct neighbours.
### Imputation of missing genotypes
As a first step to improve GBS genotypes the user might want to impute missing data. While both TASSEL and `qtl` provide very sophisticated imputation algorithms `ABGgenotypR` uses a simpler approach. Imputation of missing data is performed for each individual based on flanking alleles. Basically, if the genotypes left and right of a stretch of missing data are identical the genotypes are filled in. Imputation is performed by:
```{r}
postImpGenotypes <- imputeByFlanks(genotypes)
```
Ths will create a new genotype list object with imputed data. The imputeByFlanks() function (and the other genotype changing functions) will also print a report to the console which tells you how absolute and relative genotype numbers changed.
This report can be also be obtained by running:
```{r, eval = FALSE}
reportGenos(postImpGenotypes)
```
Another way to compare the outcome of imputation is to produce graphical genotypes of both datasets using the plotGenos() function. Here only the first chromosome is shown. Direct comparisons of genotypes can be made with plotCompareGenos() functon explained later.
```{r, fig.show = 'hold', fig.width = 7, fig.cap = "Fig. 2: Raw (top) and imputed (bottom) genotypes from chromosome 1."}
# Genotypes can be plotted by:
plotGenos(genotypes, chromToPlot = 1)
plotGenos(postImpGenotypes,chromToPlot = 1)
```
### Error corrections
In a similar fashion obvious genotype errors may be corrected. The only differencs is that the user can supply a maximum haplotype length. This sets the maximum stretch of uniform alleles that could be attributed to error. High values here might correct series of wrongly called alleles but will remove recombination events which resulted in short haplotypes. Small values here will potentially retain smaller recombination events, but might leave errors.
To choose a values it is suggested to examine your data, e.g. with the `plotGenos()` function, and think about population structure and marker density. In our case we decided that a minimum haplotype length of 3 yields acceptable results.
Error correction is performed using two functions, `correctUnderCalledHets()` and `correctStretches()`. The fact that there are two functions is partially due to the developmental history of this package but allows for greater flexibility to correct different types of errors.
`correctUnderCalledHets()` addresses the particular fact that genotyping methods that rely on read alignements (like GBS) tend to miss out on heterozygous sites, since they require reads from both alleles. `correctUnderCalledHets()` changes alleles from A or B to H if they are flanked by H. Running `correctUnderCalledHets()` with maxHapLength = 3 will change HAAAH, but not HAAAAH, implying that a 4 consecutive A might be a realistic genotype.
`correctStretches()` addresses all other genotype errors, so it will change H to A or B and A to B and B to A when appropriate. It will also change N to A, B or H making it partially redundant with `imputeByFlanks()`. The main difference being is that `correctStretches()` allows the user to specify `maxHapLength` whereas `imputeByFlanks()` recognizes N stretches of arbitrary size.
Both function will report the number of alleles before and after running them.
```{r}
#remove undercalled heterozygous alleles
ErrCorr1Genotypes <- correctUndercalledHets(postImpGenotypes, maxHapLength = 3)
#remove other errors
ErrCorr2Genotypes <- correctStretches(ErrCorr1Genotypes, maxHapLength = 3)
```
After removing errors both genotype list objects can be compared using `plotGenos()`.
```{r, fig.show = 'hold', fig.width = 7, fig.cap = "Fig. 3: Genotypes with corrected undercalled heterozygous (top) and other errors (bottom) from chromosome 1."}
plotGenos(ErrCorr1Genotypes, chromToPlot = 1)
plotGenos(ErrCorr2Genotypes,chromToPlot = 1)
```
###Comparing two genotype matrices
To quickly compare the results of the different functions in this package that manipulate genotypes, but also to compare the output of other imputation methods (e.g from TASSEL) the user can graphically compare two genotype matrices. This allows a quick glance at which genotypes differ in two otherwise identical matrices.
```{r, fig.show = 'hold', fig.width = 7, fig.height = 4, fig.cap = "Fig. 4: Comparison of two genotype matrices"}
plotCompareGenos(genotypes, ErrCorr2Genotypes, chromToPlot = 1:3)
```
The plotCompareGenos function also takes the same arguments as plotGenos() to look in more detail at
certain regions or individuals.
###Exporting results
As evident by the graphical genotypes almost all putatively wrong genotypes have been changed into more sensible ones. Once you are confident that your genotype data is of sufficient quality to allow QTL analysis or GWAS you can export the genotypes back to a .csv file for further analyses, for example using the `qtl` package
```{r, eval = FALSE}
writeABHgenotypes(ErrCorr2Genotypes, outfile = "path/to/dir")
```
##Other functions
The `ABHgenotypeR` package offers two additional visualizaton options that we found useful and that are currently lacking in both TASSEL and `qtl`.
The `plotMarkerDensity()` function allows plotting the density of markers along the physical positions of the chromosomes. This might be usefull to assess marker coverage in GBS experiments.
```{r, fig.show = 'hold', fig.width = 7, fig.height = 7, fig.cap = "Fig. 5: Marker Density"}
plotMarkerDensity(genos = ErrCorr2Genotypes)
```
The `plotAlleleFreq()` function allows plotting of parental allele frequencies along the physical position of the chromosomes. This is usefull to identify potential preferential transmission in population.
```{r, fig.show = 'hold', fig.width = 7, fig.height = 7, fig.cap = "Fig. 6:Parental allele frequencies"}
plotAlleleFreq(genos = ErrCorr2Genotypes)
```
| /scratch/gouwar.j/cran-all/cranData/ABHgenotypeR/inst/doc/ABHgenotypeR-vignette.Rmd |
---
title: "ABHGenotypeR"
author: "Tomoyuki Furuta and Stefan Reuscher"
date: "`r Sys.Date()`"
output:
rmarkdown::html_vignette:
fig_caption: yes
vignette: >
%\VignetteIndexEntry{Using ABHGenotypeR}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, echo = FALSE}
knitr::opts_chunk$set(collapse = TRUE, comment = "#>")
library(ggplot2)
library(reshape2)
```
## Introduction
The `ABHgenotypeR` package provides simple imputation, error-correction and plotting capacities for genotype data. The function in this package were initially developed for the GBS/QTL analysis pipeline described in:
*Furuta, Reuscher et. al., 2016 Adaption genotyping by sequencing for rice F2
populations. BMC Genomics XYZ*
The `ABHgenotypeR` package is supposed to serve as an intermediate but independent analysis tool between the TASSEL GBS pipeline and the `qtl` package. `ABHgenotypeR` provides functionalities not found in either TASSEL or `qtl` in addition to visualization of genotypes as "graphical genotypes".
## An example workflow
Load `ABHgenotypeR`.
```{r}
library(ABHgenotypeR)
```
`ABHgenotypeR` requires genotypes encoded as ABHN, whereas A and B denote homozygous genotypes, H denotes heterozygous genotypes and N denotes missing data. Typical data sources can be the TASSEL GenotypesToABHPlugin or data from other genotyping systems as long as the input format conforms to the "csvs" format for genotypes defined in the `qtl` package. If your data comes from the TASSEL GenotypesToABHPlugin it should be in the correct format. Otherwise please refer to the included test dataset for correct formating.
### Reading in genotype files
```{r readInChunk}
# Start with reading in genotype data:
genotypes <- readABHgenotypes(system.file("extdata",
"preprefall025TestData.csv",
package = "ABHgenotypeR"),
nameA = "NB", nameB = "OL")
```
This will load the example dataset inlcuded in the package. The dataset contains genotypes from 50 F2 individuals from a cross of the elite rice cultivar Oryza sativa Nipponbare (NB) and the african wild rice Oryza longistaminata (OL). The file is directly taken from the output of the GenosToABHPlugin and as most real world datasets contains genotyping errors and missing data.
The above command will create a genotype list object which stores all the information from the input file.
### The genotype list object
Genotypes and associated informations are stored in an R list object, hereafter called genotype list. There are seven items in the list:
* ABHmatrix The actual genotypes matrix including dimension names
* chrom Chromosome number from the second line of the input file. Must be integer which means no X-chromosome for now. Sorry animal people.
* marker_names Taken from the first line of the input file.
* individual_names Take from the first column of the input file.
* pos Physical position in bp of each marker. By default extracted from the marker name by removing all characters before and including an underscore and expecting the remaining part to be an integer referring to the position in bp on the respective chromosome. e.g S1_123456 -> 123456 bp on chromosome 1.
* nameA and nameB Name of parent A and parent B. Can be set by the user
### Creating graphical genotypes
Being able to visualize all genotypes across a whole population can give hints about population structure or possible errors.
```{r, fig.show = 'hold', fig.width = 7, fig.cap = "Fig. 1: raw genotype data"}
# Genotypes can be plotted by:
plotGenos(genotypes)
```
The plotGenos() function provides options to plot only selectes markers, individuals or chromosomes and further allows the user to choose the colors assigned to each of the four possible states (ABHN) and if axis labels are displayed. Advanced users can take full advantage of the flexibility and power of `ggplot2` by assigning the output of plotGenos(), which will create a ggplot object for further manipulation.
```{r}
# Assign the output
plottedGenos <- plotGenos(genotypes)
# bold axis labels and no legend
plottedGenos <- plottedGenos + theme(axis.text = element_text(face = "bold"),
legend.position = "none")
```
A you can see from the ouput there are some markers which have a high percentage of missing data and/or tend to be obviously wrong, e.g a single A in a stretch of H. To correct this `ABHgenotypeR` contains a set of function that change genotypes based on their direct neighbours.
### Imputation of missing genotypes
As a first step to improve GBS genotypes the user might want to impute missing data. While both TASSEL and `qtl` provide very sophisticated imputation algorithms `ABGgenotypR` uses a simpler approach. Imputation of missing data is performed for each individual based on flanking alleles. Basically, if the genotypes left and right of a stretch of missing data are identical the genotypes are filled in. Imputation is performed by:
```{r}
postImpGenotypes <- imputeByFlanks(genotypes)
```
Ths will create a new genotype list object with imputed data. The imputeByFlanks() function (and the other genotype changing functions) will also print a report to the console which tells you how absolute and relative genotype numbers changed.
This report can be also be obtained by running:
```{r, eval = FALSE}
reportGenos(postImpGenotypes)
```
Another way to compare the outcome of imputation is to produce graphical genotypes of both datasets using the plotGenos() function. Here only the first chromosome is shown. Direct comparisons of genotypes can be made with plotCompareGenos() functon explained later.
```{r, fig.show = 'hold', fig.width = 7, fig.cap = "Fig. 2: Raw (top) and imputed (bottom) genotypes from chromosome 1."}
# Genotypes can be plotted by:
plotGenos(genotypes, chromToPlot = 1)
plotGenos(postImpGenotypes,chromToPlot = 1)
```
### Error corrections
In a similar fashion obvious genotype errors may be corrected. The only differencs is that the user can supply a maximum haplotype length. This sets the maximum stretch of uniform alleles that could be attributed to error. High values here might correct series of wrongly called alleles but will remove recombination events which resulted in short haplotypes. Small values here will potentially retain smaller recombination events, but might leave errors.
To choose a values it is suggested to examine your data, e.g. with the `plotGenos()` function, and think about population structure and marker density. In our case we decided that a minimum haplotype length of 3 yields acceptable results.
Error correction is performed using two functions, `correctUnderCalledHets()` and `correctStretches()`. The fact that there are two functions is partially due to the developmental history of this package but allows for greater flexibility to correct different types of errors.
`correctUnderCalledHets()` addresses the particular fact that genotyping methods that rely on read alignements (like GBS) tend to miss out on heterozygous sites, since they require reads from both alleles. `correctUnderCalledHets()` changes alleles from A or B to H if they are flanked by H. Running `correctUnderCalledHets()` with maxHapLength = 3 will change HAAAH, but not HAAAAH, implying that a 4 consecutive A might be a realistic genotype.
`correctStretches()` addresses all other genotype errors, so it will change H to A or B and A to B and B to A when appropriate. It will also change N to A, B or H making it partially redundant with `imputeByFlanks()`. The main difference being is that `correctStretches()` allows the user to specify `maxHapLength` whereas `imputeByFlanks()` recognizes N stretches of arbitrary size.
Both function will report the number of alleles before and after running them.
```{r}
#remove undercalled heterozygous alleles
ErrCorr1Genotypes <- correctUndercalledHets(postImpGenotypes, maxHapLength = 3)
#remove other errors
ErrCorr2Genotypes <- correctStretches(ErrCorr1Genotypes, maxHapLength = 3)
```
After removing errors both genotype list objects can be compared using `plotGenos()`.
```{r, fig.show = 'hold', fig.width = 7, fig.cap = "Fig. 3: Genotypes with corrected undercalled heterozygous (top) and other errors (bottom) from chromosome 1."}
plotGenos(ErrCorr1Genotypes, chromToPlot = 1)
plotGenos(ErrCorr2Genotypes,chromToPlot = 1)
```
###Comparing two genotype matrices
To quickly compare the results of the different functions in this package that manipulate genotypes, but also to compare the output of other imputation methods (e.g from TASSEL) the user can graphically compare two genotype matrices. This allows a quick glance at which genotypes differ in two otherwise identical matrices.
```{r, fig.show = 'hold', fig.width = 7, fig.height = 4, fig.cap = "Fig. 4: Comparison of two genotype matrices"}
plotCompareGenos(genotypes, ErrCorr2Genotypes, chromToPlot = 1:3)
```
The plotCompareGenos function also takes the same arguments as plotGenos() to look in more detail at
certain regions or individuals.
###Exporting results
As evident by the graphical genotypes almost all putatively wrong genotypes have been changed into more sensible ones. Once you are confident that your genotype data is of sufficient quality to allow QTL analysis or GWAS you can export the genotypes back to a .csv file for further analyses, for example using the `qtl` package
```{r, eval = FALSE}
writeABHgenotypes(ErrCorr2Genotypes, outfile = "path/to/dir")
```
##Other functions
The `ABHgenotypeR` package offers two additional visualizaton options that we found useful and that are currently lacking in both TASSEL and `qtl`.
The `plotMarkerDensity()` function allows plotting the density of markers along the physical positions of the chromosomes. This might be usefull to assess marker coverage in GBS experiments.
```{r, fig.show = 'hold', fig.width = 7, fig.height = 7, fig.cap = "Fig. 5: Marker Density"}
plotMarkerDensity(genos = ErrCorr2Genotypes)
```
The `plotAlleleFreq()` function allows plotting of parental allele frequencies along the physical position of the chromosomes. This is usefull to identify potential preferential transmission in population.
```{r, fig.show = 'hold', fig.width = 7, fig.height = 7, fig.cap = "Fig. 6:Parental allele frequencies"}
plotAlleleFreq(genos = ErrCorr2Genotypes)
```
| /scratch/gouwar.j/cran-all/cranData/ABHgenotypeR/vignettes/ABHgenotypeR-vignette.Rmd |
#' Agent Based Model Simulation Framework
#'
#' This package provides a framework to simulate agent based models that are
#' based on states and events.
#'
#' ## Agent
#' The concept of this framework is agent, which is an object of the [Agent]
#' class. An agent maintains its own state, which is a named R list storing any
#' R values in it. (see [State] ). The main task of an agent is to manage events
#' (see [Event]), and handle them in chronological order.
#'
#' ## Population
#' An object of the [Population] class manages agents and their contacts. The
#' contacts of agents are managed by Contact objects. The main functionality for
#' a contact object is to provide contacts of a given individuals at a given
#' time. For example, [newRandomMixing()] returns such an object that finds a
#' random agent in the population as a contact. the effect of contacts on the
#' states of agents are defined using a state transition rule. Please see
#' ```addTransition``` method of [Simulation] for more details.
#'
#' ## Simulation
#' The [Simulation] class inherits the [Population] class. So a simulation
#' manages agents and their contacts. Thus, the class also inherits the [Agent]
#' class. So a simulation can have its own state, and events attached
#' (scheduled) to it. In addition, it also manages all the transitions, using
#' its ```addTransition``` method. At last, it maintains loggers, which record
#' (or count) the state changes, and report their values at specified times.
#'
#' During a simulation the earliest event in the simulation is picked out,
#' unscheduled (detached), and handled, which potentially causes the state
#' change of the agent (or another agent in the simulation). The state change is
#' then logged by loggers (see [newCounter()] and
#' [newStateLogger()] for more details) that recognize the state
#' change.
#'
#' ## Usage
#' To use this framework, we start by creating a simulation
#' object, populate the simulation with agents (either using the argument in
#' the constructor, or use its ```addAgent``` method), and
#' initialize the agents with their initial states using its ```setState``` method.
#'
#' We then attach ([schedule()]) events to agents (possibly to the populations or
#' the simulation object too), so that these events change the agents' state.
#' For models which agents' states are defined by discrete states, such as the
#' SIR epidemic model, the events are managed by the framework through state
#' transitions, using rules defined by the ```addTransition``` method of
#' the [Simulation] class.
#'
#' At last, we add loggers to the simulation using
#' the [Simulation] class' ```addLogger``` method` and either [newCounter()] or
#' [newStateLogger()]. At last, run the simulation using
#' its ```run``` method, which returns the observations of the loggers
#' at the requested time points as a data.frame object.
#'
#' For more information and examples, please see the [Wiki](https://github.com/junlingm/ABM/wiki/) pages on Github.
#'
#' @examples
#' # simulate an SIR model using the Gillespie method
#' # the population size
#' N = 10000
#' # the initial number of infectious agents
#' I0 = 10
#' # the transmission rate
#' beta = 0.4
#' # the recovery rate
#' gamma = 0.2
#' # an waiting time egenerator that handles 0 rate properly
#' wait.exp = function(rate) {
#' if (rate == 0) Inf else rexp(1, rate)
#' }
#' # this is a function that rescheduled all the events. When the
#' # state changed, the old events are invalid because they are
#' # calculated from the old state. This is possible because the
#' # waiting times are exponentially distributed
#' reschedule = function(time, agent, state) {
#' clearEvents(agent)
#' t.inf = time + wait.exp(beta*state$I*state$S/N)
#' schedule(agent, newEvent(t.inf, handler.infect))
#' t.rec = time + wait.exp(gamma*state$I)
#' schedule(agent, newEvent(t.rec, handler.recover))
#' }
#' # The infection event handler
#' # an event handler take 3 arguments
#' # time is the current simulation time
#' # sim is an external pointer to the Simulation object.
#' # agent is the agent that the event is scheduled to
#' handler.infect = function(time, sim, agent) {
#' x = getState(agent)
#' x$S = x$S - 1
#' x$I = x$I + 1
#' setState(agent, x)
#' reschedule(time, agent, x)
#' }
#' # The recovery event handler
#' handler.recover = function(time, sim, agent) {
#' x = getState(agent)
#' x$R = x$R + 1
#' x$I = x$I - 1
#' setState(agent, x)
#' reschedule(time, agent, x)
#' }
#' # create a new simulation with no agent in it.
#' # note that the simulation object itself is an agent
#' sim = Simulation$new()
#' # the initial state
#' x = list(S=N-I0, I=I0, R=0)
#' sim$state = x
#' # schedule an infection event and a recovery event
#' reschedule(0, sim$get, sim$state)
#' # add state loggers that saves the S, I, and R states
#' sim$addLogger(newStateLogger("S", NULL, "S"))
#' sim$addLogger(newStateLogger("I", NULL, "I"))
#' sim$addLogger(newStateLogger("R", sim$get, "R"))
#' # now the simulation is setup, and is ready to run
#' result = sim$run(0:100)
#' # the result is a data.frame object
#' print(result)
#'
#' # simulate an agent based SEIR model
#' # specify an exponential waiting time for recovery
#' gamma = newExpWaitingTime(0.2)
#' # specify a tansmission rate
#' beta = 0.4
#' # specify a exponentially distributed latent period
#' sigma =newExpWaitingTime(0.5)
#' # the population size
#' N = 10000
#' # create a simulation with N agents, initialize the first 5 with a state "I"
#' # and the remaining with "S".
#' sim = Simulation$new(N, function(i) if (i <= 5) "I" else "S")
#' # add event loggers that counts the individuals in each state.
#' # the first variable is the name of the counter, the second is
#' # the state for counting. States should be lists. However, for
#' # simplicity, if the state has a single value, then we
#' # can specify the list as the value, e.g., "S", and the state
#' # is equivalent to list("S")
#' sim$addLogger(newCounter("S", "S"))
#' sim$addLogger(newCounter("E", "E"))
#' sim$addLogger(newCounter("I", "I"))
#' sim$addLogger(newCounter("R", "R"))
#' # create a random mixing contact pattern and attach it to sim
#' m = newRandomMixing()
#' sim$addContact(m)
#' # the transition for leaving latent state anbd becoming infectious
#' sim$addTransition("E"->"I", sigma)
#' # the transition for recovery
#' sim$addTransition("I"->"R", gamma)
#' # the transition for tranmission, which is caused by the contact m
#' # also note that the waiting time can be a number, which is the same
#' # as newExpWaitingTime(beta)
#' sim$addTransition("I" + "S" -> "I" + "E" ~ m, beta)
#' # run the simulation, and get a data.frame object
#' result = sim$run(0:100)
#' print(result)
#'
#' @useDynLib ABM, .registration=TRUE
#' @importFrom R6 R6Class
#' @importFrom Rcpp evalCpp
"_PACKAGE"
#' The state of an agent
#'
#' In this framework, a state is a list, each named component is called a
#' domain. The value of a domain can be any R value. The list can be at most
#' one unnamed value, which corresponds to a domain with no name. This is
#' useful if there is only one domain.
#'
#' A state can be matched to an R list (called a rule in this case).
#' The state matches the rule if and only if each domain (names of the
#' list) in rule has the same value as in state. The domains in domains of the
#' state not listed in rule are not matched. In addition, to match to a rule,
#' the domain values must be either a number or a character. This is useful
#' for identifying state changes. See [newCounter()] and
#' the [Simulation] class' ```addTransition``` method for more details.
#'
#' @name State
NULL
#' NULL | /scratch/gouwar.j/cran-all/cranData/ABM/R/ABM-package.R |
#' R6 class that represent an agent
#'
#' The key task of an agent is to maintain events, and handle them in the
#' chronological order. Agents also maintain their states, which is a list of
#' values. The events, when handled, operate on the state of the agent (or other
#' agents).
#'
#' During the simulation the agent with the earliest event in the simulation is
#' picked out, unscheduled, then its earliest event is handled, which
#' potentially causes the state change of the agent (or another agent in the
#' simulation). The state change is then logged by loggers that recognize the
#' state change.
#'
#' An agent itself cannot handle the event. Instead, it has to be added to a
#' simulation (or a population that itself is added to a simulation).
#'
#' @export
Agent <- R6::R6Class(
"R6Agent",
public = list(
#' Agent
#'
#' @param agent can be either an external pointer to an agent such as one
#' returned by newAgent, or a list representing the initial state for creating
#' a new agent, or NULL (an empty state)
#' @param death.time the time of death for the agent, a numeric value
#'
#' @details Note that specifying death.time is equivalent to call the
#' ```$setDeathTime``` method.
initialize = function(agent=NULL, death.time=NA) {
if (typeof(agent) == "externalptr") {
private$agent = agent
} else {
if (!is.null(agent) && !is.list(agent))
agent = list(agent)
private$agent = newAgent(agent, death.time)
}
},
#' Check if the state of the agent matches a given state
#'
#' @param rule the state to match, a list
#'
#' @return a logical value
match = function(rule) {
matchState(private$agent, rule)
},
#' Schedule an event
#'
#' @param event an object of the R6 class Event, or an external pointer
#' returned by newEvent
#'
#' @return the agent itself (invisible)
schedule = function(event) {
if (inherits(event, "R6Event"))
event = event$get
schedule(private$agent, event)
invisible(self)
},
#' Unschedule an event
#'
#' @param event an object of the R6 class Event, or an external pointer
#' returned by newEvent
#'
#' @return the agent itself (invisible)
unschedule = function(event) {
if (inherits(event, "R6Event"))
event = event$get
unschedule(private$agent, event)
invisible(self)
},
#' leave the population that the agent is in
#'
#' @return the agent itself
leave = function() {
leave(private$agent)
invisible(self)
},
#' set the time of death for the agent
#'
#' @param time the time of death, a numeric value
#'
#' @return the agent itself (invisible)
#'
#' @details At the time of death, the agent is removed from the simulation.
#' Calling it multiple times causes the agent to die at the earliest time.
setDeathTime = function(time) {
setDeathTime(private$agent, time)
invisible(self)
}
),
private = list(
agent = NULL
),
active = list(
#' @field state
#'
#' Get/set the state of the agent
state = function(new.state = NULL) {
if (is.null(new.state)) {
getState(private$agent)
} else {
setState(private$agent, new.state)
}
},
#' @field id
#'
#' Get the agent ID
id = function() { getID(private$agent) },
#' @field get
#'
#' Get the external pointer for the agent
get = function() { private$agent }
),
)
#' Create an agent with a given state
#'
#' @name newAgent
#'
#' @param state a list giving the initial state of the agent, or NULL (an empty
#' list)
#'
#' @param death_time the death time for the agent, an optional numeric value.
#'
#' @return an external pointer pointing to the agent
#'
#' @details Setting death_time is equivalent to calling the [setDeathTime()]
#' function.
#'
#' @export
NULL
#' Get the ID of the agent.
#'
#' @name getID
#'
#' @param agent an external pointer returned by newAgent
#'
#' @return an integer value
#'
#' @details Before an agent is added to a population, its id is 0.
#' After it is added, its id is the index in the population
#' (starting from 1).
#'
#' If agent is an R6 object, then we should either use ```agent$id```,
#' or use ```getID(agent$get)```
#'
#' @export
NULL
#' Get the state of the agent
#'
#' @name getState
#'
#' @param agent an external pointer returned by newAgent
#'
#' @return a list holding the state
#'
#' @details If agent is an R6 object, then we should either use agent$schedule,
#' or use schedule(agent$get, event)
#'
#' @export
NULL
#' Set the state of the agent
#'
#' @name setState
#'
#' @param agent an external pointer returned by newAgent
#'
#' @param state an R list giving the components of the state to be
#' undated.
#'
#' @details In this framework, a state is a list, each named
#' component is called a domain. This function only updates the
#' values of the domain given in the "value" list, while leave the
#' other components not in the "value" list unchanged.
#'
#' If agent is an R6 object, then we should either use agent$schedule,
#' or use schedule(agent$get, event)
#'
#' @export
NULL
#' Check if two states match
#'
#' @name stateMatch
#'
#' @param state a list holding a state to check
#'
#' @param rule a list holding the state to match against
#'
#' @return a logical value
#'
#' @details The state matches the rule if and only if each domain (names of the
#' list) in rule has the same value as in state. The domains in domains of the
#' state not listed in rule are not matched
#'
#' @export
NULL
#' Check if the state of an agent matches a given state
#'
#' @param agent an external pointer returned by newAgent
#'
#' @param rule a list holding the state to match against
#'
#' @return a logical value
#'
#' @details This function is equivalent to
#' stateMatch(getState(agent), rule)
#'
#' The state matches the rule if and only if each domain (names of the
#' list) in rule has the same value as in state. The domains in domains of the
#' state not listed in rule are not matched
#'
#' @export
matchState = function(agent, rule) {
stateMatch(getState(agent), rule)
}
#' Schedule (attach) an event to an agent
#'
#' @name schedule
#'
#' @param agent an external pointer returned by newAgent
#'
#' @param event an external pointer returned by newEvent
#'
#' @details If agent is an R6 object, then we should use either
#' agent$schedule(event) or schedule(agent$get, event)
#'
#' Similarly, if event is an R6 object, then we should use
#' schedule(agent, event$get)
#'
#' @export
NULL
#' Unschedule (detach) an event from an agent
#'
#' @name unschedule
#'
#' @param agent an external pointer returned by newAgent
#'
#' @param event an external pointer returned by newEvent
#'
#' @details If agent is an R6 object, then we should use either
#' agent$unschedule(event) or unschedule(agent$get, event)
#'
#' Similarly, if event is an R6 object, then we should use
#' unschedule(agent, event$get)
#'
#' @export
NULL
#' Unschedule all event from an agent
#'
#' @name clearEvents
#'
#' @param agent an external pointer returned by newAgent
#'
#' @details If agent is an R6 object, then we should use either
#' agent$clearEvents() or clearEvents(agent$get)
#'
#' @export
NULL
#' leave the population that the agent is in
#'
#' @name leave
#'
#' @param agent an external pointer returned by newAgent
#'
#' @details If agent is an R6 object, then we should use either
#' agent$leave() or leave(agent$get)
#'
#' @export
NULL
#' set the time of death for an agent
#'
#' @name setDeathTime
#'
#' @param agent an external pointer returned by [newAgent()] or [getAgent()]
#'
#' @param time the time of death, a numeric value
#'
#' @details If agent is an R6 object, then we should use either
#' agent$leave() or leave(agent$get)
#'
#' At the time of death, the agent is removed from the simulation. Calling it
#' multiple times causes the agent to die at the earliest time.
#'
#' @export
NULL
| /scratch/gouwar.j/cran-all/cranData/ABM/R/Agent.R |
#' An R6 class that implements a contact pattern in R
#'
#' The main task of the class is to return the contacts of a given agent. Each
#' object of this class is associated to a population. A population may have
#' multiple contacts attached, e.g., a random mixing contact pattern and a
#' network contact pattern.
#'
#' @details This class must be subclassed in order to implement specific functionality.
#' To subclass, we must implement three methods, namely contact, addAgent, and
#' build. See more details in the documentation of each method.
#'
#' @export
Contact = R6::R6Class(
"R6Contact",
public = list(
#' @description the constructor
initialize = function() {
private$pointer = newContact(self)
},
#' @description attach to a population
#'
#' @param population the population to attach to. An external pointer
#'
#' @details. This method should be called from the C++ side. Users should not
#' call this directly.
attach = function(population) {
if (!is.null(private$population))
stop("Already attached to a population")
private$population = population
self$build()
},
#' @description Returns the contacts of the given agent
#'
#' @param time the current time in the simulation, a number
#'
#' @param agent the agent whose contacts are requested. An external pointer
#'
#' @return a list of external pointers pointing to the contacting agents
contact = function(time, agent) { list() },
#' @description Add an agent to the contact pattern
#'
#' @param agent the agent to be added. An external pointer
#'
#' @details When an agent is added to a population, it is added to each of the
#' contact patterns. When a contact pattern is added to a population, all
#' agents in a population is added to the contact pattern, one by one.
#'
#' Note that, immediately before the simulation is run, while reporting
#' the states to the simulation object, the population will call the
#' build method for each Contact object. Thus a contact object may choose
#' to ignore adding agents before build is called, and handle all agents
#' within the finalize method. However, the contact object must handle
#' adding an agent after build is called.
addAgent = function(agent) { },
#' @description Remove an agent from the contact pattern
#'
#' @param agent the agent to be removed. An external pointer
#'
#' @details When an agent leaves a population, it is removed from each of the
#' contact patterns.
#'
#' This method may also be called in event handlers to remove an agent
remove = function(agent) { },
#' @description Build the contact pattern
#'
#' @details This method is called immediately before the simulation is run,
#' when the attached population reports the states to the simulation object.
#'
#' Thus this method can be considered as a callback function to notify the
#' contact object the population state, such as its agents, states, events,
#' and contact patterns are all initialized, so the contact pattern should
#' finish initialization, for example, building the contact network.
#'
#' This is needed because some contact patterns, such as a configuration-
#' model contact network, cannot be built while adding agents one by one.
#' It must be generated when all agents are present. This is unlike the
#' Albert-Barabasi networkm which can be built while adding the agents.
build = function() { }
),
private = list(
pointer = NULL,
population = NULL
),
active = list(
#' @field get
#'
#'.The external pointer pointing to the C++ RContact object.
get = function() { private$pointer },
#' @field attached
#'
#' a logical value indicating whether the object has been attached
#' to a population
attached = function() { !is.null(private$population) }
)
)
#' Creates a RandomMixing object
#'
#' @return an external pointer.
#'
#' @name newRandomMixing
#'
#' @export
#'
#' @examples
#' # creates a simulation with 100 agents
#' sim = Simulation$new(100)
#' # add a random mixing contact pattern for these agents.
#' sim$addContact(newRandomMixing())
#'
#' @export
NULL
#' Creates a random network using the configuration model
#'
#' @name newConfigurationModel
#'
#' @param rng a function that generates random degrees
#'
#' @return an external pointer.
#'
#' @details The population must be an external pointer, not an R6 object
#' To use an R6 object, we should use its pointer representation from its
#' $get method.
#'
#' The function rng should take exactly one argument n for the number of degrees
#' to generate, and should return an integer vector of length n.
#'
#' @examples
#' # creates a simulation with 100 agents
#' sim = Simulation$new(100)
#' # add a Poisson network with a mean degree 5
#' sim$addContact(newConfigurationModel(function(n) rpois(n, 5)))
#'
#' @export
NULL
| /scratch/gouwar.j/cran-all/cranData/ABM/R/Contact.R |
#' Create a logger of the Counter class
#'
#' When state changes occur, it is passed to each logger, which then
#' change its value. At the specified time points in a run, the
#' values of the logger are reported and recorded in a data.frame object,
#' where the columns represent variables, and rows represent the
#' observation at each time point given to each run. Each logger has a
#' name, which becomes the the column name in the data.frame.
#'
#' @name newCounter
#'
#' @param name the name of the counter, must be a length-1 character vector
#'
#' @param from a list specifying state of the agent, or a character or numeric
#' value that is equivalent to list(from). please see the details section
#'
#' @param to a list (can be NULL) specifying the state of the agent after the
#' state change, or a character or numeric value that is equivalent to
#' list(from). please see the details section
#'
#' @param initial the initial value of the counter. Default to 0.
#'
#' @return an external pointer that can be passed to the [Simulation] class'
#' ```$addLogger```.
#'
#' @details if the argument "to" is not NULL, then the counter counts the
#' transitions from "from" to "to". Otherwise, it counts the number of agents
#' in a state that matches the "from" argument. Specifically, if the agent
#' jumps to "from", then the count increases by 1. If the agents jumps away
#' from "from", then the count decreases by 1.
#'
#' @export
NULL
#' Create a logger of the StateLogger class
#'
#' When state changes occur, it is passed to each logger, which then
#' change its value. At the specified time points in a run, the
#' values of the logger are reported and recorded in a data.frame object,
#' where the columns represent variables, and rows represent the
#' observation at each time point given to each run. Each logger has a
#' name, which becomes the the column name in the data.frame.
#'
#' @name newStateLogger
#'
#' @param name the name of the logger. A length-1 character vector
#'
#' @param agent the agent whose state will be logged. An external pointer
#'
#' @param state.name the state name of the state of the agent to be logged.
#' A character vector of length 1.
#'
#' @details If a state changed happened to any agent, the specified state
#' of the agent given by the "agent" argument will be logged. If
#' ```state.name==NULL``` then the state of the agent who just changed is
#' logged.
#'
#' The agent must be an external pointer. To use an R6 object, we need
#' to use its $get method to get the external pointer.
#'
#' The state to be logged must have a numeric value.
#'
#' @export
NULL
| /scratch/gouwar.j/cran-all/cranData/ABM/R/Counter.R |