content
stringlengths
0
14.9M
filename
stringlengths
44
136
#' A3 Results for Arbitrary Model #' #' This function calculates the A3 results for an arbitrary model construction algorithm (e.g. Linear Regressions, Support Vector Machines or Random Forests). For linear regression models, you may use the \code{\link{a3.lm}} convenience function. #' #' @param formula the regression formula. #' @param data a data frame containing the data to be used in the model fit. #' @param model.fn the function to be used to build the model. #' @param model.args a list of arguments passed to \code{model.fn}. #' @param ... additional arguments passed to \code{\link{a3.base}}. #' @return S3 \code{A3} object; see \code{\link{a3.base}} for details #' @references Scott Fortmann-Roe (2015). Consistent and Clear Reporting of Results from Diverse Modeling Techniques: The A3 Method. Journal of Statistical Software, 66(7), 1-23. <http://www.jstatsoft.org/v66/i07/> #' @examples #' \donttest{ #' ## Standard linear regression results: #' #' summary(lm(rating ~ ., attitude)) #' #' ## A3 Results for a Linear Regression model: #' #' # In practice, p.acc should be <= 0.01 in order #' # to obtain finer grained p values. #' #' a3(rating ~ ., attitude, lm, p.acc = 0.1) #' #' #' ## A3 Results for a Random Forest model: #' #' # It is important to include the "+0" in the formula #' # to eliminate the constant term. #' #' require(randomForest) #' a3(rating ~ .+0, attitude, randomForest, p.acc = 0.1) #' #' # Set the ntrees argument of the randomForest function to 100 #' #' a3(rating ~ .+0, attitude, randomForest, p.acc = 0.1, model.args = list(ntree = 100)) #' #' # Speed up the calculation by doing 5-fold cross-validation. #' # This is faster and more conservative (i.e. it should over-estimate error) #' #' a3(rating ~ .+0, attitude, randomForest, n.folds = 5, p.acc = 0.1) #' #' # Use Leave One Out Cross Validation. The least biased approach, #' # but, for large data sets, potentially very slow. #' #' a3(rating ~ .+0, attitude, randomForest, n.folds = 0, p.acc = 0.1) #' #' ## Use a Support Vector Machine algorithm. #' #' # Just calculate the slopes and R^2 values, do not calculate p values. #' #' require(e1071) #' a3(rating ~ .+0, attitude, svm, p.acc = NULL) #' } a3 <- function(formula, data, model.fn, model.args = list(), ...){ model.fn.w.args <- function(y, x){ dat <- data.frame(cbind(y, x)) names(dat) <- c("y", paste("x", 1:ncol(x), sep="")) new.model.args = list(formula = y ~ . + 0, data = dat) for(n in names(model.args)){ new.model.args[[n]] = model.args[[n]] } return(do.call(model.fn, new.model.args)) } simulate.fn <- function(y, x, new.x, ...){ reg <- model.fn.w.args(y, x) new.data <- data.frame(new.x) if(ncol(new.data) != ncol(x)){ new.data <- data.frame(t(new.data)) } names(new.data) <- paste("x", 1:ncol(x), sep="") return(predict(reg, new.data)) } a3.base(formula, data, model.fn.w.args, simulate.fn, ...) } #' A3 for Linear Regressions #' #' This convenience function calculates the A3 results specifically for linear regressions. It uses R's \code{\link{glm}} function and so supports logistic regressions and other link functions using the \code{family} argument. For other forms of models you may use the more general \code{\link{a3}} function. #' #' @param formula the regression formula. #' @param data a data frame containing the data to be used in the model fit. #' @param family the regression family. Typically 'gaussian' for linear regressions. #' @param ... additional arguments passed to \code{\link{a3.base}}. #' @return S3 \code{A3} object; see \code{\link{a3.base}} for details #' @examples #' \donttest{ #' ## Standard linear regression results: #' #' summary(lm(rating ~ ., attitude)) #' #' ## A3 linear regression results: #' #' # In practice, p.acc should be <= 0.01 in order #' # to obtain fine grained p values. #' #' a3.lm(rating ~ ., attitude, p.acc = 0.1) #' #' # This is equivalent both to: #' #' a3(rating ~ ., attitude, glm, model.args = list(family = gaussian), p.acc = 0.1) #' #' # and also to: #' #' a3(rating ~ ., attitude, lm, p.acc = 0.1) #' } a3.lm <- function(formula, data, family = gaussian, ...){ a3(formula, data, model.fn = glm, model.args = list(family = family), ...) } #' Base A3 Results Calculation #' #' This function calculates the A3 results. Generally this function is not called directly. It is simpler to use \code{\link{a3}} (for arbitrary models) or \code{\link{a3.lm}} (specifically for linear regressions). #' #' @param formula the regression formula. #' @param data a data frame containing the data to be used in the model fit. #' @param model.fn function used to generate a model. #' @param simulate.fn function used to create the model and generate predictions. #' @param n.folds the number of folds used for cross-validation. Set to 0 to use Leave One Out Cross Validation. #' @param data.generating.fn the function used to generate stochastic noise for calculation of exact p values. #' @param p.acc the desired accuracy for the calculation of exact p values. The entire calculation process will be repeated \eqn{1/p.acc} times so this can have a dramatic affect on time required. Set to \code{NULL} to disable the calculation of p values. #' @param features whether to calculate the average slopes, added \eqn{R^2} and p values for each of the features in addition to the overall model. #' @param slope.sample if not NULL the sample size for use to calculate the average slopes (useful for very large data sets). #' @param slope.displacement the amount of displacement to take in calculating the slopes. May be a single number in which case the same slope is applied to all features. May also be a named vector where there is a name for each feature. #' @return S3 \code{A3} object containing: #' \item{model.R2}{The cross validated \eqn{R^2} for the entire model.} #' \item{feature.R2}{The cross validated \eqn{R^2}'s for the features (if calculated).} #' \item{model.p}{The p value for the entire model (if calculated).} #' \item{feature.p}{The p value for the features (if calculated).} #' \item{all.R2}{The \eqn{R^2}'s for the model features, and any stochastic simulations for calculating exact p values.} #' \item{observed}{The observed response for each observation.} #' \item{predicted}{The predicted response for each observation.} #' \item{slopes}{Average slopes for each of the features (if calculated).} #' \item{all.slopes}{Slopes for each of the observations for each of the features (if calculated).} #' \item{table}{The A3 results table.} #' a3.base <- function(formula, data, model.fn, simulate.fn, n.folds = 10, data.generating.fn = replicate(ncol(x), a3.gen.default), p.acc = 0.01, features = TRUE, slope.sample = NULL, slope.displacement = 1){ if(! is.null(p.acc)){ if(p.acc <= 0 || p.acc >=1){ stop("p.acc must be between 0 and 1. Set p.acc to NULL to disable the calculation of p values.") } } if(n.folds < 2 && n.folds != 0){ stop("n.folds must be >= 2. Set n.folds to 0 to use Leave One Out Cross Validation.") } n.reps <- 0 if(! is.null(p.acc)){ n.reps <- ceiling(1/p.acc) } res <- list() mf <- model.frame(formula, data, drop.unused.levels = TRUE) x <- model.matrix(formula, mf) y <- model.response(mf) if(length(data.generating.fn) != ncol(x)){ stop("data.generating.fn must be a list of functions one for each column in the model matrix") } if(n.folds == 0){ n.folds <- length(y) } my.apply <- lapply if( ! is.null(p.acc) ){ # if( library(pbapply, quietly = TRUE, logical.return = TRUE) == TRUE ){ # not needed due to depends my.apply <- pblapply # Show a progress bar if available # } } # Calculate the groups for cross validation cv.folds <- split(sample(1:length(y)), rep(1:n.folds, length = length(y))) # Generate random data series for p values new.data <- lapply(1:ncol(x), function(c){ data.generating.fn[[c]](x[,c], n.reps) }) r2.formatter <- function(x){ signs <- sign(x) x <- abs(x)*100 res <- paste(format(round(x, 1), digits = 3), "%") signs <- sapply(signs, function(x){ if(x == -1){ return("- ") }else{ return("+ ") } }) if(signs[1] == "+ "){ signs[1] <- " " # removed the plus sign for the overall model accuracy } res <- paste(signs, res, sep="") return(res) } p.formatter <- function(x){ if(length(x) == 0){ return(c()) } res <- format(x, digits = 4) for(i in 1:length(x)){ if(x[i] == 0){ res[i] <- paste("<", p.acc) } } return(res) } slope.formatter <- function(x){ if(length(x) == 0){ return(c()) } format(x, digits = 3) } # Setup iterations # "default" is initial simulation without any randomized data # Each rep after that has some form of randomized data iterations <- "default" if(! is.null(p.acc)){ iterations <- c(iterations, 1:n.reps) } top <- 0 if(features){ top <- ncol(x) } # Iterate through each rep and the default # outputs[[1]] will have the set of default cases in it # outputs[[>1]] will have the randomized data cases outputs <- my.apply(iterations, function(rep){ # Calculate R2's for the rep # We calculate for the model (0) and then for each column of data by numerical index out <- lapply(0:top, function(c){ new.x <- x if(rep != "default"){ # If we aren't on the default case, we add some form of randomization if(c==0){ # We are doing the overall model # So randomize all the data for(j in 1:ncol(x)){ new.x[,j] <- new.data[[j]][[as.numeric(rep)]] } }else{ # We're looking at a specific column, so just randomize that data new.x[,c] <- new.data[[c]][[as.numeric(rep)]] } } # Remove a column of data if we are at the un-randomized case if((c != 0) && (rep == "default")){ if(top == 1){ return(list(R2=0)); # if there is only one feature column added R^2 should be full value } new.x <- as.data.frame(new.x[,-c]) } res <- a3.r2(y, new.x, simulate.fn, cv.folds) return(res) } ) r2 <- sapply(out, function(x){x$R2}) return(list( R2 = r2, predicted = out[[1]]$predicted, observed = out[[1]]$observed )) }) predicted <- outputs[[1]]$predicted observed <- outputs[[1]]$observed outputs <- lapply(outputs, function(x){x$R2}) if(features){ get.names <- function(formula, data){ #t <- terms(formula, data=data) #l <- attr(t, "term.labels") #if(attr(t, "intercept")==1){ # l <- c("(Intercept)",l) #} return(attr(x, "dimnames")[[2]]) } entry.names <- c("-Full Model-", get.names(formula, data = data)) getSlopes <- function (reg, data){ slopes <- list() for(col in 2:ncol(data)){ slopes[[as.character(col)]] <- c() span <- range(data[,col]) span <- span[2] - span[1] for(row in 1:nrow(data)){ point <- data[row,] at.point <- predict(reg, point) if(length(slope.displacement) == 1){ dist <- slope.displacement }else{ dist <- slope.displacement[entry.names[col]] } slope <- 0 #while(TRUE){ above.point <- point above.point[col] <- point[col] + dist at.above <- predict(reg, above.point)[[1]] below.point <- point below.point[col] <- point[col] - dist at.below <- predict(reg, below.point)[[1]] new.slope <- (at.above - at.below)/(dist*2) # # if(new.slope == 0){ # dist <- dist * 2 # if(slope != 0){ # break # } # if(dist > span){ # break # } # }else{ # if(abs( (slope-new.slope) / new.slope) < epsilon){ # break # } # dist <- dist / 2 # } slope <- new.slope #} slopes[[as.character(col)]] <- c(slopes[[as.character(col)]], slope) } } slopes } # print(model.fn(y, x)) slope.data <- data.frame(cbind(y, x)) names(slope.data) <- c("y", paste("x", 1:ncol(x), sep="")) if(! is.null(slope.sample)){ slope.data <- slope.data[sample(1:nrow(slope.data), slope.sample),] } res[["all.slopes"]] <- getSlopes(model.fn(y, x), slope.data) res[["all.slopes"]] <- lapply(res[["all.slopes"]], function(x){ round(x, digits = 8)}) res[["slopes"]] = sapply(res[["all.slopes"]], function(x){ return(median(x)) # r <- unique(round(range(x), digits=8)) # if(length(r) == 1){ # return(r) # }else{ # return(paste(r, collapse = " - ")) # } } ) names(res[["all.slopes"]]) <- entry.names[-1] names(res[["slopes"]]) <- entry.names[-1] }else{ entry.names <- c("-Full Model-") } # Now take the data and calculate R2 and p values res[["predicted"]] <- predicted res[["observed"]] <- observed if(! is.null(p.acc)){ # we did a set of repitions so we should calculate p value r2 <- c() p.values <- c() res$all.R2 <- list() # item 1 is the overall model # item > 1 is a specific column for(i in 1:(top+1)){ # Get the R2 for the specific item items <- sapply(outputs, function(x){x[i]}) # check if we are on a column item if(i > 1){ # if so replace the first element of the items list with the value of the overall model R2 items[1] <- r2[1] } names(items) <- c("Base",paste("Rep", 1:n.reps)) res$all.R2[[paste(entry.names[i])]] <- items # rank the items by R2 dist <- rank(items) # find the position of the first item (overall model R2) within the list of randomly derived p values # this is the emprical R2 p.values <- c(p.values, 1 - (dist[1]-1)/(length(dist)-1)) # the R2 of the model as generated with random data new.null <- mean(items[-1]) if(i==1){ # the R2 of the model as generated with random data new.null <- mean(items[-1]) # if the R2 with stochasticity is better than 0, we will use as a baseline to scale our R2 #if(new.null > 0){ # Adjust R^2 based on what was observed in stochastic series XXX reenable? # r2 <- (items[1]-new.null) / (1-new.null) #}else{ r2 <- items[1] #} }else{ # get data series again (we overwrote the item[1] position earlier) items <- sapply(outputs, function(x){x[i]}) # see how the results improve compared to the baseline r2 <- c(r2, r2[1] - items[1])#max(new.null, items[1])) # Adjust R^2 based on what was observed in stochastic series XXX reenable? } } res$table <- data.frame(`Average Slope` = c("", slope.formatter(res$slopes)), `CV R^2` = r2.formatter(r2), `p value` = p.formatter(p.values), check.names=F) res$model.R2 <- r2[1] res$feature.R2 <- r2[-1] res$model.p <- p.values[1] names(res$model.p) <- entry.names[1] res$feature.p <- p.values[-1] names(res$feature.p) <- entry.names[-1] }else{ # we didn't do repetitions so no p values r2 <- outputs[[1]] r2[-1] <- r2[1] - r2[-1] # get delta to overall model R2 res$table <- data.frame(`Average Slope` = c("", slope.formatter(res$slopes)), `CV R^2` = r2.formatter(r2), check.names=F) res$all.R2 <- r2 res$model.R2 <- r2[1] res$feature.R2 <- r2[-1] } names(res$model.R2) <- entry.names[1] names(res$feature.R2) <- entry.names[-1] row.names(res$table) <- entry.names class(res) <- "A3" return(res) } #' Plot A3 Results #' #' Plots an 'A3' object results. Displays predicted versus observed values for each observation along with the distribution of slopes measured for each feature. #' #' @param x an A3 object. #' @param ... additional options provided to \code{\link{plotPredictions}}, \code{\link{plotSlopes}} and \code{\link{plot}} functions. #' @method plot A3 #' @examples #' \donttest{ #' data(housing) #' res <- a3.lm(MED.VALUE ~ NOX + ROOMS + AGE + HIGHWAY + PUPIL.TEACHER, housing, p.acc = NULL) #' plot(res) #' } plot.A3 <- function(x, ...){ if(class(x) != "A3"){ stop("'x' must be of class 'A3'.") } plotPredictions(x, ...) old.par <- par(ask=T) plotSlopes(x, ...) par(old.par) } #' Plot Predicted versus Observed #' #' Plots an 'A3' object's values showing the predicted versus observed values for each observation. #' #' @param x an A3 object, #' @param show.equality if true plot a line at 45-degrees. #' @param xlab the x-axis label. #' @param ylab the y-axis label. #' @param main the plot title. #' @param ... additional options provided to the \code{\link{plot}} function. #' #' @examples #' data(multifunctionality) #' x <- a3.lm(MUL ~ ., multifunctionality, p.acc = NULL, features = FALSE) #' plotPredictions(x) plotPredictions <- function(x, show.equality = TRUE, xlab = "Observed Value", ylab = "Predicted Value", main = "Predicted vs Observed", ...){ if(class(x) != "A3"){ stop("'x' must be of class 'A3'.") } plot(x$observed, x$predict, xlab=xlab, ylab=ylab, main=main, ...) abline(h=0, col="Gray"); abline(v=0, col="Gray"); if(show.equality){ abline(coef = c(0, 1), col = "Blue", lty = 2) } } #' Plot Distribution of Slopes #' #' Plots an 'A3' object's distribution of slopes for each feature and observation. Uses Kernel Density Estimation to create an estimate of the distribution of slopes for a feature. #' #' @param x an A3 object. #' @param ... additional options provided to the \code{\link{plot}} and \code{\link{density}} functions. #' #' @examples #' \donttest{ #' require(randomForest) #' data(housing) #' #' x <- a3(MED.VALUE ~ NOX + PUPIL.TEACHER + ROOMS + AGE + HIGHWAY + 0, #' housing, randomForest, p.acc = NULL, n.folds = 2) #' #' plotSlopes(x) #' } plotSlopes <- function(x, ...){ if(class(x) != "A3"){ stop("'x' must be of class 'A3'.") } size <- length(x$slopes) if(size == 0 ){ stop("no slopes to plot") } width <- ceiling(sqrt(size)) height <- floor(sqrt(size)) if(width*height < size){ width <- width+1 } old.par <- par(mfrow = c(height, width), mar = .55*c(5, 4, 4, 2) + 0.2) for(s in names(x$slopes)){ if(length(unique(x$all.slopes[[s]]))==1){ plot(x$slopes[[s]], 0, main = s) }else{ plot(density(x$all.slopes[[s]],...), xlab = "", ylab="", main = s, ...) rug(x$all.slopes[[s]], col="Blue") } abline(h=0, col="Gray") abline(v=0, col="Gray") } par(old.par) } #' Print Fit Results #' #' Prints an 'A3' object results table. #' #' @param x an A3 object. #' @param ... additional arguments passed to the \code{\link{print}} function. #' @method print A3 #' @examples #' x <- a3.lm(rating ~ ., attitude, p.acc = NULL) #' print(x) print.A3 <- function(x, ...){ if(class(x) != "A3"){ stop("'x' must be of class 'A3'.") } print(x$table, ...) } #' Nicely Formatted Fit Results #' #' Creates a LaTeX table of results. Depends on the \pkg{xtable} package. #' #' @param x an A3 object. #' @param ... additional arguments passed to the \code{\link{print.xtable}} function. #' @method xtable A3 #' @examples #' x <- a3.lm(rating ~ ., attitude, p.acc = NULL) #' xtable(x) xtable.A3 <- function(x, ...){ # require(xtable) # not needed due to depends if(class(x) != "A3"){ stop("'x' must be of class 'A3'.") } data <- x$table names(data) <- gsub("p value","Pr(>R^2)", names(data), fixed=TRUE) names(data) <- gsub("R^2","$R^2$", names(data), fixed=TRUE) print(xtable(data, align=c("l","|", rep("r", ncol(data))), ...), sanitize.colnames.function = function(x){x}, sanitize.text.function = function(x){ return(sapply(x, function(x){ trimmed = gsub("(^ +)|( +$)", "", x) if((! is.na(suppressWarnings(as.numeric(x)))) && suppressWarnings(as.numeric(x))==trimmed){ return(paste0("$", x, "$")) }else if(substr(trimmed, nchar(trimmed), nchar(trimmed))=="%"){ return(paste0("$", gsub("%", "\\%", trimmed, fixed=T), "$")); }else if(substr(trimmed, 1, 1)=="<"){ return(paste0("$", trimmed, "$")); }else{ return (gsub("_", "\\_", x, fixed=T)); } })) }) } #' Cross-Validated \eqn{R^2} #' #' Applies cross validation to obtain the cross-validated \eqn{R^2} for a model: the fraction of the squared error explained by the model compared to the null model (which is defined as the average response). A pseudo \eqn{R^2} is implemented for classification. #' #' @param y a vector or responses. #' @param x a matrix of features. #' @param simulate.fn a function object that creates a model and predicts y. #' @param cv.folds the cross-validation folds. #' #' @return A list comprising of the following elements: #' \item{R2}{the cross-validated \eqn{R^2}} #' \item{predicted}{the predicted responses} #' \item{observed}{the observed responses} a3.r2 <- function(y, x, simulate.fn, cv.folds){ errors <- lapply(cv.folds, function(fold){ test.y <- y[fold] test.x <- x[fold,] train.y <- y[-fold] train.x <- as.data.frame(x[-fold,]) new.y <- simulate.fn(train.y, train.x, test.x) if(is.factor(new.y)){ # classification return(list( type="classification", correct = (new.y == test.y), predicted = new.y, observed = test.y )) }else{ # regression y.null <- mean(train.y) return(list( type="regression", ss.null = sum((test.y-y.null)^2), ss.model = sum((test.y-new.y)^2), predicted = new.y, observed = test.y )) } } ) if(errors[[1]]$type == "regression"){ # regression ss.model <- sum(unlist(sapply(errors, function(x){x$ss.model}))) ss.null <- sum(unlist(sapply(errors, function(x){x$ss.null}))) return( list(R2 = 1 - ss.model/ss.null, predicted = unlist(sapply(errors, function(x){x$predicted})), observed = unlist(sapply(errors, function(x){x$observed}))) ) }else{ # classification corrects <- unlist(sapply(errors, function(x){x$correct})) null.count <- max(table(as.factor(y))) # return number of observations in the largest class return( list( R2 = (sum(corrects)-null.count) / (length(corrects)-null.count), predicted = unlist(sapply(errors, function(x){x$predicted})), observed = unlist(sapply(errors, function(x){x$observed}))) ) } } #' Stochastic Data Generators #' #' The stochastic data generators generate stochastic noise with (if specified correctly) the same properties as the observed data. By replicating the stochastic properties of the original data, we are able to obtain the exact calculation of p values. #' #' Generally these will not be called directly but will instead be passed to the \code{data.generating.fn} argument of \code{\link{a3.base}}. #' #' @name a3.gen.default #' @aliases a3.gen.default a3.gen.bootstrap a3.gen.resample a3.gen.normal a3.gen.autocor #' #' @param x the original (observed) data series. #' @param n.reps the number of stochastic repetitions to generate. #' #' @return A list of of length \code{n.reps} of vectors of stochastic noise. There are a number of different methods of generating noise: #' \item{a3.gen.default}{The default data generator. Uses \code{a3.gen.bootstrap}.} #' \item{a3.gen.resample}{Reorders the original data series.} #' \item{a3.gen.bootstrap}{Resamples the original data series with replacement.} #' \item{a3.gen.normal}{Calculates the mean and standard deviation of the original series and generates a new series with that distribution.} #' \item{a3.gen.autocor}{Assumesa first order autocorrelation of the original series and generates a new series with the same properties.} #' #' @examples #' \donttest{ #' # Calculate the A3 results assuming an auto-correlated set of observations. #' # In usage p.acc should be <=0.01 in order to obtain more accurate p values. #' #' a3.lm(rating ~ ., attitude, p.acc = 0.1, #' data.generating.fn = replicate(ncol(attitude), a3.gen.autocor)) #' } #' #' ## A general illustration: #' #' # Take x as a sample set of observations for a feature #' x <- c(0.349, 1.845, 2.287, 1.921, 0.803, 0.855, 2.368, 3.023, 2.102, 4.648) #' #' # Generate three stochastic data series with the same autocorrelation properties as x #' rand.x <- a3.gen.autocor(x, 3) #' #' plot(x, type="l") #' for(i in 1:3) lines(rand.x[[i]], lwd = 0.2) # Default generator, use bootstrap a3.gen.default <- function(x, n.reps){ if(length(unique(x))==1){ #it's a constant, such as an intercept return(a3.gen.normal(x, n.reps)) } a3.gen.bootstrap(x, n.reps) } # Generates a bootstrap random data series a3.gen.bootstrap <- function(x, n.reps){ res <- lapply(1:n.reps, function(r) {sample(x, length(x), replace=TRUE)}) res$default <- x res } # Generates a resampled random data series a3.gen.resample <- function(x, n.reps){ res <- lapply(1:n.reps, function(r) {sample(x, length(x), replace=FALSE)}) res$default <- x res } # Generates a normally distributed random data series a3.gen.normal <- function(x, n.reps){ mu <- mean(x) sd <- sd(x) if(sd == 0){ sd <- 1 } res <- lapply(1:n.reps, function(r) {rnorm(length(x), mu, sd)}) res$default <- x res } # Generates a first order autocorrelated random data series a3.gen.autocor <- function(x, n.reps){ mu <- mean(x) sd <- sd(x) if(sd == 0){ r <- 1 }else{ r <- cor(x[-1], x[-length(x)]) } res <- lapply(1:n.reps, function(rep) { dat <- rnorm(length(x), mu, sd) for(i in 2:length(x)){ dat[i] <- dat[i-1]*r + dat[i]*(1-r) } return(dat) } ) res$default <- x res }
/scratch/gouwar.j/cran-all/cranData/A3/R/A3.R
#' Boston Housing Prices #' #' A dataset containing the prices of houses in the Boston region and a number of features. #' The dataset and the following description is based on that provided by UCI Machine Learning Repository (\url{http://archive.ics.uci.edu/ml/datasets/Housing}). #' #' \itemize{ #' \item CRIME: Per capita crime rate by town #' \item ZN: Proportion of residential land zoned for lots over 25,000 sq.ft. #' \item INDUS: Proportion of non-retail business acres per town #' \item CHAS: Charles River dummy variable (= 1 if tract bounds river; 0 otherwise) #' \item NOX: Nitrogen oxides pollutant concentration (parts per 10 million) #' \item ROOMS: Average number of rooms per dwelling #' \item AGE: Proportion of owner-occupied units built prior to 1940 #' \item DISTANCE: Weighted distances to five Boston employment centres #' \item HIGHWAY: Index of accessibility to radial highways #' \item TAX: Full-value property-tax rate per ten thousand dollar #' \item PUPIL.TEACHER: Pupil-teacher ratio by town #' \item MINORITY: 1000(Bk - 0.63)^2 where Bk is the proportion of blacks by town #' \item LSTAT: Percent lower status of the population #' \item MED.VALUE: Median value of owner-occupied homes in thousands of dollars #' } #' #' @docType data #' @keywords datasets #' @name housing #' @usage data(housing) #' @references #' Frank, A. & Asuncion, A. (2010). UCI Machine Learning Repository [http://archive.ics.uci.edu/ml]. Irvine, CA: University of California, School of Information and Computer Science. #' #' Harrison, D. and Rubinfeld, D.L. Hedonic prices and the demand for clean air, J. Environ. Economics & Management, vol.5, 81-102, 1978. NULL #' Ecosystem Multifunctionality #' #' This dataset relates multifunctionality to a number of different biotic and abiotic features in a global survey of drylands. The dataset was obtained from (\url{http://www.sciencemag.org/content/335/6065/214/suppl/DC1}). The dataset contains the features listed below. #' #' \itemize{ #' \item ELE: Elevation of the site #' \item LAT & LONG: Location of the site #' \item SLO: Site slope #' \item SAC: Soil sand content #' \item PCA_C1, PCA_C2, PCA_C3, PCA_C4: Principal components of a set of 21 climatic features #' \item SR: Species richness #' \item MUL: Multifunctionality #' } #' #' @docType data #' @keywords datasets #' @name multifunctionality #' @usage data(multifunctionality) #' @references #' Maestre, F. T., Quero, J. L., Gotelli, N. J., Escudero, A., Ochoa, V., Delgado-Baquerizo, M., et al. (2012). Plant Species Richness and Ecosystem Multifunctionality in Global Drylands. Science, 335(6065), 214-218. doi:10.1126/science.1215442 NULL
/scratch/gouwar.j/cran-all/cranData/A3/R/A3.data.R
############################# # bootstrapped bias score computation #' @title Compute bootstrapped approach-bias scores #' @description Compute bootstrapped approach-bias scores with confidence intervals. #' @param ds a longformat data.frame #' @param subjvar Quoted name of the participant identifier column #' @param pullvar Quoted name of the column indicating pull trials. #' Pull trials should either be represented by 1, or by the second level of a factor. #' @param targetvar Name of the column indicating trials featuring the target stimulus. #' Target stimuli should either be represented by 1, or by the second level of a factor. #' @param rtvar Name of the reaction time column. #' @param iters Total number of desired iterations. At least 200 are required to get confidence intervals that make sense. #' @param algorithm Function (without brackets or quotes) to be used to compute AAT scores. See \link{Algorithms} for a list of usable algorithms. #' @param trialdropfunc Function (without brackets or quotes) to be used to exclude outlying trials in each half. #' The way you handle outliers for the reliability computation should mimic the way you do it in your regular analyses. #' It is recommended to exclude outlying trials when computing AAT scores using the mean double-dfference scores and regression scoring approaches, #' but not when using d-scores or median double-difference scores. #' \itemize{ #' \item \code{prune_nothing} excludes no trials (default) #' \item \code{trial_prune_grubbs} applies a Grubbs' test to the data, removing one outlier at a time until the test is no longer significant. #' \item \code{trial_prune_3SD} excludes trials deviating more than 3SD from the mean per participant. #' \item \code{trial_prune_SD_dropcases} removes trials deviating more than a specific number of standard deviations from the participant's mean, #' and removes participants with an excessive percentage of outliers. #' Required arguments: #' \itemize{ #' \item \code{trialsd} - trials deviating more than \code{trialsd} standard deviations from the participant's mean are excluded (optional; default is 3) #' \item \code{maxoutliers} - participants with a higher percentage of outliers are removed from the data. (optional; default is .15) #' } #' \item \code{trial_recode_SD} recodes outlying reaction times to the nearest non-outlying value, #' with outliers defined as reaction times deviating more than a certain number of standard deviations from the participant's mean. Required argument: #' \itemize{ #' \item \code{trialsd} - trials deviating more than this many standard deviations from the mean are classified as outliers. #' } #' \item \code{trial_prune_percent_subject} and \code{trial_prune_percent_sample} remove trials below and/or above certain percentiles, #' on a subject-by-subject basis or sample-wide, respectively. The following arguments are available: #' \itemize{ #' \item \code{lowerpercent} and \code{uppperpercent} (optional; defaults are .01 and .99). #' } #' } #' @param errortrialfunc Function (without brackets or quotes) to apply to an error trial. #' #' \itemize{ #' \item \code{prune_nothing} removes no errors (default). #' \item \code{error_replace_blockmeanplus} replaces error trial reaction times with the block mean, plus an arbitrary extra quantity. #' If used, the following additional arguments are required: #' \itemize{ #' \item \code{blockvar} - Quoted name of the block variable (mandatory) #' \item \code{errorvar} - Quoted name of the error variable, where errors are 1 or TRUE and correct trials are 0 or FALSE (mandatory) #' \item \code{errorbonus} - Amount to add to the reaction time of error trials. Default is 0.6 (recommended by \code{Greenwald, Nosek, & Banaji, 2003}) #' } #' \item \code{error_prune_dropcases} removes errors and drops participants if they have more errors than a given percentage. The following arguments are available: #' \itemize{ #' \item \code{errorvar} - Quoted name of the error variable, where errors are 1 or TRUE and correct trials are 0 or FALSE (mandatory) #' \item \code{maxerrors} - participants with a higher percentage of errors are excluded from the dataset. Default is .15. #' } #' } #' @param plot Plot the bias scores and their confidence intervals after computation is complete. This gives a good overview of the data. #' @param include.raw logical indicating whether raw split-half data should be included in the output object. #' @param parallel If TRUE (default), will use parallel computing to compute results faster. #' If a doParallel backend has not been registered beforehand, #' this function will register a cluster and stop it after finishing, which takes some extra time. #' @param ... Other arguments, to be passed on to the algorithm or outlier rejection functions (see arguments above) #' #' #' @return A list, containing bootstrapped bias scores, their variance, bootstrapped 95 percent confidence intervals, #' the number of iterations, and a matrix of bias scores for each iteration. #' #' @author Sercan Kahveci #' @examples #' # Compute 10 bootstrapped AAT scores. #' boot<-aat_bootstrap(ds=erotica[erotica$is_irrelevant==0,], subjvar="subject", #' pullvar="is_pull", targetvar="is_target",rtvar="RT", #' iters=10,algorithm="aat_doublemediandiff", #' trialdropfunc="trial_prune_3SD", #' plot=FALSE, parallel=FALSE) #' plot(boot) #' print(boot) #' #' @export aat_bootstrap<-function(ds,subjvar,pullvar,targetvar=NULL,rtvar,iters, algorithm=c("aat_doublemeandiff","aat_doublemediandiff", "aat_dscore","aat_dscore_multiblock", "aat_regression","aat_standardregression", "aat_singlemeandiff","aat_singlemediandiff"), trialdropfunc=c("prune_nothing","trial_prune_3SD","trial_prune_3MAD", "trial_prune_SD_dropcases","trial_recode_SD", "trial_prune_percent_subject","trial_prune_percent_sample", "trial_prune_grubbs"), errortrialfunc=c("prune_nothing","error_replace_blockmeanplus","error_prune_dropcases"), plot=TRUE,include.raw=FALSE,parallel=TRUE,...){ packs<-c("magrittr","dplyr","AATtools") #Handle arguments args<-list(...) algorithm<-ifelse(is.function(algorithm),deparse(substitute(algorithm)),match.arg(algorithm)) if(!(algorithm %in% c("aat_singlemeandiff","aat_singlemediandiff","aat_regression","aat_standardregression")) & is.null(targetvar)){ stop("Argument targetvar missing but required for algorithm!") } trialdropfunc<-ifelse(is.function(trialdropfunc),deparse(substitute(trialdropfunc)),match.arg(trialdropfunc)) errortrialfunc<-ifelse(is.function(errortrialfunc),deparse(substitute(errortrialfunc)),match.arg(errortrialfunc)) errorpenalizefunc<-ifelse(errortrialfunc=="error_replace_blockmeanplus",errortrialfunc,"prune_nothing") errorremovefunc<-ifelse(errortrialfunc=="error_replace_blockmeanplus","prune_nothing",errortrialfunc) if(errortrialfunc=="error_replace_blockmeanplus"){ stopifnot(!is.null(args$blockvar),!is.null(args$errorvar)) if(is.null(args$errorbonus)){ args$errorbonus<- 0.6 } if(is.null(args$blockvar)){ args$blockvar<- 0 } if(is.null(args$errorvar)){ args$errorvar<- 0 } } stopifnot(!(algorithm=="aat_dscore_multiblock" & is.null(args$blockvar))) if(algorithm %in% c("aat_regression","aat_standardregression")){ if(!("formula" %in% names(args))){ args$formula<-as.formula(paste0(rtvar,"~",pullvar,"*",targetvar)) warning("No formula provided. Defaulting to formula ",form2char(args$formula)) }else if(is.character(args$formula)){ args$formula<-as.formula(args$formula) } if(!("aatterm" %in% names(args))){ args$aatterm<-paste0(pullvar,":",targetvar) warning("No AAT-term provided. Defaulting to AAT-term ",args$aatterm) } } ds<-do.call(aat_preparedata,c(list(ds=ds,subjvar=subjvar,pullvar=pullvar,targetvar=targetvar,rtvar=rtvar),args)) %>% mutate(key=1) #Prepare the cluster if(parallel){ `%dofunc%` <- `%dopar%` hasCluster<-getDoParRegistered() if(!hasCluster){ cluster<-makeCluster(getOption("AATtools.workers")) registerDoParallel(cluster) on.exit(unregisterDoParallel(cluster)) } }else{ `%dofunc%` <- `%do%` } #bootstrap loop results<- foreach(iter = seq_len(iters), .packages=packs, .combine=cbind) %dofunc% { #Split data # iterds<-ds %>% group_by(!!sym(subjvar), !!sym(pullvar), !!sym(targetvar)) %>% # sample_n(size=n(),replace=TRUE) %>% ungroup() iterds<-ds[unlist(lapply(split(x=seq_len(nrow(ds)),f=ds[c(subjvar,pullvar,targetvar)]), FUN=function(x){ x[sample.int(length(x),replace=T)] })),] #Handle error removal iterds<-do.call(errorremovefunc,c(args,list(ds=iterds,subjvar=subjvar,rtvar=rtvar))) #Handle outlying trials iterds<-do.call(trialdropfunc,c(args,list(ds=iterds,subjvar=subjvar,rtvar=rtvar))) #Handle error penalization iterds<-do.call(errorpenalizefunc,c(args,list(ds=iterds,subjvar=subjvar,rtvar=rtvar))) abds<-do.call(algorithm,c(list(ds=iterds,subjvar=subjvar,pullvar=pullvar, targetvar=targetvar,rtvar=rtvar),args)) #colnames(abds)<-c(subjvar,paste0("iter", formatC(iter, width = nchar(iters), format = "d", flag = "0"))) outvar<-abds$ab names(outvar)<-abds[[subjvar]] outvar } #results<-results[!is.na(rownames(results)),] statset<-data.frame(ppidx=rownames(results), bias=rowMeans(results,na.rm=TRUE), var=apply(results,MARGIN = 1,FUN=var,na.rm=TRUE), lowerci=apply(results,MARGIN=1,FUN=function(x){quantile(x,0.025,na.rm=TRUE)}), upperci=apply(results,MARGIN=1,FUN=function(x){quantile(x,0.975,na.rm=TRUE)}), stringsAsFactors=F) statset$ci<-statset$upperci-statset$lowerci #q-reliability bv<-var(statset$bias,na.rm=TRUE) wv<-mean(statset$var,na.rm=TRUE) q<-1-wv/bv output<-list(bias=statset, reliability=q, parameters=c(list(ds=ds, subjvar=subjvar, pullvar=pullvar, targetvar=targetvar, rtvar=rtvar, iters=iters, algorithm=algorithm, trialdropfunc=trialdropfunc, errortrialfunc=errortrialfunc),args)) %>% structure(class = "aat_bootstrap") if(include.raw){ output$iterdata<-results } if(plot){ plot(output) } return(output) } #' @export #' @rdname aat_bootstrap #' @param x An \code{aat_bootstrap} object. print.aat_bootstrap<-function(x,...){ cat("Bootstrapped bias scores and confidence intervals", "\nMean bias score: ", mean(x$bias$bias,na.rm=TRUE), "\nMean confidence interval: ",mean(x$bias$ci,na.rm=TRUE), "\nreliability: q = ",x$reliability, "\nNumber of iterations: ",x$parameters$iters,sep="") } #' @export #' @rdname aat_bootstrap #' @param x An \code{aat_bootstrap} object. plot.aat_bootstrap <- function(x,...){ statset<-x$bias statset<-statset[!is.na(statset$bias) & !is.na(statset$upperci) & !is.na(statset$lowerci),] rank<-rank(statset$bias) wideness<-max(statset$upperci) - min(statset$lowerci) plot(x=statset$bias,y=rank,xlim=c(min(statset$lowerci)-0.01*wideness,max(statset$upperci)+0.01*wideness), xlab="Bias score",main=paste0("Individual bias scores with 95%CI", "\nEstimated reliability: q = ",x$reliability)) segments(x0=statset$lowerci,x1=statset$bias-0.005*wideness,y0=rank,y1=rank) segments(x0=statset$bias+0.005*wideness,x1=statset$upperci,y0=rank,y1=rank) abline(v=0) #text(x=statset$bias,y=statset$rownr,labels=statset$ppidx,cex=0.5) }
/scratch/gouwar.j/cran-all/cranData/AATtools/R/aat_bootstrap.R
#' @title Compute simple AAT scores #' @description Compute simple AAT scores, with optional outlier exclusion and error trial recoding. #' @param ds a long-format data.frame #' @param subjvar column name of subject variable #' @param pullvar column name of pull/push indicator variable, must be numeric or logical (where pull is 1 or TRUE) #' @param targetvar column name of target stimulus indicator, must be numeric or logical (where target is 1 or TRUE) #' @param rtvar column name of reaction time variable #' @param algorithm Function (without brackets or quotes) to be used to compute AAT scores. See \link{Algorithms} for a list of usable algorithms. #' @param trialdropfunc Function (without brackets or quotes) to be used to exclude outlying trials in each half. #' The way you handle outliers for the reliability computation should mimic the way you do it in your regular analyses. #' It is recommended to exclude outlying trials when computing AAT scores using the mean double-dfference scores and regression scoring approaches, #' but not when using d-scores or median double-difference scores. #' \itemize{ #' \item \code{prune_nothing} excludes no trials (default) #' \item \code{trial_prune_3SD} excludes trials deviating more than 3SD from the mean per participant. #' \item \code{trial_prune_grubbs} applies a Grubbs' test to the data, removing one outlier at a time until the test is no longer significant. #' \item \code{trial_prune_SD_dropcases} removes trials deviating more than a specific number of standard deviations from the participant's mean, #' and removes participants with an excessive percentage of outliers. #' Required arguments: #' \itemize{ #' \item \code{trialsd} - trials deviating more than \code{trialsd} standard deviations from the participant's mean are excluded (optional; default is 3) #' \item \code{maxoutliers} - participants with a higher percentage of outliers are removed from the data. (optional; default is .15) #' } #' \item \code{trial_recode_SD} recodes outlying reaction times to the nearest non-outlying value, #' with outliers defined as reaction times deviating more than a certain number of standard deviations from the participant's mean. Required argument: #' \itemize{ #' \item \code{trialsd} - trials deviating more than this many standard deviations from the mean are classified as outliers. #' } #' \item \code{trial_prune_percent_subject} and \code{trial_prune_percent_sample} remove trials below and/or above certain percentiles, #' on a subject-by-subject basis or sample-wide, respectively. The following arguments are available: #' \itemize{ #' \item \code{lowerpercent} and \code{uppperpercent} (optional; defaults are .01 and .99). #' } #' } #' @param errortrialfunc Function (without brackets or quotes) to apply to an error trial. #' #' \itemize{ #' \item \code{prune_nothing} removes no errors (default). #' \item \code{error_replace_blockmeanplus} replaces error trial reaction times with the block mean, plus an arbitrary extra quantity. #' If used, the following additional arguments are required: #' \itemize{ #' \item \code{blockvar} - Quoted name of the block variable (mandatory) #' \item \code{errorvar} - Quoted name of the error variable, where errors are 1 or TRUE and correct trials are 0 or FALSE (mandatory) #' \item \code{errorbonus} - Amount to add to the reaction time of error trials. Default is 0.6 (recommended by \code{Greenwald, Nosek, & Banaji, 2003}) #' } #' \item \code{error_prune_dropcases} removes errors and drops participants if they have more errors than a given percentage. The following arguments are available: #' \itemize{ #' \item \code{errorvar} - Quoted name of the error variable, where errors are 1 or TRUE and correct trials are 0 or FALSE (mandatory) #' \item \code{maxerrors} - participants with a higher percentage of errors are excluded from the dataset. Default is .15. #' } #' } #' @param ... Other arguments, to be passed on to the algorithm or outlier rejection functions (see arguments above) #' #' @export #' #' @examples #' #Compute the correlation between relevant-feature and irrelevant-feature AAT scores #' ds<-erotica[erotica$correct==1,] #' relevant <- aat_compute(ds=ds[ds$is_irrelevant==0,], #' pullvar="is_pull",targetvar="is_target", #' rtvar="RT",subjvar="subject", #' trialdropfunc="trial_prune_3SD", #' algorithm="aat_doublemediandiff") #' #' irrelevant <- aat_compute(ds=ds[ds$is_irrelevant==1,], #' pullvar="is_pull",targetvar="is_target", #' rtvar="RT",subjvar="subject", #' trialdropfunc="trial_prune_3SD", #' algorithm="aat_doublemediandiff") #' #' comparison.df <- merge(relevant, irrelevant, by = "subject") #' cor(comparison.df$ab.x, comparison.df$ab.y) #' # 0.1145726 aat_compute<-function(ds,subjvar,pullvar,targetvar=NULL,rtvar, algorithm=c("aat_doublemeandiff","aat_doublemediandiff", "aat_dscore","aat_dscore_multiblock", "aat_regression","aat_standardregression", "aat_doublemeanquotient","aat_doublemedianquotient", "aat_singlemeandiff","aat_singlemediandiff"), trialdropfunc=c("prune_nothing","trial_prune_3SD","trial_prune_3MAD", "trial_prune_SD_dropcases","trial_recode_SD", "trial_prune_percent_subject","trial_prune_percent_sample", "trial_prune_grubbs"), errortrialfunc=c("prune_nothing","error_replace_blockmeanplus","error_prune_dropcases"), ...){ #Handle arguments args<-list(...) algorithm<-ifelse(is.function(algorithm),deparse(substitute(algorithm)),match.arg(algorithm)) if(!(algorithm %in% c("aat_singlemeandiff","aat_singlemediandiff","aat_regression","aat_standardregression")) & is.null(targetvar)){ stop("Argument targetvar missing but required for algorithm!") } trialdropfunc<-ifelse(is.function(trialdropfunc),deparse(substitute(trialdropfunc)),match.arg(trialdropfunc)) errortrialfunc<-ifelse(is.function(errortrialfunc),deparse(substitute(errortrialfunc)),match.arg(errortrialfunc)) errorpenalizefunc<-ifelse(errortrialfunc=="error_replace_blockmeanplus",errortrialfunc,"prune_nothing") errorremovefunc<-ifelse(errortrialfunc=="error_replace_blockmeanplus","prune_nothing",errortrialfunc) if(errortrialfunc=="error_replace_blockmeanplus"){ stopifnot(!is.null(args$blockvar),!is.null(args$errorvar)) if(is.null(args$errorbonus)){ args$errorbonus<- 0.6 } if(is.null(args$blockvar)){ args$blockvar<- 0 } if(is.null(args$errorvar)){ args$errorvar<- 0 } } stopifnot(!(algorithm=="aat_dscore_multiblock" & is.null(args$blockvar))) if(algorithm %in% c("aat_regression","aat_standardregression")){ if(!("formula" %in% names(args))){ args$formula<-as.formula(paste0(rtvar,"~",pullvar,"*",targetvar)) warning("No formula provided. Defaulting to formula ",form2char(args$formula)) }else if(is.character(args$formula)){ args$formula<-as.formula(args$formula) } if(!("aatterm" %in% names(args))){ args$aatterm<-paste0(pullvar,":",targetvar) warning("No AAT-term provided. Defaulting to AAT-term ",args$aatterm) } } ds<-do.call(aat_preparedata,c(list(ds=ds,subjvar=subjvar,pullvar=pullvar,targetvar=targetvar,rtvar=rtvar),args)) %>% mutate(key=1) #Handle error removal ds<-do.call(errorremovefunc,c(args,list(ds=ds,subjvar=subjvar,rtvar=rtvar))) #Handle outlying trials ds<-do.call(trialdropfunc,c(args,list(ds=ds,subjvar=subjvar,rtvar=rtvar))) #Handle error penalization ds<-do.call(errorpenalizefunc,c(args,list(ds=ds,subjvar=subjvar,rtvar=rtvar))) abds<-do.call(algorithm,c(list(ds=ds,subjvar=subjvar,pullvar=pullvar, targetvar=targetvar,rtvar=rtvar),args)) abds <- merge(x=abds,by=subjvar,all=TRUE,y=ds %>% group_by(!!sym(subjvar)) %>% summarise(trials=n())) return(abds) }
/scratch/gouwar.j/cran-all/cranData/AATtools/R/aat_compute.R
#' Compute stimulus-specific bias scores #' Computes mean single-difference scores (push - pull) for each stimulus. #' #' @param ds the \code{data.frame} to use #' @param subjvar Name of the subject-identifying variable #' @param stimvar Name of the stimulus-identifying variable #' @param pullvar Name of the movement-direction identifying variable #' @param targetvar Optional. Name of the stimulus-category identifying variable #' @param rtvar Name of the reaction-time identifying variable #' @param aggfunc The function with which to aggregate the RTs before computing difference scores. Defaults to mean but can be changed to median. #' @param iters If there are missing values (which is almost inevitable) then #' multiple imputation will be used to complete the covariance matrix - this argument sets #' the number of multiple imputations to be used. #' #' @return Exports a \code{list} containing #' a \code{data.frame} with stimulus-specific bias scores, indicated in the column names, #' a covariance matrix of that same data, and #' a \code{data.frame} indicating to which stimulus category each stimulus belongs. #' @export #' #' @examples #' ds<-aat_simulate(biasfx_jitter=40,nstims=16) #' ds$stim<-paste0(ds$stim,"-",ds$is_target) #' aat_stimulusscores(ds,"subj","stim","is_pull","is_target","rt") aat_stimulusscores<-function(ds,subjvar,stimvar,pullvar,targetvar=NULL,rtvar,aggfunc=c("mean","median"),iters=5){ ds<-aat_preparedata(ds,subjvar=subjvar,pullvar=pullvar,stimvar=stimvar,targetvar=targetvar,rtvar=rtvar) pps<-unique(ds[[subjvar]]) stims<-unique(ds[[stimvar]]) if(!is.null(targetvar)){ stimcats<-distinct(ds[c(stimvar,targetvar)]) %>% setNames(c("stim","cat")) }else{ stimcats<-data.frame(stim=stims,cat=0,stringsAsFactors=F) } aggfunc<-match.arg(aggfunc) if(aggfunc=="median"){ scorefunc<-aat_singlemediandiff }else{ scorefunc<-aat_singlemeandiff } biases<-list() for(u in seq_along(pps)){ biases[[u]]<- do.call(scorefunc,list(ds=ds[ds[[subjvar]]==pps[u],], subjvar=stimvar,pullvar=pullvar,rtvar=rtvar)) %>% setNames(c(stimvar,paste0("",pps[u]))) #subject- } biasset<-Reduce(function(x,y){merge(x,y,by=stimvar,all=T)},x=biases[-1],init=biases[1]) biasmat<-t(as.matrix(biasset[,-1])) colnames(biasmat)<-biasset[[1]] unmissing<-covEM(biasmat,iters) covmat<-unmissing$sigma rownames(covmat)<-colnames(covmat) dataset<-unmissing$data out<-list(data=dataset,covmat=covmat,stimcats=stimcats) return(out) } #' Compute a dataset's reliability from its covariance matrix #' #' This function computes mean single-difference scores (push minus pull) for individual stimuli, #' and computes the reliability from that information. #' Missing values are dealt with using multiple imputation. #' #' When only one stimulus category is indicated, one of the commonly known reliability algorithms #' provided with the \code{algorithm} argument is used. #' When two stimulus categories are indicated, this function uses Lord's (1963) algorithm to #' compute the reliability of a double mean difference score, using the algorithms in \code{algorithm} #' to estimate the reliability of indiviau lstimulus categories. #' #' When one wants to compute the reliability of a double median difference score or D-score, #' \code{aat_splithalf()} is recommended instead. #' #' @param ds the \code{data.frame} to use #' @param subjvar Name of the subject-identifying variable #' @param stimvar Name of the stimulus-identifying variable #' @param pullvar Name of the movement-direction identifying variable #' @param targetvar Optional. Name of the stimulus-category identifying variable #' @param rtvar Name of the reaction-time identifying variable #' @param aggfunc The function with which to aggregate the RTs before computing difference scores. Defaults to mean but can be changed to median. #' @param algorithm The reliability formula to use. Defaults to Cronbach's alpha, but Guttman's Lambda-2 is recommended instead. #' @param iters If there are missing values (which is almost inevitable) then #' multiple imputation will be used to complete the covariance matrix - this option sets #' the number of multiple imputations to be used. #' #' @return Returns an \code{aat_covreliability} object containing the reliability value #' as well as the dataset and covariance matrix with replaced missing values. When #' the argument \code{targetvar} is provided, the output also contains the reliability of the #' individual stimulus categories and their intercorrelation. #' #' @export #' #' @references #' Lord, F.Y. (1963), "Elementary Models for Measuring Change", #' in Problems in Measuring Change, C.W. Harris, ed.. Madison. Wisconsin: #' University of Wisconsin. #' #' @examples #' #We generate a dataset with 16 stimuli in each category #' ds<-aat_simulate(biasfx_jitter=40,nstims=16) #' ds$stim<-paste0(ds$stim,"-",ds$is_target) #' #' # If Lord's formula and #' # bootstrapped splithalf measure something similar, #' # then the outcomes should be close to each other. #' aat_covreliability(ds=ds,subjvar="subj",stimvar="stim",pullvar="is_pull", #' targetvar="is_target",rtvar="rt") #' aat_splithalf(ds=ds,subjvar="subj",pullvar="is_pull",targetvar="is_target",rtvar="rt", #' algorithm="aat_doublemeandiff",iters=100,plot=FALSE) #' #' #Testing reliability for single-difference scores #' ds<-ds[ds$is_target==1,] #' aat_covreliability(ds=ds,subjvar="subj",stimvar="stim",pullvar="is_pull",rtvar="rt") aat_covreliability<-function(ds,subjvar,stimvar,pullvar,targetvar=NULL,rtvar,aggfunc=c("mean","median"), algorithm=c("calpha","lambda2","lambda4"),iters=5){ algorithm<-match.arg(algorithm) aggfunc<-match.arg(aggfunc) sc<-aat_stimulusscores(ds,subjvar=subjvar,stimvar=stimvar,pullvar=pullvar,targetvar=targetvar, rtvar=rtvar,aggfunc=aggfunc,iters=iters) if(!is.null(targetvar)){ dia<-diag(sc$covmat) firstcat <-which(names(dia) %in% sc$stimcats$stim[sc$stimcats$cat==0]) secondcat<-which(names(dia) %in% sc$stimcats$stim[sc$stimcats$cat==1]) n1<-length(firstcat ) n2<-length(secondcat) r11<-do.call(algorithm,list(covmat=sc$covmat[firstcat, firstcat ])) r22<-do.call(algorithm,list(covmat=sc$covmat[secondcat,secondcat])) # r12<-cor(x=rowSums(sc$dataset[,firstcat]), # y=rowSums(sc$dataset[,secondcat])) r12<-sum(sc$covmat[firstcat,secondcat])/sqrt(sum(sc$covmat[firstcat,firstcat])*sum(sc$covmat[secondcat,secondcat])) s1<-sqrt(sum(sc$covmat[firstcat,firstcat]))/n1 s2<-sqrt(sum(sc$covmat[secondcat,secondcat]))/n2 rel<-(s1^2*r11+s2^2*r22-2*s1*s2*r12)/ (s1^2+s2^2-2*s1*s2*r12) }else{ rel<-do.call(algorithm,list(covmat=sc$covmat)) } out<-structure(list(rel=rel,data=sc$data,covmat=sc$covmat,algorithm=algorithm), class="aat_covreliability") if(!is.null(targetvar)){ out$components<-list(r11=r11,r22=r22,r12=r12,n1=n1,n2=n2,s1=s1,s2=s2) } return(out) } #' @export #' @describeIn aat_covreliability Print an \code{aat_covreliability} object print.aat_covreliability<-function(x,...){ cat(sep="","r = ",mf(x$rel), "\nBased on ",ncol(x$data)," valid stimuli, ", nrow(x$data)," valid participants, and the ", x$algorithm," algorithm.\n") if(any("components"==names(x))){ cat(sep="", "Reliability of stimulus category 1: r = ",mf(x$components$r11),", n = ",x$components$n1,", sd = ",mf(x$components$s1),"\n", "Reliability of stimulus category 2: r = ",mf(x$components$r22),", n = ",x$components$n2,", sd = ",mf(x$components$s2),"\n", "Category intercorrelation: r = ",mf(x$components$r12),"\n") } } #' @rdname aat_covreliability #' @param holdout What should be removed from the data for computation of jackknife statistics? #' "both" computes reliability when stimuli and participants are separately removed, #' while "cross" computes reliability when stimuli and participants are simultaneously removed. #' @description This function computes the reliability when stimuli and participants are removed, #' allowing for the diagnosis of potential sources of unreliability within the data. #' @export #' @return \code{aat_covreliability_jackknife()} returns an \code{aat_covreliability_jackknife} object, #' containing jackknife reliability statistics. If argument \code{holdout} was set to "cross", #' then these statistics are provided in a matrix where rows represent participants and columns represent stimuli. #' Otherwise, they are provided in \code{data.frame}s where the stimulus or participant is represented in a column #' alongside the associated reliability value. #' @examples #' hh<-aat_simulate() #' test<-aat_covreliability_jackknife(ds=hh,subjvar="subj",stimvar="stim",pullvar="is_pull", #' targetvar="is_target",rtvar="rt",holdout="cross") #' print(test) #' plot(test) aat_covreliability_jackknife<-function(ds,subjvar,stimvar,pullvar,targetvar=NULL,rtvar, algorithm=c("calpha","lambda2","lambda4"),iters=5, holdout=c("both","participant","stimulus","cross")){ algorithm<-match.arg(algorithm) sc<-aat_stimulusscores(ds,subjvar=subjvar,stimvar=stimvar,pullvar=pullvar,targetvar=targetvar, rtvar=rtvar,iters=iters) cat1<-sc$stimcats$stim[sc$stimcats$cat==0] cat2<-sc$stimcats$stim[sc$stimcats$cat==1] #declare reliability computation functions if(!is.null(targetvar)){ relfinder<-function(psc){ dia<-diag(psc) firstcat <-which(names(dia) %in% cat1) secondcat<-which(names(dia) %in% cat2) n1<-length(firstcat) n2<-length(secondcat) r11<-do.call(algorithm,list(covmat=psc[firstcat, firstcat ])) r22<-do.call(algorithm,list(covmat=psc[secondcat,secondcat])) r12<-sum(psc[firstcat,secondcat])/sqrt(sum(psc[firstcat,firstcat])*sum(psc[secondcat,secondcat])) s1<-sqrt(sum(psc[firstcat,firstcat]))/n1 s2<-sqrt(sum(psc[secondcat,secondcat]))/n2 rel<-(s1^2*r11+s2^2*r22-2*s1*s2*r12)/ (s1^2+s2^2-2*s1*s2*r12) return(rel) } }else{ relfinder<-function(psc){ rel<-do.call(algorithm,list(covmat=psc)) return(rel) } } output<-list(rel=relfinder(sc$covmat)) pps<-sort(unique(ds[[subjvar]])) stims<-sort(unique(ds[[stimvar]])) if(any(c("both","participant")==holdout)){ # Run jackknife over participants ppset<-data.frame(pp=pps,rel=NA) for(i in seq_along(ppset$pp)){ ppset$rel[i]<-relfinder(cov(sc$data[rownames(sc$data)!=ppset$pp[i],])) } output$pps<-ppset } if(any(c("both","stimulus")==holdout)){ #Run jackknife over stimuli stimset<-data.frame(stim=stims,rel=NA) for(i in seq_along(stimset$stim)){ stimset$rel[i]<-relfinder(sc$covmat[rownames(sc$covmat) != stimset$stim[i], colnames(sc$covmat) != stimset$stim[i]]) } output$stims<-stimset } if("cross"==holdout){ #run jackknife over stimuli and participants simultaneously relmat<-matrix(NA,nrow=length(pps),ncol=length(stims)) rownames(relmat)<-pps colnames(relmat)<-stims for(i in seq_len(nrow(relmat))){ itercov<-cov(sc$data[rownames(sc$data) != rownames(relmat)[i],]) for(j in seq_len(ncol(relmat))){ relmat[i,j]<-relfinder(itercov[rownames(itercov) != colnames(relmat)[j],colnames(itercov) != colnames(relmat)[j]]) } } output$cross<-relmat } output<-structure(c(output,list(data=sc$data,covmat=sc$covmat,algorithm=algorithm,holdout=holdout)), class="aat_covreliability_jackknife") return(output) } #' @export #' @describeIn aat_covreliability Print an \code{aat_covreliability_jackknife} object #' @param x Object to be printed #' @param ... Ignored print.aat_covreliability_jackknife<-function(x, ...){ cat("Reliability: r = ",mf(x$rel),"\n",sep="") if(any("pps"==names(x))){ cmax<-which.max(x$pps$rel) cat("Maximum achieveable reliability is with removal of participant ",as.character(x$pps$pp[cmax]), ": r = ",mf(x$pps$rel[cmax]),"\n",sep="") } if(any("stims"==names(x))){ cmax<-which.max(x$stims$rel) cat("Maximum achieveable reliability is with removal of stimulus ",as.character(x$stims$stim[cmax]), ": r = ",mf(x$stims$rel[cmax]),"\n",sep="") } if(any("cross"==names(x))){ cmax<-which(x$cross==max(x$cross),arr.ind=T) cat("Maximum achieveable reliability is with removal of stimulus ",colnames(x$cross)[cmax[2]], " and participant ",rownames(x$cross)[cmax[1]], ": r = ",mf(x$cross[cmax[1],cmax[2]]),"\n",sep="") } } #' @export #' @describeIn aat_covreliability Plot an \code{aat_covreliability_jackknife} object plot.aat_covreliability_jackknife<-function(x, ...){ prev.mfrow<-par("mfrow") ncols<-sum(c("pps", "stims","cross") %in% names(x)) par(mfrow=c(1,ncols)) if(any("pps"==names(x))){ ord<-order(x$pps$rel) plot(range(x$pps$rel), range(ord), bty = 'n', type = 'n',main="Participants", xlab="Jackknife reliability",ylab="Rank") abline(v=x$rel,col="#00000055") text(x=x$pps$rel[ord],y=seq_along(x$pps$rel),label=as.character(x$pps$pp[ord]),cex=.7) } if(any("stims"==names(x))){ ord<-order(x$stims$rel) plot(range(x$stims$rel), range(ord), bty = 'n', type = 'n',main="Stimuli", xlab="Jackknife reliability",ylab="Rank") abline(v=x$rel,col="#00000055") text(x=x$stims$rel[ord],y=seq_along(x$stims$rel),label=as.character(x$stims$stim[ord]),cex=.7) } if(any("cross"==names(x))){ image(t(x$cross), xlab="Stimuli",ylab="Participants",axes=FALSE,main="Jackknife reliability") axis(2,at=(seq_len(nrow(x$cross))-1)/(nrow(x$cross)-1),labels=rownames(x$cross)) axis(1,at=(seq_len(ncol(x$cross))-1)/(ncol(x$cross)-1),labels=colnames(x$cross)) } par(mfrow=prev.mfrow) }
/scratch/gouwar.j/cran-all/cranData/AATtools/R/aat_covreliability.R
#' Simulate AAT datasets and predict parameters #' #' \code{aat_simulate()} generates approach-avoidance task datasets. #' #' @param npps Number of participants #' @param nstims Number of stimuli #' @param stimreps Number of repetitions of each stimulus within each group #' (i.e. within approach target, avoid target, approach control, avoid control) #' @param meanrt Mean sample reaction time #' @param meanrt_jitter Extent by which participants' mean RTs #' deviate from mean sample RT. #' @param sdrt Standard deviation of samplewide RTs, #' ignoring effects of movement, stimulus, and approach bias. #' In essence, this represents the amount of pure noise present in the data. #' @param sdrt_jitter Extent by which standard deviations of individual participants' RTs #' are larger or smaller than the samplewide SD. #' @param pullfx size of the effect of approach-versus-avoidance, in milliseconds #' @param pullfx_jitter Individual variation in the effect of approach-versus-avoidance #' @param stimfx size of the effect of stimulus category, in milliseconds #' @param stimfx_jitter Individual variation in the effect of stimulus category #' @param biasfx Size of the approach bias effect, in milliseconds #' @param biasfx_jitter Individual variation in the approach bias effect #' @param empirical If TRUE, then effect sizes and standard deviations will be exact #' @param ... Ignored. #' #' @return \code{aat_simulate()} returns a \code{data.frame} with the following columns: #' subj (participant ID), stim (stimulus number), rep (stimulus repetition number), #' is_pull (0 = avoid, 1 = approach), is_target (0 = control stimulus, 1 = target stimulus), #' meanrt (participant's mean RT), sdrt (participant's residual standard deviation), #' pullfx (participant approach-avoidance effect size in ms), #' stimfx (participant stimulus category effect size in ms), #' biasfx (participant approach bias effect size in ms), #' and rt (trial reaction time). #' Additionally, the data.frame has the attribute \code{population_reliability} which represents #' the expected reliability of the data given the provided parameters. #' @details Defaults of \code{aat_simulate()} are based on #' Kahveci, Van Alebeek, Berking, & Blechert (2021). #' @export #' #' @examples #' ts<- aat_simulate(pullfx = 50, stimfx = 10, biasfx = 100) #' mod<-lm(rt~is_pull*is_target,data=ts) #' coef(mod) #these should be somewhat close to the provided coefficients #' #' # Here's how one might derive the parameters used in this function from a real dataset #' \dontrun{ #' mod<-lmer(decisiontime ~ is_pull * is_food + (is_pull * is_food | subjectid),data=dsa) #' fixef(mod) # from here, all the fx and mean RTs are derived #' ranef(mod)$subjectid %>% apply(2,sd) #from here, all the fx jitters are derived #' dsa %>% group_by(subjectid) %>% summarise(sd=sd(resid)) %>% #' summarise(m=mean(sd),s=sd(sd)) # from here, sdrt_jitter is derived #' } aat_simulate<-function(npps=36,nstims=16,stimreps=4, meanrt=632,meanrt_jitter=90.1, sdrt=158,sdrt_jitter=49.9, pullfx=-39.2,pullfx_jitter=40.5, stimfx=-30.9,stimfx_jitter=32.5, biasfx= 39.0,biasfx_jitter=60.1, empirical=FALSE, ...){ cond.scale<-function(x,emp){ if(emp){vec.scale(x)}else{x} } #set properties subjprops<-data.frame(subj=1:npps, meanrt=meanrt+meanrt_jitter*cond.scale(rnorm(npps),empirical), sdrt=sdrt+sdrt_jitter*cond.scale(rgamma2(n=npps,shape=3),empirical), pullfx=pullfx+pullfx_jitter*cond.scale(rnorm(npps),empirical), stimfx=stimfx+stimfx_jitter*cond.scale(rnorm(npps),empirical), biasfx=biasfx+biasfx_jitter*cond.scale(rnorm(npps),empirical)) #initialize dataset ds<-expand.grid(subj=1:npps,stim=1:nstims,rep=1:stimreps,is_pull=0:1,is_target=0:1) ds<-merge(ds,subjprops,by="subj",all.x=T) #fix stimulus names ds$stim<-paste0(ds$is_target,"-",ds$stim) #Generate RTs gshape<-3 gscale<-1 ds$rt<-rgamma2(n=nrow(ds),shape=gshape) if(empirical){ ds$rt<-ave(ds$rt,ds[c("subj","is_pull","is_target")],FUN=vec.scale) } ds$rt<-ds$rt * ds$sdrt + ds$meanrt + (ds$is_pull-.5)*ds$pullfx + (ds$is_target-.5) * ds$stimfx + ((ds$is_pull==ds$is_target)-.5)*-.5 * ds$biasfx #compute true "population" reliability (Kahveci's Q) # alt_q <- (biasfx_jitter^2)/(biasfx_jitter^2 + sdrt^2 /(nstims*stimreps) *4) # attr(ds,"population_reliability")<-alt_q #output return(ds) } rgamma2<-function(n,shape,m=0,s=1){ m + (rgamma(n=n,shape=shape,scale=1) - shape*1) *s/(sqrt(shape)*1) } aat_simulate_old<-function(npps=40,nstims=32,stimreps=2, meanrt=743,meanrt_jitter=66, sdrt=133,sdrt_jitter=38, pullfx=25,pullfx_jitter=40, stimfx=10,stimfx_jitter=35, biasfx=35,biasfx_jitter=75){ #set properties subjprops<-data.frame(subj=1:npps, meanrt=meanrt+meanrt_jitter*rnorm(npps), sdrt=sdrt+sdrt_jitter*rgamma2(n=npps,shape=3), pullfx=pullfx+pullfx_jitter*rnorm(npps), stimfx=stimfx+stimfx_jitter*rnorm(npps), biasfx=biasfx+biasfx_jitter*rnorm(npps)) #initialize dataset ds<-expand.grid(subj=1:npps,stim=1:nstims,rep=1:stimreps,is_pull=0:1,is_target=0:1) ds<-merge(ds,subjprops,by="subj",all.x=T) #Generate RTs gshape<-3 gscale<-1 ds$rt<-(rgamma(n=nrow(ds),shape=gshape,scale=gscale)-gshape*gscale) * ds$sdrt/(sqrt(gshape)*gscale) + ds$meanrt + (ds$is_pull-.5)*ds$pullfx + (ds$is_target-.5)*ds$stimfx + (ds$is_pull*ds$is_target-.25)*ds$biasfx #compute true "population" reliability (Kahveci's Q) alt_q <- (biasfx_jitter^2)/(biasfx_jitter^2 + sdrt^2 /(nstims*stimreps) *4) attr(ds,"population_reliability")<-alt_q #output return(ds) } #' #' \code{aat_simulate2} offers defaults taken from different studies and allows inserting outliers. #' #' @param ... Any parameters of \code{aat_simulate} provided here will override the defaults #' from the defaults parameter. #' @param defaults Which set of default values should be used? #' @param slowols Number of slow outliers to insert per participant #' @param fastols Number of fats outliers to insert per participant #' @param olsd Number of standard deviations by which (slow) outliers deviate #' #' @details "Lender2018" parameters are taken from the relevant-feature AAT of #' Lender, Meule, Rinck, Brockmeyer, & Blechert (2018). "Kahveci2021" parameters #' are taken from Kahveci, Van Alebeek, Berking, & Blechert (in review). #' #' Lender, A., Meule, A., Rinck, M., Brockmeyer, T., & Blechert, J. (2018). #' Measurement of food-related approach–avoidance biases: #' Larger biases when food stimuli are task relevant. Appetite, 125, 42-47. #' #' Kahveci, S., Van Alebeek, H., Berking, M., & Blechert, J. (in review). #' Touchscreen based assessment of food approach biases: investigation of #' reliability and stimulus-specific effects. #' @export #' #' @examples #' hist(aat_simulate2(defaults="Lender2018_relevant_raw",slowols=10,fastols=10)$rt) #' @rdname aat_simulate aat_simulate2<-function(..., defaults="none", slowols=0,fastols=0,olsd=3){ override.args<-list(...) if(defaults=="none"){ args<-override.args }else{ chosenset<-match.arg(defaults,choices=dataprops$setname) args<-as.list(dataprops[chosenset==dataprops$setname,]) } args[names(override.args)]<-override.args ds<-do.call(aat_simulate,args) gshape<-3 gscale<-1 #slow OLs ds<-ds%>%group_by(.data$subj)%>%mutate(rownum=1:n())%>% mutate(rt = ifelse(!(.data$rownum %in% sample(.data$rownum,slowols)),.data$rt, .data$rt+.data$sdrt*olsd)) #fast OLs if(fastols>0){ ds<-ds%>%group_by(.data$subj)%>%mutate(rownum=1:n(),eligible=.data$rt>.data$sdrt*olsd)%>% mutate(rt = ifelse(!(.data$rownum %in% sample(which(.data$eligible),fastols)), .data$rt,.data$rt-.data$sdrt*olsd)) } return(ds) } #experimental. Can currently only be used in datasets with approximately equal trials in all cells aat_properties<-function(ds,subjvar,pullvar,targetvar,rtvar){ ds<-aat_preparedata(ds=ds,subjvar=subjvar,pullvar=pullvar,targetvar=targetvar,rtvar=rtvar) ds%<>%group_by(!!sym(subjvar))%>% mutate(pulldiff=mean(subset(!!sym(rtvar),!!sym(pullvar)==1)) - mean(subset(!!sym(rtvar),!!sym(pullvar)==0)), targetdiff=mean(subset(!!sym(rtvar),!!sym(targetvar)==1)) - mean(subset(!!sym(rtvar),!!sym(targetvar)==0)), doublediff=2*mean(subset(!!sym(rtvar),!!sym(pullvar)!=!!sym(targetvar))) - 2*mean(subset(!!sym(rtvar),!!sym(pullvar)==!!sym(targetvar)))) ds%<>%group_by(!!sym(subjvar)) %>% mutate(.residrt=!!sym(rtvar)+ -(!!sym(pullvar)-mean(!!sym(pullvar)))*.data$pulldiff+ -(!!sym(targetvar)-mean(!!sym(targetvar)))*.data$targetdiff+ +.5*((!!sym(pullvar)==!!sym(targetvar))-mean(!!sym(pullvar)==!!sym(targetvar)))*.data$doublediff) ppstats<-ds%>%group_by(!!sym(subjvar))%>% summarise(.pullfx=first(.data$pulldiff), .targetfx=first(.data$targetdiff), .biasfx=first(.data$doublediff), .meanrt=mean(!!sym(rtvar)), .sdrt.full=sd(!!sym(rtvar)), .sdrt.resid=sd(.data$.residrt), ntrial=n(), .groups="drop") output<-ppstats %>% ungroup() %>% summarise(pullfx=mean(.data$.pullfx),pullfx_jitter=sd(.data$.pullfx), stimfx=mean(.data$.targetfx),stimfx_jitter=sd(.data$.targetfx), biasfx=mean(.data$.biasfx),biasfx_jitter=sd(.data$.biasfx), meanrt=mean(.data$.meanrt),meanrt_jitter=sd(.data$.meanrt), sdrt.full=mean(.data$.sdrt.full),sdrt.full_jitter=sd(.data$.sdrt.full), sdrt.resid=mean(.data$.sdrt.resid),sdrt.resid_jitter=sd(.data$.sdrt.resid), ntrial=mean(.data$ntrial), .groups="drop") return(list(dataprops=as.list(output),subjectprops=ppstats,ds=ds)) } #' @rdname aat_simulate #' @description \code{aat_getstudydata()} retrieves the properties of datasets from a number of pre-existing studies #' @export aat_getstudydata<-function(){ dataprops }
/scratch/gouwar.j/cran-all/cranData/AATtools/R/aat_simulate.R
# splithalf engine #### #multicore splithalf #' @title Compute the bootstrapped split-half reliability for approach-avoidance task data #' @description Compute bootstrapped split-half reliability for approach-avoidance task data. #' @param ds a longformat data.frame #' @param subjvar Quoted name of the participant identifier column #' @param pullvar Quoted name of the column indicating pull trials. #' Pull trials should either be represented by 1, or by the second level of a factor. #' @param targetvar Name of the column indicating trials featuring the target stimulus. #' Target stimuli should either be represented by 1, or by the second level of a factor. #' @param rtvar Name of the reaction time column. #' @param stratvars Names of additional variables to stratify splits by. #' @param iters Total number of desired iterations. At least 6000 are recommended for reasonable estimates. #' @param algorithm Function (without brackets or quotes) to be used to compute AAT scores. See \link{Algorithms} for a list of usable algorithms. #' @param trialdropfunc Function (without brackets or quotes) to be used to exclude outlying trials in each half. #' The way you handle outliers for the reliability computation should mimic the way you do it in your regular analyses. #' It is recommended to exclude outlying trials when computing AAT scores using the mean double-dfference scores and regression scoring approaches, #' but not when using d-scores or median double-difference scores. #' \itemize{ #' \item \code{prune_nothing} excludes no trials (default) #' \item \code{trial_prune_grubbs} applies a Grubbs' test to the data, removing one outlier at a time until the test is no longer significant. #' \item \code{trial_prune_3SD} excludes trials deviating more than 3SD from the mean per participant. #' \item \code{trial_prune_SD_dropcases} removes trials deviating more than a specific number of standard deviations from the participant's mean, #' and removes participants with an excessive percentage of outliers. #' Required arguments: #' \itemize{ #' \item \code{trialsd} - trials deviating more than \code{trialsd} standard deviations from the participant's mean are excluded (optional; default is 3) #' \item \code{maxoutliers} - participants with a higher percentage of outliers are removed from the data. (optional; default is .15) #' } #' \item \code{trial_recode_SD} recodes outlying reaction times to the nearest non-outlying value, #' with outliers defined as reaction times deviating more than a certain number of standard deviations from the participant's mean. Required argument: #' \itemize{ #' \item \code{trialsd} - trials deviating more than this many standard deviations from the mean are classified as outliers. #' } #' \item \code{trial_prune_percent_subject} and \code{trial_prune_percent_sample} remove trials below and/or above certain percentiles, #' on a subject-by-subject basis or sample-wide, respectively. The following arguments are available: #' \itemize{ #' \item \code{lowerpercent} and \code{uppperpercent} (optional; defaults are .01 and .99). #' } #' } #' @param errortrialfunc Function (without brackets or quotes) to apply to an error trial. #' #' \itemize{ #' \item \code{prune_nothing} removes no errors (default). #' \item \code{error_replace_blockmeanplus} replaces error trial reaction times with the block mean, plus an arbitrary extra quantity. #' If used, the following additional arguments are required: #' \itemize{ #' \item \code{blockvar} - Quoted name of the block variable (mandatory) #' \item \code{errorvar} - Quoted name of the error variable, where errors are 1 or TRUE and correct trials are 0 or FALSE (mandatory) #' \item \code{errorbonus} - Amount to add to the reaction time of error trials. Default is 0.6 (recommended by \code{Greenwald, Nosek, & Banaji, 2003}) #' } #' \item \code{error_prune_dropcases} removes errors and drops participants if they have more errors than a given percentage. The following arguments are available: #' \itemize{ #' \item \code{errorvar} - Quoted name of the error variable, where errors are 1 or TRUE and correct trials are 0 or FALSE (mandatory) #' \item \code{maxerrors} - participants with a higher percentage of errors are excluded from the dataset. Default is .15. #' } #' } #' @param casedropfunc Function (without brackets or quotes) to be used to exclude outlying participant scores in each half. #' The way you handle outliers here should mimic the way you do it in your regular analyses. #' \itemize{ #' \item \code{prune_nothing} excludes no participants (default) #' \item \code{case_prune_3SD} excludes participants deviating more than 3SD from the sample mean. #' } #' @param plot Create a scatterplot of the AAT scores computed from each half of the data from the last iteration. #' This is highly recommended, as it helps to identify outliers that can inflate or diminish the reliability. #' @param include.raw logical indicating whether raw split-half data should be included in the output object. #' @param parallel If TRUE (default), will use parallel computing to compute results faster. #' If a doParallel backend has not been registered beforehand, #' this function will register a cluster and stop it after finishing, which takes some extra time. #' @param ... Other arguments, to be passed on to the algorithm or outlier rejection functions (see arguments above) #' #' @return A list, containing the mean bootstrapped split-half reliability, bootstrapped 95% confidence intervals, #' a list of data.frames used over each iteration, and a vector containing the split-half reliability of each iteration. #' #' @author Sercan Kahveci #' @seealso \link{q_reliability} #' @examples #' split <- aat_splithalf(ds=erotica[erotica$is_irrelevant==0,], #' subjvar="subject", pullvar="is_pull", targetvar="is_target", #' rtvar="RT", stratvars="stimuluscode", iters=10, #' trialdropfunc="trial_prune_3SD", #' casedropfunc="case_prune_3SD", algorithm="aat_dscore", #' plot=FALSE, parallel=FALSE) #' #' print(split) #' #Mean reliability: 0.521959 #' #Spearman-Brown-corrected r: 0.6859041 #' #95%CI: [0.4167018, 0.6172474] #' #' plot(split) #' #' \donttest{ #' #Regression Splithalf #' aat_splithalf(ds=erotica[erotica$is_irrelevant==0,], #' subjvar="subject", pullvar="is_pull", targetvar="is_target", #' rtvar="RT", iters=10, trialdropfunc="trial_prune_3SD", #' casedropfunc="case_prune_3SD", algorithm="aat_regression", #' formula = RT ~ is_pull * is_target, aatterm = "is_pull:is_target", #' plot=FALSE, parallel=FALSE) #' #Mean reliability: 0.5313939 #' #Spearman-Brown-corrected r: 0.6940003 #' #95%CI: [0.2687186, 0.6749176] #' } #' @export aat_splithalf<-function(ds,subjvar,pullvar,targetvar=NULL,rtvar,stratvars=NULL,iters, algorithm=c("aat_doublemeandiff","aat_doublemediandiff", "aat_dscore","aat_dscore_multiblock", "aat_regression","aat_standardregression", "aat_singlemeandiff","aat_singlemediandiff"), trialdropfunc=c("prune_nothing","trial_prune_3SD","trial_prune_3MAD", "trial_prune_SD_dropcases", "trial_recode_SD","trial_prune_percent_subject", "trial_prune_percent_sample","trial_prune_grubbs"), errortrialfunc=c("prune_nothing","error_replace_blockmeanplus", "error_prune_dropcases"), casedropfunc=c("prune_nothing","case_prune_3SD"), plot=TRUE,include.raw=FALSE,parallel=TRUE,...){ packs<-c("magrittr","dplyr","AATtools") #Handle arguments args<-list(...) algorithm<-match.arg(algorithm) if(!(algorithm %in% c("aat_singlemeandiff","aat_singlemediandiff", "aat_regression","aat_standardregression")) & is.null(targetvar)){ stop("Argument targetvar missing but required for algorithm!") } trialdropfunc<-match.arg(trialdropfunc) casedropfunc<-match.arg(casedropfunc) errortrialfunc<-match.arg(errortrialfunc) errorpenalizefunc<-ifelse(errortrialfunc=="error_replace_blockmeanplus",errortrialfunc,"prune_nothing") errorremovefunc<-ifelse(errortrialfunc=="error_replace_blockmeanplus","prune_nothing",errortrialfunc) if(errortrialfunc=="error_replace_blockmeanplus"){ stopifnot(!is.null(args$blockvar),!is.null(args$errorvar)) if(is.null(args$errorbonus)){ args$errorbonus<- 0.6 } if(is.null(args$blockvar)){ args$blockvar<- 0 } if(is.null(args$errorvar)){ args$errorvar<- 0 } } stopifnot(!(algorithm=="aat_dscore_multiblock" & is.null(args$blockvar))) if(algorithm %in% c("aat_regression","aat_standardregression")){ if(!("formula" %in% names(args))){ args$formula<-as.formula(paste0(rtvar,"~",pullvar,"*",targetvar)) warning("No formula provided. Defaulting to formula ",form2char(args$formula)) }else if(is.character(args$formula)){ args$formula<-as.formula(args$formula) } if(!("aatterm" %in% names(args))){ args$aatterm<-paste0(pullvar,":",targetvar) warning("No AAT-term provided. Defaulting to AAT-term ",args$aatterm) } } ds<-do.call(aat_preparedata,c(list(ds=ds,subjvar=subjvar,pullvar=pullvar,targetvar=targetvar, rtvar=rtvar,stratvars=stratvars),args)) #Prepare the cluster if(parallel){ `%dofunc%` <- `%dopar%` hasCluster<-getDoParRegistered() if(!hasCluster){ cluster<-makeCluster(getOption("AATtools.workers")) registerDoParallel(cluster) on.exit(unregisterDoParallel(cluster)) } }else{ `%dofunc%` <- `%do%` } #splithalf loop results<- foreach(iter = seq_len(iters), .packages=packs) %dofunc% { #Split data # if(is.null(targetvar)){ # iterds<-ds%>%group_by(!! sym(subjvar), !! sym(pullvar))%>% # mutate(key=sample(n())%%2)%>%ungroup() # }else{ # # iterds<-ds%>%group_by(!! sym(subjvar), !! sym(pullvar), !! sym(targetvar))%>% # # mutate(key=sample(n())%%2)%>%ungroup() # # h<-tapply(seq_len(nrow(ds)),ds[c(subjvar,pullvar,targetvar)], # function(x){sample(x,size=round(length(x)/2))})%>%unlist() # iterds<-ds # iterds$key<-0 # iterds$key[h]<-1 # } #Split data iterds<-ds iterds$key<-datasplitter(iterds[,c(subjvar,pullvar,targetvar,stratvars)]) #Handle error removal iterds<-do.call(errorremovefunc,c(args,list(ds=iterds,subjvar=subjvar,rtvar=rtvar))) #Handle outlying trials iterds<-do.call(trialdropfunc,c(args,list(ds=iterds,subjvar=subjvar,rtvar=rtvar))) #Handle error penalization iterds<-do.call(errorpenalizefunc,c(args,list(ds=iterds,subjvar=subjvar,rtvar=rtvar))) #intermediate prune of empty cases iterds<-drop_empty_cases(iterds,subjvar) # abds<-do.call(algorithm,c(list(iterds=iterds,subjvar=subjvar,pullvar=pullvar, # targetvar=targetvar,rtvar=rtvar),args)) #Compute AB half0set<-iterds[which(iterds$key==0),] half1set<-iterds[which(iterds$key==1),] abds<-merge( do.call(algorithm,c(list(ds=half0set,subjvar=subjvar,pullvar=pullvar, targetvar=targetvar,rtvar=rtvar),args)), do.call(algorithm,c(list(ds=half1set,subjvar=subjvar,pullvar=pullvar, targetvar=targetvar,rtvar=rtvar),args)), by=subjvar,suffixes=c("half0","half1")) #Remove outlying participants abds<-do.call(casedropfunc,list(ds=abds)) #Compute reliability currcorr<-cor(abds$abhalf0,abds$abhalf1,use="complete.obs") frcorr<-FlanaganRulonStandard(abds$abhalf0,abds$abhalf1) rjcorr<-RajuStandard(abds$abhalf0,abds$abhalf1,mean(iterds$key)) #produce output out<-list(corr=currcorr,frcorr=frcorr,rjcorr=rjcorr,abds=abds) if(include.raw){out$rawdata<-iterds} out } #extract coefs from output rjcors<-sapply(results,FUN=function(x){x$rjcorr}) %>% lim(-.9999,.9999) cors<-sapply(results,FUN=function(x){x$corr}) sbcors<-SpearmanBrown(lim(cors,-.9999,.9999),fix.negative="none") %>% lim(-.9999,.9999) frcorrs<-sapply(results,FUN=function(x){x$frcorr}) %>% lim(-.9999,.9999) #get sample sizes (for averaging and significance testing) counts<-sapply(results,function(x){ sum(!is.na(x$abds$abhalf0) & !is.na(x$abds$abhalf1)) }) avg_n<-mean(counts) #sort the cors ordering<-order(rjcors) rjcors<-rjcors[ordering] cors<-cors[ordering] sbcors<-sbcors[ordering] frcorrs<-frcorrs[ordering] counts<-counts[ordering] #assemble output output<-list(uncorrected=list(r=cormean(cors,counts), lowerci=quantile(cors,probs=.025), upperci=quantile(cors,probs=.975), pval=r2p(cormean(cors,counts),avg_n), itercors=cors), spearmanbrown=list(r=cormean(sbcors,counts), lowerci=quantile(sbcors,probs=.025), upperci=quantile(sbcors,probs=.975), pval=r2p(cormean(sbcors,counts),avg_n), itercors=sbcors), flanaganrulon=list(r=cormean(frcorrs,counts), lowerci=quantile(x=frcorrs,probs=.025), upperci=quantile(x=frcorrs,probs=.975), pval=r2p(cormean(frcorrs,counts),avg_n), itercors=frcorrs), raju=list(r=cormean(rjcors,counts), lowerci=quantile(x=rjcors,probs=.025), upperci=quantile(x=rjcors,probs=.975), pval=r2p(cormean(rjcors,counts),avg_n), itercors=rjcors), avg_n=avg_n, ordering=ordering, parameters=c(list(ds=ds, subjvar=subjvar, pullvar=pullvar, targetvar=targetvar, rtvar=rtvar, iters=iters, algorithm=algorithm, trialdropfunc=trialdropfunc, errortrialfunc=errortrialfunc, casedropfunc=casedropfunc), args), iterdata=lapply(results,function(x){ x$abds })[ordering]) %>% structure(class = "aat_splithalf") #include raw data if asked to (disabled by default, takes a lot of space) if(include.raw){ output$rawiterdata<-lapply(results,function(x){ x$rawdata })[ordering] } #plot if asked to (default) if(plot){ plot(output) } #return output return(output) } #' @param coef Optional character argument, #' indicating which reliability coefficient should be printed. #' Defaults to Raju's beta. #' @details The calculated split-half coefficients are described in Warrens (2016). #' @references Warrens, M. J. (2016). A comparison of reliability coefficients for #' psychometric tests that consist of two parts. #' Advances in Data Analysis and Classification, 10(1), 71-84. #' @export #' @rdname aat_splithalf print.aat_splithalf<-function(x,coef=c("SpearmanBrown","Raju","FlanaganRulon"),...){ coef<-match.arg(coef) if(coef=="Raju"){ coefstr<-paste0("\nFull-length reliability (Raju's beta):\n", "beta (",format(x$avg_n),") = ",mf(x$raju$r), ", 95%CI [", mf(x$raju$lowerci), ", ", mf(x$raju$upperci),"]", ", p = ",mf(x$raju$pval,digits=3),"\n") }else if(coef=="FlanaganRulon"){ coefstr<-paste0("\nFull-length reliability (Flanagan-Rulon coefficient):\n", "FR (",format(x$avg_n),") = ",mf(x$flanaganrulon$r), ", 95%CI [", mf(x$flanaganrulon$lowerci), ", ", mf(x$flanaganrulon$upperci),"]", ", p = ",mf(x$flanaganrulon$pval,digits=3),"\n") }else if(coef=="SpearmanBrown"){ coefstr<-paste0("\nFull-length reliability (Spearman-Brown coefficient):\n", "SB (",format(x$avg_n),") = ",mf(x$spearmanbrown$r), ", 95%CI [", mf(x$spearmanbrown$lowerci), ", ", mf(x$spearmanbrown$upperci),"]", ", p = ",mf(x$spearmanbrown$pval,digits=3),"\n") } cat(coefstr, "\nUncorrected, average split-half correlation:\n", "r (",format(x$avg_n),") = ",mf(x$uncorrected$r), ", 95%CI [", mf(x$uncorrected$lowerci), ", ", mf(x$uncorrected$upperci),"]", ", p = ",mf(x$uncorrected$pval,digits=3),"\n", sep="") } #' @title Plot split-half scatterplots #' #' @param x an \code{aat_splithalf} object #' @param type Character argument indicating which iteration should be chosen. Must be an abbreviation of #' \code{"median"} (default), \code{"minimum"}, \code{"maximum"}, or \code{"random"}. #' #' @export #' @rdname aat_splithalf plot.aat_splithalf<-function(x,type=c("median","minimum","maximum","random"),...){ type<-match.arg(type) if(type=="median"){ title<-"Split-half Scatterplot for Iteration with Median Reliability" idx<-ceiling(x$parameters$iters/2) }else if(type=="minimum"){ title<-"Split-half Scatterplot for Iteration with the Lowest Reliability" idx<-1 }else if(type=="maximum"){ title<-"Split-half Scatterplot for Iteration with the Highest Reliability" idx<-x$parameters$iters }else if(type=="random"){ title<-"Split-half Scatterplot for Random Iteration" idx<-sample(1:x$parameters$iters,1) } abds<-x$iterdata[[idx]] plot(abds$abhalf0,abds$abhalf1,pch=20,main= paste0(title,"\n(Uncorrected r = ", round(x$uncorrected$itercors[idx],digits=2),")"), xlab="Half 1 computed bias",ylab="Half 2 computed bias") text(abds$abhalf0,abds$abhalf1,abds[,1],cex= 0.7, pos=3, offset=0.3) }
/scratch/gouwar.j/cran-all/cranData/AATtools/R/aat_splithalf.R
subtraction.matrix<-function(avec,bvec){ na<-length(avec) nb<-length(bvec) out<-matrix(NA,nrow=na,ncol=nb) for(i in seq_len(na)){ out[i,]<-avec[i]-bvec } return(out) } meanpercentile<-function(sample,population){ sample %>% sapply(function(x) mean(x<population,na.rm=T)) %>% mean(na.rm=T) } #' Compute stimulus-rest correlations of double-difference scores #' This function provides a statistic that can give an indication of how deviant #' the responses to specific stimuli are, in comparison to the rest of the stimulus set. #' The algorithm computes stimulus-rest correlations of stimulus-specific double-difference scores. #' It takes single-difference approach-avoidance scores for each stimulus, and computes #' every possible subtraction between individual stimuli from both stimulus categories. #' It then computes correlations between every such subtraction of stimuli on one hand, and #' the mean double difference score of all other stimuli. Stimulus-rest correlations are then #' computed by averaging every such subtraction-rest correlation involving a specific stimulus. #' #' @param ds a \code{data.frame} #' @param subjvar the label of the participant identifier variable #' @param stimvar the label of the stimulus identifier variable #' @param pullvar the label of the movement direction identifier variable #' @param targetvar the label of the stimulus category identifier variable #' @param rtvar the label of the reaction time variable #' @param method Optional, the correlation method to be used (pearson, spearman, kendall) #' #' @return Returns a \code{aat_stimulus_rest} object containing statistics for each stimulus. #' Stats include the average stimulus-rest correlation (mcor); the standard deviation of #' dyad-rest correlations for this stimulus (sdcor); #' the number of valid correlations involved in these statistic (n); #' the average percentile of dyad-rest correlations involving the stimulus within #' the distribution of all other dyad-rest correlations (restpercentile); #' as well as z-scores (zpercentile) and p-values for this percentile (pval). #' #' @export #' #' @examples #' #' ds<-aat_simulate() #' stimrest<-aat_stimulus_rest(ds,subjvar="subj",stimvar="stim",pullvar="is_pull", #' targetvar="is_target",rtvar="rt") #' plot(stimrest) #' print(stimrest) aat_stimulus_rest<-function(ds,subjvar,stimvar,pullvar,targetvar,rtvar,method=c("pearson","spearman","kendall")){ method<-match.arg(method) # check data ds<-aat_preparedata(ds,subjvar,pullvar,targetvar,rtvar,stimvar=stimvar) #compute single-difference scores biasset<-ds%>%group_by(!!sym(subjvar),!!sym(stimvar),!!sym(targetvar))%>% summarise(bias=mean(subset(!!sym(rtvar),!!sym(pullvar)==0),na.rm=T)- mean(subset(!!sym(rtvar),!!sym(pullvar)==1),na.rm=T),.groups="drop") stimset<-biasset%>%select(!!sym(stimvar),!!sym(targetvar))%>%distinct() stimset$mcor<-NA for(i in seq_len(nrow(stimset))){ iterset<-biasset%>%group_by(!!sym(subjvar))%>% summarise(stimbias=.data$bias[which(!!sym(stimvar)==stimset[[stimvar]][i])], restbias=mean(.data$bias[!!sym(stimvar) != stimset[[stimvar]][i] & !!sym(targetvar) == stimset[[targetvar]][i] ]), counterbias=mean(.data$bias[!!sym(targetvar) != stimset[[targetvar]][i] ]), .groups="drop") stimset$mcor[i]<-cor(iterset$stimbias-iterset$counterbias,iterset$restbias-iterset$counterbias, use="complete.obs",method=method) } return(structure(stimset,class=c("aat_stimulus_rest","data.frame"))) } #' @rdname aat_stimulus_rest #' @param x an \code{aat_stimulus_rest} object #' @param ... Ignored. #' @export plot.aat_stimulus_rest<-function(x,...){ x<-x[!is.na(x$mcor),] ranks<-rank(x$mcor) wideness<-max(x$mcor)-min(x$mcor) plot(x=x$mcor,y=ranks, xlim=c(min(x$mcor)-.5*wideness*strwidth(s=x$mcor[min(ranks)],cex=.5,font=2,units="figure"), max(x$mcor)+.5*wideness*strwidth(s=x$mcor[max(ranks)],cex=.5,font=2,units="figure")), xlab="Stimulus-rest correlation",main=paste0("Stimulus-rest correlations"), yaxt="n") segments(x0=mean(x$mcor),x1=x$mcor,y0=ranks,y1=ranks) text(x=x$mcor,y=ranks,labels=x$stim, pos=3+sign(x$mcor-mean(x$mcor)),offset=0.5,cex=.5,font=2) abline(v=mean(x$mcor)) axis(2, labels=x$img,at=ranks,las=1,cex.axis=.5) }
/scratch/gouwar.j/cran-all/cranData/AATtools/R/aat_stimulus_rest.R
# Score computation algorithms #### #' @title AAT score computation algorithms #' @name Algorithms #' @description AAT score computation algorithms #' @param ds A long-format data.frame #' @param subjvar Column name of the participant identifier variable #' @param pullvar Column name of the movement variable (0: avoid; 1: approach) #' @param targetvar Column name of the stimulus category variable (0: control stimulus; 1: target stimulus) #' @param rtvar Column name of the reaction time variable #' @param ... Other arguments passed on by functions (ignored) #' #' @return A data.frame containing participant number and computed AAT score. NULL #' @describeIn Algorithms computes a mean-based double-difference score: #' \code{(mean(push_target) - mean(pull_target)) - (mean(push_control) - mean(pull_control))} #' #' @export aat_doublemeandiff<-function(ds,subjvar,pullvar,targetvar,rtvar,...){ a<-tapply(ds[[rtvar]],list(ds[[subjvar]],ds[[targetvar]],ds[[pullvar]]), mean.default,na.rm=TRUE) b<-apply(a,1,function(x){x[2,1]-x[2,2]-(x[1,1]-x[1,2]) }) setNames(data.frame(id=names(b),ab=b,stringsAsFactors=F), c(subjvar,"ab")) } aat_doublemeandiff_old<-function(ds,subjvar,pullvar,targetvar,rtvar,...){ idx1<-which(ds[[pullvar]]==0 & ds[[targetvar]]==1) idx2<-which(ds[[pullvar]]==1 & ds[[targetvar]]==1) idx3<-which(ds[[pullvar]]==0 & ds[[targetvar]]==0) idx4<-which(ds[[pullvar]]==1 & ds[[targetvar]]==0) ab<-(tapply(ds[[rtvar]][idx1],ds[[subjvar]][idx1],mean.default,na.rm=TRUE) - tapply(ds[[rtvar]][idx2],ds[[subjvar]][idx2],mean.default,na.rm=TRUE))- (tapply(ds[[rtvar]][idx3],ds[[subjvar]][idx3],mean.default,na.rm=TRUE) - tapply(ds[[rtvar]][idx4],ds[[subjvar]][idx4],mean.default,na.rm=TRUE)) setNames(data.frame(id=names(ab),ab=ab,stringsAsFactors=F),c(subjvar,"ab")) } aat_doublemeandiff_older<-function(ds,subjvar,pullvar,targetvar,rtvar,...){ group_by(ds,!!sym(subjvar)) %>% summarise(ab=(mean(subset(!!sym(rtvar),!!sym(pullvar)==0 & !!sym(targetvar) == 1),na.rm=TRUE) - mean(subset(!!sym(rtvar),!!sym(pullvar)==1 & !!sym(targetvar) == 1),na.rm=TRUE)) - (mean(subset(!!sym(rtvar),!!sym(pullvar)==0 & !!sym(targetvar) == 0),na.rm=TRUE) - mean(subset(!!sym(rtvar),!!sym(pullvar)==1 & !!sym(targetvar) == 0),na.rm=TRUE))) } #' @export #' @describeIn Algorithms computes a median-based double-difference score: #' \code{(median(push_target) - median(pull_target)) - (median(push_control) - median(pull_control))} aat_doublemediandiff<-function(ds,subjvar,pullvar,targetvar,rtvar,...){ a<-tapply(ds[[rtvar]],list(ds[[subjvar]],ds[[targetvar]],ds[[pullvar]]), median.default,na.rm=TRUE) b<-apply(a,1,function(x){x[2,1]-x[2,2]-(x[1,1]-x[1,2]) }) setNames(data.frame(id=names(b),ab=b,stringsAsFactors=F), c(subjvar,"ab")) } aat_doublemediandiff_old<-function(ds,subjvar,pullvar,targetvar,rtvar,...){ idx1<-which(ds[[pullvar]]==0 & ds[[targetvar]]==1) idx2<-which(ds[[pullvar]]==1 & ds[[targetvar]]==1) idx3<-which(ds[[pullvar]]==0 & ds[[targetvar]]==0) idx4<-which(ds[[pullvar]]==1 & ds[[targetvar]]==0) ab<-(tapply(ds[[rtvar]][idx1],ds[[subjvar]][idx1],median.default,na.rm=TRUE) - tapply(ds[[rtvar]][idx2],ds[[subjvar]][idx2],median.default,na.rm=TRUE))- (tapply(ds[[rtvar]][idx3],ds[[subjvar]][idx3],median.default,na.rm=TRUE) - tapply(ds[[rtvar]][idx4],ds[[subjvar]][idx4],median.default,na.rm=TRUE)) setNames(data.frame(id=names(ab),ab=ab,stringsAsFactors=F),c(subjvar,"ab")) } aat_doublemediandiff_older<-function(ds,subjvar,pullvar,targetvar,rtvar,...){ group_by(ds,!!sym(subjvar)) %>% summarise(ab=(median(subset(!!sym(rtvar),!!sym(pullvar)==0 & !!sym(targetvar) == 1),na.rm=TRUE) - median(subset(!!sym(rtvar),!!sym(pullvar)==1 & !!sym(targetvar) == 1),na.rm=TRUE)) - (median(subset(!!sym(rtvar),!!sym(pullvar)==0 & !!sym(targetvar) == 0),na.rm=TRUE) - median(subset(!!sym(rtvar),!!sym(pullvar)==1 & !!sym(targetvar) == 0),na.rm=TRUE))) } #' @export #' @describeIn Algorithms computes D-scores for a 2-block design (see Greenwald, Nosek, and Banaji, 2003): #' \code{((mean(push_target) - mean(pull_target)) - (mean(push_control) - mean(pull_control))) / sd(participant_reaction_times)} aat_dscore<-function(ds,subjvar,pullvar,targetvar,rtvar,...){ a<-tapply(ds[[rtvar]],list(ds[[subjvar]],ds[[targetvar]],ds[[pullvar]]), mean.default,na.rm=TRUE) b<-apply(a,1,function(x){x[2,1]-x[2,2]-(x[1,1]-x[1,2]) }) sds<-tapply(ds[[rtvar]],ds[[subjvar]],vec.sd,na.rm=TRUE) c<-b/sds setNames(data.frame(id=names(c),ab=c,stringsAsFactors=F), c(subjvar,"ab")) } aat_dscore_old<-function(ds,subjvar,pullvar,targetvar,rtvar,...){ idx1<-which(ds[[pullvar]]==0 & ds[[targetvar]]==1) idx2<-which(ds[[pullvar]]==1 & ds[[targetvar]]==1) idx3<-which(ds[[pullvar]]==0 & ds[[targetvar]]==0) idx4<-which(ds[[pullvar]]==1 & ds[[targetvar]]==0) ab<-((tapply(ds[[rtvar]][idx1],ds[[subjvar]][idx1],mean.default,na.rm=TRUE) - tapply(ds[[rtvar]][idx2],ds[[subjvar]][idx2],mean.default,na.rm=TRUE))- (tapply(ds[[rtvar]][idx3],ds[[subjvar]][idx3],mean.default,na.rm=TRUE) - tapply(ds[[rtvar]][idx4],ds[[subjvar]][idx4],mean.default,na.rm=TRUE)))/ tapply(ds[[rtvar]],ds[[subjvar]],sd,na.rm=TRUE) setNames(data.frame(id=names(ab),ab=ab,stringsAsFactors=F),c(subjvar,"ab")) } aat_dscore_older<-function(ds,subjvar,pullvar,targetvar,rtvar,...){ group_by(ds,!!sym(subjvar)) %>% summarise(ab=((mean(subset(!!sym(rtvar),!!sym(pullvar)==0 & !!sym(targetvar) == 1),na.rm=TRUE) - mean(subset(!!sym(rtvar),!!sym(pullvar)==1 & !!sym(targetvar) == 1),na.rm=TRUE)) - (mean(subset(!!sym(rtvar),!!sym(pullvar)==0 & !!sym(targetvar) == 0),na.rm=TRUE) - mean(subset(!!sym(rtvar),!!sym(pullvar)==1 & !!sym(targetvar) == 0),na.rm=TRUE))) / sd(!!sym(rtvar),na.rm=TRUE)) } #' @export #' @describeIn Algorithms computes a double-difference score usign medians, #' and divides it by the median absolute deviation of the participant's overall reaction times: #' \code{((median(push_target) - median(pull_target)) - (median(push_control) - median(pull_control))) / mad(participant_reaction_times)} aat_mediandscore<-function(ds,subjvar,pullvar,targetvar,rtvar,...){ a<-tapply(ds[[rtvar]],list(ds[[subjvar]],ds[[targetvar]],ds[[pullvar]]), median.default,na.rm=TRUE) b<-apply(a,1,function(x){x[2,1]-x[2,2]-(x[1,1]-x[1,2]) }) sds<-tapply(ds[[rtvar]],ds[[subjvar]],mad,na.rm=TRUE) c<-b/sds setNames(data.frame(id=names(c),ab=c,stringsAsFactors=F), c(subjvar,"ab")) } aat_mediandscore_old<-function(ds,subjvar,pullvar,targetvar,rtvar,...){ idx1<-which(ds[[pullvar]]==0 & ds[[targetvar]]==1) idx2<-which(ds[[pullvar]]==1 & ds[[targetvar]]==1) idx3<-which(ds[[pullvar]]==0 & ds[[targetvar]]==0) idx4<-which(ds[[pullvar]]==1 & ds[[targetvar]]==0) ab<-((tapply(ds[[rtvar]][idx1],ds[[subjvar]][idx1],median.default,na.rm=TRUE) - tapply(ds[[rtvar]][idx2],ds[[subjvar]][idx2],median.default,na.rm=TRUE))- (tapply(ds[[rtvar]][idx3],ds[[subjvar]][idx3],median.default,na.rm=TRUE) - tapply(ds[[rtvar]][idx4],ds[[subjvar]][idx4],median.default,na.rm=TRUE)))/ tapply(ds[[rtvar]],ds[[subjvar]],mad,na.rm=TRUE) setNames(data.frame(id=names(ab),ab=ab,stringsAsFactors=F),c(subjvar,"ab")) } aat_mediandscore_older<-function(ds,subjvar,pullvar,targetvar,rtvar,...){ group_by(ds,!!sym(subjvar)) %>% summarise(ab=((median(subset(!!sym(rtvar),!!sym(pullvar)==0 & !!sym(targetvar) == 1),na.rm=TRUE) - median(subset(!!sym(rtvar),!!sym(pullvar)==1 & !!sym(targetvar) == 1),na.rm=TRUE)) - (median(subset(!!sym(rtvar),!!sym(pullvar)==0 & !!sym(targetvar) == 0),na.rm=TRUE) - median(subset(!!sym(rtvar),!!sym(pullvar)==1 & !!sym(targetvar) == 0),na.rm=TRUE))) / mad(!!sym(rtvar),na.rm=TRUE)) } #' @param blockvar name of the variable indicating block number #' @export #' @describeIn Algorithms computes D-scores for pairs of sequential blocks #' and averages the resulting score (see Greenwald, Nosek, and Banaji, 2003). #' Requires extra \code{blockvar} argument, indicating the name of the block variable. #note: this matches sequential blocks with one another. aat_dscore_multiblock<-function(ds,subjvar,pullvar,targetvar,rtvar,blockvar,...){ ds$.blockset<-floor((ds[[blockvar]]-min(ds[[blockvar]]))/2) a<-tapply(ds[[rtvar]],list(ds[[subjvar]],ds$.blockset,ds[[targetvar]],ds[[pullvar]]), mean.default,na.rm=TRUE) b<-apply(a,1:2,function(x){x[2,1]-x[2,2]-(x[1,1]-x[1,2]) }) sds<-tapply(ds[[rtvar]],list(ds[[subjvar]],ds$.blockset),vec.sd,na.rm=TRUE) c<-rowMeans(b/sds) setNames(data.frame(id=names(c),ab=c,stringsAsFactors=F), c(subjvar,"ab")) } aat_dscore_multiblock_old<-function(ds,subjvar,pullvar,targetvar,rtvar,blockvar,...){ ds %>% mutate(.blockset = floor((!!sym(blockvar) - min(!!sym(blockvar)))/2) ) %>% group_by(!!sym(subjvar),.data$.blockset) %>% summarise(ab=((mean(subset(!!sym(rtvar),!!sym(pullvar)==0 & !!sym(targetvar) == 1),na.rm=TRUE) - mean(subset(!!sym(rtvar),!!sym(pullvar)==1 & !!sym(targetvar) == 1),na.rm=TRUE)) - (mean(subset(!!sym(rtvar),!!sym(pullvar)==0 & !!sym(targetvar) == 0),na.rm=TRUE) - mean(subset(!!sym(rtvar),!!sym(pullvar)==1 & !!sym(targetvar) == 0),na.rm=TRUE))) / sd(!!sym(rtvar),na.rm=TRUE)) %>% group_by(!!sym(subjvar)) %>% summarise(ab=mean(ab,na.rm=TRUE)) } #' @param formula A regression formula to fit to the data to compute an AAT score #' @param aatterm A character naming the formula term representing the approach bias. #' Usually this is the interaction of the movement-direction and stimulus-category terms. #' @export #' @describeIn Algorithms \code{aat_regression} and \code{aat_standardregression} fit regression models to participants' reaction times and extract a term that serves as AAT score. #' \code{aat_regression} extracts the raw coefficient, equivalent to a mean difference score. #' \code{aat_standardregression} extracts the t-score of the coefficient, standardized on the basis of the variability of the participant's reaction times. #' These algorithms can be used to regress nuisance variables out of the data before computing AAT scores. #' When using these functions, additional arguments must be provided: #' \itemize{ #' \item \code{formula} - a formula to fit to the data #' \item \code{aatterm} - the term within the formula that indicates the approach bias; this is usually the interaction of the pull and target terms. #' } aat_regression<-function(ds,subjvar,formula,aatterm,...){ output<-data.frame(pp=unique(ds[[subjvar]]),ab=NA,var=NA) for(i in seq_len(nrow(output))){ mod<-coef(summary(lm(formula,data=ds[ds[[subjvar]]==output[i,"pp"],]))) if(aatterm %in% rownames(mod)){ output[i,"ab"]<- -mod[rownames(mod)==aatterm,1] output[i,"var"]<- mod[rownames(mod)==aatterm,2] } } colnames(output)[colnames(output)=="pp"]<-subjvar return(output) } #' @export #' @describeIn Algorithms See above aat_standardregression<-function(ds,subjvar,formula,aatterm,...){ output<-data.frame(pp=unique(ds[[subjvar]]),ab=NA,var=NA) for(i in seq_len(nrow(output))){ mod<-coef(summary(lm(formula,data=ds[ds[[subjvar]]==output[i,"pp"],]))) if(aatterm %in% rownames(mod)){ output[i,"ab"]<- -mod[rownames(mod)==aatterm,1] output[i,"var"]<- mod[rownames(mod)==aatterm,2] } } colnames(output)[colnames(output)=="pp"]<-subjvar output$ab<-output$ab/output$var return(output) } #' @export #' @describeIn Algorithms subtracts the mean approach reaction time from the mean avoidance reaction time. #' Using this algorithm is only sensible if the supplied data contain a single stimulus category. aat_singlemeandiff<-function(ds,subjvar,pullvar,rtvar,...){ a<-tapply(ds[[rtvar]],list(ds[[subjvar]],ds[[pullvar]]),mean.default,na.rm=T) b<-apply(a,1,function(x){ x[1]-x[2] }) setNames(data.frame(id=names(b),ab=b,stringsAsFactors=F), c(subjvar,"ab")) } aat_singlemeandiff_old<-function(ds,subjvar,pullvar,rtvar,...){ idx1<-which(ds[[pullvar]]==0) idx2<-which(ds[[pullvar]]==1) ab<-(tapply(ds[[rtvar]][idx1],ds[[subjvar]][idx1],mean.default,na.rm=T) - tapply(ds[[rtvar]][idx2],ds[[subjvar]][idx2],mean.default,na.rm=T)) setNames(data.frame(id=names(ab),ab=ab,stringsAsFactors=F),c(subjvar,"ab")) } aat_singlemeandiff_older<-function(ds,subjvar,pullvar,rtvar,...){ group_by(ds,!!sym(subjvar))%>% summarise(ab=mean(subset(!!sym(rtvar),!!sym(pullvar)==1)) - mean(subset(!!sym(rtvar),!!sym(pullvar)==0))) } #' @export #' @describeIn Algorithms subtracts the median approach reaction time from the median avoidance reaction time. #' Using this algorithm is only sensible if the supplied data contain a single stimulus category. aat_singlemediandiff<-function(ds,subjvar,pullvar,rtvar,...){ a<-tapply(ds[[rtvar]],list(ds[[subjvar]],ds[[pullvar]]),median.default,na.rm=T) b<-apply(a,1,function(x){ x[1]-x[2] }) setNames(data.frame(id=names(b),ab=b,stringsAsFactors=F), c(subjvar,"ab")) } aat_singlemediandiff_old<-function(ds,subjvar,pullvar,rtvar,...){ idx1<-which(ds[[pullvar]]==0) idx2<-which(ds[[pullvar]]==1) ab<-(tapply(ds[[rtvar]][idx1],ds[[subjvar]][idx1],median.default,na.rm=T) - tapply(ds[[rtvar]][idx2],ds[[subjvar]][idx2],median.default,na.rm=T)) setNames(data.frame(id=names(ab),ab=ab,stringsAsFactors=F),c(subjvar,"ab")) } aat_singlemediandiff_older<-function(ds,subjvar,pullvar,rtvar,...){ group_by(ds,!!sym(subjvar))%>% summarise(ab=median(subset(!!sym(rtvar),!!sym(pullvar)==1)) - median(subset(!!sym(rtvar),!!sym(pullvar)==0))) }
/scratch/gouwar.j/cran-all/cranData/AATtools/R/algorithms.R
#' @name correlation-tools #' @title Correlation tools #' @description Helper functions to compute important statistics from correlation coefficients. #' @param r,r1,r2 a correlation value #' @param z a Z-score #' @param n,n1,n2 sample sizes #' @param alpha the significance level to use #' @seealso \link{cormean}, \link{multiple.cor}, \link{partial.cor} #' @examples #' z <- r2z(.5) #' r <- z2r(z) #' t<-r2t(r,30) #' r2p(r,30) #' print(rconfint(r,30)) #' print(compcorr(.5,.7,20,20)) NULL #' @export #' @describeIn correlation-tools converts correlation coefficients to z-scores r2z<-function(r){ z<-.5 * (log(1+r) - log(1-r)) return(z) } #' @export #' @describeIn correlation-tools converts z-scores to correlation coefficients z2r<-function(z){ r<-(exp(2*z)-1)/(exp(2*z)+1) rma<-which(is.nan(r)) r[rma]<-ifelse(z[rma]>0,1,-1) return(r) } #' @export #' @describeIn correlation-tools Converts correlation coefficients to t-scores r2t<-function(r,n){ (r*sqrt(n-2))/sqrt(1-r^2) } t2r<-function(t,n){ sqrt(t/sqrt(t^2+n-2)) } #' @export #' @describeIn correlation-tools Computes the two-sided p-value for a given correlation r2p<-function(r,n){ 2*pt(abs(r2t(r,n)),n-2,lower.tail=FALSE) } #' @export #' @describeIn correlation-tools Computes confidence intervals for a given correlation coefficient rconfint<-function(r,n,alpha=.05){ z<-r2z(r) zint<-qnorm(1-alpha/2) * sqrt(1/(n-3)) confints<-c(z2r(z-zint),z2r(z+zint)) return(rconfint) } #' @export #' @describeIn correlation-tools computes the significance of the difference between two correlation coefficients compcorr<-function(r1,r2,n1,n2){ zval<-abs(r2z(r1)-r2z(r2)) / sqrt((1/(n1-3)) + (1/(n2-3))) pval<-min(1,pnorm(abs(zval),lower.tail=F)*2) return(structure(list(zscore=zval,pvalue=pval),class="compcorr")) } print.compcorr<-function(x,...){ cat("Two-tailed Z-test for the difference between two correlation coefficients.", "\nZ =",x$zscore,"\np =",x$pvalue,"\n") } #' Compute a minimally biased average of correlation values #' #' This function computes a minimally biased average of correlation values. #' This is needed because simple averaging of correlations is negatively biased, #' and the often used z-transformation method of averaging correlations is positively biased. #' The algorithm was developed by Olkin & Pratt (1958). #' #' @param r a vector containing correlation values #' @param n a single value or vector containing sample sizes #' @param wts Character. How should the correlations be weighted? #' \code{none} leads to no weighting, \code{n} weights by sample size, \code{df} weights by sample size minus one. #' @param type Character. Determines which averaging algorithm to use, with "OP5" being the most accurate. #' @param na.rm Logical. Should missing values be removed? #' #' @return An average correlation. #' @name cormean #' @export #' #' @references #' Olkin, I., & Pratt, J. (1958). Unbiased estimation of certain correlation coefficients. #' The Annals of Mathematical Statistics, 29. https://doi.org/10.1214/aoms/1177706717 #' #' Shieh, G. (2010). Estimation of the simple correlation coefficient. Behavior Research Methods, #' 42(4), 906-917. https://doi.org/10.3758/BRM.42.4.906 #' #' @examples #' cormean(c(0,.3,.5),c(30,30,60)) cormean<-function(r,n,wts=c("none","n","df"),type=c("OP5","OPK","OP2"),na.rm=F){ type<-match.arg(type) wts<-match.arg(wts) if(na.rm){ missing<-which(is.na(r) | is.na(n)) if(length(missing)>0){ r<-r[-missing] n<-n[-missing] } } weight<-list(rep(1,times=length(n)),n,n-1)[[1+(wts=="n")+2*(wts=="df")]] if(length(r)!=length(n)){ stop("Length of r and n not equal!") } if(type=="OP5"){ sizevec<-unique(n) gammalist<-sapply(sizevec,function(nr) (gamma(.5+1:5)^2 * gamma(nr/2-1))/ (gamma(.5)^2 * gamma(nr/2-1+1:5))) rmean<-weighted.mean(x= sapply(seq_along(r), function(i) r[i]*(1+ sum(gammalist[,match(n[i],sizevec)] * (1-r[i]^2)^(1:5)/factorial(1:5)))), w= weight) }else if(type=="OPK"){ rmean<-weighted.mean(x= r*(1+(1-r^2)/(2*(n-(9*sqrt(2)-7)/2))), w= weight) }else if(type=="OP2"){ rmean<-weighted.mean(x= r*(1+ (1-r^2)/(2*(n-2)) + (9*(1-r^2)^2)/(8*n*(n-2))), w= weight) } return(rmean) } #' Partial correlation #' Compute the correlation between x and y while controlling for z. #' @param x,y,z x and y will be correlated while controlling for z #' @param use optional character indicating how to handle missing values (see \link{cor}) #' @export #' @examples #' partial.cor(mtcars$mpg,mtcars$cyl,mtcars$disp) partial.cor<-function(x,y,z,use=c("complete.obs","everything")){ use<-match.arg(use) if(use=="complete.obs"){ key<- !is.na(x) & !is.na(y) & !is.na(z) x<-x[key] y<-y[key] z<-z[key] } xy<-cor(x,y) xz<-cor(x,z) yz<-cor(y,z) return((xy-xz*yz)/sqrt((1-xz^2)*(1-yz^2))) } #ref: https://www.tse-fr.eu/sites/default/files/medias/stories/SEMIN_09_10/STATISTIQUE/croux.pdf # devlin, 1975 cor.influence<-function(x,y){ x<-x-mean(x) y<-y-mean(y) x*y-(x^2+y^2)/2*cor(x,y) } #' Multiple correlation #' Computes the \href{https://en.wikipedia.org/wiki/Multiple_correlation}{multiple correlation coefficient} #' of variables in \code{ymat} with the variable \code{x} #' @param x Either a matrix of variables whose multiple correlation with each other is to be estimated; or a vector of which the multiple correlation with variables in \code{ymat} is to be estimated #' @param ymat a matrix or data.frame of variables of which the multiple correlation with \code{x} is to be estimated #' @param use optional character indicating how to handle missing values (see \link{cor}) #' #' @return The multiple correlation coefficient #' @export #' @seealso https://www.personality-project.org/r/book/chapter5.pdf #' #' @examples #' multiple.cor(mtcars[,1],mtcars[,2:4]) multiple.cor<-function(x,ymat,use="everything"){ if(missing(ymat)){ cv<-cor(x,use=use) corvec<-numeric(ncol(x)) for(i in seq_along(corvec)){ gfvec<-cv[(1:nrow(cv))[-i],i] dcm<-cv[(1:nrow(cv))[-i],(1:ncol(cv))[-i]] rsq<-t(gfvec) %*% solve(dcm) %*% gfvec corvec[i]<-sqrt(as.vector(rsq)) } names(corvec)<-colnames(cv) return(corvec) }else{ cv<-cor(cbind(x,ymat),use=use) gfvec<-cv[2:nrow(cv),1] dcm<-cv[2:nrow(cv),2:ncol(cv)] rsq<-t(gfvec) %*% solve(dcm) %*% gfvec return(sqrt(as.vector(rsq))) } } #negative reliability in split-half aat occurs when the subtracted components correlate too positively with each other #' Covariance matrix computation with multiple imputation #' #' This function computes a covariance matrix from data with some values missing at random. #' The code was written by Eric from StackExchange. https://stats.stackexchange.com/questions/182718/ml-covariance-estimation-from-expectation-maximization-with-missing-data #' @param dat_missing a matrix with missing values #' @param iters the number of iterations to perform to estimate missing values #' @references Beale, E. M. L., & Little, R. J. A.. (1975). Missing Values in Multivariate Analysis. Journal of the Royal Statistical Society. Series B (methodological), 37(1), 129–145. #' @export #' @examples #' # make data with missing values #' missing_mtcars <- mtcars #' for(i in 1:20){ #' missing_mtcars[sample(1:nrow(mtcars),1),sample(1:ncol(mtcars),1)]<-NA #' } #' covmat<-covEM(as.matrix(missing_mtcars))$sigma #' calpha(covmat) covEM<-function(dat_missing,iters=1000){ if(!anyNA(dat_missing)){ return(list(sigma=cov(dat_missing),data=dat_missing)) } n <- nrow(dat_missing) nvar <- ncol(dat_missing) is_na <- apply(dat_missing,2,is.na) # index if NAs dat_impute <- dat_missing # data matrix for imputation # set initial estimates to means from available data for(i in 1:ncol(dat_impute)){ dat_impute[is_na[,i],i] <- colMeans(dat_missing,na.rm = TRUE)[i] } # starting values for EM means <- colMeans(dat_impute) # NOTE: multiplying by (nrow-1)/(nrow) to get ML estimate sigma <- cov(dat_impute)*(nrow(dat_impute)-1)/nrow(dat_impute) # carry out EM over 100 iterations for(j in 1:iters){ bias <- matrix(0,nvar,nvar) for(i in 1:n){ row_dat <- dat_missing[i,] miss <- which(is.na(row_dat)) if(length(miss)>0){ bias[miss,miss] <- bias[miss,miss] + sigma[miss,miss] - sigma[miss,-miss] %*% solve(sigma[-miss,-miss]) %*% sigma[-miss,miss] dat_impute[i,miss] <- means[miss] + (sigma[miss,-miss] %*% solve(sigma[-miss,-miss])) %*% (row_dat[-miss]-means[-miss]) } } # get updated means and covariance matrix means <- colMeans(dat_impute) biased_sigma <- cov(dat_impute)*(n-1)/n # correct for bias in covariance matrix sigma <- biased_sigma + bias/n } return(list(sigma=sigma,data=dat_impute)) }
/scratch/gouwar.j/cran-all/cranData/AATtools/R/cortools.R
#' AAT examining approach bias for erotic stimuli #' #' AAT #' #' @docType data #' #' @usage erotica #' #' @format An object of class \code{"data.frame"} #' #' @keywords datasets #' #' @references Kahveci, S., Van Bockstaele, B.D., & Wiers, R.W. (in preparation). #' Pulling for Pleasure? Erotic Approach-Bias Associated With Porn Use, Not Problems. DOI:10.17605/OSF.IO/6H2RJ #' #' @source \href{https://osf.io/6h2rj/}{osf.io repository} #' "erotica" # erotica<-read.csv("./../data/erotica.csv") # erotica$subject%<>%as.factor() # erotica%>%dplyr::group_by(subject)%>%dplyr::summarise(meanrt=mean(RT),sdrt=sd(RT),ct=n()) # erotica%<>%dplyr::filter(!(subject %in% c(13, 42,40,32,55))) # save(erotica,file="./data/erotica.RData")
/scratch/gouwar.j/cran-all/cranData/AATtools/R/data.R
balancedrandombinary<-function(n){ keys<-rep(c(0,1),floor(n/2)) if(n%%2){ keys<-c(keys,NA) } keys[sample.int(length(keys))] } splitsweep<-function(currsplitset){ h<-tapply(seq_len(nrow(currsplitset)),currsplitset,function(x){ cbind(x,balancedrandombinary(length(x))) },simplify=F) h<-do.call(rbind,h) currkey<-numeric(nrow(h)) currkey[h[,1]]<-h[,2] currkey } datasplitter<-function(splitset){ validcols<-ncol(splitset) key<-splitsweep(splitset) while(anyNA(key) & validcols>0){ whichna<-is.na(key) key[whichna]<-splitsweep(as.data.frame(splitset[whichna,1:validcols])) validcols<-validcols-1 } key[is.na(key)]<-sample( (seq_len(sum(is.na(key)))+sample(0:1,1)) %%2) key }
/scratch/gouwar.j/cran-all/cranData/AATtools/R/datasplitter.R
serr<-function(x,na.rm=T){sqrt(var(x,na.rm=na.rm)/sum(!is.na(x)))} FlanaganRulonBilateral<-function(x1,x2){ key<-!is.na(x1) & !is.na(x2) x1<-x1[key] x2<-x2[key] fr<-(1-var(x1-x2)/var(x1+x2)) return(fr/max(1, 1-fr)) } RajuBilateral<-function(x1,x2,prop){ covar<-cov(x1,x2) sumvar<-var(x1)+var(x2)+2*abs(covar) raju<-covar / (prop * (1-prop) * sumvar) return(raju) } FlanaganRulonStandard<-function(x1,x2){ (1-var(x1-x2)/var(x1+x2)) } RajuStandard<-function(x1,x2,prop){ covar<-cov(x1,x2) sumvar<-var(x1)+var(x2)+2*covar covar / (prop * (1-prop) * sumvar) } vec.sd<-function(x,na.rm=F){ if(na.rm){x<-na.omit(x)} sqrt(sum((x-mean.default(x))^2) / (length(x)-1)) } vec.scale<-function(x){ xt<-na.omit(x) m<-mean.default(xt) (x-m)/sqrt((sum((xt-m)^2)/(length(xt)-1))) } vec.madscale<-function(x){ (x-median.default(x,na.rm=T))/mad(x,na.rm=T) } val_between<-function(x,lb,ub){x>lb & x<ub} lim<-function(x,minx,maxx){ x[x<minx]<-minx; x[x>maxx]<-maxx; x } drop_empty_cases<-function(iterds,subjvar){ ids<-vapply(split(iterds$key,iterds[[subjvar]]), FUN=function(x){any(x==1)&any(x==0)}, FUN.VALUE=FALSE) outds<-iterds[which(iterds[[subjvar]] %in% names(ids)[ids]),] outds[[subjvar]]<-droplevels(outds[[subjvar]]) outds } form2char<-function(x){ if(is.character(x)){ return(x) } fs<-as.character(x) fs<-paste(fs[2],fs[1],fs[3]) return(fs) } is.formula <- function(x){ inherits(x,"formula") } mf<-function(x,digits=2){ s<-format(round(x,digits=digits), digits=digits,drop0trailing=T,scientific=F,nsmall=digits) s<-gsub("^0\\.","\\.",s) return(s) } r_check_limit_cores <- function() { Rcheck <- tolower(Sys.getenv("_R_CHECK_LIMIT_CORES_", "")) return((nchar(Rcheck[1]) > 0) & (Rcheck != "false")) } unregisterDoParallel <- function(cluster) { stopCluster(cluster) registerDoSEQ() #env <- foreach:::.foreachGlobals #rm(list=ls(name=env), pos=env) } aat_preparedata<-function(ds,subjvar,pullvar,targetvar=NULL,rtvar,stratvars=NULL,...){ args<-list(...) cols<-c(subjvar,pullvar,targetvar,rtvar,stratvars,args$errorvar,args$blockvar,args$stimvar) if("formula" %in% names(args)){ formterms <- args$formula %>% as.formula() %>% terms() %>% attr("variables") %>% as.character() formterms <- formterms[-1] if(any(!(formterms %in% colnames(ds)))){ stop("Formula term(s) ",paste(formterms[!(formterms %in% colnames(ds))],collapse=", ")," missing from dataset") } cols <- c(cols,formterms) } missingcols<-!(cols %in% colnames(ds)) if(any(missingcols)){ stop("Missing column(s) in dataset: ",paste0(cols[missingcols],collapse=" ")) } ds<-ds[,cols] ds[[subjvar]]%<>%as.factor() if(is.logical(ds[,pullvar])){ warning("Recoded ",pullvar," from logical to numeric. Please make sure that FALSE ", "represents push trials and TRUE represents pull trials") ds[,pullvar]%<>%as.numeric() } if(is.factor(ds[,pullvar])){ warning("Recoded ",pullvar," from factor to numeric. Please make sure that ", levels(ds[,pullvar])[1], " represents push trials and ",levels(ds[,pullvar])[2], " represents pull trials") ds[,pullvar]<-as.numeric(ds[,pullvar])-1 } if(!is.null(targetvar)){ if(is.logical(ds[,targetvar])){ warning("Recoded ",targetvar," from logical to numeric. Please make sure that FALSE ", "represents control/neutral stimuli and TRUE represents target stimuli") ds[,targetvar]%<>%as.numeric() } if(is.factor(ds[,targetvar])){ warning("Recoded ",targetvar," from factor to numeric. Please make sure that ", levels(ds[,targetvar])[1], " represents control/neutral stimuli and ", levels(ds[,targetvar])[2], " represents target stimuli") ds[,targetvar]<-as.numeric(ds[,targetvar])-1 } } rmindices <- ds[,cols] %>% lapply(FUN=is.na) %>% as.data.frame %>% apply(MARGIN=1,FUN=any) %>% which if(length(rmindices)>0){ ds<-ds[-rmindices,] warning("Removed ",length(rmindices), " rows due to presence of NA in critical variable(s)") } return(ds) }
/scratch/gouwar.j/cran-all/cranData/AATtools/R/helpers.R
# Outlier removing algorithms #### #' @title Pre-processing rules #' @description These are pre-processing rules that can be used in \link{aat_splithalf}, \link{aat_bootstrap}, and \link{aat_compute}. #' #' \itemize{ #' \item The following rules are to be used for the \code{trialdropfunc} argument. #' The way you handle outliers for the reliability computation and bootstrapping more broadly #' should mimic the way you do it in your regular analyses. #' It is recommended to exclude outlying trials when computing AAT scores using the mean double-dfference scores and regression scoring approaches, #' but not when using d-scores or median double-difference scores. #' \itemize{ #' \item \code{prune_nothing} excludes no trials (default) #' \item \code{trial_prune_3SD} excludes trials deviating more than 3SD from the mean per participant. #' \item \code{trial_prune_3MAD} excludes trials deviating more than 3 median absolute deviations from the median per participant. #' \item \code{trial_prune_grubbs} applies a Grubbs' test to the data, removing one outlier at a time until the test is no longer significant. #' \item \code{trial_prune_SD_dropcases} removes trials deviating more than a specific number of standard deviations from the participant's mean, #' and removes participants with an excessive percentage of outliers. #' Required arguments: #' \itemize{ #' \item \code{trialsd} - trials deviating more than \code{trialsd} standard deviations from the participant's mean are excluded (optional; default is 3) #' \item \code{maxoutliers} - participants with a higher percentage of outliers are removed from the data. (optional; default is .15) #' } #' \item \code{trial_recode_SD} recodes outlying reaction times to the nearest non-outlying value, #' with outliers defined as reaction times deviating more than a certain number of standard deviations from the participant's mean. Required argument: #' \itemize{ #' \item \code{trialsd} - trials deviating more than this many standard deviations from the mean are classified as outliers. #' } #' \item \code{trial_prune_percent_subject} and \code{trial_prune_percent_sample} remove trials below and/or above certain percentiles, #' on a subject-by-subject basis or sample-wide, respectively. The following arguments are available: #' \itemize{ #' \item \code{lowerpercent} and \code{uppperpercent} (optional; defaults are .01 and .99). #' } #' } #' \item The following pre-procesing rules are to be used for the \code{errortrialfunc} argument. #' They determine what is to be done with errors - remove or recode? #' #' \itemize{ #' \item \code{prune_nothing} removes no errors (default). #' \item \code{error_replace_blockmeanplus} replaces error trial reaction times with the block mean, plus an arbitrary extra quantity. #' If used, the following additional arguments are required: #' \itemize{ #' \item \code{blockvar} - Quoted name of the block variable (mandatory) #' \item \code{errorvar} - Quoted name of the error variable, where errors are 1 or TRUE and correct trials are 0 or FALSE (mandatory) #' \item \code{errorbonus} - Amount to add to the reaction time of error trials. Default is 0.6 (recommended by \code{Greenwald, Nosek, & Banaji, 2003}) #' } #' \item \code{error_prune_dropcases} removes errors and drops participants if they have more errors than a given percentage. The following arguments are available: #' \itemize{ #' \item \code{errorvar} - Quoted name of the error variable, where errors are 1 or TRUE and correct trials are 0 or FALSE (mandatory) #' \item \code{maxerrors} - participants with a higher percentage of errors are excluded from the dataset. Default is .15. #' } #' } #' \item These are pre-processing rules to be used for the \code{casedropfunc} argument. #' The way you handle outliers here should mimic the way you do it in your regular analyses. #' \itemize{ #' \item \code{prune_nothing} excludes no participants (default) #' \item \code{case_prune_3SD} excludes participants deviating more than 3SD from the sample mean. #' } #' } #' @param ds A data.frame. #' @param subjvar The name of the subject variable. #' @param rtvar The name of the reaction time variable. #' @param blockvar The name of the block variable. #' @param errorvar The name of the error variable. #' @param lowerpercent,upperpercent for \code{trial_prune_percent_subject} and \code{trial_prune_percent_sample}, #' the lower and upper proportions beyond which trials are considered outliers and removed (defaults to .01 and .99). #' @param trialsd The amount of deviation from the participant mean (in SD) after which a trial is considered an outlier and excluded (defaults to 3). #' @param maxoutliers for \code{trial_prune_SD_dropcases}, the maximum percentage of outliers, after which a participant is excluded from the data. #' @param errorbonus for \code{error_replace_blockmeanplus}, the amount of seconds to add to the block mean #' and use as a replacement for error trial reaction times (default is 0.6). #' @param maxerrors for \code{error_prune_dropcases}, the maximum percentage of errors, after which a participant is excluded from the data. #' @param ... Other arguments (ignored). #' @name Preprocessing NULL #' @export #' @rdname Preprocessing prune_nothing<-function(ds,...){ ds } #' @export #' @rdname Preprocessing trial_prune_percent_subject<-function(ds,subjvar,rtvar,lowerpercent=.01,upperpercent=.99,...){ ds %>% group_by(!!sym(subjvar),key) %>% mutate(percentile=(rank(!!sym(rtvar))-1)/(n()-1)) %>% filter(.data$percentile > lowerpercent & .data$percentile< upperpercent) %>% ungroup() } trial_prune_percent_subject_alt<-function(ds,subjvar,rtvar,lowerpercent=.01,upperpercent=.99,...){ ds$percentile <- ave(ds[[rtvar]],ds[[subjvar]],ds[["key"]],FUN=function(x){ (rank(x)-1)/(length(x)-1) }) ds[ds$percentile > lowerpercent & ds$percentile < upperpercent,] } #' @export #' @rdname Preprocessing trial_prune_percent_sample<-function(ds,rtvar,lowerpercent=.01,upperpercent=.99,...){ ds %>% group_by(key) %>% mutate(percentile=(rank(!!sym(rtvar))-1)/(n()-1)) %>% filter(.data$percentile > lowerpercent & .data$percentile< upperpercent) %>% ungroup() } trial_prune_percent_sample_alt<-function(ds,rtvar,lowerpercent=.01,upperpercent=.99,...){ ds$percentile <- ave(ds[[rtvar]],ds[["key"]],FUN=function(x){ (rank(x)-1)/(length(x)-1) }) ds[(ds$percentile > lowerpercent & ds$percentile < upperpercent),] } #' @export #' @rdname Preprocessing trial_prune_3SD<-function(ds,subjvar,rtvar,...){ ds %>% group_by(!!sym(subjvar),key) %>% filter(abs(vec.scale(!!sym(rtvar))) <3) %>% ungroup() } #' @export #' @rdname Preprocessing trial_prune_3MAD<-function(ds,subjvar,rtvar,...){ ds %>% group_by(!!sym(subjvar),key) %>% filter(abs(vec.madscale(!!sym(rtvar))) <3) %>% ungroup() } trial_prune_3SD_alt<-function(ds,subjvar,rtvar,...){ h<-ave(ds[[rtvar]],ds[[subjvar]],ds[["key"]],FUN=vec.scale) ds[which(abs(h)<3),] } #' @export #' @rdname Preprocessing trial_prune_SD_dropcases<-function(ds,subjvar,rtvar,trialsd=3,maxoutliers=.15,...){ ds %>% group_by(!!sym(subjvar),key) %>% mutate(is.ol=as.numeric(abs(vec.scale(!!sym(rtvar))) >=trialsd), avg.ol=mean.default(.data$is.ol)) %>% ungroup() %>% filter(.data$is.ol==0 & .data$avg.ol<maxoutliers) } #' @export #' @rdname Preprocessing trial_recode_SD<-function(ds,subjvar,rtvar,trialsd=3,...){ dsa<- ds %>% group_by(!!sym(subjvar),key) %>% mutate(ol.z.score=vec.scale(!!sym(rtvar)), ol.type=(.data$ol.z.score >= trialsd) - (.data$ol.z.score <= -trialsd), is.ol=abs(.data$ol.type), ol.max.rt=mean.default(!!sym(rtvar))+vec.sd(!!sym(rtvar))*trialsd, ol.min.rt=mean.default(!!sym(rtvar))-vec.sd(!!sym(rtvar))*trialsd) dsa[which(dsa$is.ol!=0),rtvar]<-ifelse(dsa[which(dsa$is.ol!=0),]$ol.type==1, dsa[which(dsa$is.ol!=0),]$ol.max.rt, dsa[which(dsa$is.ol!=0),]$ol.min.rt) #dsa %>% dplyr::select(-.data$ol.type,-.data$ol.max.rt,-.data$ol.min.rt,-.data$ol.z.score) dsa$ol.type<-dsa$ol.max.rt<-dsa$ol.min.rt<-dsa$ol.z.score<-NULL return(dsa) } trial_recode_SD_alt<-function(ds,subjvar,rtvar,trialsd=3,...){ ds$ol.grmean<-ave(ds[[rtvar]],ds[[subjvar]],ds$key,FUN=mean.default) ds$ol.grsd<-ave(ds[[rtvar]],ds[[subjvar]],ds$key,FUN=vec.sd) ds$ol.z.score<-(ds[[rtvar]]-ds$ol.grmean)/ds$ol.grsd ds$is.ol<-abs(ds$ol.z.score)>=trialsd ds[[rtvar]]<-(!ds$is.ol)*ds[[rtvar]] + ds$is.ol*(ds$ol.grmean+sign(ds$ol.z.score)*ds$ol.grsd*trialsd) ds$ol.grmean<-ds$ol.grsd<-ds$ol.z.score<-ds$ol.type<-NULL ds } #' @export #' @rdname Preprocessing trial_prune_grubbs<-function(ds,subjvar,rtvar,...){ ds %>% group_by(!!sym(subjvar)) %>% filter(!grubbsFilter(!!sym(rtvar))) %>% ungroup() } grubbsFilter<-function(x,alphalevel=.05){ pval<-0 is.ol<-rep(F,length(x)) while(pval<alphalevel & sum(is.ol) < length(x)){ scaled<-vec.scale(x[!is.ol]) biggest<-which.max(abs(scaled)) pval<-pgrubbs(scaled[biggest],sum(!is.ol)) if(pval<alphalevel) is.ol[!is.ol][biggest]<-T } is.ol } # Borrowed from the {outliers} package pgrubbs<-function(p,n){ s <- (p^2 * n * (2 - n))/(p^2 * n - (n - 1)^2) t <- sqrt(s) if (is.nan(t)) { res <- 0 } else { res <- n * (1 - pt(t, n - 2)) res[res > 1] <- 1 } return(res) } #' @export #' @rdname Preprocessing case_prune_3SD<-function(ds,...){ ds[which(abs(vec.scale(ds$abhalf0))<3 & abs(vec.scale(ds$abhalf1))<3),] } #Replace error trial latencies with correct block mean RT + 600 #' @export #' @rdname Preprocessing error_replace_blockmeanplus<-function(ds,subjvar,rtvar,blockvar,errorvar,errorbonus, ...){ if(!("is.ol" %in% colnames(ds))){ ds$is.ol<-0 } ds%<>%group_by(!!sym(subjvar),!!sym(blockvar), key)%>% mutate(newrt=mean.default((!!sym(rtvar))[!(!!sym(errorvar)) & .data$is.ol==0])+errorbonus)%>%ungroup() errids<-which(ds[[errorvar]]==1) ds[[rtvar]][errids]<-ds$newrt[errids] ds$newrt<-NULL ds } error_replace_blockmeanplus_alt<-function(ds,subjvar,rtvar,blockvar,errorvar,errorbonus, ...){ if(!("is.ol" %in% colnames(ds))){ ds$is.ol<-0 } ds$.corrmean<-ave(ds$rt+ifelse(!ds[[errorvar]] & !ds$is.ol,0,NA), ds[[rtvar]],ds[[blockvar]],ds[["key"]], FUN=function(x){mean.default(x[!is.na(x)])}) ds[[rtvar]][ds[[errorvar]]==TRUE]<-ds$.corrmean[ds[[errorvar]]==TRUE]+errorbonus ds$.corrmean<-NULL ds } #' @export #' @rdname Preprocessing error_prune_dropcases<-function(ds,subjvar, errorvar, maxerrors = .15, ...){ ds%>%group_by(!!sym(subjvar), key)%>% filter(mean.default(!!sym(errorvar))<maxerrors & !!sym(errorvar) == FALSE) } error_prune_dropcases_alt<-function(ds,subjvar, errorvar, maxerrors = .15, ...){ ds$merr<-ave(ds[[errorvar]],ds[[subjvar]],ds$key,FUN=mean.default) ds[which(ds$merr<maxerrors & !ds[[errorvar]]),] }
/scratch/gouwar.j/cran-all/cranData/AATtools/R/outlierhandlers.R
#' Compute psychological experiment reliability #' @description This function can be used to compute an exact reliability score for a psychological task whose results involve a difference score. #' The resulting intraclass correlation coefficient is equivalent to the average all possible split-half reliability scores. #' It ranges from -1 to 1, with -1 implying that all variance in the data is explained by within-subjects variability, #' 1 implying that all variance is explained by between-subjects variability, #' and 0 implying that within-subjects and between-subjects variability contribute equally to the total variance in the sample. #' @param ds a long-format data.frame #' @param subjvar name of the subject variable #' @param formula a formula predicting the participant's reaction time using trial-level variables such as movement direction and stimulus category #' @param aatterm a string denoting the term in the formula that contains the participant's approach bias #' #' @return a qreliability object, containing the reliability coefficient, #' and a data.frame with participants' bias scores and score variance. #' #' Please note that the valence of the bias scores may or may not correspond with #' approach and avoidance. If you plan to use these scores in your analyses, #' always verify that they are in the right direction by correlating them with #' independently calculated bias scores, for example using \code{aat_compute()}. #' #' @export #' @author Sercan Kahveci #' @examples #' # Double-difference score reliability #' q_reliability(ds=erotica,subjvar="subject", #' formula= RT ~ is_pull * is_target, aatterm = "is_pull:is_target") #' #' # Single-difference reliability for target stimuli #' q_reliability(ds=erotica[erotica$is_target ==1,],subjvar="subject", #' formula= RT ~ is_pull, aatterm = "is_pull") #' #' # Reliability of the mean reaction time of approaching target stimuli (no difference score) #' q_reliability(ds=erotica[erotica$is_target ==1 & erotica$is_pull ==1,],subjvar="subject", #' formula= RT ~ 1, aatterm = "1") #' q_reliability<-function(ds,subjvar,formula,aatterm=NA){ # argument checks cols<-c(subjvar,as.character(attr(terms(formula),"variables"))[-1]) stopifnot(all(cols %in% colnames(ds))) ds<-ds[apply(!is.na(ds[,cols]),MARGIN=1,FUN=all),] if(aatterm=="1"){ aatterm<-NA } # functional part coefs<-data.frame(pp=unique(ds[[subjvar]]),ab=NA,var=NA) for(u in 1:nrow(coefs)){ iterset<-ds[ds[[subjvar]]==coefs[u,]$pp,] mod<-lm(formula,data=iterset) coefs[u,]$ab <- -coef(mod)[ifelse(is.na(aatterm),length(coef(mod)),aatterm)] coefs[u,]$var <- (diag(vcov(mod)))[ifelse(is.na(aatterm),length(coef(mod)),aatterm)] # squared standard error } bv<-var(coefs$ab,na.rm=TRUE) wv<-mean(coefs$var,na.rm=TRUE) q<-(bv-wv)/(bv) return(structure(list(q=q,coefs=coefs),class="qreliability")) } #' @rdname q_reliability #' @param splitvars Vector of column names over which to split the data #' to compute difference scores. This can be used to compute the #' reliability of single, double, or even triple difference scores. #' @param rtvar Column name of the variable containing reaction times #' @param dscore If true, reliability will be computed for a difference score #' that is divided by the subject's standard deviation (as in D-scores) #' @param na.rm If true, remove rows with missing values from the data #' @export #' @examples #' q_reliability2(ds=erotica,subjvar="subject", #' splitvars=c("is_pull", "is_target"),rtvar="RT") q_reliability2<-function(ds,subjvar,splitvars,rtvar,dscore=F,na.rm=F){ #remove missing if(na.rm){ ds<-ds[,c(subjvar,rtvar,splitvars)] ds<-ds[rowSums(is.na(ds))<1,] } #divide RTs by person-specific SD to make it possible to compute D-score by simply # doing a double mean difference if(dscore){ sds<-tapply(ds[[rtvar]],ds[[subjvar]],sd) ds[[rtvar]]<-ds[[rtvar]]/sds[as.character(ds[[subjvar]])] } #scores sc<-tapply(X=ds[[rtvar]], INDEX = ds[,c(splitvars,subjvar)], FUN=mean) if(!all(dim(sc)[-length(dim(sc))]==2)){ stop("Not all split variables consist of only 2 values.") } while(length(dim(sc))>1){ sc<-arrextract(sc,1,1)-arrextract(sc,1,2) } #variances variances<-tapply(X=ds[[rtvar]], INDEX = ds[,c(splitvars,subjvar)], FUN=function(x){var(x)/length(x)}) %>% apply(X=.,MARGIN=length(dim(.)),sum) #remove missing unmissing<-which(!is.na(variances) & !is.na(sc)) sc<-sc[unmissing] variances<-variances[unmissing] #rel bv<-var(sc) wv<-mean(variances) rel<-(bv-wv)/(bv) #output return(structure(list(q=rel,coefs=data.frame(pp=names(sc), bias=sc, var=variances)), class="qreliability")) } #borrowed from stackoverflow arrextract <- function(A, .dim, .value){ idx.list <- lapply(dim(A), seq_len) idx.list[[.dim]] <- .value do.call(`[`, c(list(A), idx.list)) } #' @export #' @rdname q_reliability #' @param x a \code{qreliability} object #' @param ... Other arguments passed to the generic \code{print} and \code{plot} functions. print.qreliability<-function(x,...){ cat("q = ",x$q,"\n",sep="") } #' @export #' @rdname q_reliability #' @param x a \code{qreliability} object #' @param ... Other arguments passed to the generic \code{print} and \code{plot} functions. plot.qreliability<-function(x,...){ bv<-var(x$coefs$ab,na.rm=TRUE) / nrow(x$coefs)*1.96 *2 wv<-mean(x$coefs$var,na.rm=TRUE) / nrow(x$coefs)*1.96 *2 plotset<-data.frame(x=mean(x$coefs$ab) + cos(0:100 / 100 * 2*pi)*bv * 1/2*sqrt(2) - sin(0:100 / 100 * 2*pi)*wv * 1/2*sqrt(2), y=mean(x$coefs$ab) + cos(0:100 / 100 * 2*pi)*bv * 1/2*sqrt(2) + sin(0:100 / 100 * 2*pi)*wv * 1/2*sqrt(2)) plot(plotset$x,plotset$y,type="l",main=paste0("Reliability\n","q = ",round(x$q,digits=2)),xlab="Participants' scores",ylab="Participants' scores") points(x$coefs$ab,x$coefs$ab) dispval<-(bv+wv)/100 plotset<-data.frame(xstart=c(x$coefs$ab+dispval,x$coefs$ab-dispval), ystart=c(x$coefs$ab-dispval,x$coefs$ab+dispval), xend=c(x$coefs$ab+sqrt(x$coefs$var) *1/2*sqrt(2), x$coefs$ab-sqrt(x$coefs$var) *1/2*sqrt(2)), yend=c(x$coefs$ab-sqrt(x$coefs$var) *1/2*sqrt(2), x$coefs$ab+sqrt(x$coefs$var) *1/2*sqrt(2))) segments(plotset$xstart,plotset$ystart,plotset$xend,plotset$yend) }
/scratch/gouwar.j/cran-all/cranData/AATtools/R/q_reliability.R
# utils #### #' @name splitrel #' @title Split Half-Based Reliability Coefficients #' @seealso \link{covrel} NULL #' @describeIn splitrel Perform a Spearman-Brown correction on the provided correlation score. #' #' @param corr To-be-corrected correlation coefficient #' @param ntests An integer indicating how many times larger the full test is, for which the corrected correlation coefficient is being computed. #' When \code{ntests=2}, the formula will compute what the correlation coefficient would be if the test were twice as long. #' @param fix.negative Determines how to deal with a negative value. "nullify" sets it to zero, #' "bilateral" applies the correction as if it were a positive number, and then sets it to negative. #' "none" gives the raw value. It should be noted that negative values are not supposed to occur, #' and there is no commonly accepted way to deal with them when they do occur. #' @return Spearman-Brown-corrected correlation coefficient. #' @export #' #' @examples #' #' SpearmanBrown(.5) SpearmanBrown<-function(corr,ntests=2,fix.negative=c("none","nullify","bilateral")){ fix.negative<-match.arg(fix.negative) if(fix.negative=="bilateral"){ s<-sign(corr) corr<-abs(corr) sb<-ntests*corr / (1+(ntests-1)*corr) return(s*sb) }else{ sb<-ntests*corr / (1+(ntests-1)*corr) if(fix.negative=="nullify"){ return(ifelse(sb<0,0,sb)) }else{ return(sb) } } } #' @describeIn splitrel Compute the true reliability using the Flanagan-Rulon formula, #' which takes into account inequal variances between split halves. #' @param x1 scores from half 1 #' @param x2 scores from half 2 #' @export #' #' @examples #' FlanaganRulon(a<-rnorm(50),rnorm(50)+a*.5,fix.negative="bilateral") FlanaganRulon<-function(x1,x2,fix.negative=c("none","nullify","bilateral")){ fix.negative<-match.arg(fix.negative) d<-var(x1-x2) k<-var(x1+x2) if(fix.negative=="none"){ return(1-d/k) }else if(fix.negative=="bilateral"){ fr<-(1-d/k) #fr<-ifelse(fr>0,fr,fr / (1-fr)) fr<-fr/max(1, 1-fr) return(fr) }else if(fix.negative=="nullify"){ fr<-1-d/k return(ifelse(fr>0,fr,0)) } } #' @describeIn splitrel Compute split-half reliability using the Raju formula, #' which takes into account unequal split-halves and variances. #' #' @param prop Proportion of the first half to the complete sample #' #' @export #' #' @examples #' a<-rnorm(50) #' b<-rnorm(50)+a*.5 #' RajuCoefficient(a,b,prop=.4,fix.negative="bilateral") RajuCoefficient<-function(x1,x2,prop,fix.negative=c("none","nullify","bilateral")){ fix.negative<-match.arg(fix.negative) covar<-cov(x1,x2) if(fix.negative=="bilateral"){ sumvar<-var(x1)+var(x2)+2*abs(covar) }else{ sumvar<-var(x1)+var(x2)+2*covar } raju<-covar / (prop * (1-prop) * sumvar) return(ifelse(fix.negative=="nullify" & raju<0,0,raju)) } #' @name covrel #' @title Covariance Matrix-Based Reliability Coefficients #' @description These functions allow for the computation of the reliability of a dataset #' from the covariance matrix of its variables. #' @seealso \link{splitrel} #' @examples #' # compute reliability from covariance #' h<-cov(iris[,1:4]) #' calpha(h) #' lambda2(h) #' lambda4(h) #' # Lambda-2 and Lambda-4 are significantly larger because #' # some of the variables in the iris dataset are negatively correlated. NULL #' @describeIn covrel Cronbach's alpha #' @param covmat a covariance matrix #' @export calpha<-function(covmat){ (nrow(covmat)/(nrow(covmat)-1))*(1 - sum(diag(covmat))/sum(covmat)) } #' @describeIn covrel Guttman's Lambda-2 #' @export lambda2<-function(covmat){ offs<-covmat[upper.tri(covmat)] covs<-2*sum(offs) sqcov<-2*sum(offs^2) sums<-sum(covmat) n<-dim(covmat)[1] covs/sums + sqrt(n/(n-1)*sqcov)/sums } #' @describeIn covrel Guttman's Lambda-4. This algorithm tries to get the highest attainable reliability by #' @export lambda4<-function(covmat){ flip<-rep(1,ncol(covmat)) itermaxid<- -1 itermax<- -1 while(itermaxid != 0){ itermaxid<-0 for(i in seq_along(flip)){ key<-rep(1,ncol(covmat)) key[i]<- -1 itera<-calpha(t(t(covmat*flip*key)*flip*key)) if(itera>itermax){ itermax<-itera itermaxid<-i } } if(itermaxid>0){ flip[itermaxid]<- -flip[itermaxid] } } calpha(t(t(covmat*flip)*flip)) }
/scratch/gouwar.j/cran-all/cranData/AATtools/R/relcorrections.R
#' @import dplyr #' @import magrittr #' @import doParallel #' @import foreach #' @importFrom magrittr %>% %<>% %$% #' @importFrom dplyr group_by ungroup mutate summarise sample_n n filter select #' @importFrom parallel detectCores makeCluster stopCluster #' @importFrom foreach getDoParRegistered registerDoSEQ #' @importFrom stats var median mad sd lm vcov terms as.formula coef cor cov setNames quantile #' pt rnorm rgamma pnorm qnorm ave median.default na.omit weighted.mean #' @importFrom graphics abline points segments text plot par axis strwidth image .onLoad<-function(libname, pkgname){ #avoid CRAN errors utils::globalVariables(c("abhalf0","abhalf1","ab","key","."),"AATtools") #register generic functions registerS3method("print",class="aat_splithalf",method=print.aat_splithalf) registerS3method("plot",class="aat_splithalf",method=plot.aat_splithalf) registerS3method("print",class="aat_bootstrap",method=print.aat_bootstrap) registerS3method("plot",class="aat_bootstrap",method=plot.aat_bootstrap) registerS3method("print",class="qreliability",method=print.qreliability) registerS3method("plot",class="qreliability",method=plot.qreliability) registerS3method("print",class="aat_covreliability",method=print.aat_covreliability) registerS3method("print",class="aat_covreliability_jackknife",method=print.aat_covreliability_jackknife) registerS3method("plot",class="aat_covreliability_jackknife",method=plot.aat_covreliability_jackknife) #set max number of cores to use if (r_check_limit_cores()) { num_workers <- 2L } else { num_workers <- max(parallel::detectCores(),1L) } options(AATtools.workers=num_workers) #greet user #packageStartupMessage("Thank you for loading AATtools v0.0.1") }
/scratch/gouwar.j/cran-all/cranData/AATtools/R/zzz.R
#' Shiny App to Demonstrate Analysis of Variance #' #' @name shiny_anova #' @aliases shiny_anova #' @description An interactive Shiny app to demonstrate Analysis of Variance. #' @usage shiny_anova() #' #' @details The interactive Shiny app demonstrates the principles of Analysis of Variance. #' The true parameter values are provided by the user. #' The user changes sample characteristics, distribution function and simulation features #' and explores the influence of these changes on the hypothesis testing using principles of analysis of variance. #' #' The left panel includes the user inputs for \strong{Simulation Features}, \strong{Population Parameters}, #' \strong{Sample Characteristics}, and \strong{Distribution Function}. #' To use the app at first instance, just click the \code{Update} button. #' To alter the input values, edit the text box or move the point on the slider and #' explore the changes in different tabs (see below). #' #' To obtain identical outcomes in a separate run of the app, #' set a common seed value at the bottom of the left panel and click \code{Update}. #' All subsequent updates will produce identical results provided other inputs are identical. #' The seed value is ignored when the option \code{check the box to update instantly} is selected. #' #' @return The outcomes are presented in several tabs. #' \item{Population}{contains the density plots of three populations and #' rug plots of the sample units randomly drawn from these populations. #' It also shows the population parameter values chosen by the user.} #' \item{Sample}{contains the dot plots and box plots of three samples drawn #' randomly from the three populations and rug plots of the sample units. #' It also includes the estimates of mean and standard deviation of three samples.} #' \item{SS & MS}{contains the bar plots showing the between and within sum of squares (SS) #' and mean squares (MS) as well as the proportion of between and within SS over total SS.} #' \item{Test Statistic}{contains the plots showing the mean difference between groups. #' and corresponding 95\% confidence intervals (CI). #' The tab also contains the distribution of the test statistic \code{F}, #' the observed value of the test statistic and probabilities #' under the given value of the Type 1 error} #' \item{Summary}{includes the summary of the sampled data and outcomes #' from the one-way analysis of variance. Different sections are: #' (1) Hypothesis, highlighting the null and alternative hypothesis; #' (2) Sample, tabulating the full sampled data; #' (3) Summary Statistics, summarising the summary information of three samples; #' (4) Model Outputs, the outputs from fitting the analysis of variance model. #' The section also present the multiple comparision of means using #' Tukey's Honest Significant Differences test. #' This section represents standard R outputs based on fitting an \code{\link{lm}} function.} #' #' @note \url{https://shiny.abdn.ac.uk/Stats/apps/} #' #' @author Mintu Nath #' #' @seealso Function in base R for normal distribution, F distribution and fitting linear model including #' \code{\link{dnorm}}, \code{\link{pnorm}}, \code{\link{qnorm}}, \code{\link{rnorm}}, #' \code{\link{df}}, \code{\link{pf}}, \code{\link{qf}}, \code{\link{rf}}, #' \code{\link{lm}}, \code{\link{aov}}, #' \code{\link{anova.lm}}, \code{\link{summary.lm}} #' \code{\link{summary.aov}}, \code{\link{model.tables}} #' #' @examples #' if(interactive()){ #' library(ggplot2) #' library(shiny) #' library(ABACUS) #' # Run shiny app #' shiny_anova() #' } #' #' @import shiny #' @import ggplot2 #' @export # Function shiny_anova <- function() { shiny::runApp(appDir = system.file("app_anova", package = "ABACUS"), launch.browser = TRUE) Sys.setenv("shiny_anova" = "") }
/scratch/gouwar.j/cran-all/cranData/ABACUS/R/shiny_anova.R
#' Shiny App to Explore Properties of the Normal Distribution #' #' @name shiny_dnorm #' @aliases shiny_dnorm #' @description An interactive Shiny app to demonstrate properties of the Normal distribution. #' @usage shiny_dnorm() #' #' @details The interactive Shiny app demonstrates the properties of Normal distribution. #' The app considers parameters (mean and standard deviation) of the Normal distribution and captures its #' properties using different graphical outputs. #' The user changes the population parameter values, sample characteristics, distribution function and #' simulation features and explores the influence of these changes on the hypothesis testing. #' #' The left panel includes the user inputs for \strong{Simulation Features}, \strong{Population Parameters}, #' \strong{Sample Characteristics}, and \strong{Distribution Function}. #' To use the app at first instance, just click the \code{Update} button. #' To alter the input values, edit the text box or move the point on the slider and #' explore the changes in different tabs (see below). #' #' To obtain identical outcomes in a separate run of the app, #' set a common seed value at the bottom of the left panel and click \code{Update}. #' All subsequent updates will produce identical results provided other inputs are identical. #' The seed value is ignored when the option \code{check the box to update instantly} is selected. #' #' @return The outcomes are presented in several tabs. #' \item{Sample}{contains the histogram of sampling units randomly drawn from the given population. #' Increasing the sample size and the number of bins creates the shape of the Normal distribution. #' It also creates the normal density plot based on empirical data and #' theoretical normal distribution given the parameter values} #' \item{Distribution}{contains the plot for the probability density function of the Normal distribution #' with given parameter values. #' The user can also explore centring and scaling effect on the probability density function.} #' \item{Probability & Quantile}{contains the plots for the probability density function and #' cumulative probability density function. The user can explore the relationship between the #' cumulative probability and quantile corresponding to tails of the distribution.} #' #' @seealso Function in base R for normal distribution including #' \code{\link{dnorm}}, \code{\link{pnorm}}, \code{\link{qnorm}}, \code{\link{rnorm}}. #' #' @note \url{https://shiny.abdn.ac.uk/Stats/apps/} #' #' @author Mintu Nath #' #' @seealso Function in base R for normal distribution, including #' \code{\link{dnorm}}, \code{\link{pnorm}}, #' \code{\link{qnorm}}, \code{\link{rnorm}} #' #' @examples #' if(interactive()){ #' library(ggplot2) #' library(shiny) #' library(ABACUS) #' # Run shiny app #' shiny_dnorm() #' } #' #' @import shiny #' @import ggplot2 #' @export # Function shiny_dnorm <- function() { shiny::runApp(appDir = system.file("app_dnorm", package = "ABACUS"), launch.browser = TRUE) Sys.setenv("shiny_dnorm" = "") }
/scratch/gouwar.j/cran-all/cranData/ABACUS/R/shiny_dnorm.R
#' Shiny App to Explore Properties of Normal and Student's t Distributions #' #' @name shiny_dnorm_dt #' @aliases shiny_dnorm_dt #' @description An interactive Shiny app to demonstrate Normal and Student's t distributions. #' @usage shiny_dnorm_dt() #' #' @details The interactive Shiny app demonstrates the properties of Normal and Student's t distributions. #' The app considers parameters (mean and standard deviation) of the standard Normal distribution #' along with Student's t distribution given degrees of freedom. #' #' The left panel includes the user inputs for #' \strong{Parameters} of standard Normal distribution (mean = 0, sd = 1) and #' Student's \code{t} distribution (degrees of freedom), and #' \strong{Probability} with options to change cumulative probability and tails of probability. #' To alter the input values, move the point on the slider for the degrees of freedom #' of \code{t} distribution and explore the changes in different tabs (see below). #' #' #' @return The outcomes are presented in two tabs. #' \item{Probability Density Function}{displays the probability density function of #' the standard Normal distribution (red) and \code{t} distribution (blue).} #' \item{Distribution & Probability}{contains the plot for the probablity density function of #' the standard Normal distribution and Student's \code{t} distribution with given degrees of freedom.} #' #' #' @note \url{https://shiny.abdn.ac.uk/Stats/apps/} #' #' @author Mintu Nath #' #' @seealso Function in base R for normal distribution, including #' \code{\link{dnorm}}, \code{\link{pnorm}}, #' \code{\link{qnorm}}, \code{\link{rnorm}} #' \code{\link{dt}}, \code{\link{pt}} #' \code{\link{qt}}, \code{\link{rt}} #' #' @examples #' if(interactive()){ #' library(ggplot2) #' library(shiny) #' library(ABACUS) #' # Run shiny app #' shiny_dnorm_dt() #' } #' #' @import shiny #' @import ggplot2 #' @export # Function shiny_dnorm_dt <- function() { shiny::runApp(appDir = system.file("app_dnorm_dt", package = "ABACUS"), launch.browser = TRUE) Sys.setenv("shiny_dnorm_dt" = "") }
/scratch/gouwar.j/cran-all/cranData/ABACUS/R/shiny_dnorm_dt.R
#' Shiny App to Demonstrate One-Sample Student's t-Test #' #' @name shiny_onesampt #' @aliases shiny_onesampt #' @description An interactive Shiny app to demonstrate one-sample Student's t-test. #' @usage shiny_onesampt() #' #' @details The interactive Shiny app demonstrates the principles of the hypothesis testing of means #' in a one-sample design where the population variance is unknown. #' The true population parameters are provided by the user. #' The user changes the hypothesised population mean and other features and explores #' how Student's t-test compares the hypothesised mean #' with the mean of the sample randomly drawn from the population. #' #' The left panel includes the user inputs for \strong{Simulation Features}, \strong{Population Parameters}, #' \strong{Sample Characteristics}, and \strong{Distribution Function}. #' To use the app at first instance, just click the \code{Update} button. #' To alter the input values, edit the text box or move the point on the slider and #' explore the changes in different tabs (see below). #' #' To obtain identical outcomes in a separate run of the app, #' set a common seed value at the bottom of the left panel and click \code{Update}. #' All subsequent updates will produce identical results provided other inputs are identical. #' The seed value is ignored when the option \code{check the box to update instantly} is selected. #' #' @return The outcomes are presented in several tabs. #' \item{Population}{contains the density plots of the population and #' rug plots of the sample units randomly drawn from the population. #' It also includes the population parameter values chosen by the user.} #' \item{Sample}{contains the dot plot and box plot of the sample drawn #' randomly from the population and rug plot of the sample units. #' It also includes the mean and standard deviation of the random sample.} #' \item{Test Statistic}{presents the plot showing the mean difference #' between the sample mean and hypothesised mean and corresponding 95\% confidence intervals (CI). #' The tab also contains the distribution of the test statistic \code{t} #' with the observed value of the test statistic and probabilities under the given value of the Type 1 error} #' \item{Summary}{includes the summary of the sampled data and outcomes #' from the one-sample Student's t-test. Different sections are: #' (1) Hypothesis, highlighting the null and alternative hypothesis; #' (2) Sample, tabulating the full sampled data; #' (3) Summary Statistics, summarising the summary information of the sample; #' (4) Test Statistic, presenting the outputs from the one-sample Student's t-test. #' (5) Confidence Interval, highlighting the mean difference and corresponding 95\% confidence intervals (CI).} #' #' @note \url{https://shiny.abdn.ac.uk/Stats/apps/} #' #' @author Mintu Nath #' #' @seealso Function in base R for normal distribution and t distribution including #' \code{\link{dnorm}}, \code{\link{pnorm}}, \code{\link{qnorm}}, \code{\link{rnorm}}, #' \code{\link{dt}}, \code{\link{pt}}, \code{\link{qt}}, \code{\link{rt}} #' The app \code{\link{shiny_onesampz}} performs the hypothesis testing of mean #' when the population variance is unknown. #' #' @examples #' if(interactive()){ #' library(ggplot2) #' library(shiny) #' library(ABACUS) #' # Run shiny app #' shiny_onesampt() #' } #' #' @import shiny #' @import ggplot2 #' @export # Function shiny_onesampt <- function() { shiny::runApp(appDir = system.file("app_onesampt", package = "ABACUS"), launch.browser = TRUE) Sys.setenv("shiny_onesampt" = "") }
/scratch/gouwar.j/cran-all/cranData/ABACUS/R/shiny_onesampt.R
#' Shiny App to Demonstrate One-Sample Z-Test #' #' @name shiny_onesampz #' @aliases shiny_onesampz #' @description An interactive Shiny app to demonstrate one-sample Z-test. #' @usage shiny_onesampz() #' #' @details The interactive Shiny app demonstrates the principles of the hypothesis testing of means #' in a one-sample design where the population variance is known. #' The true population parameters are provided by the user. #' The user changes the hypothesised population mean and other features and explores #' how the Z-test compares the hypothesised mean #' with the mean of the sample randomly drawn from the population. #' #' The left panel includes the user inputs for \strong{Simulation Features}, \strong{Population Parameters}, #' \strong{Sample Characteristics}, and \strong{Distribution Function}. #' To use the app at first instance, just click the \code{Update} button. #' To alter the input values, edit the text box or move the point on the slider and #' explore the changes in different tabs (see below). #' #' To obtain identical outcomes in a separate run of the app, #' set a common seed value at the bottom of the left panel and click \code{Update}. #' All subsequent updates will produce identical results provided other inputs are identical. #' The seed value is ignored when the option \code{check the box to update instantly} is selected. #' #' @return The outcomes are presented in several tabs. #' \item{Population}{contains the density plots of the population and #' rug plots of the sample units randomly drawn from the population. #' It also includes the population parameter values chosen by the user.} #' \item{Sample}{contains the dot plot and box plot of the sample drawn #' randomly from the population and rug plot of the sample units. #' It also includes the mean and standard deviation of the random sample.} #' \item{Test Statistic}{contains the plot showing the mean difference #' between the sample mean and hypothesised mean and corresponding 95\% confidence intervals (CI). #' The tab also contains the distribution of the test statistic \code{t} #' with the observed value of the test statistic and probabilities under the given value of the Type 1 error} #' \item{Summary}{includes the summary of the sampled data and outcomes #' from the one-sample Z-test. Different sections are: #' (1) Hypothesis, highlighting the null and alternative hypothesis; #' (2) Sample, tabulating the full sampled data; #' (3) Summary Statistics, summarising the summary information of the sample; #' (4) Test Statistic, presenting the outputs from the one-sample Z-test. #' (5) Confidence Interval, highlighting the mean difference and corresponding 95\% confidence intervals (CI).} #' #' @note \url{https://shiny.abdn.ac.uk/Stats/apps/} #' #' @author Mintu Nath #' @seealso Function in base R for normal distribution including #' \code{\link{dnorm}}, \code{\link{pnorm}}, \code{\link{qnorm}}, \code{\link{rnorm}}. #' The app \code{\link{shiny_onesampt}} performs the hypothesis testing of mean #' when the population variance is known. #' #' @examples #' if(interactive()){ #' library(ggplot2) #' library(shiny) #' library(ABACUS) #' # Run shiny app #' shiny_onesampz() #' } #' #' @import shiny #' @import ggplot2 #' @export # Function shiny_onesampz <- function() { shiny::runApp(appDir = system.file("app_onesampz", package = "ABACUS"), launch.browser = TRUE) Sys.setenv("shiny_onesampz" = "") }
/scratch/gouwar.j/cran-all/cranData/ABACUS/R/shiny_onesampz.R
#' Shiny App to Explore Properties of Sampling Distributions #' #' @name shiny_sampling #' @aliases shiny_sampling #' @description An interactive Shiny app to demonstrate properties of the sampling distributions. #' @usage shiny_sampling() #' #' @details The interactive Shiny app demonstrates the properties of the sampling distribution. #' The true population parameter values of the Normal distribution are provided by the user. #' The user draws many samples from the population with the given sample characteristics #' and explore the variability of sample means. #' The app also includes the construction of 95\% confidence interval for all samples. #' Altering the population and sample characteristics, the user can explore #' the influence of these changes on the sampling distribution. #' #' The left panel includes the user inputs for \strong{Simulation Features}, \strong{Population Parameters}, #' \strong{Sample Characteristics} and \strong{Distribution Function}. #' To use the app at first instance, just click the \code{Update} button. #' To alter the input values, edit the text box or move the point on the slider and #' explores the changes in different tabs (see below). #' #' To obtain identical outcomes in a separate run of the app, #' set a common seed value at the bottom of the left panel and click \code{Update}. #' All subsequent updates will produce identical results provided other inputs are identical. #' The seed value is ignored when the option \code{check the box to update instantly} is selected. #' #' @return The outcomes are presented in several tabs. #' \item{Population & Sample}{contains the density plots of the population and #' dot plot of the sample units for the first sample randomly drawn from the population. #' It also includes the population parameter values are chosen by the user as well as #' estimates of sample mean and standard deviation based on the first sample.} #' \item{Sampling Distribution}{contains a panel of 8 dot plots based on the sample drawn #' randomly from the population with given parameters. #' Each plot depicts the mean and standard deviation of the random sample.} #' \item{Sample Estimators}{contains the histogram of the observed sample means and #' the empirical distribution of sample means. It also includes the rug plot of all sample means.} #' \item{Confidence Interval}{contains the plot showing the 95\% confidence intervals (CI) of all samples. #' The plot shows the true population mean as a red horizontal line. #' It also provides the exact number of these estimated CI that include the true population mean.} #' \item{Summary}{includes the summary of the sampled data and outcomes #' from the one-sample z-test. Different sections are: #' (1) Sample, tabulating the full sampled data; #' (2) Sample Distribution, highlighting the expection of sample mean and sample standard deviation #' as well as standard error of mean; #' (3) Confidence Interval, showing the concept of 95\% confidence intervals (CI) of mean.} #' #' @note \url{https://shiny.abdn.ac.uk/Stats/apps/} #' #' Also note that under the central limit theorem, the distribution of the sample means will follow normal distribution #' whatever the distribution of the variable in the population. #' #' @author Mintu Nath #' @seealso Function in base R for normal distribution including #' \code{\link{dnorm}}, \code{\link{pnorm}}, \code{\link{qnorm}}, \code{\link{rnorm}}, \code{\link{sample}}. #' #' @examples #' if(interactive()){ #' library(ggplot2) #' library(shiny) #' library(ABACUS) #' # Run shiny app #' shiny_sampling() #' } #' #' @import shiny #' @import ggplot2 #' @export # Funcition shiny_sampling <- function() { shiny::runApp(appDir = system.file("app_sampling", package = "ABACUS"), launch.browser = TRUE) Sys.setenv("shiny_sampling" = "") }
/scratch/gouwar.j/cran-all/cranData/ABACUS/R/shiny_sampling.R
#' Shiny App to Demonstrate Two-Sample Independent (Unpaired) Student's t-Test #' #' @name shiny_twosampt #' @aliases shiny_twosampt #' @description An interactive Shiny app to demonstrate two-sample independent (unpaired) Student's t-test. #' @usage shiny_twosampt() #' #' @details The interactive Shiny app demonstrates the principles of the hypothesis testing of means #' in a two-sample independent (unpaired) design where the population variances are equal but unknown. #' The true parameter values are provided by the user. #' The user changes sample characteristics, distribution function and simulation features #' and explores the influence of these changes on the hypothesis testing using Student's t-test. #' #' The left panel includes the user inputs for \strong{Simulation Features}, \strong{Population Parameters}, #' \strong{Sample Characteristics}, and \strong{Distribution Function}. #' To use the app at first instance, just click the \code{Update} button. #' To alter the input values, edit the text box or move the point on the slider and #' explore the changes in different tabs (see below). #' #' To obtain identical outcomes in a separate run of the app, #' set a common seed value at the bottom of the left panel and click \code{Update}. #' All subsequent updates will produce identical results provided other inputs are identical. #' The seed value is ignored when the option \code{check the box to update instantly} is selected. #' #' @return The outcomes are presented in several tabs. #' \item{Population}{contains the density plots of two populations and #' rug plots of the sample units sample units randomly drawn from these populations. #' It also includes the population parameter values chosen by the user.} #' \item{Sample}{contains the dot plots and box plots of two samples drawn #' randomly from the two populations and rug plots of the sample units. #' It also includes the mean and standard deviation of two random samples.} #' \item{Test Statistic}{contains the plots showing the mean difference between two groups #' and corresponding 95\% confidence intervals (CI). #' The tab also contains a panel of the distribution of the test statistic \code{t} #' with the observed value of the test statistic and probabilities under the given value of the Type 1 error} #' \item{Summary}{includes the summary of the sampled data and outcomes #' from the one-sample Student's t-test. Different sections are: #' (1) Hypothesis, highlighting the null and alternative hypothesis; #' (2) Sample, tabulating the full sampled data; #' (3) Summary Statistics, summarising the summary information of two samples; #' (4) Test Statistic, presenting the outputs from independent two-sample Student's t-test. #' (5) Confidence Interval, highlighting the mean difference and corresponding 95\% confidence intervals (CI).} #' #' @note \url{https://shiny.abdn.ac.uk/Stats/apps/} #' #' @author Mintu Nath #' #' @seealso Function in base R for normal distribution and t distribution including #' \code{\link{dnorm}}, \code{\link{pnorm}}, \code{\link{qnorm}}, \code{\link{rnorm}}, #' \code{\link{dt}}, \code{\link{pt}}, \code{\link{qt}}, \code{\link{rt}} #' #' @examples #' if(interactive()){ #' library(ggplot2) #' library(shiny) #' library(ABACUS) #' # Run shiny app #' shiny_twosampt() #' } #' #' @import shiny #' @import ggplot2 #' @export # Function shiny_twosampt <- function() { shiny::runApp(appDir = system.file("app_twosampt", package = "ABACUS"), launch.browser = TRUE) Sys.setenv("shiny_twosampt" = "") }
/scratch/gouwar.j/cran-all/cranData/ABACUS/R/shiny_twosampt.R
README.md exists but content is empty. Use the Edit dataset card button to edit it.
Downloads last month
2
Edit dataset card