content
stringlengths 0
14.9M
| filename
stringlengths 44
136
|
---|---|
# optionally, graphs can be plotted with zplot and lines with zlines
# allowing to go back and forth with bf()
# to use it: copy it in your folder and source it:
# source("zoom.r")
# WARNING: when you zoom on a multiplot window, you can only zoom on the last plot and the other plots will be rescaled
# accordingly, can be very usefull or anoying but I don't plan to change this soon
#=============================
# Navigation "session"
#=============================
# multipancPoint
# @rdname replot
multipancPoint<-function(ancien,fact,move,new,point=NULL,isLog){
# cat("ancien",ancien,"fact",fact,"new:\n")
# print(new)
if(! is.null(fact)&& is.numeric(fact) && length(fact)==1){
if(is.null(point)) point<-mean(ancien)
fact<-1/fact
newRange<- (1-fact)*point+fact*ancien
}else if(!is.null(move) && is.numeric(move) && length(move)==1){
newRange <- .movelim(ancien,move,isLog)
}else{
if(!is.null(new)&& is.numeric(new) && length(new)==2){
newRange <- new
}else{
newRange <- ancien
}
}
if(isLog && newRange[1]<=0){
correction <- (10e-12)-newRange[1]
newRange <- newRange+correction
}
return(newRange);
}
centerOnPoint<-function(ancien,fact=1,new,point=NULL){
currentCenter<-mean(ancien)
if(is.null(point)) point<-currentCenter
fact<-1/fact
newRange<- fact*ancien-currentCenter+point
return(newRange);
}
keepanc<-function(ancien,fact,new,point=NULL){
# cat("ancien",ancien,"fact",fact,"new:\n")
# print(new)
return(ancien);
}
usenew<-function(ancien,fact,new,point=NULL){
# cat("ancien",ancien,"fact",fact,"new:\n")
# print(new)
return(new);
}
# to avoid repeating oneself, specially on something quickly changing
# in case of need for change here, also check "locator" in zoomplot.zoom()
#' @importFrom utils sessionInfo
is.plot.window<-function(alst,fn){
## for former versions of R
# tmp <- all.equal(.Primitive("plot.window"), fn)
# tmp <- (length(grep("plot.window",deparse(fn)))>0)
version<-sessionInfo()$R.version
Maj<-as.numeric(version$major)
Min<-as.numeric(version$minor)
if(Maj>3 || (Maj==3 & Min>= 0.1)){
tmp <- all.equal("C_plot_window",alst[[1]]$name)
attributes(tmp)$lims<-c(2,3)
}else if(Maj==3 & Min <=0.1){
tmp <- (length(grep("plot.window",deparse(fn)))>0)
attributes(tmp)$lims<-c(1,2)
}else if(Maj<3){
tmp <- all.equal(.Primitive("plot.window"), fn)
attributes(tmp)$lims<-c(1,2)
}
return(tmp)
}
is.locator<-function(alst,fn){
## for former versions of R
## beginning 3.0.1
version<-sessionInfo()$R.version
Maj<-as.numeric(version$major)
Min<-as.numeric(version$minor)
if(Maj>3 || (Maj==3 & Min>= 0.1)){
tmp <- all.equal("C_locator",alst[[1]]$name)
}else if(Maj==3 & Min <=0.1){
tmp <- (length(grep("locator",deparse(fn)))>0)
}else if(Maj<3){
tmp <- all.equal(.Primitive("locator"), fn)
}
return(tmp)
}
# get the limits of the plot in argument
getalst<-function(tmp=recordPlot()[[1]]){
for (i in seq(along = tmp)) {
fn <- tmp[[i]][[1]]
alst <- as.list(tmp[[i]][[2]])
tmp2<- is.plot.window(alst,fn)
if (is.logical(tmp2) && tmp2) {
alstwin<-alst
}
}
return(alstwin);
}
#' Central low level function of the zoom package.
#'
#' This function allows to replot the current or a saved plot with specific
#' boundaries, magnification factor and possibly arround a user defined x/y.
#'
#' This function is not necessarily easy to use by hand. It is designed to work
#' well when called from higher level functions. End user should always use
#' zm().
#'
#' @param xlim A vector with min and max x
#' @param ylim A vector with min and max y
#' @param fact A scalar giving the magnification factor (>1 brings you closer)
#' @param rp A previously recorded plot with recordPlot(). With all the
#' @param moveX Expected shift on X axis.
#' @param moveY Expected shift on Y axis.
#' corresponding warnings in ?recordPlot.
#' @param x x of a fix point when rescaling, by default the center.
#' @param y y of a fix point when rescaling, by default the center.
#' @param xlimfn a function using x, y and/or fact to generate new x lim if NULL and xlim/ylim not given will use multipancPoint
#' @param ylimfn a function using x, y and/or fact to generate new y lim, if NULL will use xlimfn
#' @param \dots Additional parameters not implemented, just in case.
#' @return Not guaranted for now.
#' @note This function is the heart of the zoom package and the one that can be.
#' affected by R version changes.
#' It is inspired by the zoomplot function in TeachingDemos package
#' @author Corentin M. Barbu
#' @seealso zm, in.zoom
#' @keywords zoom plot
#' @examples
#'
#' plot(rnorm(1000),rnorm(1000))
#' zoomplot.zoom(fact=2,x=0,y=0)
#'
#' @importFrom graphics box
#' @importFrom graphics par
#' @export zoomplot.zoom
zoomplot.zoom <- function (xlim=NULL, ylim = NULL,fact=NULL,moveX=NULL,moveY=NULL,rp=NULL,x=NULL,y=NULL,xlimfn=NULL,ylimfn=NULL,...)
{
# cat("using zoomplot.zoom")
# rp is a recorded plot
# fact is a factor of magnification/outzoom
# fact has priority on xlim/ylim
if(is.null(xlimfn)){
xlimfn <- multipancPoint
ylimfn <- multipancPoint
}
if(is.null(ylimfn)){
ylimfn<-xlimfn
}
if(is.null(rp)){
tmp <- recordPlot()[[1]]
}else{
tmp<-rp[[1]]
}
xlog <- par("xlog")
ylog <- par("ylog")
plotOk<-NULL
for (i in seq(along = tmp)) {
# cat("i:",i,"\n")
fn <- tmp[[i]][[1]]
alst <- as.list(tmp[[i]][[2]])
tmp1 <- is.locator(alst,fn)
if (is.logical(tmp1) && tmp1) {
next # will not like do.call
}
tmp2<- is.plot.window(alst,fn)
if (is.logical(tmp2) && tmp2) {
# print(alst)
# cat("alst orig:",alst[[1]],alst[[2]],"\n")
locx<-attributes(tmp2)$lims[1]
locy<-attributes(tmp2)$lims[2]
alst[[locx]] <- xlimfn(alst[[locx]],fact,moveX,xlim,x,xlog)
alst[[locy]] <- ylimfn(alst[[locy]],fact,moveY,ylim,y,ylog)
}
plotOk<-try(do.call(fn, alst))
}
if(isError(plotOk)){
box() # finish the graph even if errors in between
}
}
#' @importFrom graphics par
is.out.of.plot.click<-function(loc){
plotLim<-par("usr")
if(loc$x<min(plotLim[1:2])
|| loc$x>max(plotLim[1:2])
|| loc$y>max(plotLim[3:4])
|| loc$y<min(plotLim[3:4])){
return(TRUE)
}else{
return(FALSE)
}
}
# to avoid repeating this painful piece
#' @importFrom grDevices dev.cur dev.off dev.print dev.set dev.size getGraphicsEvent
#' @importFrom grDevices dev.cur pdf png recordPlot replayPlot getGraphicsEventEnv
#' @importFrom grDevices dev.cur setGraphicsEventEnv setGraphicsEventHandlers
other.option.session.message<-function(){
devType<-names(dev.cur())
terminate.key<-switch(EXPR=devType,
X11cairo = "Any other click",
X11 = "Any other click",
quartz = "Hit Esc",
RStudioGD = "Click on Finish",
windows = "Middle click","Terminate as in locator()")
cat(terminate.key,"for other options.\n")
}
#' @title Direct access to zoom functionalities.
#'
#' @description Direct selection of a zoom method of the "session" type.
#' Possibly of use in scripts?
#'
#' @aliases in.zoom out.zoom set.zoom sq.zoom
#' @param \dots Extra arguments to zoomplot.zoom.
#' @note Each function starts a different interactive sequence
#' \itemize{
#' \item{inout.zoom(): }{left click within bounds zooms in, outside bounds zoom out}
#' \item{move.to.click.zoom(): }{center plot arround left click}
#' \item{in.zoom(): }{each left click zooms in}
#' \item{out.zoom(): }{each left click zooms out}
#' \item{set.zoom(): }{ask for a magnification factor}
#' \item{sq.zoom(): }{allow to click on the two corners of the desired region to zoom on}
#' }
#' @return NULL
#' @author Corentin M. Barbu
#' @seealso zm(), session.zoom().
#' @export in.zoom
#' @importFrom graphics locator
in.zoom<-function(...){
# Ideally later should center arround the point selected
cat("Left click to zoom in\n")
other.option.session.message()
center<-locator(1)
if(length(center$x)==1){
zoomplot.zoom(fact=2,x=center$x,y=center$y,...);
in.zoom()
}
return()
}
#' @rdname in.zoom
#' @importFrom graphics locator
#' @export move.to.click.zoom
# center the plot at the point of click
move.to.click.zoom<-function(...){
cat("Click on the point you want to center the plot arround.\n")
other.option.session.message()
center<-locator(1)
if(length(center$x)==1){
zoomplot.zoom(fact=1,x=center$x,y=center$y,xlimfn=centerOnPoint,...);
move.to.click.zoom(...)
}
return()
}
#' allow interactive in/out zoom in "session" mode
#' @rdname in.zoom
#' @export inout.zoom
inout.zoom<-function(...){
# Ideally later should center arround the point selected
cat("Left click in plot to zoom in\n")
cat("Left click out of plot to zoom out\n")
other.option.session.message()
center<-locator(1)
if(length(center$x)==1){
if(is.out.of.plot.click(center)){
zoomplot.zoom(fact=0.5,...);
}else{
zoomplot.zoom(fact=2,x=center$x,y=center$y,...);
}
inout.zoom(...)
}
return()
}
#' @rdname in.zoom
#' @importFrom graphics locator
#' @export out.zoom
out.zoom<-function(...){
# Ideally later should center arround the point selected
cat("Left click to zoom out\n")
other.option.session.message()
center<-locator(1)
print(center)
if(length(center$x)==1){
zoomplot.zoom(fact=0.5,x=center$x,y=center$y,...);
out.zoom()
}
return()
}
#' @rdname in.zoom
#' @export out.zoom
set.zoom<-function(...){
cat("Enter magnification factor: \n")
f<-scan(n=1)
zoomplot.zoom(fact=f,...);
return()
}
# Double-click detection
# Test is the output of locator corresponds to two identical points
is.double.click<-function(loc){
if(length(loc$x)!=2){
cat("1\n")
return(FALSE)
}else{
if(loc$x[1]==loc$x[2] &&
loc$y[1]==loc$y[2]){
return(TRUE)
}else{
cat("2\n")
print(loc)
return(FALSE)
}
}
}
#' @rdname in.zoom
#' @importFrom graphics locator
#' @export sq.zoom
sq.zoom<-function(...){
# use locator to zoom with the mouse (two left clicks)
# specially, ... can be used to pass a recorded plot rp
cat("Click left over opposite corners of zoom area.\n");
cat("Double left click for zoom out\n")
other.option.session.message()
square<-locator(2)
print(square)
if(length(square)==2){
if(is.double.click(square)){
zoomplot.zoom(fact=0.5,x=square$x,y=square$y,...);
out.zoom()
}else{
xmin<-min(square$x)
xmax<-max(square$x)
ymin<-min(square$y)
ymax<-max(square$y)
zoomplot.zoom(xlim=c(xmin,xmax),ylim=c(ymin,ymax),...)
}
sq.zoom(...)
}
}
orig.zoom<-function(orig){
replayPlot(orig)
return()
}
#' @import tools
print.zoom<-function(orig=NULL,dev=NULL,fileName=NULL,...){
if(is.null(fileName)){
fileName<-file.choose()
}
if(is.null(dev)){
if (isError(try(dev<-eval(parse(text=tools::file_ext(fileName))),
silent=TRUE))) {
cat("Error: extension not recognized, try png or pdf\n")
return(1)
}
}
if(identical(dev,pdf)){
dev.print(device=dev,fileName,...)
}else{
devSize<-dev.size(units="px")
dev.print(device=dev,fileName,width=devSize[1],height=devSize[2],...)
}
cat("Successfully printed in",fileName,"\n")
}
png.zoom<-function(orig=NULL,fileName=NULL,...){
devSize<-dev.size(units="px")
print.zoom(dev=png,fileName,width=devSize[1],height=devSize[2],...)
}
pdf.zoom<-function(orig=NULL,fileName=NULL){
print.zoom(dev=pdf)
}
#' @rdname navigation.zoom
#' @export session.zoom
session.zoom<-function(...){
orig <- recordPlot()
go_on<-TRUE
inout.zoom(...)
while(go_on){
cat("Do you want to?\n")
cat(" zoom in/out: 1\n")
cat("move arround: 2\n")
cat(" zoom square: 3\n")
cat(" save: 6\n")
cat("back to init: 9\n")
cat(" Exit: Enter\n")
sel<-scan(n=1)
if(length(sel)==0){
go_on<-FALSE;
}else{
if(exists("exec.zoom")){
rm(exec.zoom)
}
exec.zoom<-switch(sel,
inout.zoom,move.to.click.zoom,
sq.zoom,NULL,
NULL,print.zoom,NULL,NULL,
orig.zoom)
if(!is.null(exec.zoom)){
exec.zoom(orig=orig,...);
}else{
cat("Say it again?\n")
}
}
}
return(recordPlot());
}
# Transform the button event code passed by the event handler into a
# meaningful string.
#
# The event handler code returns a hard to understand code. I made a lot of
# trials and errors and came up with the corresponding function to triage
# these codes. This may evolve as I better understand this system.
#
# Used for all analysis of the events in navigation.zoom The returned values
# should be self-explaining.
#
# @param buttons button event code as passed to event functions by
# getGraphicsEvent()
# @return The returned value is a string that can be:\itemize{
# \item{left }{left button of the mouse}
# \item{right }{right button of the mouse}
# \item{middle }{middle button or scrolling weel *pressed*}
# \item{scrollDown }{scrolling weel turned down}
# \item{scrollUp }{scrolling weel turned up}
# }
# @author Corentin M. Barbu
# @seealso setCallBack
# @keywords event
# @examples
#
# labelButton(c(0,1)) # should return "right"
#
# @export labelButton
labelButton<-function(buttons){
# label the buttons with easy to remember names
label<-""
# cat("buttons:")
# print(buttons)
if(length(buttons)==1){
if(buttons==1){ # middle button
label<-"middle"
}else if(buttons==0){
label<-"left"
}else if(buttons==2){# scroll up
label<-"scrollUp"
}
}else if(length(buttons)>=2){ # rightbutton or scrolling
if(buttons[2]==2){ # scroll down
label<-"scrollDown"
}else if(buttons[2]==1){ # right button
label<-"right"
}else if(buttons[1]==2){ # scroll up button
label<-"scrollUp"
}else if(buttons[1]==1){ # middle button
label<-"middle"
}
}else{
label<-NULL
}
# cat("label:",label,"\n")
return(label)
}
#' @importFrom graphics grconvertX
#' @importFrom graphics grconvertY
#' @importFrom graphics par
setCallBack<-function(..., xlim = NULL, ylim = NULL, xaxs = "r", yaxs = "r"){
startx <- NULL
starty <- NULL
usr <- NULL
rp <- recordPlot()
#---------------------
# Navigation functions
#---------------------
dragmousemove <- function(buttons, x, y) {
devset()
# cat("In dragmousemove\n")
deltax <- diff(grconvertX(c(startx, x), "ndc", "user"))
deltay <- diff(grconvertY(c(starty, y), "ndc", "user"))
if(par("xlog")){
xlim<<-10^usr[1:2]-deltax
if(xlim[1] <=0){
xlim <<- 10^usr[1:2]
}
}else{
xlim<<-usr[1:2]-deltax
}
if(par("ylog")){
ylim <<-10^usr[3:4]-deltay
if(ylim[1] <=0){
ylim <<- 10^usr[3:4]
}
}else{
ylim <<-usr[3:4]-deltay
}
moveX <- diff(c(startx, x))
moveY <- diff(c(starty, y))
zoomplot.zoom(xlim=xlim,ylim=ylim,...)
# zoomplot.zoom(moveX=-moveX,moveY=-moveY)
NULL
}
zoomDyn <- function(buttons, x, y) {
devset()
usr <<- par("usr")
mevent<-labelButton(buttons)
if(!is.null(mevent)){
# cat("mevent:",mevent,"\n")
if(mevent=="scrollDown" || mevent=="middle"){
fact<-0.7
}else if(mevent=="scrollUp" || mevent == "right"){
fact<-1.5
}else {
deltay <- diff(grconvertY(c(starty, y), "ndc", "user"))
fact<-max(min(1+deltay/(usr[2]-usr[1]),10),0.1)
}
xuser<-grconvertX(x, "ndc", "user")
yuser<-grconvertY(y, "ndc", "user")
zoomplot.zoom(fact=fact,x=xuser,y=yuser)
# cat("fact:",fact,"\n")
# cat(usr[1:2],"->",xlim,"\n")
# cat(usr[3:4],"->",ylim,"\n")
}
NULL
}
mouseDownNavig <- function(buttons, x, y) {
startx <<- x
starty <<- y
devset()
usr <<- par("usr")
# cat("buttonPress:",buttons,"\n")
mevent<-labelButton(buttons)
if(!is.null(mevent)){
if(mevent=="scrollDown"){
eventEnv$onMouseMove <- zoomDyn
zoomDyn(buttons,x,y)
}else if(mevent=="scrollUp"){
eventEnv$onMouseMove <- zoomDyn
zoomDyn(buttons,x,y)
}else if(mevent=="middle"){
eventEnv$onMouseMove <- zoomDyn
zoomDyn(buttons,x,y)
# cat("Turn on zoomDyn\n")
}else if(mevent=="left"){
# cat("Turn on dragmousemove\n")
eventEnv$onMouseMove <- dragmousemove
}else if(mevent=="right"){
# cat("Closing...")
# return(invisible(1))
}
}else{ # likely scroll (up or down)
# cat("guess scroll")
# browser()
# eventEnv$onMouseMove <- zoomDyn
# zoomDyn(buttons,x,y)
}
NULL
}
mouseup <- function(buttons, x, y) {
# cat("mouseup:")
# print(buttons)
eventEnv$onMouseMove <- NULL
NULL
}
#---------------------
# Keyboard mode commuter
#---------------------
keydown <- function(key) {
switch(key,
## quit (\033 == ESC)
"\033" =, "ctrl-C" =, "q" = { return(invisible(1)) },
## print to file
"p" = {
message("Entering printing mode:")
eventEnv$prompt <<- "Printing mode"
setGraphicsEventEnv(env=eventEnv)
print.zoom()
},
## show limits
"s" = { message("xlim=c(",
paste0(round(.xlim(), digits=3), collapse=", "), "), ",
"ylim=c(",
paste0(round(.ylim(), digits=3), collapse=", "), ")") },
## restaure initial size
"r" = { orig.zoom(rp) },
## zoom in (ctrl-* == [CTRL]+[+])
"ctrl-*" =, "i" =,"+" = { zoomplot.zoom(fact=10/9) },
## zoom out (ctrl-_ == [CTRL]+[-])
"ctrl-_" =, "o" =,"-" = { zoomplot.zoom(fact=9/10) },
## zoom in (x-axis only)
"L" = { zoomplot.zoom(xlim=.zoomXlim(10/9)) },
## zoom out (x-axis only)
"H" = { zoomplot.zoom(xlim=.zoomXlim(9/10)) },
## zoom in (y-axis only)
"K" = { zoomplot.zoom(ylim=.zoomYlim(10/9)) },
## zoom out (y-axis only)
"J" = { zoomplot.zoom(ylim=.zoomYlim(9/10)) },
## move left
"Left" =, "h" = { zoomplot.zoom(moveX=-0.1) },
## move right
"Right" =, "l" = { zoomplot.zoom(moveX=+0.1) },
## move down
"Down" =, "j" = { zoomplot.zoom(moveY=-0.1) },
## move up
"Up" =, "k" = { zoomplot.zoom(moveY=+0.1) },
## default (nothing)
{}
)
return(NULL)
}
#---------------------
# Set event handler
#---------------------
setGraphicsEventHandlers(prompt = "p to print, q to quit",
onMouseDown = mouseDownNavig,
onMouseUp = mouseup,
onMouseMove = NULL,
onKeybd = keydown)
eventEnv <- getGraphicsEventEnv()
devset <- function(){
if (dev.cur() != eventEnv$which) dev.set(eventEnv$which)
}
}
#' Opening of an interactive zoom/navigate session.
#'
#' To launch an interactive session you should use zm() but if you are sure of
#' your device you can launch directly one of these functions.
#'
#' session.zoom launch an interactive console menu to navigate a plot.
#'
#' navigation.zoom allows to interactively navigate a plot with the mouse.
#'
#' @aliases navigation.zoom session.zoom
#' @param \dots Everything that can be accepted by sq.zoom.
#' @return Returns the final plot, as saved by recordPlot().
#' @author Corentin M. Barbu, Sebastian Gibb
#' @seealso zm().
#' @keywords session navigation
#' @examples
#' \dontrun{
#' plot(rnorm(100),rnorm(100))
#' session.zoom()
#' }
#'
#' @export navigation.zoom
navigation.zoom<-function(...){
message("Mouse:")
if(names(dev.cur())=="windows"){
zoom.in.out.mes<-"Right to zoom in, Middle or Hold Left + click right to zoom out"
}else{
zoom.in.out.mes<-"Right to zoom in\nMiddle to zoom out" # "Scroll to zoom in and out"
}
message(zoom.in.out.mes,"\nHold left mouse button to move")
message("\nKeyboard:")
## keyboard usage
keys <- c("Left/Right (h/l)", "Up/Down (k/j)",
"+ or i / - or o", "L/H", "K/J",
"p", "r", "s")
usage <- c("move left/right", "move up/down",
"zoom in/out", "zoom in/out (x-axis only)",
"zoom in/out (y-axis only)",
"print to file", "reset limits", "show limits")
message(paste(format(keys, justify="left"),
format(usage, justify="right"),
sep=": ", collapse="\n"))
g<-0
while(length(g)!=1 || g!=1){
g<-getGraphicsEvent(consolePrompt="q on the graphic window to quit")
}
out<-recordPlot()
return(out)
}
# try to replot the graph into a Xlib device to allow events handling
# Zoom package utility functions
#
# Different functions, should not be used by end users, definitions may change
# in future versions.
#
# This functions are more or less tightly connected to R internals functions
# that are subject to a lot of changes and should not be relied on by a end
# user. In particular is.plot.windown() is responsible for the delicate task
# of identifying in a recording of a plot to identify the part actually
# responsible for plotting the graphic window. This single thing has changed
# three times between 2010 and 2013.
#
# setCallBack() sets up the call handler. It should probably never be called
# alone.
#
# keepanc, usenew and multipancPoint are three functions possibly called by
# zoomplot.zoom() in an interchangeable way, none of them make use of all the
# arguments. keepanc() and usenew() return ancien or new according to their
# names. multipancPoint() allows to transform "ancien" into new coordinates,
# after magnification by "fact" (ignores "new") but keeping point invariant.
#
# @aliases replot getalst is.plot.window is.locator keepanc usenew
# setCallBack orig.zoom multipancPoint
# @param rp Saved plot as generated by recordPlot().
# @param tmp Usable part (equivalent to rp[[1]]) of a saved plot as generated
# by recordPlot().
# @param alst Arguments part of an item in an recorded plot object.
# @param fn Function part of an item in an recorded plot object.
# @param ancien ancien set of coordinates
# @param fact factor of magnification
# @param new new set of coordinates
# @param point coordinates of the pointer
# @param xlim x limits of the plot
# @param ylim y limits of the plot
# @param xaxs style of the x axis see xaxs in par() help
# @param yaxs style of the y axis see yaxs in par() help
# @param orig like rp, orig.zoom is at this point just a convenience wrapper
# for replayPlot()
# @param ... Possible addicional arguments to zoomplot.zoom
# @return Can't be relied upon at this point.
# @author Corentin M. Barbu, Sebastian Gibb
# @seealso zm, zoomplot.zoom
replot <- function(rp=NULL) {
if (!interactive()) {
stop("R has to run in interactive mode.")
}
if (is.null(rp)) {
rp <- recordPlot()
}
initDev <- dev.cur()
## choose correct device for each supported platform
if (.Platform$OS.type == "windows") {
cl <- call("windows")
} else {
cl <- call("X11", type="Xlib")
}
initWarn<-options("warn")
options(warn=2) # avoid overlooking a warning that may be deal killer
if (!isError(try(eval(cl), silent=TRUE)) &&
!isError(try(replayPlot(rp), silent=TRUE))) {
message("Replot successful, use ", deparse(cl), " to avoid this step.")
return(!isError(try(setCallBack(), silent=TRUE)))
} else {
if(dev.cur() != initDev) {
dev.off()
}
message("Fall back to classical interface.")
message("Use ", deparse(cl), " to enable full navigation.")
return(FALSE)
}
options(warn=initWarn)
}
# zm() Main function, choosing between navigation or old "session" interactions
#' Launch interaction on a plot
#'
#' Allow to zoom/navigate in any open plot. The controls should be intuitive:
#' \itemize{
#' \item{zoom in:}{ scroll up, or right click if no scrolling weel.}
#' \item{zoom out:}{ scroll down, or Hold left + right click if no working weel.}
#' \item{move:}{ left click and move }
#' }
#'
#' By default, zm() try to open a mouse interactive session. If the current
#' device is not interactive, will try to replot the current plot in a
#' \code{X11(type="Xlib")} device. If it fails it will open a console menu
#' based interactive session.
#'
#' Zoom handle multiple plots on a device together. You need to navigate the
#' last one plotted and all the other plots will be navigated according to the
#' last one: that can be pretty amazing too if you want to explore multiple
#' layers at the same time.
#'
#' @param type the type of interaction with the plot. Possible types are:
#' \itemize{
#' \item{session}{ for console menu}
#' \item{navigation}{ for mouse interaction}
#' }
#'
#' Or any short names for these. By default will try to
#' launch a "navigation" session.
#' @param rp plot to navigate, saved using \code{rp<-recordPlot()}. By default
#' (NULL) will use the current device.
#' @return The recording of the final plot. Can be reploted using replayPlot().
#' The most useful may be to get the xlim and ylim of the final plot. That can
#' be simply got using: \code{par("usr")} after \code{zm()} ends.
#' @note This function relies on pretty low level functions in R that change
#' quite often with new versions. New version of R can break this package but I
#' got used to it and fix it quickly.
#'
#' In case you close the device before striking q, just hit Ctrl-C on the
#' command line.
#' @author Corentin M. Barbu
#' @keywords zoom zm navigate navigation plot
#' @examples
#'
#' \dontrun{
#' # basic example
#' plot(rnorm(1000),rnorm(1000)) # could be any plot
#' zm() # navigate the plot
#'
#' # use the same xlim/ylim as ended up in the zoom session
#' xylim<-par("usr") # xmin,xmax,ymin,ymax of the final version of the plot
#' dev.off()
#' plot(rnorm(1000),rnorm(1000),xlim=xylim[1:2],ylim=xylim[3:4])
#'
#' # navigate two layers of data at the same time
#' par(mfrow=c(1,2))
#' plot(1,type="n",xlim=c(-3,3),ylim=c(-3,3),main="First Track")
#' polygon(c(-1,1,1,-1)*2,c(-1,-1,1,1)*2,col="blue")
#' lines(rnorm(100),rnorm(100))
#' plot(1,type="n",xlim=c(-3,3),ylim=c(-3,3),main="Second Track")
#' polygon(c(-1,1,1,-1)*2,c(-1,-1,1,1)*2,col="green")
#' lines(rnorm(100),rnorm(100))
#' zm() # it flickers quite a bit as it needs to replot everything every time...
#'
#' # one might want to use the older interface
#' # if attached to cairo under linux or MacOS
#' # it is also sometimes helpful to just define a square you want to zoom on
#' zm(type="s")
#' }
#'
#' @export zm
zm <- function(type="navigation", rp=NULL) {
if (missing(type)) {
if (isError(try(setCallBack(), silent=TRUE))) {
if (replot(rp=rp)) {
type <- "navigation"
} else {
type <- "session"
}
}
}
if (type == "navigation") {
return(try(navigation.zoom()))
} else {
return(try(session.zoom()))
}
}
| /scratch/gouwar.j/cran-all/cranData/zoom/R/zoom.R |
#' Helper function to aggregate sentiment variables
#'
#' Used to aggregate the sentiment variables to the individual
#' and meeting levels
#' @param inputData data.frame that has been output from textSentiment function
#' @param meetingId string that indicates the name of the variable containing the meeting ID
#' @param speakerId string that indicates the name of the variable containing the speaker identity
#' @param sentMethod string that indicates what type of
#' sentiment analysis to aggregate--must be either 'aws' or 'syuzhet'
#' @import data.table
#' @return A data.frame giving the sentiment metrics aggregated to the requested level. If only meetingId
#' is specified, metrics are aggregated to that level. If only speakerId is specified, metrics
#' are aggregated to the individual level across any meetings. If both meetingId and speakerId
#' are specified, metrics are aggregated to the level of the individual within meeting.
#' @export
#'
#' @examples
#' agg.out = aggSentiment(inputData=sample_transcript_sentiment_aws,
#' meetingId="batchMeetingId", speakerId = "userId", sentMethod="aws")
#'
#' agg.out = aggSentiment(inputData=sample_chat_sentiment_syu,
#' meetingId="batchMeetingId", speakerId = "userName", sentMethod="syuzhet")
aggSentiment = function(inputData, meetingId=NULL, speakerId=NULL, sentMethod) {
aws_sentClass <- sd <- NULL
sentDt = data.table::data.table(inputData)
if(sentMethod == "aws") {
aws_sentClasses = c("POSITIVE", "NEGATIVE", "MIXED", "NEUTRAL")
awsContVars = paste0("aws_", tolower(aws_sentClasses))
awsClassVars = paste0(awsContVars, "_class")
if(sum(awsContVars %in% names(inputData)) == 0) {
stop("You have requested aws sentiment metrics, but your input data does not include aws output. Either change sentMethod to 'none' or first run textSentiment on your input data and provide the correct output data frame.")
}
sentDt[, (awsClassVars) := lapply(aws_sentClasses, function(x) aws_sentClass == x)]
if(!is.null(meetingId) && !is.null(speakerId)) {
agg1 = data.frame(sentDt[, as.list(unlist(lapply(.SD, function(x) list(mean = mean(x, na.rm=T), sd=sd(x, na.rm=T), sum=sum(x, na.rm=T), pct=sum(x, na.rm=T)/.N)))), by=list(get(meetingId), get(speakerId)), .SDcols=c(awsContVars, awsClassVars)])
names(agg1)[1:2]= c(meetingId, speakerId)
agg1 = agg1[, c(meetingId, speakerId, paste0(awsContVars, ".mean"), paste0(awsContVars, ".sd"), paste0(awsClassVars, ".sum"), paste0(awsClassVars, ".pct"))]
} else if(!is.null(meetingId)) {
agg1 = data.frame(sentDt[, as.list(unlist(lapply(.SD, function(x) list(mean = mean(x, na.rm=T), sd=sd(x, na.rm=T), sum=sum(x, na.rm=T), pct=sum(x, na.rm=T)/.N)))), by=list(get(meetingId)), .SDcols=c(awsContVars, awsClassVars)])
names(agg1)[1]= c(meetingId)
agg1 = agg1[, c(meetingId, paste0(awsContVars, ".mean"), paste0(awsContVars, ".sd"), paste0(awsClassVars, ".sum"), paste0(awsClassVars, ".pct"))]
} else if(!is.null(speakerId)) {
agg1 = data.frame(sentDt[, as.list(unlist(lapply(.SD, function(x) list(mean = mean(x, na.rm=T), sd=sd(x, na.rm=T), sum=sum(x, na.rm=T), pct=sum(x, na.rm=T)/.N)))), by=list(get(speakerId)), .SDcols=c(awsContVars, awsClassVars)])
names(agg1)[1]= c(speakerId)
agg1 = agg1[, c(speakerId, paste0(awsContVars, ".mean"), paste0(awsContVars, ".sd"), paste0(awsClassVars, ".sum"), paste0(awsClassVars, ".pct"))]
} else {
stop("You did not enter either a meetingId or an speakerId")
}
sentOut = agg1
}
if(sentMethod == "syuzhet") {
syuVars = paste0("syu_",c("anger", "anticipation", "disgust", "fear", "joy", "sadness", "surprise", "trust", "negative", "positive"))
if(sum(syuVars %in% names(inputData)) == 0) {
stop("You have requested syuzhet sentiment metrics, but your input data does not include syuzhet output. Either change sentMethod to 'none' or first run textSentiment on your input data and provide the correct output data frame.")
}
if(!is.null(meetingId) && !is.null(speakerId)) {
agg1 = data.frame(sentDt[, as.list(unlist(lapply(.SD, function(x) list(sum=sum(x, na.rm=T), pct=sum(x, na.rm=T)/.N)))), by=list(get(meetingId), get(speakerId)), .SDcols=syuVars])
names(agg1)[1:2]= c(meetingId, speakerId)
agg1 = agg1[, c(meetingId, speakerId, paste0(syuVars, ".sum"), paste0(syuVars, ".pct"))]
} else if(!is.null(meetingId)) {
agg1 = data.frame(sentDt[, as.list(unlist(lapply(.SD, function(x) list(sum=sum(x, na.rm=T), pct=sum(x, na.rm=T)/.N)))), by=list(get(meetingId)), .SDcols=syuVars])
names(agg1)[1]= c(meetingId)
agg1 = agg1[, c(meetingId, paste0(syuVars, ".sum"), paste0(syuVars, ".pct"))]
} else if(!is.null(speakerId)) {
agg1 = data.frame(sentDt[, as.list(unlist(lapply(.SD, function(x) list(sum=sum(x, na.rm=T), pct=sum(x, na.rm=T)/.N)))), by=list(get(speakerId)), .SDcols=syuVars])
names(agg1)[1]= c(speakerId)
agg1 = agg1[, c(speakerId, paste0(syuVars, ".sum"), paste0(syuVars, ".pct"))]
} else {
stop("You did not enter either a meetingId or an speakerId")
}
sentOut = agg1
}
return(sentOut)
} | /scratch/gouwar.j/cran-all/cranData/zoomGroupStats/R/aggSentiment.R |
#' Batch process video files, breaking them into stills
#'
#' #' This helper calls grabVideoStills, which function currently
#' relies on the av package and 'ffmpeg' to split a video file into images.
#' This function will save the images to the director specified by the user.
#'
#' @param batchInfo the batchInfo data.frame that is output from batchProcessZoomOutput
#' @param imageDir the directory where you want the function to write the extracted image files
#' @param overWriteDir logical indicating whether you want to overwrite imageDir if it exists
#' @param sampleWindow an integer indicating how frequently you want to sample
#' images in number of seconds.
#'
#' @return a data.frame that gives information about the batch. Each record
#' corresponds to one video, with:
#' \itemize{
#' \item batchMeetingId - the meeting identifier
#' \item videoExists - boolean indicating whether the video file was there
#' \item imageDir - path to the directory where video images are saved
#' \item sampleWindow - integer with the sampleWindow requested
#' \item numFramesExtracted - the number of image files that were saved
#' }
#' @export
#'
#' @examples
#' vidBatchInfo = batchGrabVideoStills(batchInfo=sample_batch_info,
#' imageDir=tempdir(), overWriteDir=TRUE, sampleWindow=2)
#' \dontrun{
#' vidBatchInfo = batchGrabVideoStills(batchInfo=zoomOut$batchInfo,
#' imageDir="~/Documents/myMeetings/videoImages", overWriteDir=TRUE, sampleWindow=600)
#' }
batchGrabVideoStills = function(batchInfo,imageDir=NULL, overWriteDir=FALSE, sampleWindow){
if(is.null(imageDir)) {
stop("You must provide a value for imageDir so that the function knows where to write the images extracted from the video.")
}
vidBatch = data.frame(batchMeetingId=integer(), videoExists=logical(), sampleWindow=integer(), imageDir=character(), numFramesExtracted=integer())
haveffmpeg = tryCatch(system("ffmpeg -hide_banner -loglevel quiet -version", intern=T), error=function(err) NA)
if(!is.na(haveffmpeg[1])) {
if(nrow(batchInfo) == 1) pbMin=0 else pbMin=1
pb = utils::txtProgressBar(min=pbMin, max=nrow(batchInfo), style=3)
for(m in 1:nrow(batchInfo)) {
utils::setTxtProgressBar(pb, m)
mInfo = batchInfo[m, ]
videoExists = file.exists(file.path(mInfo$dirRoot, paste0(mInfo$fileRoot,"_video.mp4")))
if(videoExists) {
inname = basename(tools::file_path_sans_ext(file.path(mInfo$dirRoot, paste0(mInfo$fileRoot,"_video.mp4"))))
outDir = file.path(imageDir, inname)
gVS = grabVideoStills(inputVideo=file.path(mInfo$dirRoot, paste0(mInfo$fileRoot,"_video.mp4")), imageDir=imageDir,overWriteDir=overWriteDir, sampleWindow=sampleWindow)
vidBatch=rbind(vidBatch, cbind(batchMeetingId=mInfo$batchMeetingId, videoExists=videoExists, imageDir=outDir, sampleWindow=sampleWindow, numFramesExtracted=length(gVS)))
} else {
vidBatch=rbind(vidBatch, cbind(batchMeetingId=mInfo$batchMeetingId, videoExists=videoExists, imageDir=NA, sampleWindow=NA, numFramesExtracted=0))
}
}
close(pb)
} else {
message("Error: No videos can be processed because you do not have a working version of ffmpeg. Please check your installation of ffmpeg.")
}
return(vidBatch)
} | /scratch/gouwar.j/cran-all/cranData/zoomGroupStats/R/batchGrabVideoStills.R |
#' Batch process files that have been downloaded from Zoom
#'
#' Provide the location of a structured batchInput file and this
#' function will process a set of meetings at once.
#'
#' @param batchInput String giving the location of the xlsx file
#' that contains the information for the zoom meetings. All corresponding
#' Zoom downloads for the meetings in the batch must be saved in the same
#' directory as the batchInput file.
#' @param exportZoomRosetta optional string giving the path for exporting the
#' zoomRosetta file to link up unique individual IDs manually. Providing this
#' path will write the zoomRosetta file to that location.
#'
#' @return a list that has a data.frame for each of the elements
#' of a Zoom output that are available in the input directory:
#' \itemize{
#' \item batchInfo - Each row is a meeting included in batchInput. Columns
#' provide information about each meeting.
#' \item meetInfo - Each row is a meeting for which there was a downloaded
#' participants file. Columns provide information about the meeting from the Zoom
#' Cloud recording site.
#' \item partInfo - Each row is a Zoom display name (with display name changes
#' in parentheses). Columns provide information about participants from the Zoom Cloud
#' recording site.
#' \item transcript - Each row is an utterance in the audio transcript. This is the
#' output from processZoomTranscript.
#' \item chat - Each row is a message posted to the chat. This is the output
#' from processZoomChat.
#' \item rosetta - Each row is a unique display name (within meeting) encountered
#' in the batchInput. This is used to reconcile user identities.
#' }
#'
#' @export
#'
#' @examples
#' batchOut = batchProcessZoomOutput(batchInput=system.file('extdata',
#' 'myMeetingsBatch.xlsx', package = 'zoomGroupStats'),
#' exportZoomRosetta=file.path(tempdir(),"_rosetta.xlsx"))
#'
batchProcessZoomOutput = function(batchInput, exportZoomRosetta=NULL) {
if(!file.exists(batchInput)) {
stop("Cannot find the specified batchInput file: ",batchInput)
}
batchInfo = openxlsx::read.xlsx(batchInput)
batchInfo[,c("participants_processed", "transcript_processed", "chat_processed", "video_processed")] = 0
batchInfo$dirRoot = dirname(batchInput)
### Initialize the output frames that will be filled ###
batchMeetInfo = data.frame(meetingId=character(), meetingTopic=character(), meetingStartTime=character(), meetingEndTime=character(), userEmail=character(), meetingDuration=integer(), numParticipants=integer(), batchMeetingId=character())
batchPartInfo = data.frame(userName=character(), userEmail=character(), userDuration=integer(), userGuest=logical(), batchMeetingId=character())
batchTranscript = data.frame(utteranceId=integer(), utteranceStartSeconds=numeric(), utteranceStartTime=character(), utteranceEndSeconds=numeric(), utteranceEndTime=character(), utteranceTimeWindow=numeric(), userName=character(), utteranceMessage=character(), utteranceLanguage=character(), batchMeetingId=character())
batchTranscript$utteranceStartTime = as.POSIXct(batchTranscript$utteranceStartTime)
batchTranscript$utteranceEndTime = as.POSIXct(batchTranscript$utteranceEndTime)
batchChat = data.frame(messageId=integer(), messageSeconds=numeric(), messageTime=character(), userName=character(), message=character(), messageLanguage=character(), batchMeetingId=character())
batchChat$messageTime = as.POSIXct(batchChat$messageTime)
batchRosetta = data.frame(userName=character(), userEmail=character(), batchMeetingId=character())
message("Processing any correctly named Zoom downloads in ",dirname(batchInput))
if(nrow(batchInfo) == 1) pbMin=0 else pbMin=1
pb = utils::txtProgressBar(min=pbMin, max=nrow(batchInfo), style=3)
for(r in 1:nrow(batchInfo)) {
utils::setTxtProgressBar(pb, r)
zoomOut = processZoomOutput(file.path(dirname(batchInput),batchInfo[r, "fileRoot"]), sessionStartDateTime=batchInfo[r, "sessionStartDateTime"], recordingStartDateTime=batchInfo[r, "recordingStartDateTime"])
if(!is.null(zoomOut$meetInfo)) {
zoomOut$meetInfo$batchMeetingId = batchInfo[r, "batchMeetingId"]
batchMeetInfo = rbind(batchMeetInfo, zoomOut$meetInfo)
}
if(!is.null(zoomOut$partInfo)) {
zoomOut$partInfo$batchMeetingId = batchInfo[r, "batchMeetingId"]
batchPartInfo = rbind(batchPartInfo, zoomOut$partInfo)
batchInfo$participants_processed = 1
}
if(!is.null(zoomOut$transcript)) {
zoomOut$transcript$batchMeetingId = batchInfo[r, "batchMeetingId"]
batchTranscript = rbind(batchTranscript, zoomOut$transcript)
batchInfo$transcript_processed = 1
}
if(!is.null(zoomOut$chat)) {
zoomOut$chat$batchMeetingId = batchInfo[r, "batchMeetingId"]
batchChat = rbind(batchChat, zoomOut$chat)
batchInfo$chat_processed = 1
}
if(!is.null(zoomOut$rosetta) && nrow(zoomOut$rosetta) > 0) {
zoomOut$rosetta$batchMeetingId = batchInfo[r, "batchMeetingId"]
batchRosetta = rbind(batchRosetta, zoomOut$rosetta)
}
}
close(pb)
batchOut = list("batchInfo" = batchInfo, "meetInfo" = batchMeetInfo, "partInfo" = batchPartInfo, "transcript" = batchTranscript, "chat" = batchChat, "rosetta" = batchRosetta)
if(!is.null(exportZoomRosetta)) {
openxlsx::write.xlsx(batchOut$rosetta, exportZoomRosetta)
}
return(batchOut)
}
| /scratch/gouwar.j/cran-all/cranData/zoomGroupStats/R/batchProcessZoomOutput.R |
#' Batch analyze faces in videos
#'
#' Using this function you can analyze attributes of facial expressions within
#' a batch of video files. This batch approach requires breaking the videos into
#' still frames in advance by using the batchGrabVideoStills() function.
#'
#' @param batchInfo the batchInfo data.frame that is output from batchProcessZoomOutput
#' @param imageDir the path to the top-level directory of where all the images are stored
#' @param sampleWindow an integer indicating how frequently you have sampled images
#' in number of seconds.
#' @param facesCollectionID name of an 'AWS' collection with identified faces
#'
#' @return data.frame with one record for every face detected in each frame across all meetings. For each face, there is an abundance of information from 'AWS Rekognition'. This output is quite detailed. Note that there will be a varying number of faces per sampled frame in the video. Imagine that you have sampled the meeting and had someone rate each person's face within that sampled moment.
#' @export
#'
#' @examples
#' \dontrun{
#' vidOut = batchVideoFaceAnalysis(batchInfo=zoomOut$batchInfo,
#' imageDir="~/Documents/meetingImages",
#' sampleWindow = 300)
#' }
batchVideoFaceAnalysis = function(batchInfo, imageDir, sampleWindow, facesCollectionID=NA) {
for(m in 1:nrow(batchInfo)) {
mInfo = batchInfo[m, ]
message("Processing Meeting ",mInfo$batchMeetingId," ", "(",m," of ",nrow(batchInfo),")")
videoExists = file.exists(file.path(mInfo$dirRoot, paste0(mInfo$fileRoot,"_video.mp4")))
dirExists = dir.exists(file.path(imageDir, paste0(mInfo$fileRoot,"_video")))
if(dirExists) {
vidOut = videoFaceAnalysis(inputVideo=file.path(mInfo$dirRoot, paste0(mInfo$fileRoot,"_video.mp4")), recordingStartDateTime=mInfo$recordingStartDateTime, sampleWindow=sampleWindow, facesCollectionID=facesCollectionID, videoImageDirectory=imageDir)
vidOut$batchMeetingId = mInfo$batchMeetingId
if(!exists('resOut')) {
resOut = vidOut
} else {
resOut = rbind(resOut, vidOut)
}
} else {
message("There is no directory of pre-processed images for the video for this meeting. No analysis was conducted")
}
}
return(resOut)
}
| /scratch/gouwar.j/cran-all/cranData/zoomGroupStats/R/batchVideoFaceAnalysis.R |
#' Create a file to aid in adding a unique identifier to link to the zoom user name
#'
#' A major challenge in analyzing virtual meetings is reconciling the display
#' name that zoom users in chat and transcript. This function outputs a data.frame
#' that can be helpful in manually adding a new unique identifier to use in
#' further data anlaysis.
#'
#' @param zoomOutput the output from running processZoomOutput
#'
#' @return a data.frame that has unique values for the zoom display name
#' that show up across any files that are available, including
#' participants, transcript, and chat. If the user gives the participants
#' file, it will separate display name changes and include all versions. If
#' there are emails attached to display names, it will include those.
#'
#' @export
#'
#' @examples
#' rosetta.out = createZoomRosetta(processZoomOutput(fileRoot=
#' file.path(system.file('extdata', package = 'zoomGroupStats'),"meeting001")))
#' \dontrun{
#' rosetta.out = createZoomRosetta(processZoomOutput(fileRoot="~/zoomMeetings/meeting001"))
#' }
createZoomRosetta = function(zoomOutput) {
uniqueTranscriptNames = c()
uniqueChatNames = c()
uniquePartNames = c()
if(!is.null(zoomOutput$transcript)) {
uniqueTranscriptNames = unique(zoomOutput$transcript$userName)
}
if(!is.null(zoomOutput$chat)) {
uniqueChatNames = unique(zoomOutput$chat$userName)
}
if(!is.null(zoomOutput$partInfo)) {
uniqParts = zoomOutput$partInfo[,c("userName", "userEmail")]
uniqParts$userName2 = lapply(uniqParts$userName, function(x) regmatches(x, gregexpr("(?<=\\().*?(?=\\))", x, perl=T))[[1]])
uniqParts$userName = unlist(gsub("\\s*\\([^\\)]+\\)","",as.character(uniqParts$userName)))
uniqParts$nameChange = unlist(lapply(uniqParts$userName2, function(x) ifelse(length(x) > 0, 1, 0)))
appendNames1 = uniqParts[uniqParts$nameChange==1, c("userName2", "userEmail", "nameChange")]
names(appendNames1)[1] = "userName"
appendNames1$userName = unlist(appendNames1$userName)
appendNames2 = uniqParts[uniqParts$nameChange==1, c("userName", "userEmail", "nameChange")]
uniqParts = unique(rbind(uniqParts[uniqParts$nameChange==0, c("userName", "userEmail", "nameChange")], appendNames1, appendNames2) )
uniquePartNames=unique(uniqParts$userName)
}
allNames = data.frame(userName=unique(c(uniqueTranscriptNames, uniqueChatNames, uniquePartNames)), stringsAsFactors=F)
if(!is.null(zoomOutput$partInfo)) {
allNames = merge(allNames, uniqParts[, c("userName", "userEmail")], by="userName", all.x=T)
}
return(allNames)
} | /scratch/gouwar.j/cran-all/cranData/zoomGroupStats/R/createZoomRosetta.R |
#' Parsed batch info file in a recorded 'Zoom' meeting
#'
#' @format A data frame with 3 rows of 13 variables:
#' \describe{
#' \item{batchMeetingId}{a character meeting identification variable}
#' \item{fileRoot}{the prefix to the files for this particular meeting}
#' \item{participants}{binary indicating whether there is a participants file downloaded}
#' \item{transcript}{binary indicating whether there is a transcript file downloaded}
#' \item{chat}{binary indicating whether there is a chat file downloaded}
#' \item{video}{binary indicating whether there is a video file downloaded}
#' \item{sessionStartDateTime}{start of the actual session as a character YYYY-MM-DD HH:MM:SS}
#' \item{recordingStartDateTime}{start of the actual recording as a character YYYY-MM-DD HH:MM:SS}
#' \item{participants_processed}{binary indicating whether there is a participants file already processed}
#' \item{transcript_processed}{binary indicating whether there is a transcript file already processed}
#' \item{chat_processed}{binary indicating whether there is a chat file already processed}
#' \item{video_processed}{binary indicating whether there is a video file already processed}
#' \item{dirRoot}{character giving the directory in which all files will be found}
#' }
#' @source \url{http://zoomgroupstats.org/}
"sample_batch_info"
#'
#'
#' Parsed spoken language in a 'Zoom' meeting.
#'
#' @format A data frame with 30 rows of 12 variables:
#' \describe{
#' \item{batchMeetingId}{a character meeting identification variable}
#' \item{userName}{'Zoom' display name attached to this speaker}
#' \item{utteranceId}{an incremented numeric identifier for a marked speech utterance}
#' \item{utteranceStartSeconds}{when the utterance started as the number of seconds from the start of the recording}
#' \item{utteranceStartTime}{timestamp for the start of the utterance}
#' \item{utteranceEndSeconds}{when the utterance ended as the number of seconds from the start of the recording}
#' \item{utteranceEndTime}{timestamp for the end of the utterance}
#' \item{utteranceTimeWindow}{duration of the utterance, in seconds}
#' \item{utteranceMessage}{the text of the utterance}
#' \item{utteranceLanguage}{language code of the utterance}
#' \item{userEmail}{character email address}
#' \item{userId}{numeric id of each speaker}
#' }
#' @source \url{http://zoomgroupstats.org/}
"sample_transcript_processed"
#' Parsed spoken language in a 'Zoom' meeting with AWS-based sentiment analysis.
#'
#' @format A data frame with 30 rows of 17 variables:
#' \describe{
#' \item{batchMeetingId}{a character meeting identification variable}
#' \item{utteranceId}{an incremented numeric identifier for a marked speech utterance}
#' \item{userName}{'Zoom' display name attached to this speaker}
#' \item{utteranceStartSeconds}{when the utterance started as the number of seconds from the start of the recording}
#' \item{utteranceStartTime}{timestamp for the start of the utterance}
#' \item{utteranceEndSeconds}{when the utterance ended as the number of seconds from the start of the recording}
#' \item{utteranceEndTime}{timestamp for the end of the utterance}
#' \item{utteranceTimeWindow}{duration of the utterance, in seconds}
#' \item{utteranceMessage}{the text of the utterance}
#' \item{utteranceLanguage}{language code of the utterance}
#' \item{userEmail}{character email address}
#' \item{userId}{numeric id of each speaker}
#' \item{aws_sentClass}{character giving the sentiment classification of this text}
#' \item{aws_positive}{probability that this text is mixed emotion}
#' \item{aws_negative}{probability that this text is negative}
#' \item{aws_neutral}{probability that this text is neutral}
#' \item{aws_mixed}{probability that this text is positive}
#' }
#' @source \url{http://zoomgroupstats.org/}
"sample_transcript_sentiment_aws"
#' Parsed spoken language in a 'Zoom' meeting with syuzhet-based sentiment analysis.
#'
#' @format A data frame with 30 rows of 23 variables:
#' \describe{
#' \item{batchMeetingId}{a character meeting identification variable}
#' \item{utteranceId}{an incremented numeric identifier for a marked speech utterance}
#' \item{userName}{'Zoom' display name attached to this speaker}
#' \item{utteranceStartSeconds}{when the utterance started as the number of seconds from the start of the recording}
#' \item{utteranceStartTime}{timestamp for the start of the utterance}
#' \item{utteranceEndSeconds}{when the utterance ended as the number of seconds from the start of the recording}
#' \item{utteranceEndTime}{timestamp for the end of the utterance}
#' \item{utteranceTimeWindow}{duration of the utterance, in seconds}
#' \item{utteranceMessage}{the text of the utterance}
#' \item{utteranceLanguage}{language code of the utterance}
#' \item{userEmail}{character email address}
#' \item{userId}{numeric id of each speaker}
#' \item{wordCount}{number of words in this utterance}
#' \item{syu_anger}{number of anger words}
#' \item{syu_anticipation}{number of anticipation words}
#' \item{syu_disgust}{number of disgust words}
#' \item{syu_fear}{number of fear words}
#' \item{syu_joy}{number of joy words}
#' \item{syu_sadness}{number of sadness words}
#' \item{syu_surprise}{number of surprise words}
#' \item{syu_trust}{number of trust words}
#' \item{syu_negative}{number of negative words}
#' \item{syu_positive}{number of positive words}
#' }
#' @source \url{http://zoomgroupstats.org/}
"sample_transcript_sentiment_syu"
#' Parsed chat file in a 'Zoom' meeting
#'
#' @format A data frame with 30 rows of 9 variables:
#' \describe{
#' \item{batchMeetingId}{a character meeting identification variable}
#' \item{userName}{'Zoom' display name attached to this speaker}
#' \item{messageId}{an incremented numeric identifier for a marked chat message}
#' \item{messageSeconds}{when the message was posted as the number of seconds from the start of the recording}
#' \item{messageTime}{timestamp for message}
#' \item{message}{text of the message}
#' \item{messageLanguage}{language code of the message}
#' \item{userEmail}{character email address}
#' \item{userId}{numeric id of each speaker}
#' }
#' @source \url{http://zoomgroupstats.org/}
"sample_chat_processed"
#' Parsed chat file in a 'Zoom' meeting with sentiment analysis using AWS
#'
#' @format A data frame with 10 rows of 14 variables:
#' \describe{
#' \item{batchMeetingId}{a character meeting identification variable}
#' \item{messageId}{an incremented numeric identifier for a marked chat message}
#' \item{userName}{'Zoom' display name attached to the messager}
#' \item{messageSeconds}{when the message was posted as the number of seconds from the start of the recording}
#' \item{messageTime}{timestamp for message}
#' \item{message}{text of the message}
#' \item{messageLanguage}{language code of the message}
#' \item{userEmail}{character email address}
#' \item{userId}{numeric id of each speaker}
#' \item{aws_sentClass}{character giving the sentiment classification of this text}
#' \item{aws_positive}{probability that this text is mixed emotion}
#' \item{aws_negative}{probability that this text is negative}
#' \item{aws_neutral}{probability that this text is neutral}
#' \item{aws_mixed}{probability that this text is positive}
#' }
#' @source \url{http://zoomgroupstats.org/}
"sample_chat_sentiment_aws"
#' Parsed chat file in a 'Zoom' meeting with sentiment analysis using syuzhet
#'
#' @format A data frame with 30 rows of 30 variables:
#' \describe{
#' \item{batchMeetingId}{a character meeting identification variable}
#' \item{messageId}{an incremented numeric identifier for a marked chat message}
#' \item{userName}{'Zoom' display name attached to the messager}
#' \item{messageSeconds}{when the message was posted as the number of seconds from the start of the recording}
#' \item{messageTime}{timestamp for message}
#' \item{message}{text of the message}
#' \item{messageLanguage}{language code of the message}
#' \item{userEmail}{character email address}
#' \item{userId}{numeric id of each speaker}
#' \item{wordCount}{number of words in this utterance}
#' \item{syu_anger}{number of anger words}
#' \item{syu_anticipation}{number of anticipation words}
#' \item{syu_disgust}{number of disgust words}
#' \item{syu_fear}{number of fear words}
#' \item{syu_joy}{number of joy words}
#' \item{syu_sadness}{number of sadness words}
#' \item{syu_surprise}{number of surprise words}
#' \item{syu_trust}{number of trust words}
#' \item{syu_negative}{number of negative words}
#' \item{syu_positive}{number of positive words}
#' }
#' @source \url{http://zoomgroupstats.org/}
"sample_chat_sentiment_syu" | /scratch/gouwar.j/cran-all/cranData/zoomGroupStats/R/data.R |
#' Helper function to split a video into still frames
#'
#' This function currently relies on the av package and
#' 'ffmpeg' to split a video file into images. This function will save
#' the images to the directory specified by the user.
#'
#' @param inputVideo full filepath to a video file
#' @param imageDir the directory where you want the function to write the extracted image files
#' @param overWriteDir logical indicating whether you want to overwrite imageDir if it exists
#' @param sampleWindow an integer indicating how frequently you want to sample
#' images in number of seconds.
#'
#' @return a data.frame that gives information about the still frames. Each record is
#' a stillframe, with the following info:
#' \itemize{
#' \item imageSeconds - number of seconds from the start of the video when this image was captured
#' \item imageName - full path to where the image has been saved as a .png
#' }
#' @export
#'
#' @examples
#' vidOut = grabVideoStills(inputVideo=system.file('extdata', "meeting001_video.mp4",
#' package = 'zoomGroupStats'), imageDir=tempdir(), overWriteDir=TRUE, sampleWindow=2)
#' \dontrun{
#' grabVideoStills(inputVideo='myMeeting.mp4',
#' imageDir="~/Documents/myMeetings/videoImages", overWriteDir=TRUE, sampleWindow=45)
#' }
grabVideoStills = function(inputVideo, imageDir=NULL, overWriteDir=FALSE, sampleWindow) {
if(is.null(imageDir) || !dir.exists(imageDir)) {
stop("You must provide a value for imageDir so that the function knows where to write the images extracted from the video.")
}
# Get the full path of inputVideo
inname = basename(tools::file_path_sans_ext(inputVideo))
outDir = file.path(imageDir, inname)
outpath = file.path(outDir, "image_%6d.png")
if(dir.exists(file.path(outDir)) && overWriteDir) {
unlink(outDir, recursive = TRUE)
}
if(!dir.exists(file.path(imageDir))) {
stop("You did not provide a valid path for the image directory.")
}
haveffmpeg = tryCatch(system("ffmpeg -hide_banner -loglevel quiet -version", intern=T), error=function(err) NA)
if(!is.na(haveffmpeg[1])) {
dir.create(outDir)
ffCmd = paste("ffmpeg -i ", inputVideo, " -vf fps=1.0/",sampleWindow, " ", outpath, " -hide_banner -nostdin -loglevel error", sep="")
message("Processing ", basename(inputVideo), " using ffmpeg. Note that processing videos can be time intensive for long duration videos.")
o = system(ffCmd, intern=T)
# How many images are in the directory:
avOut = list.files(path=outDir, pattern="*.png", full.names=T)
if(length(avOut) > 1) {
imageSeconds = c(sampleWindow/2, sampleWindow/2+(1:(length(avOut)-1))*sampleWindow)
} else {
imageSeconds = sampleWindow/2
}
imageInfo = data.frame(imageSeconds=imageSeconds, imageName=avOut)
} else {
message("Error: No videos can be processed because you do not have a working version of ffmpeg. Please check your installation of ffmpeg.")
imageInfo = data.frame(imageSeconds=NA, imageName=NA)
}
return(imageInfo)
} | /scratch/gouwar.j/cran-all/cranData/zoomGroupStats/R/grabVideoStills.R |
#' Helper function to add unique identifiers to processed Zoom downloads
#'
#' Import an edited zoomRosetta file that tells how to
#' link up Zoom display names to some unique individual
#' identifier
#'
#' @param zoomOutput the output of batchProcessZoomOutput
#' @param zoomRosetta the path to an edited zoomRosetta xlsx
#' @param meetingId the name of the meetingId you want to use
#'
#' @return returns zoomOutput with identifiers in zoomRosetta
#' merged to any available data.frames in the zoomOutput file
#' @export
#'
#' @examples
#' batchOutIds = importZoomRosetta(zoomOutput=
#' batchProcessZoomOutput(batchInput=system.file('extdata',
#' 'myMeetingsBatch.xlsx', package = 'zoomGroupStats')),
#' zoomRosetta=system.file('extdata',
#' 'myMeetingsBatch_rosetta_edited.xlsx', package = 'zoomGroupStats'),
#' meetingId="batchMeetingId")
#'
#' \dontrun{
#' batchOutIds = importZoomRosetta(zoomOutput=batchOut, zoomRosetta="myEditedRosetta.xlsx",
#' meetingId="batchMeetingId")
#' }
importZoomRosetta = function(zoomOutput, zoomRosetta, meetingId) {
zoomRosettaUpdate = openxlsx::read.xlsx(zoomRosetta)
zoomOutput$rosetta=zoomRosettaUpdate
if(!is.null(zoomOutput$transcript)) {
zoomOutput$transcript = merge(zoomOutput$transcript, zoomRosettaUpdate, by=c(meetingId, "userName"), all.x=T)
zoomOutput$transcript = zoomOutput$transcript[
with(zoomOutput$transcript,
order(zoomOutput$transcript[,meetingId], zoomOutput$transcript$utteranceId)), ]
}
if(!is.null(zoomOutput$chat)) {
zoomOutput$chat = merge(zoomOutput$chat, zoomRosettaUpdate, by=c(meetingId, "userName"), all.x=T)
zoomOutput$chat = zoomOutput$chat[
with(zoomOutput$chat,
order(zoomOutput$chat[,meetingId], zoomOutput$chat$messageId)), ]
}
if(!is.null(zoomOutput$partInfo)) {
zoomOutput$partInfo = merge(zoomOutput$partInfo[,c(meetingId, "userName", "userGuest")], zoomRosettaUpdate, by=c(meetingId, "userName"), all.x=T)
}
return(zoomOutput)
}
| /scratch/gouwar.j/cran-all/cranData/zoomGroupStats/R/importZoomRosetta.R |
#' Helper function that creates temporal windows in datasets
#'
#' This creates a set of temporal windows of specified size so that metrics
#' can be computed within those windows.
#'
#' @param inputData data.frame that has data over time, usually within a single meeting
#' @param timeVar name of a numeric column that contains the time variable you want to use
#' @param windowSize numeric value giving the length of time window
#'
#' @return list with two data.frames:
#' \itemize{
#' \item windowedData - inputData with the temporal window identifying information included
#' \item allWindows - contains the full set of temporal windows and identifying information. This is valuable because inputData may not have records within all of the possible temporal windows
#' }
#' @export
#'
#' @examples
#' win.out = makeTimeWindows(sample_transcript_processed,
#' timeVar="utteranceStartSeconds", windowSize=10)
makeTimeWindows = function(inputData, timeVar, windowSize) {
tempTimeVarHelp <- tempTimeVar <- NULL
inputData$tempTimeVar = floor(inputData[,timeVar])
# Create a set of windows
windowStart = windowSize*(1:ceiling(max(inputData[,timeVar])/windowSize)-1)
windowEnd = windowSize*(1:ceiling((max(inputData[,timeVar])/windowSize)))-1
windowId = 1:length(windowEnd)
windows = data.table::data.table(cbind(windowId, windowStart, windowEnd))
# Join
inputData.dt = data.table::data.table(inputData)
inputData.dt[, tempTimeVarHelp := tempTimeVar]
data.table::setkey(windows, windowStart, windowEnd)
inputMrg = data.frame(foverlaps(inputData.dt, windows, by.x=c('tempTimeVar', 'tempTimeVarHelp'),
by.y=c('windowStart', 'windowEnd')))
inputMrg[,timeVar] = inputMrg$tempTimeVar
inputMrg[,c("tempTimeVar", "tempTimeVarHelp")] = NULL
windows = data.frame(windows)
return(list("windowedData" = inputMrg, "allWindows" = windows))
}
| /scratch/gouwar.j/cran-all/cranData/zoomGroupStats/R/makeTimeWindows.R |
#' Process a Zoom chat file
#'
#' Parses the data from the chatfile that is downloaded from the Zoom Cloud recording
#' site. Note that this is the file that accompanies a recording. This is not the file
#' that you might download directly within a given Zoom session, nor is it the one
#' that is saved locally on your computer. This is the file that you can access
#' after a session if you record in the cloud.
#'
#' @param fname String that is the path to the downloaded Zoom .txt chat file
#' @param sessionStartDateTime String that is the start of the session in YYYY-MM-DD HH:MM:SS
#' @param languageCode String denoting the language
#'
#' @return data.frame where each record is a message submission in the chat, containing columns:
#' \itemize{
#' \item messageId - Numeric identifier for each message, only unique within a given meeting
#' \item messageSeconds - When message was posted, in number of seconds from start of session
#' \item messageTime - When message was posted as POSIXct, using the supplied sessionStartDateTime
#' \item userName - Display name of user who posted the message
#' \item message - Text of the message that was posted
#' \item messageLanguage - Language code for the message
#' }
#' @export
#'
#' @examples
#' ch.out = processZoomChat(
#' fname=system.file('extdata', "meeting001_chat.txt", package = 'zoomGroupStats'),
#' sessionStartDateTime = '2020-04-20 13:30:00',
#' languageCode = 'en')
processZoomChat = function(fname, sessionStartDateTime="1970-01-01 00:00:00", languageCode="en") {
# NOTE: Need to fix this to remove any stray tabs from this file before bringing it in. I have run into a few edge cases where participants use tabs in their messages and it screws up the file. Need to experiment with this and introduce (likely) a brute force parser for this file.
ch = utils::read.delim(fname, sep="\t", stringsAsFactors=F, header=F, col.names=c("messageIncrement", "userName", "message"), quote="")
####################################
# Chat transcripts do not handle soft returns well (i.e., if the same person uses a soft line break
# for multiple lines in a single message that is submitted to the system).
# This is a crude way to identify them based on someone having an invalid message time.
# For now, will assign that text to the last marked user name in the dataset,
# pasting the messages together into a single line (separated by a space. )
# Create a flag to mark erroneous records based on the message time variable. This should be made stronger
# and cleaner eventually
ch$flag = ifelse(!(grepl('(?:[01]\\d|2[0123]):(?:[012345]\\d):(?:[012345]\\d)', ch$messageIncrement)) | ch$userName=="", TRUE, FALSE)
# Assign the value in the message_increment variable to the message variable. This is because
# the parsing of the file is screwed up when there are soft returns in someone's chat message
ch$message = ifelse(ch$flag, ch$messageIncrement, ch$message)
# Go through the records from the bottom up to paste the message on the one it
# should be part of
for(i in nrow(ch):1) {
if(ch[i,"flag"]) {
ch[(i-1), "message"] = paste(ch[(i-1), "message"], ch[i, "message"], sep=" ")
}
}
# now drop the unnecessary records
ch = ch[!ch$flag, ]
# Apply date and time
sessionStartDateTime = as.POSIXct(sessionStartDateTime, tz=Sys.timezone())
ch$messageSeconds = as.numeric(lubridate::seconds(lubridate::hms(ch$messageIncrement)))
ch$messageTime = sessionStartDateTime + ch$messageSeconds
# get rid of whitespace at the beginning and end
ch$message = gsub("^\\s+|\\s+$", "", ch$message)
# Add a language variable, which is user-supplied for now
ch$messageLanguage = languageCode
# Add a simple numeric incrementing identifier for the messages that people submitted
ch$messageId = 1:nrow(ch)
# Get rid of the superfluous colon at the end of the usernames
ch$userName = substr(ch$userName, 1, nchar(ch$userName)-1)
# Mark as unidentified any user with a blank username
ch$userName = ifelse(ch$userName == "" | is.na(ch$userName), "UNIDENTIFIED", ch$userName)
# Clean up the ordering of variables that are returned and return
return(ch[, c("messageId", "messageSeconds", "messageTime", "userName", "message", "messageLanguage")])
} | /scratch/gouwar.j/cran-all/cranData/zoomGroupStats/R/processZoomChat.R |
#' Wrapper function to process the raw files from Zoom in a single call
#'
#' The user provides a fileRoot that is used for a given meeting. Output
#' files should be named as fileRoot_chat.txt; fileRoot_transcript.vtt;
#' and fileRoot_participants.csv. Any relevant files will be processed.
#' @param fileRoot string giving the path to the files and the root
#' @param rosetta boolean to produce the rosetta file or not
#' @param sessionStartDateTime string giving the start of the session in YYYY-MM-DD HH:MM:SS
#' @param recordingStartDateTime string giving the start of the recording in YYYY-MM-DD HH:MM:SS
#' @param languageCode string giving the language code
#'
#' @return a named list containing data.frames for each of the available files:
#' \itemize{
#' \item meetInfo - A single row with info for the meeting that is in the
#' participants file. Columns provide information about the meeting from the Zoom
#' Cloud recording site.
#' \item partInfo - Each row is a Zoom display name (with display name changes
#' in parentheses). Columns provide information about participants from the Zoom Cloud
#' recording site.
#' \item transcript - Each row is an utterance in the audio transcript. This is the
#' output from processZoomTranscript.
#' \item chat - Each row is a message posted to the chat. This is the output
#' from processZoomChat.
#' \item rosetta - Each row is a unique display name (within meeting) encountered
#' in the batchInput. This is used to reconcile user identities.
#' }
#' @export
#'
#' @examples
#' zoomOut = processZoomOutput(fileRoot=file.path(
#' system.file('extdata', package = 'zoomGroupStats'),"meeting001"
#' ), rosetta=TRUE)
#' \dontrun{
#' zoomOut = processZoomOutput(fileRoot="~/zoomMeetings/myMeeting", rosetta=TRUE)
#' }
processZoomOutput = function(fileRoot, rosetta=TRUE, sessionStartDateTime="1970-01-01 00:00:00", recordingStartDateTime="1970-01-01 00:00:00", languageCode="en") {
out.list = list()
participantsFile = paste(fileRoot, "_participants.csv", sep="")
chatFile = paste(fileRoot, "_chat.txt", sep="")
transcriptFile = paste(fileRoot, "_transcript.vtt", sep="")
if(file.exists(participantsFile)) {
outInfo = processZoomParticipantsInfo(participantsFile)
out.list[["meetInfo"]] = outInfo[[1]]
out.list[["partInfo"]] = outInfo[[2]]
# If there is a participants file, but there is no specified timestamps, use the datetime information included in it
if(sessionStartDateTime == "1970-01-01 00:00:00") {
sessionStartDateTime = outInfo[[1]]$meetingStartTime
}
if(recordingStartDateTime == "1970-01-01 00:00:00") {
recordingStartDateTime = outInfo[[1]]$meetingStartTime
}
}
if(file.exists(chatFile)) {
out.list[["chat"]] = processZoomChat(fname=chatFile, sessionStartDateTime=sessionStartDateTime, languageCode=languageCode)
}
if(file.exists(transcriptFile)) {
out.list[["transcript"]] = processZoomTranscript(fname=transcriptFile, recordingStartDateTime=recordingStartDateTime, languageCode=languageCode)
}
if(rosetta) {
out.list[["rosetta"]] = createZoomRosetta(out.list)
}
return(out.list)
} | /scratch/gouwar.j/cran-all/cranData/zoomGroupStats/R/processZoomOutput.R |
#' Process participant information from a Zoom meeting export
#'
#' This function parses the information from the downloadable meeting information file in Zooms reports section.
#' The function presumes that you have checked the box to
#' include the meeting information in the file.
#' That means that there is a header (2 rows) containing the zoom meeting information.
#' Following that header are four columns:
#' Name of user, user email, total duration, and guest.
#'
#' @param inputPath character
#'
#' @return list of two data.frames with parsed information from the downloadable
#' Zoom participants file
#' \itemize{
#' \item meetInfo - provides the meeting level information that Zoom Cloud gives
#' \item partInfo - provides the participant level information that Zoom Cloud gives
#'}
#' @export
#'
#' @examples
#' partInfo = processZoomParticipantsInfo(
#' system.file('extdata', "meeting001_participants.csv", package = 'zoomGroupStats')
#' )
processZoomParticipantsInfo = function(inputPath) {
meetInfo = utils::read.table(inputPath, header=F, nrows=1, skip=1, sep=",", stringsAsFactors=F, colClasses=c(rep("character", 5), "numeric", "numeric", "character"))
meetInfo = meetInfo[,1:7]
names(meetInfo) = c("meetingId", "meetingTopic", "meetingStartTime", "meetingEndTime", "userEmail", "meetingDuration", "numParticipants")
# Change the date column to something more useable in the other functions
meetInfo$meetingStartTime = as.character(lubridate::parse_date_time(meetInfo$meetingStartTime, "%m/%d/%Y %I:%M:%S %p", tz=Sys.timezone()))
meetInfo$meetingEndTime = as.character(lubridate::parse_date_time(meetInfo$meetingEndTime, "%m/%d/%Y %I:%M:%S %p", tz=Sys.timezone()))
partInfo = data.frame(utils::read.delim(inputPath, header=T, skip=3, sep=",", stringsAsFactors=F))
partInfo = partInfo[,1:4]
names(partInfo) = c("userName", "userEmail", "userDuration", "userGuest")
outInfo = list(meetInfo = meetInfo, partInfo = partInfo)
return(outInfo)
} | /scratch/gouwar.j/cran-all/cranData/zoomGroupStats/R/processZoomParticipantsInfo.R |
#' Process Zoom transcript file
#'
#'# Zoom Recording Transcript File Processing
#' This function parses the data from the transcript file (.vtt) that is downloaded from the Zoom website.
#' NOTE: This is the file that accompanies a recording to the cloud.
#'
#' @param fname String that is the path to the exported Zoom .vtt transcript chat file
#' @param recordingStartDateTime String that is the timestamp when the recording was started in YYYY-MM-DD HH:MM:SS
#' @param languageCode String denoting the language
#'
#' @return data.frame where each record is an utterance in the transcript, with columns:
#' \itemize{
#' \item utteranceId - Numeric identifier for each utterance in the transcript
#' \item utteranceStartSeconds - number of seconds from the start of the recording when utterance began
#' \item utteranceStartTime - POSIXct timestamp of the start of the utterance, using recordingStartDateTime as the zero
#' \item utteranceEndSeconds - number of seconds from the start of the recording when utterance ended
#' \item utteranceEndTime - POSIXct timestamp of the end of the utterance, using recordingStartDateTime as the zero
#' \item utteranceTimeWindow - number of seconds that this utterance lasted
#' \item userName - Zoom display name of the person who spoke this utterance
#' \item utteranceMessage - transcribed spoken words of this utterance
#' \item utteranceLanguage - language code for this utterance
#' }
#' @export
#'
#' @examples
#' tr.out = processZoomTranscript(
#' fname=system.file('extdata', 'meeting001_transcript.vtt', package = 'zoomGroupStats'),
#' recordingStartDateTime = '2020-04-20 13:30:00', languageCode = 'en')
processZoomTranscript = function(fname, recordingStartDateTime="1970-01-01 00:00:00", languageCode="en") {
# Parse the transcript file -- vtt is a structured format.
f = readLines(fname)
# there are three main pieces of data for each marked "utterance" - an id, a window of time, and the text
utteranceId = as.integer(f[seq(3,length(f), 4)])
utteranceWindow = f[seq(4,length(f), 4)]
utteranceText = f[seq(5,length(f), 4)]
# Parse the time window into two separate elements
utteranceStartTime = unlist(strsplit(utteranceWindow, " --> "))[seq(1, length(utteranceWindow)*2, 2)]
utteranceEndTime = unlist(strsplit(utteranceWindow, " --> "))[seq(2, length(utteranceWindow)*2, 2)]
utteranceStartSeconds = as.numeric(lubridate::seconds(lubridate::hms(utteranceStartTime)))
utteranceEndSeconds = as.numeric(lubridate::seconds(lubridate::hms(utteranceEndTime)))
# Now turn these into actual datetime values
recordingStartDateTime = as.POSIXct(recordingStartDateTime, tz=Sys.timezone())
utteranceStartTime = recordingStartDateTime + utteranceStartSeconds
utteranceEndTime = recordingStartDateTime + utteranceEndSeconds
# Create a time window (in seconds) for the utterances -- how long is each in seconds
utteranceTimeWindow = as.numeric(difftime(utteranceEndTime, utteranceStartTime, units="secs"))
# Parse the utterance message itself
utteranceMessage = substring(utteranceText, regexpr("[:]", utteranceText)+2)
# Get the user name that spoke the text
userName = substr(utteranceText, 1, regexpr("[:]", utteranceText)-1)
# Prepare the output file
res.out = data.frame(utteranceId, utteranceStartSeconds, utteranceStartTime, utteranceEndSeconds, utteranceEndTime, utteranceTimeWindow, userName, utteranceMessage, stringsAsFactors=F)
# Mark as unidentified any user with a blank username
res.out$userName = ifelse(res.out$userName == "" | is.na(res.out$userName), "UNIDENTIFIED", res.out$userName)
# Add the language code
res.out$utteranceLanguage = languageCode
return(res.out)
}
| /scratch/gouwar.j/cran-all/cranData/zoomGroupStats/R/processZoomTranscript.R |
#' Analyze conversation attributes
#'
#' This function takes in the output of one of the other functions (either processZoomChat or processZoomTranscript)
#' and produces a set of conversation measures.
#' @param inputData data.frame that is the output of either processZoomChat or processZoomTranscript
#' @param inputType string of either 'transcript' or 'chat'
#' @param meetingId string giving the name of the variable with the meetingId
#' @param speakerId string giving the name of the identifier for the individual who made this contribution
#' @param sentMethod string giving the type of sentiment analysis to include, either 'aws' or 'syuzhet'
#'
#' @return
#' A list of two data.frames, with names conditional on your choice to analyze
#' a parsed transcript file or a parsed chat file. The first list item contains
#' statistics at the corpus level. The second list item contains statistics
#' at the speaker/messager level of analysis.
#' @export
#'
#' @examples
#' convo.out = textConversationAnalysis(inputData=sample_transcript_processed,
#' inputType='transcript', meetingId='batchMeetingId',
#' speakerId='userName', sentMethod="none")
#'
#' convo.out = textConversationAnalysis(inputData=sample_transcript_sentiment_syu,
#' inputType='transcript', meetingId='batchMeetingId',
#' speakerId='userName', sentMethod="syuzhet")
#'
#' convo.out = textConversationAnalysis(inputData=sample_chat_sentiment_aws,
#' inputType='chat', meetingId='batchMeetingId',
#' speakerId='userName', sentMethod="aws")
#'
#' \dontrun{
#' convo.out = textConversationAnalysis(inputData=sample_transcript_sentiment_aws,
#' inputType='transcript', meetingId='batchMeetingId',
#' speakerId='userName', sentMethod="aws")
#'
#' convo.out = textConversationAnalysis(inputData=sample_transcript_sentiment_syu,
#' inputType='transcript', meetingId='batchMeetingId',
#' speakerId='userName', sentMethod="syuzhet")
#'
#' convo.out = textConversationAnalysis(inputData=sample_chat_processed,
#' inputType='chat', meetingId='batchMeetingId',
#' speakerId='userName', sentMethod="none")
#'
#' convo.out = textConversationAnalysis(inputData=sample_chat_sentiment_aws,
#' inputType='chat', meetingId='batchMeetingId',
#' speakerId='userName', sentMethod="aws")
#'
#' convo.out = textConversationAnalysis(inputData=sample_chat_sentiment_syu,
#' inputType='chat',meetingId='batchMeetingId',
#' speakerId='userName', sentMethod="syuzhet")
#' }
textConversationAnalysis = function(inputData, inputType, meetingId, speakerId, sentMethod="none") {
utteranceStartTime<-utteranceStartSeconds<-utteranceEndTime<-utteranceTimeWindow<-utteranceGap<-sd<-messageTime<-messageNumChars<-messageGap<-NULL
########################################
# IF THE USER REQUESTED AN ANALYSIS OF A TRANSCRIPT FILE, DO THE FOLLOWING
########################################
if(inputType=="transcript") {
inputData = dplyr::arrange(inputData, get(meetingId), utteranceStartSeconds)
########################################
# Create a base transcript-level output for this transcript
########################################
# Calculate the gap between one utterance and the prior one
inputData$utteranceGap = as.numeric(inputData$utteranceStartTime-dplyr::lag(inputData$utteranceEndTime))
# Make sure this is missing if the proximal utterances are from different meetings
inputData$utteranceGap = ifelse(dplyr::lag(inputData[,meetingId]) != inputData[,meetingId], NA,inputData$utteranceGap)
raw.dt = data.table::data.table(inputData)
agg.tr = data.frame(raw.dt[, list(transcriptStartTime = min(utteranceStartTime), transcriptEndTime = max(utteranceEndTime),
utteranceTimeWindow_sum = sum(utteranceTimeWindow), utteranceTimeWindow_x = mean(utteranceTimeWindow, na.rm=T), utteranceTimeWindow_sd = mean(utteranceTimeWindow, na.rm=T), utteranceGap_x = mean(utteranceGap, na.rm=T), utteranceGap_sd = sd(utteranceGap, na.rm=T), numUtterances = .N, numUniqueSpeakers = uniqueN(get(speakerId))), by=list(get(meetingId))])
names(agg.tr)[1] = meetingId
agg.tr$totalTranscriptTime = as.numeric(difftime(agg.tr$transcriptEndTime, agg.tr$transcriptStartTime, units="secs"))
agg.tr$silentTime_sum = agg.tr$totalTranscriptTime-agg.tr$utteranceTimeWindow_sum
agg.tr$burstinessRaw = (agg.tr$utteranceGap_sd - agg.tr$utteranceGap_x) / (agg.tr$utteranceGap_sd + agg.tr$utteranceGap_x)
########################################
# Create a base individual-level output for this transcript
########################################
agg.ind = data.frame(raw.dt[, list(firstUtteranceTime = min(utteranceStartTime), lastUtteranceTime = max(utteranceStartTime),
utteranceTimeWindow_sum = sum(utteranceTimeWindow), utteranceTimeWindow_x = mean(utteranceTimeWindow, na.rm=T), utteranceTimeWindow_sd = mean(utteranceTimeWindow, na.rm=T), utteranceGap_x = mean(utteranceGap, na.rm=T), utteranceGap_sd = sd(utteranceGap, na.rm=T), numUtterances = .N), by=list(get(meetingId), get(speakerId))])
names(agg.ind)[1:2] = c(meetingId, speakerId)
########################################
# Address the sentiment analysis
########################################
if(sentMethod %in% c("aws", "syuzhet")) {
aggSentiment.tr = aggSentiment(inputData, meetingId=meetingId, speakerId=NULL, sentMethod)
aggSentiment.ind = aggSentiment(inputData, meetingId=meetingId, speakerId=speakerId, sentMethod)
text.out.tr = merge(agg.tr, aggSentiment.tr, by=meetingId)
text.out.ind = merge(agg.ind, aggSentiment.ind, by=c(meetingId, speakerId))
} else {
text.out.tr = agg.tr
text.out.ind = agg.ind
}
text.out.tr = dplyr::arrange(text.out.tr, get(meetingId))
text.out.ind = dplyr::arrange(text.out.ind, get(meetingId), get(speakerId))
res.out = list("transcriptlevel" = text.out.tr, "speakerlevel" = text.out.ind)
########################################
# IF THE USER REQUESTED AN ANALYSIS OF A CHAT FILE, DO THE FOLLOWING
########################################
} else if(inputType=="chat") {
inputData = dplyr::arrange(inputData, get(meetingId), messageTime)
########################################
# Create a base chat-level output
########################################
inputData$messageNumChars = nchar(inputData$message)
# Calculate the gap between one message and the prior one
inputData$messageGap = as.numeric(inputData$messageTime-dplyr::lag(inputData$messageTime))
# Make sure this is missing if the proximal utterances are from different meetings
inputData$messageGap = ifelse(dplyr::lag(inputData[,meetingId]) != inputData[,meetingId], NA,inputData$messageGap)
raw.dt = data.table::data.table(inputData)
agg.ch = data.frame(raw.dt[, list(chatStartTime = min(messageTime), chatEndTime = max(messageTime), messageNumChars_sum = sum(messageNumChars), messageNumChars_x = mean(messageNumChars), messageNumChars_sd = sd(messageNumChars), messageGap_x = mean(messageGap, na.rm=T), messageGap_sd = sd(messageGap, na.rm=T), numUniqueMessagers = uniqueN(get(speakerId)), numMessages = .N), by=list(get(meetingId))])
names(agg.ch)[1] = meetingId
agg.ch$totalChatTime = as.numeric(difftime(agg.ch$chatEndTime, agg.ch$chatStartTime, units="secs"))
agg.ch$burstinessRaw = (agg.ch$messageGap_sd - agg.ch$messageGap_x) / (agg.ch$messageGap_sd + agg.ch$messageGap_x)
########################################
# Create a base individual-level output for this transcript
########################################
agg.ind = data.frame(raw.dt[, list(numMessages = .N, firstMessageTime = min(messageTime), lastMessageTime = max(messageTime), messageNumChars_sum = sum(messageNumChars, na.rm=T), messageNumChars_x = mean(messageNumChars, na.rm=T), messageNumChars_sd = sd(messageNumChars, na.rm=T), messageGap_x = mean(messageGap, na.rm=T), messageGap_sd = sd(messageGap, na.rm=T)), by=list(get(meetingId), get(speakerId))])
names(agg.ind)[1:2] = c(meetingId, speakerId)
########################################
# Address the sentiment analysis
########################################
if(sentMethod %in% c("aws", "syuzhet")) {
aggSentiment.ch = aggSentiment(inputData, meetingId = meetingId, speakerId = NULL, sentMethod)
aggSentiment.ind = aggSentiment(inputData, meetingId = meetingId, speakerId = speakerId, sentMethod)
text.out.ch = merge(agg.ch, aggSentiment.ch, by=meetingId)
text.out.ind = merge(agg.ind, aggSentiment.ind, by=c(meetingId, speakerId))
} else {
text.out.ch = agg.ch
text.out.ind = agg.ind
}
text.out.ch = dplyr::arrange(text.out.ch, get(meetingId))
text.out.ind = dplyr::arrange(text.out.ind, get(meetingId), get(speakerId))
res.out = list("chatlevel" = text.out.ch, "userlevel" = text.out.ind)
}
} | /scratch/gouwar.j/cran-all/cranData/zoomGroupStats/R/textConversationAnalysis.R |
#' Conduct a sentiment analysis on text data
#'
#' This function takes in the output of the chat and transcript functions. It then
#' conducts a sentiment analysis on an identified chunk of text
#' and returns the values.
#' To use the aws option, you must have an aws account that with privileges for the comprehend service
#' However you authenticate for AWS, you should do so before running calling the function
#' with this option in sentMethods
#'
#' @param inputData data.frame that has been output by either the processZoomTranscript or processZoomChat functions
#' @param idVars vector with the name of variables that give the unique identifiers for this piece of text. Usually this will be a the meeting id variable and the text id variable (e.g., utteranceId, messageId)
#' @param textVar name of variable that contains the text
#' @param sentMethods a vector specifying the types of sentiment analysis-currently
#' either "aws" or "syuzhet"
#' @param appendOut boolean indicating whether you want the sentiment results
#' merged to the inputData in your output
#' @param languageCodeVar name of variable that contains the language code
#'
#' @return returns a list containing as data.frames the output of the sentiment analyses
#' that were requested in sentMethods. For each output data.frame, the first columns
#' are the idVars specified to enable combining back with the original inputData
#' @export
#'
#' @examples
#' sent.out = textSentiment(inputData=sample_chat_processed,
#' idVars=c('batchMeetingId', 'messageId'),
#' textVar='message', sentMethods='syuzhet',appendOut=TRUE,
#' languageCodeVar='messageLanguage')
#'
#' \dontrun{
#' sent.out = textSentiment(inputData=sample_transcript_processed,
#' idVars=c('batchMeetingId','utteranceId'),
#' textVar='utteranceMessage', sentMethods=c('aws','syuzhet'),
#' appendOut=TRUE, languageCodeVar='utteranceLanguage')
#' }
#'
textSentiment = function(inputData, idVars, textVar, sentMethods, appendOut=FALSE, languageCodeVar){
aws_sentClass <- NULL
if(length(idVars[!(idVars %in% names(inputData))]) > 0) {
stop("One or more idVars is not in inputData: ",paste(idVars[!(idVars %in% names(inputData))], sep=", "))
}
returnList = list()
if("aws" %in% sentMethods) {
message("Running AWS machine learning-based sentiment analysis")
if(nrow(inputData) >= 100) {
message("Your input data has ",nrow(inputData), " records. Conducting a sentiment analysis on a dataset of this size using aws will take time. Be patient while the function does its work.")
}
svc = paws::comprehend()
aws.o = pbapply::pblapply(inputData[, textVar], function(x, y=inputData[,languageCodeVar]) svc$detect_sentiment(Text = x, LanguageCode=y))
aws.o.data = do.call(rbind, lapply(aws.o, FUN=function(x) {
aws_sentClass = x$Sentiment
aws_positive = x$SentimentScore$Positive
aws_negative = x$SentimentScore$Negative
aws_neutral = x$SentimentScore$Neutral
aws_mixed = x$SentimentScore$Mixed
return(data.frame(aws_sentClass, aws_positive, aws_negative, aws_neutral, aws_mixed))
}))
awsOutput = cbind(inputData[,c(idVars)], aws.o.data)
names(awsOutput)[1:length(idVars)] = idVars
if(appendOut) {
awsOutput = merge(inputData, awsOutput, by=idVars)
awsOutput = dplyr::arrange(awsOutput, get(idVars[1]), get(idVars[2]))
}
returnList[["aws"]] = awsOutput
}
if("syuzhet" %in% sentMethods) {
message("Running syuzhet lexicon-based sentiment analysis")
if(nrow(inputData) >= 1000) {
message("Your input data has ",nrow(inputData), " records. Conducting a sentiment analysis on a dataset of this size, even with the lexicon-based syuzhet, will take time. Be patient while the function does its work.")
}
syuzhetData = inputData
syuzhetData[,textVar] = gsub("[^[:alnum:][:space:]']", "", syuzhetData[,textVar])
syuzhetData$wordCount = stringr::str_count(syuzhetData[,textVar], '\\w+')
syu.o.data = do.call(rbind, pbapply::pblapply(syuzhetData[,textVar], syuzhet::get_nrc_sentiment))
names(syu.o.data) = paste("syu", names(syu.o.data), sep="_")
syuOutput = cbind(syuzhetData[,c(idVars, "wordCount")], syu.o.data)
names(syuOutput)[1:length(idVars)] = idVars
if(appendOut) {
syuOutput = merge(inputData, syuOutput, by=idVars)
syuOutput = dplyr::arrange(syuOutput, get(idVars[1]), get(idVars[2]))
}
returnList[["syuzhet"]] = syuOutput
}
return(returnList)
}
| /scratch/gouwar.j/cran-all/cranData/zoomGroupStats/R/textSentiment.R |
#' Simple conversational turn-taking analysis
#'
#' Generate a very basic analysis of the conversational turntaking in
#' either a Zoom transcript or a Zoom chat file.
#'
#' @param inputData data.frame output from either processZoomChat or processZoomTranscript
#' @param inputType string of either 'chat' or 'transcript'
#' @param meetingId string giving the name of the meeting identifier
#' @param speakerId string giving the name of the variable with the identity of the speaker
#'
#' @return list of four data.frames giving different levels of analysis for turn taking:
#' \itemize{
#' \item rawTurn - This data.frame gives a dataset with a
#' lagged column so that you could calculate custom metrics
#' \item aggTurnsDyad - This gives a dyad-level dataset so that
#' you know whose speech patterns came before whose
#' \item aggTurnsSpeaker - This gives a speaker-level dataset
#' with metrics that you could use to assess each given
#' person's influence on the conversation
#' \item aggTurnsSpeaker_noself - This is a replication of
#' the aggTurnsSpeaker dataset, but it excludes turns where
#' a speaker self-follows (i.e., Speaker A => Speaker A)
#' }
#' @export
#'
#' @examples
#' turn.out = turnTaking(inputData=sample_transcript_processed,
#' inputType='transcript', meetingId='batchMeetingId',
#' speakerId='userName')
#'
#' turn.out = turnTaking(inputData=sample_chat_processed,
#' inputType='chat', meetingId='batchMeetingId',
#' speakerId='userName')
#'
turnTaking = function(inputData, inputType, meetingId, speakerId) {
turnGap<-sd<-speakerCurrent<-speakerBefore<-numTurns<-turnGap_x<-NULL
## This should be done on a meeting-by-meeting basis. Will do in a crude brute force way for now
uniqueMeets = unique(inputData[,meetingId])
if(length(uniqueMeets) == 1) pbMin=0 else pbMin=1
pb = utils::txtProgressBar(min=pbMin, max=length(uniqueMeets), style=3)
for(m in 1:length(uniqueMeets)) {
utils::setTxtProgressBar(pb, m)
meetData = inputData[inputData[,meetingId] == uniqueMeets[m], ]
# Get the names of the unique speakers in this file
uniqueSpeakers = sort(unique(meetData[,speakerId]))
#Create lagged variables
meetData$speakerCurrent = meetData[,speakerId]
if(inputType == "transcript") {
meetData = meetData[order(meetData$utteranceEndSeconds), ]
meetData[, c("speakerBefore", "priorUtteranceEndSeconds")] = dplyr::lag(meetData[, c("speakerCurrent", "utteranceEndSeconds")])
meetData$turnGap = meetData$utteranceStartSeconds - meetData$priorUtteranceEndSeconds
} else if(inputType == "chat") {
meetData = meetData[order(meetData$messageTime), ]
meetData[, c("speakerBefore", "priorMessageTime")] = dplyr::lag(meetData[, c("speakerCurrent", "messageTime")])
meetData$turnGap = as.numeric(difftime(meetData$messageTime, meetData$priorMessageTime, units="secs"))
}
turnDyd = meetData[,c("speakerCurrent", "speakerBefore", "turnGap")]
turnDyd.dt = data.table::data.table(turnDyd)
turnDyd.agg = data.frame(turnDyd.dt[, list(numTurns = .N, turnGap_x = mean(turnGap, na.rm=T), turnGap_sd = sd(turnGap, na.rm=T)), by=list(speakerCurrent, speakerBefore)])
# Add zeros for pairs that didn't occur
for(b in uniqueSpeakers) {
for(c in uniqueSpeakers) {
if(nrow(turnDyd.agg[turnDyd.agg$speakerBefore == b & turnDyd.agg$speakerCurrent == c, ]) == 0) {
turnDyd.agg[nrow(turnDyd.agg)+1, ] = c(c, b, 0, NA, NA)
}
}
}
turnDyd.agg[,3:5] = lapply(turnDyd.agg[,3:5], as.numeric)
######## Create an individual level dataset focused for now on influence ########
turnDyd.dt2 = data.table::data.table(turnDyd.agg)
turnDyd.agg2 = data.frame(turnDyd.dt2[!is.na(speakerBefore), list(turnsAfterSpeaker = sum(numTurns), turnGapAfterSpeaker_x = mean(turnGap_x, na.rm=T), turnGapAfterSpeaker_sd = sd(turnGap_x, na.rm=T)), list(speakerBefore)])
totalTurns = sum(turnDyd.agg[!is.na(turnDyd.agg$speakerBefore), "numTurns"])
turnDyd.agg2$turnsAfterSpeaker_pct = turnDyd.agg2$turnsAfterSpeaker/totalTurns
# Do a version of this that excludes the self references
turnDyd.agg_noself = data.frame(turnDyd.dt2[!is.na(speakerBefore) & (speakerCurrent != speakerBefore), list(turnsAfterSpeaker = sum(numTurns), turnGapAfterSpeaker_x = mean(turnGap_x, na.rm=T), turnGapAfterSpeaker_sd = sd(turnGap_x, na.rm=T)), list(speakerBefore)])
totalTurns_noself = sum(turnDyd.agg[!is.na(turnDyd.agg$speakerBefore) & (turnDyd.agg$speakerCurrent != turnDyd.agg$speakerBefore), "numTurns"])
turnDyd.agg_noself$turnsAfterSpeaker_pct = turnDyd.agg_noself$turnsAfterSpeaker/totalTurns_noself
if(nrow(turnDyd) > 0) {
turnDyd[,meetingId] = uniqueMeets[m]
}
if(nrow(turnDyd.agg) > 0){
turnDyd.agg[,meetingId] = uniqueMeets[m]
}
if(nrow(turnDyd.agg2) > 0){
turnDyd.agg2[,meetingId] = uniqueMeets[m]
}
if(nrow(turnDyd.agg_noself) > 0){
turnDyd.agg_noself[,meetingId] = uniqueMeets[m]
}
if(m == 1) {
res1 = turnDyd
res2 = turnDyd.agg
res3 = turnDyd.agg2
res4 = turnDyd.agg_noself
} else {
res1 = rbind(res1, turnDyd)
res2 = rbind(res2, turnDyd.agg)
res3 = rbind(res3, turnDyd.agg2)
res4 = rbind(res4, turnDyd.agg_noself)
}
}
close(pb)
## output a few things
o.list = list("rawTurns" = res1, "aggTurnsDyad" = res2, "aggTurnsSpeaker" = res3, "aggTurnsSpeaker_noself" = res4)
return(o.list)
} | /scratch/gouwar.j/cran-all/cranData/zoomGroupStats/R/turnTaking.R |
#' Analyze the facial features within an exported Zoom video file
#'
#' Using this function you can analyze attributes of facial expressions within
#' a video file. There are two ways to supply the video information. First, you
#' can provide the actual video file. The function will then break it down
#' into still frames using the grabVideoStills() function. Second, you can use
#' the videoImageDirectory argument to give the location of a directory where
#' images have been pre-saved.
#'
#' @param inputVideo string path to the video file (ideal is gallery)
#' @param recordingStartDateTime YYYY-MM-DD HH:MM:SS of the start of the recording
#' @param sampleWindow Frame rate for the analysis
#' @param facesCollectionID name of an 'AWS' collection with identified faces
#' @param videoImageDirectory path to a directory that either contains image files or where you want to save image files
#' @param grabVideoStills logical indicating whether you want the function to split the video file or not
#' @param overWriteDir logical indicating whether to overwrite videoImageDirectory if it exists
#'
#' @return data.frame with one record for every face detected in each frame. For each face, there is an abundance of information from 'AWS Rekognition'. This output is quite detailed. Note that there will be a varying number of faces per sampled frame in the video. Imagine that you have sampled the meeting and had someone rate each person's face within that sampled moment.
#' @export
#'
#' @examples
#' \dontrun{
#' vid.out = videoFaceAnalysis(inputVideo="meeting001_video.mp4",
#' recordingStartDateTime="2020-04-20 13:30:00",
#' sampleWindow=1, facesCollectionID="group-r",
#' videoImageDirectory="~/Documents/meetingImages",
#' grabVideoStills=FALSE, overWriteDir=FALSE)
#' }
videoFaceAnalysis = function(inputVideo, recordingStartDateTime, sampleWindow, facesCollectionID=NA, videoImageDirectory=NULL, grabVideoStills=FALSE, overWriteDir=FALSE) {
svc = paws::rekognition()
recordingStartDateTime = as.POSIXct(recordingStartDateTime)
if(grabVideoStills) {
grabVidOut = grabVideoStills(inputVideo=inputVideo, imageDir=videoImageDirectory,overWriteDir=overWriteDir, sampleWindow=sampleWindow)
}
videoImageDirectory = file.path(videoImageDirectory, basename(tools::file_path_sans_ext(inputVideo)))
# Get any images associated with this video
imgFiles = list.files(path=videoImageDirectory, full.names=T)
# These are empty lists to use to save the information
df.o = list()
inf = list()
# Now loop through the images that are part of this video (which were already extracted)
if(length(imgFiles) == 0) {
message("There are no images extracted for this video file")
res.out =data.frame(matrix(nrow=0, ncol=length(col.names)))
} else {
message("Analyzing faces in sampled frames. For lots of images and lots of faces, this can be time intensive.")
if(length(imgFiles) == 1) pbMin=0 else pbMin=1
pb = utils::txtProgressBar(min=0, max=length(imgFiles), style=3)
for(i in 1:length(imgFiles)) {
utils::setTxtProgressBar(pb, i)
# Pull the image and its information
img = magick::image_read(imgFiles[i])
inf[[i]] = magick::image_info(img)
# This is stupid, but we have to adjust the timestamping of the images. The first one is 0 + sampleWindow/2, then the rest are sampleWindow incremented
if(i == 1) {
imgTimestamp = sampleWindow/2
} else {
imgTimestamp = sampleWindow/2 + (i-1)*sampleWindow
}
# Detect faces in this frame
df.o[[i]] = svc$detect_faces(Image=list(Bytes=imgFiles[i]), Attributes="ALL")
# Get the details of any faces detected in this frame
faces = df.o[[i]]$FaceDetails
# If there are no faces in the image, then create a blank results record, with just the image id
if(length(faces) == 0) {
res.line = matrix(nrow=1,ncol=23)
res.line[1,1] = imgFiles[i]
res.line[1, 21] = imgTimestamp
} else {
# Otherwise, if there are faces in the image, go through each face to get its info
# create a matrix to hold the info
res.line = matrix(nrow=length(faces), ncol=23)
# Loop through each face and analyze it
for(face.num in 1:length(faces)) {
fd = faces[[face.num]]
res.line[face.num,1] = imgFiles[i]
res.line[face.num,2] = face.num
res.line[face.num,3] = fd$AgeRange$Low
res.line[face.num,4] = fd$AgeRange$High
res.line[face.num,5] = fd$Smile$Value
res.line[face.num,6] = fd$Eyeglasses$Value
res.line[face.num,7] = fd$Sunglasses$Value
res.line[face.num,8] = fd$Gender$Value
res.line[face.num,9] = fd$Beard$Value
res.line[face.num,10] = fd$Mustache$Value
res.line[face.num,11] = fd$EyesOpen$Value
res.line[face.num,12] = fd$MouthOpen$Value
# Make an emotions table for this image
for(e in fd$Emotions) {
if(e$Type == "CONFUSED") res.line[face.num,13] = e$Confidence
else if(e$Type == "CALM") res.line[face.num,14] = e$Confidence
else if(e$Type == "HAPPY") res.line[face.num,15] = e$Confidence
else if(e$Type == "DISGUSTED") res.line[face.num,16] = e$Confidence
else if(e$Type == "ANGRY") res.line[face.num,17] = e$Confidence
else if(e$Type == "FEAR") res.line[face.num,18] = e$Confidence
else if(e$Type == "SAD") res.line[face.num,19] = e$Confidence
else if(e$Type == "SURPRISED") res.line[face.num,20] = e$Confidence
}
res.line[face.num, 21] = imgTimestamp
# if the user specified a face collection, go into it to see if the face has an identity
# Including the confidence value because it sometimes couldn't tell it was a face
# at low levels of confidence
if(!is.na(facesCollectionID) && fd$Confidence > 90) {
# Identify the coordinates of the face. Note that AWS returns percentage values of the total image size. This is
# why the image info object above is needed
box = fd$BoundingBox
imageWidth=inf[[i]]$width
imageHeight=inf[[i]]$height
x1 = box$Left*imageWidth
y1 = box$Top*imageHeight
x2 = x1 + box$Width*imageWidth
y2 = y1 + box$Height*imageHeight
# Crop out just this particular face out of the video
img.crop = magick::image_crop(img, paste(box$Width*imageWidth,"x",box$Height*imageHeight,"+",x1,"+",y1, sep=""))
img.crop = magick::image_write(img.crop, path = NULL, format = "png")
# Search in a specified collection to see if we can label the identity of the face is in this crop
faceRec = try(svc$search_faces_by_image(CollectionId=facesCollectionID,Image=list(Bytes=img.crop), FaceMatchThreshold=70), silent=T)
if(is.character(faceRec)) {
res.line[face.num, 22] = "IDENTITY NOT RECOGNIZED"
} else {
if(length(faceRec$FaceMatches) > 0) {
res.line[face.num, 22] = faceRec$FaceMatches[[1]]$Face$ExternalImageId
res.line[face.num, 23] = faceRec$FaceMatches[[1]]$Face$Confidence
} else {
res.line[face.num, 22] = "IDENTITY NOT RECOGNIZED"
}
}
} else {
res.line[face.num, 22] = "IDENTITY NOT RECOGNIZED"
}
# Close the face loop
}
# Close the else
}
if(i == 1) {
raw.out = res.line
} else {
raw.out = rbind(raw.out, res.line)
}
# Close the image loop
}
close(pb)
res.out = data.frame(raw.out, stringsAsFactors=F)
}
col.character=c(1,8,22)
col.numeric = c(2:4, 13:21, 23)
col.boolean = c(5:7,9:12)
col.names = c("frameId", "faceId", "ageLow", "ageHigh", "smile", "eyeglasses", "sunglasses", "gender", "beard", "mustache", "eyesopen", "mouthopen", "confused", "calm", "happy", "disgusted", "angry", "fear", "sad", "surprised", "imgSeconds", "identifiedPerson", "identifiedConfidence")
res.out[,col.character] = lapply(res.out[,col.character], as.character)
res.out[,col.numeric] = lapply(res.out[,col.numeric], as.numeric)
res.out[,col.boolean] = lapply(res.out[,col.boolean], as.logical)
names(res.out) = col.names
res.out$imgTimestamp = recordingStartDateTime + res.out$imgSeconds
res.out = res.out[, c(1,21,22,23, 24, 2:20)]
return(res.out)
} | /scratch/gouwar.j/cran-all/cranData/zoomGroupStats/R/videoFaceAnalysis.R |
#' Run a windowed analysis on either a Zoom transcript or chat
#' This function conducts a temporal window analysis on the conversation in
#' either a Zoom transcript or chat. It replicates the textConversationAnalysis
#' function across a set of windows at a window size specified by the user.
#' @param inputData data.frame output of either processZoomTranscript or processZoomChat
#' @param inputType string of either 'chat' or 'transcript'
#' @param meetingId string giving the column with the meeting identifier
#' @param speakerId string giving the name of the identifier for the individual who made this contribution
#' @param sentMethod string giving the type of sentiment analysis to include, either 'aws' or 'syuzhet'
#' @param timeVar name of variable giving the time marker to be used.
#' For transcript, either use 'utteranceStartSeconds' or 'utteranceEndSeconds';
#' for chat use 'messageTime'
#' @param windowSize integer value of the duration of the window in number of seconds
#'
#' @return list with two data.frames. In the first (windowlevel), each row is a temporal window.
#' In the second (speakerlevel), each row is a user's metrics within a given temporal window.
#' @export
#'
#' @examples
#' win.text.out = windowedTextConversationAnalysis(inputData=sample_transcript_sentiment_aws,
#' inputType="transcript", meetingId="batchMeetingId", speakerId="userName", sentMethod="aws",
#' timeVar="utteranceStartSeconds", windowSize=600)
windowedTextConversationAnalysis = function(inputData, inputType, meetingId, speakerId, sentMethod="none", timeVar="automatic", windowSize) {
if(timeVar=="automatic") {
if(inputType=="transcript") timeVar="utteranceStartSeconds"
else if(inputType=="chat") timeVar="messageSeconds"
}
inputData = dplyr::arrange(inputData, get(meetingId), get(timeVar))
##### WINDOWING NEEDS TO HAPPEN IN A MEETING-BY-MEETING SEQUENCE #####
uniqueMeetings = unique(inputData[,meetingId])
if(length(uniqueMeetings) == 1) pbMin=0 else pbMin=1
pb = utils::txtProgressBar(min=pbMin, max=length(uniqueMeetings), style=3)
for(m in 1:length(uniqueMeetings)) {
utils::setTxtProgressBar(pb, m)
thisMeetingId = uniqueMeetings[m]
meetingData = inputData[inputData[,meetingId] == thisMeetingId, ]
# Add the windowing indicators to the inputData
t.out.windowed = makeTimeWindows(inputData=meetingData, timeVar=timeVar, windowSize=windowSize)
meetingData = t.out.windowed[[1]]
fullSet = cbind(t.out.windowed[[2]], sort(rep(unique(meetingData[, speakerId]), max(t.out.windowed[[2]]$windowId))))
fullSet = fullSet[order(fullSet$windowId), ]
names(fullSet)[4] = speakerId
fullSet[,meetingId] = thisMeetingId
# Now, go through each of the time windows and run the
# conversation analysis
grp.res.out = NULL
ind.res.out = NULL
for(win in 1:max(fullSet$windowId)) {
windowed.input = meetingData[meetingData$windowId == win, ]
# run the analysis if there are any pieces of text in this window
if(nrow(windowed.input) > 0) {
res.line = textConversationAnalysis(inputData=windowed.input, inputType=inputType, meetingId=meetingId, speakerId=speakerId, sentMethod=sentMethod)
grp.res.line = res.line[[1]]
grp.res.line$windowId = win
ind.res.line = res.line[[2]]
ind.res.line$windowId = win
if(!exists("grp.res.out")) {
grp.res.out = grp.res.line
ind.res.out = ind.res.line
} else {
grp.res.out = rbind(grp.res.out, grp.res.line)
ind.res.out = rbind(ind.res.out, ind.res.line)
}
}
}
if(inputType=="transcript") {
ind.fixVars = c("utteranceTimeWindow_sum", "numUtterances")
grp.fixVars = c(ind.fixVars, "totalTranscriptTime", "silentTime_sum", "numUniqueSpeakers")
} else if(inputType=="chat") {
ind.fixVars = c("numMessages", "messageNumChars_sum")
grp.fixVars = c(ind.fixVars, "numUniqueMessagers", "totalChatTime")
}
# Add these to the full time window datasets
indFull = merge(fullSet, ind.res.out, by=c(meetingId, speakerId, "windowId"), all.x=T)
indFull[,ind.fixVars] = lapply(indFull[,ind.fixVars], function(x) ifelse(is.na(x), 0, x))
grp1 = merge(t.out.windowed[[2]], grp.res.out, by=c("windowId"), all.x=T)
grp1[,meetingId] = thisMeetingId
grp1[, grp.fixVars] = lapply(grp1[,grp.fixVars], function(x) ifelse(is.na(x), 0, x))
### that concludes the single meeting ###
if(!exists("full.grp")) {
full.grp = grp1
full.ind = indFull
} else {
full.grp = rbind(full.grp, grp1)
full.ind = rbind(full.ind, indFull)
}
}
close(pb)
return(list("windowlevel" = full.grp, "speakerlevel" = full.ind))
}
| /scratch/gouwar.j/cran-all/cranData/zoomGroupStats/R/windowedTextConversationAnalysis.R |
## ---- include = FALSE---------------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
| /scratch/gouwar.j/cran-all/cranData/zoomGroupStats/inst/doc/part01-configure-zoom.R |
---
title: "Part 1: Configuring Zoom to Capture Useful Data"
author: "Andrew P. Knight"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Part 1: Configuring Zoom to Capture Useful Data}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
The first part of this guide will provide recommended practices and processes to use when you are collecting data through Zoom to be used in research. Like any aspect of research, careful and thoughtful upfront planning pays dividends when using Zoom. Because `zoomGroupStats` relies on [Zoom Cloud recording](https://support.zoom.us/hc/en-us/articles/203741855) features, this guide will focus specifically on practices to use when recording your data to the cloud. However, even if you are recording virtual meetings locally, the same basic principles will likely apply.
This guide will not provide step-by-step instructions for how to operate Zoom. For detailed guidance on using Zoom, you should consult the [Zoom Help Center](https://support.zoom.us/hc/en-us).
Hopefully you are reading this guide *before* you have started collecting data. It is before collecting data that you have the best chance to minimize undesirable variation and maximize your options for using the data that you collect. Collecting data through virtual meetings is complicated and requires a thoughtful process. To give yourself the best downstream outcomes, take time upfront--before running any meetings at all--to configure your Zoom subscription. In particular, consider the following recommendations:
## Develop a standardized protocol
Before launching data collection, create and produce documentation of a standard process for yourself and any collaborators to follow. This is especially important if you will be depending on others (e.g., collaborators, research assistants, participants themselves) to capture virtual meetings. A standardized protocol will ensure consistency in your raw Zoom output across multiple meetings. As examples, consider:
1. [Sample of a guide given to those charged with recording meetings](https://docs.google.com/presentation/d/1B9Cdc-tdB4mKYjIXQ7R-RgF5-HEazU_Daiik_1GH8WY/edit?usp=sharing)
1. [Sample video guide for how to set up Zoom recording features](https://youtu.be/Y82nf9lfeQU)
1. [Sample video guide for recording the meeting itself](https://youtu.be/HbbKcmbaLYI)
## Maximize degrees of freedom
When configuring your Zoom subscription and preparing to record virtual meetings, I recommend providing yourself the most flexibility upfront. You can always subset and focus on some elements downstream. But, if you don't capture something upfront, you'll lose those options downstream. In particular:
1. If using cloud-based recording, select all possible recording options (of different views). This gives you the ability to make selective decisions after you've run the meeting.
1. Select options that enhance the recording for 3rd part video editing.
1. Make sure to select the option to have Zoom produce an audio transcript.
1. Make other option selection in a manner consistent with your research goals (e.g., having names on videos, having video time stamped).
## Require users to be registered in Zoom
A major challenge when collecting large scale data with Zoom recordings is the absence of a persistent individual identifier that is linked to the wide range of display names that people use. There are a few ways that this can contribute to data integrity issues. To illustrate some of these challenges, consider a few simple examples:
* Ringo Starr logs into a Zoom meeting on Monday using his corporate account, which has a default display name of *Richard Starkey*. On Tuesday, he logs into a meeting using his personal account, which has a default display name of *Ringo*. In a dataset containing both these meetings, the single person Ringo Starr would appear as two different individuals. Moreover, if linking to an external dataset, it is possible that neither *Richard Starkey* nor *Ringo* appear.
* Ringo Starr logs into a Zoom meeting on Monday using his corporate account, with the default display name of *Richard Starkey*. Halfway through the meeting, he changes his display name to *Ringo*. In a dataset of just this one meeting, there could be two display names for him, which would be interpreted as different people.
* Ringo Starr logs into a Zoom meeting on Monday with his fan club using his personal account, with the display name of *Ringo*. Two of his superfans also have the display name *Ringo*. In a dataset for this meeting, three truly distinct individuals would appear as the same person.
To properly study human behavior, we need to have a valid linkage between an individual's behavior (e.g., face in video feed, spoken words, text chat messages) and their identity. When conducting research with Zoom, it is further critical to know which individual person logged into which virtual meeting. `zoomGroupStats` does provide functions for addressing this challenge after you have collected data. However, to save yourself considerable time, take steps before you collect data to actively minimize user identity confusion:
1. If possible, require users to access meetings through an account registered with Zoom.
1. If possible, require users to access Zoom using a known registered account (e.g., one with your institution).
1. If neither of these is possible, add guidance to your standardized protocol for meeting participants to manually change their display names to some standardized format.
## Capture timestamps to sync up data streams
One significant strength of using virtual meetings for research is that you gain the ability to unobtrusively capture streams of human behavior over time. Collecting datastreams throughout time, though, brings distinct challenges. One of the most challenges to overcome is compiling precise information on *when* things happen.
Within Zoom, there are two important baseline events for which you must capture precise timing information:
* **When did the session begin?** This is the moment in time when the Zoom session was launched. In reality, this is the starting point for when people in this virtual meeting were able to interact with one another.
* **When did the recording of the session begin?** This is the moment in time when the Zoom recording was launched. This is necessarily at a time equal to (if the option to launch recording at the start of the session is selected in Zoom) or later than the time that the session began.
The reason that it is so critical to capture this information is that some Zoom outputs (e.g., chat) use the start of the session as the zero point, whereas others (e.g., transcript) use the start of the recording as the zero point. In order to properly sync up data streams, it is important to convert Zoom's datastreams to true clock time.
Keep careful records about these events by using a [spreadsheet like this template](https://github.com/andrewpknight/zoomGroupStats/blob/main/inst/extdata/myMeetingsBatch.xlsx). It is, of course, inevitable that you will fail to capture some of this information. In the event that you do not capture the timestamp for the start of the session, this can be accessed through the participants information in Zoom's Cloud recording system. If you did not capture the timestamp for the start of the recording, you might be able to extract this from the inset timestamp in video files associated with the session.
## Next Steps
In [Part 2](http://zoomgroupstats.org/articles/part02-process-zoom-files.html) of the guide, you will learn how to organize the files that you download from Zoom and use `zoomGroupStats` to turn your downloads into datasets.
| /scratch/gouwar.j/cran-all/cranData/zoomGroupStats/inst/doc/part01-configure-zoom.Rmd |
## ---- include = FALSE---------------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## ---- eval=FALSE--------------------------------------------------------------
# # Install zoomGroupStats from CRAN
# install.packages("zoomGroupStats")
#
# # Install the development version of zoomGroupStats
# devtools::install_github("andrewpknight/zoomGroupStats")
## ----setup--------------------------------------------------------------------
library(zoomGroupStats)
## ---- eval=FALSE--------------------------------------------------------------
# batchOut = batchProcessZoomOutput(batchInput="./myMeetingsBatch.xlsx")
## ----error=FALSE, message=FALSE, warning=FALSE, include=FALSE, results='hide'----
batchOut = invisible(batchProcessZoomOutput(batchInput=system.file('extdata', 'myMeetingsBatch.xlsx', package = 'zoomGroupStats')))
## -----------------------------------------------------------------------------
str(batchOut$batchInfo)
## -----------------------------------------------------------------------------
str(batchOut$meetInfo)
## -----------------------------------------------------------------------------
str(batchOut$partInfo)
## -----------------------------------------------------------------------------
str(batchOut$transcript)
## -----------------------------------------------------------------------------
str(batchOut$chat)
## -----------------------------------------------------------------------------
str(batchOut$rosetta)
## ---- eval=FALSE--------------------------------------------------------------
# batchOut = batchProcessZoomOutput(batchInput="./myMeetingsBatch.xlsx", exportZoomRosetta="./myMeetings_rosetta_original.xlsx")
## ---- eval=FALSE--------------------------------------------------------------
# batchOutIds = importZoomRosetta(zoomOutput=batchOut, zoomRosetta="./myEditedRosetta.xlsx",
# meetingId="batchMeetingId")
| /scratch/gouwar.j/cran-all/cranData/zoomGroupStats/inst/doc/part02-process-zoom-files.R |
---
title: "Part 2: Turning Zoom Downloads into Datasets"
author: "Andrew P. Knight"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Part 2: Turning Zoom Downloads into Datasets}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
In the second part of this guide, you will learn how to turn downloadable files from Zoom recordings into datasets that you can analyze in `R` using `zoomGroupStats`. Again, because `zoomGroupStats` relies on [Zoom Cloud recording](https://support.zoom.us/hc/en-us/articles/203741855) features, this guide will focus specifically on the files that you download from the Zoom Cloud. However, the functions in `zoomGroupStats` can also be used on meetings that you record locally.
This guide will progress from instructions on which files to download from Zoom to how to convert those files into usable datasets. In its most basic form, a research dataset using virtual meetings can be structured as individual meeting participants who are nested within virtual meetings. Individual participants can attend multiple virtual meetings; indeed, individual participants *could* attend multiple virtual meetings at the same time. Individuals can further be members of multiple other organizational units (e.g., groups, teams, departments, organizations). To build a basic, clean dataset, though, what you must know is which individual human being was logged into which virtual meeting.
## How to Download Files from Zoom
Depending on your research objectives, you will need different output files from Zoom. The components that are currently supported and used in this package are:
1. A *participants* file. This is a .csv file that contains meta-data about your meeting. This is an essential file that should be downloaded for each meeting that you wish to process. To download it:
* Log into your Zoom account through a web browser
* Navigate to the *Reports*
* Click *Usage Reports*
* Scroll to the *Participants* column for your focal meeting
* Click the linked number of participants
* Check "export with meeting data" and "show unique users"
* Click *Export*
1. A *transcript* file. This is a .vtt file that contains Zoom's cloud transcription of the spoken audio during the recorded meeting.
* Navigate to the *Recordings* page
* Click the link indicating the number of items
* Download the Audio Transcript file
1. A *chat* file. This is a.txt file that contains the record of public chat messages posted during the recorded meeting. Download it from the same page where you accessed the *transcript*.
1. An *audio* file. This is an .mp4 file that contains the recorded audio during the session. Download it from the same age as the items above.
1. Several different *video* file options. [Zoom's Help Center describes The nature of these different formats](https://support.zoom.us/hc/en-us/articles/360025561091-Recording-layouts). For analyzing facial expressions, one of the most useful is the gallery style video.
## Naming the Downloaded Files from Zoom
`zoomGroupStats` is designed to batch proces virtual meetings. Batch processing simply means that you are combining several meetings into a single dataset. Even if you are only analyzing a single meeting, though, it is useful to treat that single meeting as a batch of one. This will help in creating an extensible dataset that you could add to in the future.
To organize your data for a batch run, you must rename the files in a systematic way and save them in a single directory. To name the files, imagine that each file will have a "prefix" and a "suffix". The prefix will be an identifier for what meeting it came from and the suffix will be an identifier for what data source is in the file. Whereas you can use whatever prefix you choose, the suffix must be standardized as follows:
| Suffix | Description |
|:---------------|:------------------------------------------------|
| _chat.txt | Used for the chat file for meetings |
| _transcript.vtt | Used for the transcript file for meetings |
| _participants.csv | Used for the participants file for meetings |
| _video.mp4 | Used for a video file for meetings |
Imagine a sample batch run, then, which includes all four elements for three meetings. The prefixes for the meetings could be "meeting001", "meeting002", and "meeting003". This would yield a total of 12 files, named as follows:
* meeting001_chat.txt
* meeting001_transcript.vtt
* meeting001_participants.csv
* meeting001_video.mp4
* meeting002_chat.txt
* meeting002_transcript.vtt
* meeting002_participants.csv
* meeting002_video.mp4
* meeting003_chat.txt
* meeting003_transcript.vtt
* meeting003_participants.csv
* meeting003_video.mp4
All 8 files should be saved in a single directory.
## Prepare a Batch Spreadsheet
Next, you will prepare a spreadsheet in .xlsx format and save it within the same directory where your Zoom downloads are. This spreadsheet tells `zoomGroupStats` how to find your files and what information to process. Practically, this spreadsheet is also helpful for organizing your data and keeping track of what you have downloaded.
You must use the following structure and column headers for your spreadsheet:
| batchMeetingId | fileRoot | participants | transcript | chat | video | sessionStartTime | recordingStartDateTime |
|:---------------|:---------|:-------------|:-----------|:-----|:------|:--------|:--------|
| 00000000001 | /myMeetings/meeting001 | 1 | 1 | 1 | 0 | 2020-09-04 15:00:00 | 2020-09-04 15:03:30 |
| 00000000002 | /myMeetings/meeting002 | 1 | 1 | 1 | 0 | 2020-09-05 15:00:00 | 2020-09-04 15:03:04 |
| 00000000003 | /myMeetings/meeting003 | 1 | 1 | 1 | 0 | 2020-09-06 15:00:00 | 2020-09-04 15:03:07 |
The first row in the file is the header, which should mirror the example above. Each subsequent row provides the information for a single meeting. [Here is a sample that you could use](https://github.com/andrewpknight/zoomGroupStats/blob/main/inst/extdata/myMeetingsBatch.xlsx), replacing any rows after the header with your own information.
| Column Name | Description |
|:---------------|:------------------------------------------------|
| batchMeetingId | A string identifier for this particular meeting |
| fileRoot | A string that gives the full path and prefix where the files from this meeting can be found. The final part of this string (e.g., meeting001 above) is how you have named the files downloaded for this meeting. |
| participants | Binary - 0 if you did not download the participants file, 1 if you did |
| transcript | Binary - 0 if you did not download the transcript file, 1 if you did |
| chat | Binary - 0 if you did not download the chat file, 1 if you did |
| video | Binary - 0 if you did not download a video file, 1 if you did |
| sessionStartDateTime | A string giving the timestamp for when the meeting began as YYYY-MM-DD HH:MM:SS |
| recordingStartDateTime | A string giving the timestamp for when the recording of the meeting began as YYYY-MM-DD HH:MM:SS |
## Process your Batch
If you have followed the steps above, turning your Zoom sessions into data should be straightforward using the `zoomGroupStats` package.You should begin by installing the latest version of `zoomGroupStats`. Currently this is best done through the `install_github` function from the `devtools` package:
```{r, eval=FALSE}
# Install zoomGroupStats from CRAN
install.packages("zoomGroupStats")
# Install the development version of zoomGroupStats
devtools::install_github("andrewpknight/zoomGroupStats")
```
After you have installed the package, you can then load it into your environment as usual:
```{r setup}
library(zoomGroupStats)
```
The first step in turning downloads into data is to process your batch using the `batchProcessZoomOutput` function.
```{r, eval=FALSE}
batchOut = batchProcessZoomOutput(batchInput="./myMeetingsBatch.xlsx")
```
```{r error=FALSE, message=FALSE, warning=FALSE, include=FALSE, results='hide'}
batchOut = invisible(batchProcessZoomOutput(batchInput=system.file('extdata', 'myMeetingsBatch.xlsx', package = 'zoomGroupStats')))
```
This function will iterate through the meetings listed in the `batchInput` file that you created above. For each meeting, the function will detect any of the named files described above (except the video file) and do an initial processing of them. The function currently ignores the video file because video processing can take a considerable amount of time. [Processing video data will be covered in Part 4](http://zoomgroupstats.org/articles/part04-analyze-zoom-video-data.html).
The output, stored in this example in `batchOut`, will be a multi-item list that contains any data that were available according to the batch instructions. Currently, the following items can be included, if raw files are available:
### batchInfo
This is information about the batch, drawn from the input batch file that you have supplied. It is helpful to have this information stored for later analysis of the different components of a Zoom recording.
```{r}
str(batchOut$batchInfo)
```
### meetInfo & partInfo
These are based on information extracted from the participants file downloaded from Zoom. If a meeting has a *_participants.csv file, these will be included. They provide meta-data about the full set of virtual meetings. These are useful files for nailing down the structure of your full dataset (in terms of individuals nested within meetings) and for creating a unique individual identifier for the people in your dataset.
`meetInfo` is a data.frame that gives information pulled about the meetings from the participants file downloaded from Zoom. Each row in this file is a meeting:
```{r}
str(batchOut$meetInfo)
```
`partInfo` is a data.frame that gives the participants in each meeting and any information available in the participants file downloaded from Zoom
```{r}
str(batchOut$partInfo)
```
### transcript & chat
These two items provide parsed text data from the transcribed audio spoken during the meeting and the text-based chat in the meeting. These files are the basis of any further text-based analysis. One important thing to note in processing transcripts is that Zoom timestamps the transcript file anchored on the start of the recording--not at the start of the session itself. The issue here is that a recording may not begin until a period of time after the launch of the session. This means that you could have a transcript file out of sync with the chat file, which begins its timestamp at the start of the session. Resolving this issue requires careful records of when the recording was started or setting up your session to automatically record.
`transcript` is a data.frame that is the parsed audio transcript file. Each row represents a single marked "utterance" using Zoom's cloud-based transcription algorithm. Utterances are marked as a function of pauses in speech and/or speaker changes.
```{r}
str(batchOut$transcript)
```
`chat` is a data.frame that is the parsed text-based chat file. Each row represents a single chat message submitted by a user. Non-ASCII characters will not be correctly rendered in the message text.
```{r}
str(batchOut$chat)
```
### rosetta
The final element is called "rosetta" because it will help you deal with one of the most vexing challenges in analyzing virtual meetings with a large number of participants: The lack of a persistent and unique individual identifier for participants. Because the transcript and chat files rely on people's Zoom display names--and because people can change their display names--you will frequently encounter duplicate names and/or multiple identifiers for the same person.
From a data structure perspective, what is most important to know is which individual person logged into which virtual meeting. Unfortunately, complications in how Zoom identifies individuals require careful human attention to this issue. To illustrate, consider the following exaggerated example. Arun Jah is an attendee in Meeting A. Arun logs in on his laptop through his corporate account. But, he also logs in on his iPad through his family account to the same meeting. After a few minutes, Arun realizes that his iPad account has his child's name displayed (the child was using it for virtual school). So, he changes his display name. Halfway through the meeting, Arun needs to get in his car to go pick up his child. So, he logs into the meeting on his smartphone. In a raw Zoom dataset, the person "Arun" will show up as four different names--(1) his corporate name that was not changed; (2) his child's name on his iPad; (3) the name he changed his iPad to; and, (4) his mobile phone. Clearly, to properly study human behavior, all of the actions associated with these four names should be attached to "Arun".
To address this problem, the `rosetta` file compiles every unique display name (by meeting) encountered across the `participants`, `chat`, and `transcript` files.
```{r}
str(batchOut$rosetta)
```
## Add a Unique Individual Identifier to All Elements
I have found that by exporting the `rosetta` file and manually attaching a unique individual identifier (e.g., number) is a necessary process to ensure that the right data are attached to the right people. In terms of process, here is what I do:
* Export the `rosetta` file. You can do this in your initial `batchProcessZoomOutput` call as follows:
```{r, eval=FALSE}
batchOut = batchProcessZoomOutput(batchInput="./myMeetingsBatch.xlsx", exportZoomRosetta="./myMeetings_rosetta_original.xlsx")
```
* Copy the file that you have exported and save it with a new name (e.g., myMeetings_rosetta_edited.xlsx). This will prevent you from losing your manual work if you re-run the command above and overwrite the file.
* Add a new column to the new file that contains a unique user identifier. Name the column something like 'indivId'. In this column you will enter a persistent individual identifier for individuals in your dataset.
* Manually review the file, entering the unique identifier that is correct for each Zoom display name. I tend to sort the file by the Zoom display name column first. This increases the chances of catching the same person who shows up in multiple meetings or slightly different names (e.g., Ringo, Ringo Starr). For unique identifier, I recommend either using a master numeric identifier or email address that connects to other data streams that you might have.
* Import the reviewed and edited rosetta file. The following command will import the file and add your new identifier to each of the elements that are included in `batchOut`. Going forward, you should use the new individual identifier (e.g., `indivId`) in your analyses. Together with the meeting identifier (e.g., `batchMeetingId`), you will be able to link records to other relevant data that you have collected (e.g., survey data).
```{r, eval=FALSE}
batchOutIds = importZoomRosetta(zoomOutput=batchOut, zoomRosetta="./myEditedRosetta.xlsx",
meetingId="batchMeetingId")
```
Running the importZoomRosetta command will attach the new unique identifier that you have created to the datasets that you created before for any of the available files in your directory.
## Next Steps
Following the process above, you should have a single R object with a tremendous amount of information. [Part 3 of this guide will cover how to analyze the conversational data in this R object](http://zoomgroupstats.org/articles/part03-analyze-zoom-conversation-data.html). | /scratch/gouwar.j/cran-all/cranData/zoomGroupStats/inst/doc/part02-process-zoom-files.Rmd |
## ---- include = FALSE---------------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## ---- error=FALSE, message=FALSE, warning=FALSE, include=FALSE, results='hide'----
library(zoomGroupStats)
batchOut = invisible(batchProcessZoomOutput(batchInput=system.file('extdata', 'myMeetingsBatch.xlsx', package = 'zoomGroupStats')))
## ---- eval=TRUE---------------------------------------------------------------
# Three records from the sample transcript dataset
head(batchOut$transcript, 3)
## ---- eval=TRUE---------------------------------------------------------------
# Three records from the sample transcript dataset
head(batchOut$chat, 3)
## ---- eval=FALSE--------------------------------------------------------------
# # You can request both sentiment analysis methods by including them in sentMethods
# transcriptSent = textSentiment(inputData=batchOut$transcript, idVars=c('batchMeetingId','utteranceId'), textVar='utteranceMessage', sentMethods=c('aws', 'syuzhet'), appendOut=FALSE, languageCodeVar='utteranceLanguage')
#
# # This does only the aws sentiment analysis on a chat file
# chatSent = textSentiment(inputData=batchOut$chat, idVars=c('batchMeetingId', 'messageId'), textVar='message', sentMethods=c('aws'), appendOut=FALSE, languageCodeVar='messageLanguage')
## ---- eval=TRUE---------------------------------------------------------------
# This does only the syuzhet analysis on the transcript and appends does not append it to the input dataset
transcriptSent = textSentiment(inputData=batchOut$transcript, idVars=c('batchMeetingId','utteranceId'), textVar='utteranceMessage', sentMethods=c('syuzhet'), appendOut=FALSE, languageCodeVar='utteranceLanguage')
head(transcriptSent$syuzhet)
## ---- eval=TRUE---------------------------------------------------------------
# This does only the syuzhet sentiment analysis on a chat file and appends it to the input dataset
chatSent = textSentiment(inputData=batchOut$chat, idVars=c('batchMeetingId', 'messageId'), textVar='message', sentMethods=c('syuzhet'), appendOut=TRUE, languageCodeVar='messageLanguage')
head(chatSent$syuzhet)
## ---- eval=TRUE---------------------------------------------------------------
# Analyze the transcript, without the sentiment metrics
convoTrans = textConversationAnalysis(inputData=batchOut$transcript, inputType='transcript', meetingId='batchMeetingId', speakerId='userName')
## ---- eval=TRUE---------------------------------------------------------------
# This is output at the meeting level. (Note that the values across meetings are equivalent because the sample dataset is a replication of the same meeting multiple times.)
head(convoTrans$transcriptlevel)
## ---- eval=TRUE---------------------------------------------------------------
# This is output at the speaker level
head(convoTrans$speakerlevel)
## ---- eval=TRUE---------------------------------------------------------------
# Analyze the conversation within the chat file, including the sentiment metrics
convoChat = textConversationAnalysis(inputData=chatSent$syuzhet, inputType='chat', meetingId='batchMeetingId', speakerId='userName', sentMethod="syuzhet")
## ---- eval=TRUE---------------------------------------------------------------
# This is output at the meeting level
head(convoChat$chatlevel)
## ---- eval=TRUE---------------------------------------------------------------
# This is output at the speaker level
head(convoChat$userlevel)
## ---- eval=TRUE---------------------------------------------------------------
win.convo.out = windowedTextConversationAnalysis(inputData=batchOut$transcript, inputType='transcript', meetingId='batchMeetingId', speakerId='userName', sentMethod="none", timeVar="utteranceStartSeconds", windowSize=300)
## ---- eval=TRUE---------------------------------------------------------------
# View the window-level output
head(win.convo.out$windowlevel)
## ---- eval=TRUE---------------------------------------------------------------
# View the output for speakers within windows
head(win.convo.out$speakerlevel)
| /scratch/gouwar.j/cran-all/cranData/zoomGroupStats/inst/doc/part03-analyze-zoom-conversation-data.R |
---
title: "Part 3: Analyzing Conversations in Zoom"
author: "Andrew P. Knight"
date: "`r Sys.Date()`"
vignette: >
%\VignetteIndexEntry{Part 3: Analyzing Conversations in Zoom}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
```{r, error=FALSE, message=FALSE, warning=FALSE, include=FALSE, results='hide'}
library(zoomGroupStats)
batchOut = invisible(batchProcessZoomOutput(batchInput=system.file('extdata', 'myMeetingsBatch.xlsx', package = 'zoomGroupStats')))
```
## Introduction
Virtual meetings afford granular data on how people communicate with one another. With this granularity comes both opportunities and challenges. With respect to opportunities, researchers can use virtual meetings to track the flow and content of communications on a second-by-second basis. This enables deriving insights into who talks to whom and how different people may communicate with one another in distinct ways.
With respect to challenges, granular data often requires researchers--who may be used to smaller sample datasets--to contend with incredibly large volumes of information. For example, if a researcher were to collect data from 100 60-minute group meetings, this would yield 6000 minutes of recorded speech. Depending on the rate of speech, this could present 50,000 to 60,000 spoken utterances.
Because virtual meetings present data on events in time (e.g., a spoken sentence or chat message) made by an individual within a virtual meeting, they require close attention to levels of analysis. For many questions, researchers are not interested in the fine-grained events; rather, researchers are interested in using those fine-grained events to measure attributes of individuals or groups within certain segments of time. `zoomGroupStats` provides basic functionality to derive these kinds of aggregated metrics. Whether a given aggregation is appropriate for assessing some construct, however, will fundamentally depend on the phenomenon under investigation.
## Overview of text-based data in Zoom
`zoomGroupStats` provides functions for analyzing conversations that occur through two channels in a virtual meeting:
* **transcribed spoken language**: Zoom's cloud recording feature includes an option for transcription of language transmitted through meeting participants' microphones. This transcription is performed by otter.ai.
* **text-based chat messages**: During a Zoom meeting, users can send chat messages. In the chat file included in a Zoom Cloud recording, only publicly facing messages are captured. These are the messages that users send to the group as a whole. If users send one another direct/private messages, these are not captured in the downloadable file.
### Cleaning and modifying text-based data
Text analysis is a dynamic area brimming with innovation. `zoomGroupStats` does not currently include direct functions for cleaning or modifying the text that is captured in either a transcript or a chat file. Because, however, the text for each of these files is stored in a variable, it is straightforward to use functions from other packages to clean or otherwise modify text before conducting conversation analysis.
Depending on your research questions and the scale of your dataset, you may wish to manually review and correct the transcribed audio content. Just like transcriptions done by humans, transcriptions produced by otter.ai will have errors. A manual review of a transcription can correct for these errors and provide a sharper analysis of text.
### About the Zoom `transcript` file
If you have followed the steps outlined in [Part 2](http://zoomgroupstats.org/articles/part02-process-zoom-files.html), you will have a single list object from your batch analysis. Within this list you will find a `transcript` dataset. `transcript` is a data.frame that is the parsed audio transcript file. Each row represents a single marked "utterance" using Zoom's cloud-based transcription algorithm. Utterances are marked as a function of pauses in speech and/or speaker changes.
```{r, eval=TRUE}
# Three records from the sample transcript dataset
head(batchOut$transcript, 3)
```
Each row contains identifying information for the utterance, including what meeting it was in (`batchMeetingId`) who said it (`userName`), when it was said (`utteranceStartTime`, `utteranceStartSeconds`), and how long it lasted (`utteranceTimeWindow`). There is also an indicator of the language for the utterance (`utteranceLanguage`). This indicator is used with some text analysis packages.
### About the Zoom `chat` file
Also included in your batch output file will be a `chat` dataset. `chat` is a data.frame that is the parsed text-based chat file. Each row represents a single chat message submitted by a user. Note that non-ASCII characters will not be correctly rendered in the message text.
```{r, eval=TRUE}
# Three records from the sample transcript dataset
head(batchOut$chat, 3)
```
Like `transcript`, each row contains identifying information for the chat message, including what meeting it was in (`batchMeetingId`) who posted it (`userName`), and when it was posted (`messageTime`, `messageSeconds`). There is also an indicator of the language for the message (`messageLanguage`).
## Analyzing conversations in `transcript` and `chat`
`zoomGroupStats` includes functions that aid in deriving common conversation metrics at several levels of analysis. Each of the functions can be applied to either the `chat` or the `transcript` file.
### Performing sentiment analysis
Sentiment analysis--the assessment and/or classification of language according to its emotional tone--is among the most ubiquitous kinds of text analysis. `zoomGroupStats` provides the ability to perform sentiment analysis on the utterances or messages in `transcript` and `chat` files. Because this type of analysis scores pieces of text, I recommend conducting this analysis first. The sentiment metrics can then be included in downstream conversation analyses that aggregate aspects of the conversation to the individual or meeting levels.
Using the `textSentiment()` function, there are two different types of sentiment analysis that you can request:
* **syuzhet** - This is a lexicon-based analysis using the `syuzhet` package. A lexicon-based analysis uses pre-existing dictionaries to measure the sentiment of individual words in a piece of text. In essence, this approach is a word-counting method.
* **aws** - This is an approach that relies on machine learning through Amazon Web Services. Rather than focusing on individual words, this method draws upon a trained model that assesses attributes of the text as a whole. To request this type of analysis, [you must have appropriately configured your AWS credentials](https://github.com/paws-r/paws/blob/main/docs/credentials.md).
In deciding which method to use, you should consider your research objectives. In general, the aws method will provide greater validity for assessing sentiment. However, it also will take longer to run and, for larger datasets, will incur financial costs.
```{r, eval=FALSE}
# You can request both sentiment analysis methods by including them in sentMethods
transcriptSent = textSentiment(inputData=batchOut$transcript, idVars=c('batchMeetingId','utteranceId'), textVar='utteranceMessage', sentMethods=c('aws', 'syuzhet'), appendOut=FALSE, languageCodeVar='utteranceLanguage')
# This does only the aws sentiment analysis on a chat file
chatSent = textSentiment(inputData=batchOut$chat, idVars=c('batchMeetingId', 'messageId'), textVar='message', sentMethods=c('aws'), appendOut=FALSE, languageCodeVar='messageLanguage')
```
The results of textSentiment come as a named list, with items for `aws` and/or `syuzhet`:
```{r, eval=TRUE}
# This does only the syuzhet analysis on the transcript and appends does not append it to the input dataset
transcriptSent = textSentiment(inputData=batchOut$transcript, idVars=c('batchMeetingId','utteranceId'), textVar='utteranceMessage', sentMethods=c('syuzhet'), appendOut=FALSE, languageCodeVar='utteranceLanguage')
head(transcriptSent$syuzhet)
```
The appendOut option in textSentiment gives you the ability to merge the sentiment metrics back to the original input data. I usually do this so that I can incorporate these metrics into downstream conversation analyses.
```{r, eval=TRUE}
# This does only the syuzhet sentiment analysis on a chat file and appends it to the input dataset
chatSent = textSentiment(inputData=batchOut$chat, idVars=c('batchMeetingId', 'messageId'), textVar='message', sentMethods=c('syuzhet'), appendOut=TRUE, languageCodeVar='messageLanguage')
head(chatSent$syuzhet)
```
Note that I have not included the `aws` output in this vignette because it requires a call to a third-party service.
### Performing conversation analysis
Conversation analysis entails using the exchanges of communications among meeting members to assess attributes of individuals, dyads, and groups. `zoomGroupStats` currently includes two basic kinds of conversation analysis.
The `textConversationAnalysis()` function will provide a descriptive assessment of either the `transcript` or the `chat` file.
```{r, eval=TRUE}
# Analyze the transcript, without the sentiment metrics
convoTrans = textConversationAnalysis(inputData=batchOut$transcript, inputType='transcript', meetingId='batchMeetingId', speakerId='userName')
```
`textConversationAnalysis()` provides a list with output at two levels of analysis--the meeting level (first item) and the speaker level (second item). These items are named according to the type of input that you have provided.
```{r, eval=TRUE}
# This is output at the meeting level. (Note that the values across meetings are equivalent because the sample dataset is a replication of the same meeting multiple times.)
head(convoTrans$transcriptlevel)
```
| Variable | Description |
|:---------------|:-----------------------------------------|
| batchMeetingId | The meeting identifier that you specified |
| transcriptStartTime | When the first utterance was recorded |
| transcriptEndTime | When the last utterance ended |
| utteranceTimeWindow_sum | Total number of seconds of speaking time |
| utteranceTimeWindow_x | Mean duration, in seconds, of utterances |
| utteranceTimeWindow_sd | Standard deviation of the duration, in seconds, of utterances |
| utteranceGap_x | Mean duration, in seconds, of silent time between consecutive utterances |
| utteranceGap_sd | Standard deviation of the duration, in seconds, of silent time between consecutive utterances |
| numUtterances | Count of the number of utterances in the meeting |
| numUniqueSpeakers | Count of the number of unique speakers in the meeting. Note that this includes any utterances for which the speaker is UNIDENTIFIED. |
| silentTime_sum | Total number of seconds of silent time |
| burstinessRaw | A measure of the concentration of utterances in time |
```{r, eval=TRUE}
# This is output at the speaker level
head(convoTrans$speakerlevel)
```
| Variable | Description |
|:---------------|:-----------------------------------------|
| batchMeetingId | The meeting identifier that you specified |
| userName | The speaker identifier that you specified |
| firstUtteranceTime | Timestamp for this person's first utterance |
| lastUtteranceTime | Timestamp for this person's last utterance |
| utteranceTimeWindow_sum | Total number of seconds of this person's speaking time |
| utteranceTimeWindow_x | Mean duration, in seconds, of this person's utterances |
| utteranceTimeWindow_sd | Standard deviation of the duration, in seconds, of this person's utterances |
| utteranceGap_x | Mean duration, in seconds, of silent time before this person speaks after a prior utterance|
| utteranceGap_sd | Standard deviation of the duration, in seconds, of silent time before this person speaks after a prior utterance |
| numUtterances | Count of the number of utterances this person made in this the meeting |
If you have already conducted a sentiment analysis using the `textSentiment()` function, you can further include those attributes. Note that currently you can only analyze one sentiment analysis method at a time. For example, here is a request for an analysis of the chat file:
```{r, eval=TRUE}
# Analyze the conversation within the chat file, including the sentiment metrics
convoChat = textConversationAnalysis(inputData=chatSent$syuzhet, inputType='chat', meetingId='batchMeetingId', speakerId='userName', sentMethod="syuzhet")
```
The names of the items in the list output for `chat` are `chatlevel` and `userlevel`:
```{r, eval=TRUE}
# This is output at the meeting level
head(convoChat$chatlevel)
```
| Variable | Description |
|:---------------|:-----------------------------------------|
| batchMeetingId | The meeting identifier that you specified |
| chatStartTime | The time of the first chat message in this meeting |
| chatEndTime | The time of the last chat message in this meeting |
| messageNumChars_sum | Total number of characters chatted in meeting |
| messageNumChars_x | Mean number of characters per message chatted in meeting |
| messageNumChars_sd | Standard deviation of the number of characters per message chatted in meeting |
| messageGap_x | Mean duration, in seconds, of time between chat messages in this meeting |
| messageGap_sd | Standard deviation of the duration, in seconds, of time between chat messages in this meeting |
| numUniqueMessagers | Number of individuals who sent chat messages in this meeting.
| numMessages | Total number of messages sent in this meeting |
| totalChatTime | Amount of time between first and last messages
| burstinessRaw | Measure of the concentration of chat messages in time
| ... | Additional variables depend on the type of sentiment analysis you may have requested. |
```{r, eval=TRUE}
# This is output at the speaker level
head(convoChat$userlevel)
```
| Variable | Description |
|:---------------|:-----------------------------------------|
| batchMeetingId | The meeting identifier that you specified |
| userName | The individual identifier you specified |
| firstMessageTime | The time of this person's first chat message in this meeting |
| lastMessageTime | The time of this person's last chat message in this meeting |
| messageNumChars_sum | Total number of characters this person chatted in meeting |
| messageNumChars_x | Mean number of characters per message this person chatted in meeting |
| messageNumChars_sd | Standard deviation of the number of characters per message this person chatted in meeting |
| messageGap_x | Mean duration, in seconds, of time before this person sends a chat message after a prior message |
| messageGap_sd | Standard deviation of the duration, in seconds, of time before this person sends a chat message after a prior message |
| ... | Additional variables depend on the type of sentiment analysis you may have requested. |
## Windowed conversation analysis
One of the unique strengths of collecting data using virtual meetings is the ability to assess *dynamics*--how meeting characteristics and participants' behavior changes over time. Beyond analyzing the raw events over time, `zoomGroupStats` enables you to run the textConversationAnalysis above within temporal windows in a given meeting. By windowing, and aggregating data within the window, you can derive more reliable indicators of attributes than relying solely on the raw events.
For example, using the following function call, you could analyze how conversation attributes--who is speaking alot, what is the sentiment of speech--change throughout a meeting, in 5-minute (`windowSize=300` seconds) increments.
```{r, eval=TRUE}
win.convo.out = windowedTextConversationAnalysis(inputData=batchOut$transcript, inputType='transcript', meetingId='batchMeetingId', speakerId='userName', sentMethod="none", timeVar="utteranceStartSeconds", windowSize=300)
```
The output of `windowedTextConversationAnalysis` is a list with two data.frames as items:
```{r, eval=TRUE}
# View the window-level output
head(win.convo.out$windowlevel)
```
| Variable | Description |
|:---------------|:-----------------------------------------|
| windowId | Incrementing numeric identifier for the temporal window |
| windowStart | Number of seconds from start of transcript when this window begins |
| windowEnd | Number of seconds from start of transcript when this window ends |
| ... | All other variables correspond to the textConversationAnalysis output; but, they are calculated within a given temporal window |
```{r, eval=TRUE}
# View the output for speakers within windows
head(win.convo.out$speakerlevel)
```
This output will provide a record for each possible speaker within each possible window. This is done so that valid zeros (e.g., no speaking) are represented in the dataset.
| Variable | Description |
|:---------------|:-----------------------------------------|
| batchMeetingId | Meeting identifier requested |
| userName | Speaker identifier requested |
| windowId | Incrementing numeric identifier for the temporal window |
| windowStart | Number of seconds from start of transcript when this window begins |
| windowEnd | Number of seconds from start of transcript when this window ends |
| ... | All other variables correspond to the textConversationAnalysis output; but, they are calculated within a given temporal window |
## Next Steps
In the [final part of this guide](http://zoomgroupstats.org/articles/part04-analyze-zoom-video-data.html), you will learn how to process and anlayze video files downloaded from Zoom sessions. | /scratch/gouwar.j/cran-all/cranData/zoomGroupStats/inst/doc/part03-analyze-zoom-conversation-data.Rmd |
## ---- include = FALSE---------------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## ---- eval=FALSE--------------------------------------------------------------
# batchOut = batchProcessZoomOutput(batchInput="./myMeetingsBatch.xlsx")
## ---- eval=FALSE, error=FALSE, message=FALSE, warning=FALSE, include=FALSE, results='hide'----
# batchOut = invisible(batchProcessZoomOutput(batchInput=system.file('extdata', 'myMeetingsBatch.xlsx', package = 'zoomGroupStats')))
## ---- eval=FALSE--------------------------------------------------------------
# batchGrabVideoStills(batchInfo=batchOut$batchInfo, imageDir="~/Documents/myMeetings/videoImages", sampleWindow=60)
## ---- eval=FALSE--------------------------------------------------------------
# vidOut = batchVideoFaceAnalysis(batchInfo=batchOut$batchInfo, imageDir="~/Documents/meetingImages", sampleWindow=60, facesCollectionID="group-r")
| /scratch/gouwar.j/cran-all/cranData/zoomGroupStats/inst/doc/part04-analyze-zoom-video-data.R |
---
title: "Part 4: Analyzing Video Data from Zoom"
author: "Andrew P. Knight"
date: "`r Sys.Date()`"
vignette: >
%\VignetteIndexEntry{Part 4: Analyzing Video Data from Zoom}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
## Introduction to Analyzing Video from Zoom
One of the most valuable raw outputs from Zoom is the video recording of the session. Indeed, the video recording is, itself, the source of the transcript [that we considered in Part 03](http://zoomgroupstats.org/articles/part03-analyze-zoom-conversation-data.html). But, beyond spoken language, video offers the potential to examine participants' nonverbal behavior and a wide range of actions and interactions during virtual meetings. As discussed in [Part 01](http://zoomgroupstats.org/articles/part01-configure-zoom.html), you should attend carefully to your Zoom configuration before beginning to collect data. My recommendation is to select all of the video options that are available and optimize your recordings for 3rd party processing.
If you do select all of the recording options, you will be able to download several files from Zoom after a virtual meeting is complete and the recording is available:
* A gallery view: This gives a tiled view of meeting participants who have their camera on. It is important to note that participants' placement on the screen (i.e., the location of their tiles) is dynamic and will change depending on who has a camera on in the meeting at any given point in time.
* An active speaker view: This displays the camera feed of a single participant at a time. The participant who is featured is the one that Zoom has classified as the active speaker at that moment.
* Shared screen views: There are a few different shared screen views. One gives the active speaker as a small thumbnail within a larger field showing the shared screen.
Depending on your research questions, different video formats will be most relevant. For my own research, which focuses on group dynamics, I prefer the gallery view. This enables measuring the nonverbal behavior of all participants throughout the entirety of a virtual meeting (assuming they have their camera on).
`zoomGroupStats` currently supports one video (named as *prefix*_video.mp4) where *prefix* is the file naming convention that you included in your [batch processing template](https://github.com/andrewpknight/zoomGroupStats/blob/main/inst/extdata/myMeetingsBatch.xlsx). That is, for each meeting, you can include in your batch processing folder a single video file. You may still find it valuable to download other views (e.g., active speaker)--just add a suffix to the filename.
## Parsing Zoom Video feed
Analyzing videos is computationally and, consequently, time intensive. Think of a video as a series of still images that have been strung together--much like a flip book. When analyzing videos, we are breaking down a video into its still frames and, then, analyzing those still frames. Depending on the duration and quality (e.g., frame rate, size), there could be hundreds of thousands of images within your video. For example, if your video has a frame rate of 60 frames per second of footage, that would yield 3,600 frames for a single *minute* of video.
Given the time that it can take to analyze many videos, I "parse" or pre-process the video files from Zoom meetings before analyzing them. By "parse", I mean that I sample and save a sequence of still frames from each video that can be used in downstream analyses. This process is akin to how we parsed the transcript or chat files from Zoom meetings in [Part 03](http://zoomgroupstats.org/articles/part03-analyze-zoom-conversation-data.html). I usually do this at the start of a project and a time when I will not be using my computer (e.g., before going to sleep). When I launch the batch parsing function at night, it can run throughout the night while I am sleeping.
The `batchGrapVideoStills()` function presumes that you have followed the batch process setup described in [Part 02 of this guide](http://zoomgroupstats.org/articles/part02-process-zoom-files.html). If you have done this, you have a series of video files saved in the same directory as your batch input file.
```{r, eval=FALSE}
batchOut = batchProcessZoomOutput(batchInput="./myMeetingsBatch.xlsx")
```
And, the function presumes that you have run the `batchProcessZoomOutput()` function to generate the `batchOut` object:
```{r, eval=FALSE, error=FALSE, message=FALSE, warning=FALSE, include=FALSE, results='hide'}
batchOut = invisible(batchProcessZoomOutput(batchInput=system.file('extdata', 'myMeetingsBatch.xlsx', package = 'zoomGroupStats')))
```
With this setup complete, you can run the `batchGrabVideoStills` function:
```{r, eval=FALSE}
batchGrabVideoStills(batchInfo=batchOut$batchInfo, imageDir="~/Documents/myMeetings/videoImages", sampleWindow=60)
```
This function call will iterate through the meetings in your batch and, for each meeting where there is a video, sample a still frame from the video every 60 seconds. Because of a quirk with video still frames, the function will actually sample the first image at `sampleWindow/2` and then each subsequent image every `sampleWindow`. The function saves these images in a new directory that it creates within the same directory where the videos are saved.
In addition to the images that it saves, `batchGrabVideoStills` will return a data frame with information about what it did for each meeting:
| Variable | Description |
|:---------------|:-----------------------------------------|
| batchMeetingId | The meeting identifier |
| videoExists | TRUE or FALSE indicating whether there was a video for this meeting |
| imageDir |path to the directory where video images are saved |
| sampleWindow | The window requested for sampling frames |
| numFramesExtracted | The total number of image files that were saved for this meeting |
## Analyzing attributes of detected faces
With the still frames sampled from the video, you can now progress to analyzing attributes of the video. The current version of `zoomGroupStats` includes one function--`batchVideoFaceAnalysis()``--for analyzing the attributes of faces detected within the video. This function relies on the Rekognition service from Amazon Web Services. To use this function, [you must have appropriately configured your AWS credentials](https://github.com/paws-r/paws/blob/main/docs/credentials.md).
```{r, eval=FALSE}
vidOut = batchVideoFaceAnalysis(batchInfo=batchOut$batchInfo, imageDir="~/Documents/meetingImages", sampleWindow=60, facesCollectionID="group-r")
```
This function will iterate through a set of meetings. For each meeting, it will iterate through the extracted images from the video of the meeting. Within each image, it will detect human faces and measure attributes of those faces using Rekognition. If you create an identified collection of users in advance, you can use this collection to detect the identity of any faces in the video.
The function returns a data.frame that is at the face-image level of analysis. That is, each record in the data.frame corresponds to a face detected within an image, which is itself nested within a given meeting. As an example, imagine that you have a recording of a 60 minute meeting for which you have extracted 60 still frames (e.g., one every minute). Imagine that there were 5 participants in this meeting and that the participants kept their camera on throughout the meeting. This should yield approximately 300 records of data. Note that there will likely be fewer records simply because participants might have stepped away from their camera or been unrecognizable at a given point in the video.
For each detected face, there is an abundance of information from AWS rekognition. There are estimates of participants' age and gender, facial attributes (e.g., glasses, facial hair), and estimates of emotional expressions. If you provided an AWS collection of identified images, the identity of the participant will also be included, along with a confidence level.
## Next Steps
At this point in the guide, the next steps are on my end. You should expect the functionality of `zoomGroupStats` to continue to extend, as I build out new functions and make the existing functions more efficient. You should also expect this guide to extend. A subsequent topic that I will include, for example, is how to best analyze Zoom Recordings captured locally (i.e., not with the Zoom Cloud). If you have ideas or suggestions for functionality--or, if you have used the functions and encountered bugs or problems--[please reach out](mailto:knightap@wustl.edu). With many eyes, all bugs are shallow. | /scratch/gouwar.j/cran-all/cranData/zoomGroupStats/inst/doc/part04-analyze-zoom-video-data.Rmd |
---
title: "Part 1: Configuring Zoom to Capture Useful Data"
author: "Andrew P. Knight"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Part 1: Configuring Zoom to Capture Useful Data}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
The first part of this guide will provide recommended practices and processes to use when you are collecting data through Zoom to be used in research. Like any aspect of research, careful and thoughtful upfront planning pays dividends when using Zoom. Because `zoomGroupStats` relies on [Zoom Cloud recording](https://support.zoom.us/hc/en-us/articles/203741855) features, this guide will focus specifically on practices to use when recording your data to the cloud. However, even if you are recording virtual meetings locally, the same basic principles will likely apply.
This guide will not provide step-by-step instructions for how to operate Zoom. For detailed guidance on using Zoom, you should consult the [Zoom Help Center](https://support.zoom.us/hc/en-us).
Hopefully you are reading this guide *before* you have started collecting data. It is before collecting data that you have the best chance to minimize undesirable variation and maximize your options for using the data that you collect. Collecting data through virtual meetings is complicated and requires a thoughtful process. To give yourself the best downstream outcomes, take time upfront--before running any meetings at all--to configure your Zoom subscription. In particular, consider the following recommendations:
## Develop a standardized protocol
Before launching data collection, create and produce documentation of a standard process for yourself and any collaborators to follow. This is especially important if you will be depending on others (e.g., collaborators, research assistants, participants themselves) to capture virtual meetings. A standardized protocol will ensure consistency in your raw Zoom output across multiple meetings. As examples, consider:
1. [Sample of a guide given to those charged with recording meetings](https://docs.google.com/presentation/d/1B9Cdc-tdB4mKYjIXQ7R-RgF5-HEazU_Daiik_1GH8WY/edit?usp=sharing)
1. [Sample video guide for how to set up Zoom recording features](https://youtu.be/Y82nf9lfeQU)
1. [Sample video guide for recording the meeting itself](https://youtu.be/HbbKcmbaLYI)
## Maximize degrees of freedom
When configuring your Zoom subscription and preparing to record virtual meetings, I recommend providing yourself the most flexibility upfront. You can always subset and focus on some elements downstream. But, if you don't capture something upfront, you'll lose those options downstream. In particular:
1. If using cloud-based recording, select all possible recording options (of different views). This gives you the ability to make selective decisions after you've run the meeting.
1. Select options that enhance the recording for 3rd part video editing.
1. Make sure to select the option to have Zoom produce an audio transcript.
1. Make other option selection in a manner consistent with your research goals (e.g., having names on videos, having video time stamped).
## Require users to be registered in Zoom
A major challenge when collecting large scale data with Zoom recordings is the absence of a persistent individual identifier that is linked to the wide range of display names that people use. There are a few ways that this can contribute to data integrity issues. To illustrate some of these challenges, consider a few simple examples:
* Ringo Starr logs into a Zoom meeting on Monday using his corporate account, which has a default display name of *Richard Starkey*. On Tuesday, he logs into a meeting using his personal account, which has a default display name of *Ringo*. In a dataset containing both these meetings, the single person Ringo Starr would appear as two different individuals. Moreover, if linking to an external dataset, it is possible that neither *Richard Starkey* nor *Ringo* appear.
* Ringo Starr logs into a Zoom meeting on Monday using his corporate account, with the default display name of *Richard Starkey*. Halfway through the meeting, he changes his display name to *Ringo*. In a dataset of just this one meeting, there could be two display names for him, which would be interpreted as different people.
* Ringo Starr logs into a Zoom meeting on Monday with his fan club using his personal account, with the display name of *Ringo*. Two of his superfans also have the display name *Ringo*. In a dataset for this meeting, three truly distinct individuals would appear as the same person.
To properly study human behavior, we need to have a valid linkage between an individual's behavior (e.g., face in video feed, spoken words, text chat messages) and their identity. When conducting research with Zoom, it is further critical to know which individual person logged into which virtual meeting. `zoomGroupStats` does provide functions for addressing this challenge after you have collected data. However, to save yourself considerable time, take steps before you collect data to actively minimize user identity confusion:
1. If possible, require users to access meetings through an account registered with Zoom.
1. If possible, require users to access Zoom using a known registered account (e.g., one with your institution).
1. If neither of these is possible, add guidance to your standardized protocol for meeting participants to manually change their display names to some standardized format.
## Capture timestamps to sync up data streams
One significant strength of using virtual meetings for research is that you gain the ability to unobtrusively capture streams of human behavior over time. Collecting datastreams throughout time, though, brings distinct challenges. One of the most challenges to overcome is compiling precise information on *when* things happen.
Within Zoom, there are two important baseline events for which you must capture precise timing information:
* **When did the session begin?** This is the moment in time when the Zoom session was launched. In reality, this is the starting point for when people in this virtual meeting were able to interact with one another.
* **When did the recording of the session begin?** This is the moment in time when the Zoom recording was launched. This is necessarily at a time equal to (if the option to launch recording at the start of the session is selected in Zoom) or later than the time that the session began.
The reason that it is so critical to capture this information is that some Zoom outputs (e.g., chat) use the start of the session as the zero point, whereas others (e.g., transcript) use the start of the recording as the zero point. In order to properly sync up data streams, it is important to convert Zoom's datastreams to true clock time.
Keep careful records about these events by using a [spreadsheet like this template](https://github.com/andrewpknight/zoomGroupStats/blob/main/inst/extdata/myMeetingsBatch.xlsx). It is, of course, inevitable that you will fail to capture some of this information. In the event that you do not capture the timestamp for the start of the session, this can be accessed through the participants information in Zoom's Cloud recording system. If you did not capture the timestamp for the start of the recording, you might be able to extract this from the inset timestamp in video files associated with the session.
## Next Steps
In [Part 2](http://zoomgroupstats.org/articles/part02-process-zoom-files.html) of the guide, you will learn how to organize the files that you download from Zoom and use `zoomGroupStats` to turn your downloads into datasets.
| /scratch/gouwar.j/cran-all/cranData/zoomGroupStats/vignettes/part01-configure-zoom.Rmd |
---
title: "Part 2: Turning Zoom Downloads into Datasets"
author: "Andrew P. Knight"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Part 2: Turning Zoom Downloads into Datasets}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
In the second part of this guide, you will learn how to turn downloadable files from Zoom recordings into datasets that you can analyze in `R` using `zoomGroupStats`. Again, because `zoomGroupStats` relies on [Zoom Cloud recording](https://support.zoom.us/hc/en-us/articles/203741855) features, this guide will focus specifically on the files that you download from the Zoom Cloud. However, the functions in `zoomGroupStats` can also be used on meetings that you record locally.
This guide will progress from instructions on which files to download from Zoom to how to convert those files into usable datasets. In its most basic form, a research dataset using virtual meetings can be structured as individual meeting participants who are nested within virtual meetings. Individual participants can attend multiple virtual meetings; indeed, individual participants *could* attend multiple virtual meetings at the same time. Individuals can further be members of multiple other organizational units (e.g., groups, teams, departments, organizations). To build a basic, clean dataset, though, what you must know is which individual human being was logged into which virtual meeting.
## How to Download Files from Zoom
Depending on your research objectives, you will need different output files from Zoom. The components that are currently supported and used in this package are:
1. A *participants* file. This is a .csv file that contains meta-data about your meeting. This is an essential file that should be downloaded for each meeting that you wish to process. To download it:
* Log into your Zoom account through a web browser
* Navigate to the *Reports*
* Click *Usage Reports*
* Scroll to the *Participants* column for your focal meeting
* Click the linked number of participants
* Check "export with meeting data" and "show unique users"
* Click *Export*
1. A *transcript* file. This is a .vtt file that contains Zoom's cloud transcription of the spoken audio during the recorded meeting.
* Navigate to the *Recordings* page
* Click the link indicating the number of items
* Download the Audio Transcript file
1. A *chat* file. This is a.txt file that contains the record of public chat messages posted during the recorded meeting. Download it from the same page where you accessed the *transcript*.
1. An *audio* file. This is an .mp4 file that contains the recorded audio during the session. Download it from the same age as the items above.
1. Several different *video* file options. [Zoom's Help Center describes The nature of these different formats](https://support.zoom.us/hc/en-us/articles/360025561091-Recording-layouts). For analyzing facial expressions, one of the most useful is the gallery style video.
## Naming the Downloaded Files from Zoom
`zoomGroupStats` is designed to batch proces virtual meetings. Batch processing simply means that you are combining several meetings into a single dataset. Even if you are only analyzing a single meeting, though, it is useful to treat that single meeting as a batch of one. This will help in creating an extensible dataset that you could add to in the future.
To organize your data for a batch run, you must rename the files in a systematic way and save them in a single directory. To name the files, imagine that each file will have a "prefix" and a "suffix". The prefix will be an identifier for what meeting it came from and the suffix will be an identifier for what data source is in the file. Whereas you can use whatever prefix you choose, the suffix must be standardized as follows:
| Suffix | Description |
|:---------------|:------------------------------------------------|
| _chat.txt | Used for the chat file for meetings |
| _transcript.vtt | Used for the transcript file for meetings |
| _participants.csv | Used for the participants file for meetings |
| _video.mp4 | Used for a video file for meetings |
Imagine a sample batch run, then, which includes all four elements for three meetings. The prefixes for the meetings could be "meeting001", "meeting002", and "meeting003". This would yield a total of 12 files, named as follows:
* meeting001_chat.txt
* meeting001_transcript.vtt
* meeting001_participants.csv
* meeting001_video.mp4
* meeting002_chat.txt
* meeting002_transcript.vtt
* meeting002_participants.csv
* meeting002_video.mp4
* meeting003_chat.txt
* meeting003_transcript.vtt
* meeting003_participants.csv
* meeting003_video.mp4
All 8 files should be saved in a single directory.
## Prepare a Batch Spreadsheet
Next, you will prepare a spreadsheet in .xlsx format and save it within the same directory where your Zoom downloads are. This spreadsheet tells `zoomGroupStats` how to find your files and what information to process. Practically, this spreadsheet is also helpful for organizing your data and keeping track of what you have downloaded.
You must use the following structure and column headers for your spreadsheet:
| batchMeetingId | fileRoot | participants | transcript | chat | video | sessionStartTime | recordingStartDateTime |
|:---------------|:---------|:-------------|:-----------|:-----|:------|:--------|:--------|
| 00000000001 | /myMeetings/meeting001 | 1 | 1 | 1 | 0 | 2020-09-04 15:00:00 | 2020-09-04 15:03:30 |
| 00000000002 | /myMeetings/meeting002 | 1 | 1 | 1 | 0 | 2020-09-05 15:00:00 | 2020-09-04 15:03:04 |
| 00000000003 | /myMeetings/meeting003 | 1 | 1 | 1 | 0 | 2020-09-06 15:00:00 | 2020-09-04 15:03:07 |
The first row in the file is the header, which should mirror the example above. Each subsequent row provides the information for a single meeting. [Here is a sample that you could use](https://github.com/andrewpknight/zoomGroupStats/blob/main/inst/extdata/myMeetingsBatch.xlsx), replacing any rows after the header with your own information.
| Column Name | Description |
|:---------------|:------------------------------------------------|
| batchMeetingId | A string identifier for this particular meeting |
| fileRoot | A string that gives the full path and prefix where the files from this meeting can be found. The final part of this string (e.g., meeting001 above) is how you have named the files downloaded for this meeting. |
| participants | Binary - 0 if you did not download the participants file, 1 if you did |
| transcript | Binary - 0 if you did not download the transcript file, 1 if you did |
| chat | Binary - 0 if you did not download the chat file, 1 if you did |
| video | Binary - 0 if you did not download a video file, 1 if you did |
| sessionStartDateTime | A string giving the timestamp for when the meeting began as YYYY-MM-DD HH:MM:SS |
| recordingStartDateTime | A string giving the timestamp for when the recording of the meeting began as YYYY-MM-DD HH:MM:SS |
## Process your Batch
If you have followed the steps above, turning your Zoom sessions into data should be straightforward using the `zoomGroupStats` package.You should begin by installing the latest version of `zoomGroupStats`. Currently this is best done through the `install_github` function from the `devtools` package:
```{r, eval=FALSE}
# Install zoomGroupStats from CRAN
install.packages("zoomGroupStats")
# Install the development version of zoomGroupStats
devtools::install_github("andrewpknight/zoomGroupStats")
```
After you have installed the package, you can then load it into your environment as usual:
```{r setup}
library(zoomGroupStats)
```
The first step in turning downloads into data is to process your batch using the `batchProcessZoomOutput` function.
```{r, eval=FALSE}
batchOut = batchProcessZoomOutput(batchInput="./myMeetingsBatch.xlsx")
```
```{r error=FALSE, message=FALSE, warning=FALSE, include=FALSE, results='hide'}
batchOut = invisible(batchProcessZoomOutput(batchInput=system.file('extdata', 'myMeetingsBatch.xlsx', package = 'zoomGroupStats')))
```
This function will iterate through the meetings listed in the `batchInput` file that you created above. For each meeting, the function will detect any of the named files described above (except the video file) and do an initial processing of them. The function currently ignores the video file because video processing can take a considerable amount of time. [Processing video data will be covered in Part 4](http://zoomgroupstats.org/articles/part04-analyze-zoom-video-data.html).
The output, stored in this example in `batchOut`, will be a multi-item list that contains any data that were available according to the batch instructions. Currently, the following items can be included, if raw files are available:
### batchInfo
This is information about the batch, drawn from the input batch file that you have supplied. It is helpful to have this information stored for later analysis of the different components of a Zoom recording.
```{r}
str(batchOut$batchInfo)
```
### meetInfo & partInfo
These are based on information extracted from the participants file downloaded from Zoom. If a meeting has a *_participants.csv file, these will be included. They provide meta-data about the full set of virtual meetings. These are useful files for nailing down the structure of your full dataset (in terms of individuals nested within meetings) and for creating a unique individual identifier for the people in your dataset.
`meetInfo` is a data.frame that gives information pulled about the meetings from the participants file downloaded from Zoom. Each row in this file is a meeting:
```{r}
str(batchOut$meetInfo)
```
`partInfo` is a data.frame that gives the participants in each meeting and any information available in the participants file downloaded from Zoom
```{r}
str(batchOut$partInfo)
```
### transcript & chat
These two items provide parsed text data from the transcribed audio spoken during the meeting and the text-based chat in the meeting. These files are the basis of any further text-based analysis. One important thing to note in processing transcripts is that Zoom timestamps the transcript file anchored on the start of the recording--not at the start of the session itself. The issue here is that a recording may not begin until a period of time after the launch of the session. This means that you could have a transcript file out of sync with the chat file, which begins its timestamp at the start of the session. Resolving this issue requires careful records of when the recording was started or setting up your session to automatically record.
`transcript` is a data.frame that is the parsed audio transcript file. Each row represents a single marked "utterance" using Zoom's cloud-based transcription algorithm. Utterances are marked as a function of pauses in speech and/or speaker changes.
```{r}
str(batchOut$transcript)
```
`chat` is a data.frame that is the parsed text-based chat file. Each row represents a single chat message submitted by a user. Non-ASCII characters will not be correctly rendered in the message text.
```{r}
str(batchOut$chat)
```
### rosetta
The final element is called "rosetta" because it will help you deal with one of the most vexing challenges in analyzing virtual meetings with a large number of participants: The lack of a persistent and unique individual identifier for participants. Because the transcript and chat files rely on people's Zoom display names--and because people can change their display names--you will frequently encounter duplicate names and/or multiple identifiers for the same person.
From a data structure perspective, what is most important to know is which individual person logged into which virtual meeting. Unfortunately, complications in how Zoom identifies individuals require careful human attention to this issue. To illustrate, consider the following exaggerated example. Arun Jah is an attendee in Meeting A. Arun logs in on his laptop through his corporate account. But, he also logs in on his iPad through his family account to the same meeting. After a few minutes, Arun realizes that his iPad account has his child's name displayed (the child was using it for virtual school). So, he changes his display name. Halfway through the meeting, Arun needs to get in his car to go pick up his child. So, he logs into the meeting on his smartphone. In a raw Zoom dataset, the person "Arun" will show up as four different names--(1) his corporate name that was not changed; (2) his child's name on his iPad; (3) the name he changed his iPad to; and, (4) his mobile phone. Clearly, to properly study human behavior, all of the actions associated with these four names should be attached to "Arun".
To address this problem, the `rosetta` file compiles every unique display name (by meeting) encountered across the `participants`, `chat`, and `transcript` files.
```{r}
str(batchOut$rosetta)
```
## Add a Unique Individual Identifier to All Elements
I have found that by exporting the `rosetta` file and manually attaching a unique individual identifier (e.g., number) is a necessary process to ensure that the right data are attached to the right people. In terms of process, here is what I do:
* Export the `rosetta` file. You can do this in your initial `batchProcessZoomOutput` call as follows:
```{r, eval=FALSE}
batchOut = batchProcessZoomOutput(batchInput="./myMeetingsBatch.xlsx", exportZoomRosetta="./myMeetings_rosetta_original.xlsx")
```
* Copy the file that you have exported and save it with a new name (e.g., myMeetings_rosetta_edited.xlsx). This will prevent you from losing your manual work if you re-run the command above and overwrite the file.
* Add a new column to the new file that contains a unique user identifier. Name the column something like 'indivId'. In this column you will enter a persistent individual identifier for individuals in your dataset.
* Manually review the file, entering the unique identifier that is correct for each Zoom display name. I tend to sort the file by the Zoom display name column first. This increases the chances of catching the same person who shows up in multiple meetings or slightly different names (e.g., Ringo, Ringo Starr). For unique identifier, I recommend either using a master numeric identifier or email address that connects to other data streams that you might have.
* Import the reviewed and edited rosetta file. The following command will import the file and add your new identifier to each of the elements that are included in `batchOut`. Going forward, you should use the new individual identifier (e.g., `indivId`) in your analyses. Together with the meeting identifier (e.g., `batchMeetingId`), you will be able to link records to other relevant data that you have collected (e.g., survey data).
```{r, eval=FALSE}
batchOutIds = importZoomRosetta(zoomOutput=batchOut, zoomRosetta="./myEditedRosetta.xlsx",
meetingId="batchMeetingId")
```
Running the importZoomRosetta command will attach the new unique identifier that you have created to the datasets that you created before for any of the available files in your directory.
## Next Steps
Following the process above, you should have a single R object with a tremendous amount of information. [Part 3 of this guide will cover how to analyze the conversational data in this R object](http://zoomgroupstats.org/articles/part03-analyze-zoom-conversation-data.html). | /scratch/gouwar.j/cran-all/cranData/zoomGroupStats/vignettes/part02-process-zoom-files.Rmd |
---
title: "Part 3: Analyzing Conversations in Zoom"
author: "Andrew P. Knight"
date: "`r Sys.Date()`"
vignette: >
%\VignetteIndexEntry{Part 3: Analyzing Conversations in Zoom}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
```{r, error=FALSE, message=FALSE, warning=FALSE, include=FALSE, results='hide'}
library(zoomGroupStats)
batchOut = invisible(batchProcessZoomOutput(batchInput=system.file('extdata', 'myMeetingsBatch.xlsx', package = 'zoomGroupStats')))
```
## Introduction
Virtual meetings afford granular data on how people communicate with one another. With this granularity comes both opportunities and challenges. With respect to opportunities, researchers can use virtual meetings to track the flow and content of communications on a second-by-second basis. This enables deriving insights into who talks to whom and how different people may communicate with one another in distinct ways.
With respect to challenges, granular data often requires researchers--who may be used to smaller sample datasets--to contend with incredibly large volumes of information. For example, if a researcher were to collect data from 100 60-minute group meetings, this would yield 6000 minutes of recorded speech. Depending on the rate of speech, this could present 50,000 to 60,000 spoken utterances.
Because virtual meetings present data on events in time (e.g., a spoken sentence or chat message) made by an individual within a virtual meeting, they require close attention to levels of analysis. For many questions, researchers are not interested in the fine-grained events; rather, researchers are interested in using those fine-grained events to measure attributes of individuals or groups within certain segments of time. `zoomGroupStats` provides basic functionality to derive these kinds of aggregated metrics. Whether a given aggregation is appropriate for assessing some construct, however, will fundamentally depend on the phenomenon under investigation.
## Overview of text-based data in Zoom
`zoomGroupStats` provides functions for analyzing conversations that occur through two channels in a virtual meeting:
* **transcribed spoken language**: Zoom's cloud recording feature includes an option for transcription of language transmitted through meeting participants' microphones. This transcription is performed by otter.ai.
* **text-based chat messages**: During a Zoom meeting, users can send chat messages. In the chat file included in a Zoom Cloud recording, only publicly facing messages are captured. These are the messages that users send to the group as a whole. If users send one another direct/private messages, these are not captured in the downloadable file.
### Cleaning and modifying text-based data
Text analysis is a dynamic area brimming with innovation. `zoomGroupStats` does not currently include direct functions for cleaning or modifying the text that is captured in either a transcript or a chat file. Because, however, the text for each of these files is stored in a variable, it is straightforward to use functions from other packages to clean or otherwise modify text before conducting conversation analysis.
Depending on your research questions and the scale of your dataset, you may wish to manually review and correct the transcribed audio content. Just like transcriptions done by humans, transcriptions produced by otter.ai will have errors. A manual review of a transcription can correct for these errors and provide a sharper analysis of text.
### About the Zoom `transcript` file
If you have followed the steps outlined in [Part 2](http://zoomgroupstats.org/articles/part02-process-zoom-files.html), you will have a single list object from your batch analysis. Within this list you will find a `transcript` dataset. `transcript` is a data.frame that is the parsed audio transcript file. Each row represents a single marked "utterance" using Zoom's cloud-based transcription algorithm. Utterances are marked as a function of pauses in speech and/or speaker changes.
```{r, eval=TRUE}
# Three records from the sample transcript dataset
head(batchOut$transcript, 3)
```
Each row contains identifying information for the utterance, including what meeting it was in (`batchMeetingId`) who said it (`userName`), when it was said (`utteranceStartTime`, `utteranceStartSeconds`), and how long it lasted (`utteranceTimeWindow`). There is also an indicator of the language for the utterance (`utteranceLanguage`). This indicator is used with some text analysis packages.
### About the Zoom `chat` file
Also included in your batch output file will be a `chat` dataset. `chat` is a data.frame that is the parsed text-based chat file. Each row represents a single chat message submitted by a user. Note that non-ASCII characters will not be correctly rendered in the message text.
```{r, eval=TRUE}
# Three records from the sample transcript dataset
head(batchOut$chat, 3)
```
Like `transcript`, each row contains identifying information for the chat message, including what meeting it was in (`batchMeetingId`) who posted it (`userName`), and when it was posted (`messageTime`, `messageSeconds`). There is also an indicator of the language for the message (`messageLanguage`).
## Analyzing conversations in `transcript` and `chat`
`zoomGroupStats` includes functions that aid in deriving common conversation metrics at several levels of analysis. Each of the functions can be applied to either the `chat` or the `transcript` file.
### Performing sentiment analysis
Sentiment analysis--the assessment and/or classification of language according to its emotional tone--is among the most ubiquitous kinds of text analysis. `zoomGroupStats` provides the ability to perform sentiment analysis on the utterances or messages in `transcript` and `chat` files. Because this type of analysis scores pieces of text, I recommend conducting this analysis first. The sentiment metrics can then be included in downstream conversation analyses that aggregate aspects of the conversation to the individual or meeting levels.
Using the `textSentiment()` function, there are two different types of sentiment analysis that you can request:
* **syuzhet** - This is a lexicon-based analysis using the `syuzhet` package. A lexicon-based analysis uses pre-existing dictionaries to measure the sentiment of individual words in a piece of text. In essence, this approach is a word-counting method.
* **aws** - This is an approach that relies on machine learning through Amazon Web Services. Rather than focusing on individual words, this method draws upon a trained model that assesses attributes of the text as a whole. To request this type of analysis, [you must have appropriately configured your AWS credentials](https://github.com/paws-r/paws/blob/main/docs/credentials.md).
In deciding which method to use, you should consider your research objectives. In general, the aws method will provide greater validity for assessing sentiment. However, it also will take longer to run and, for larger datasets, will incur financial costs.
```{r, eval=FALSE}
# You can request both sentiment analysis methods by including them in sentMethods
transcriptSent = textSentiment(inputData=batchOut$transcript, idVars=c('batchMeetingId','utteranceId'), textVar='utteranceMessage', sentMethods=c('aws', 'syuzhet'), appendOut=FALSE, languageCodeVar='utteranceLanguage')
# This does only the aws sentiment analysis on a chat file
chatSent = textSentiment(inputData=batchOut$chat, idVars=c('batchMeetingId', 'messageId'), textVar='message', sentMethods=c('aws'), appendOut=FALSE, languageCodeVar='messageLanguage')
```
The results of textSentiment come as a named list, with items for `aws` and/or `syuzhet`:
```{r, eval=TRUE}
# This does only the syuzhet analysis on the transcript and appends does not append it to the input dataset
transcriptSent = textSentiment(inputData=batchOut$transcript, idVars=c('batchMeetingId','utteranceId'), textVar='utteranceMessage', sentMethods=c('syuzhet'), appendOut=FALSE, languageCodeVar='utteranceLanguage')
head(transcriptSent$syuzhet)
```
The appendOut option in textSentiment gives you the ability to merge the sentiment metrics back to the original input data. I usually do this so that I can incorporate these metrics into downstream conversation analyses.
```{r, eval=TRUE}
# This does only the syuzhet sentiment analysis on a chat file and appends it to the input dataset
chatSent = textSentiment(inputData=batchOut$chat, idVars=c('batchMeetingId', 'messageId'), textVar='message', sentMethods=c('syuzhet'), appendOut=TRUE, languageCodeVar='messageLanguage')
head(chatSent$syuzhet)
```
Note that I have not included the `aws` output in this vignette because it requires a call to a third-party service.
### Performing conversation analysis
Conversation analysis entails using the exchanges of communications among meeting members to assess attributes of individuals, dyads, and groups. `zoomGroupStats` currently includes two basic kinds of conversation analysis.
The `textConversationAnalysis()` function will provide a descriptive assessment of either the `transcript` or the `chat` file.
```{r, eval=TRUE}
# Analyze the transcript, without the sentiment metrics
convoTrans = textConversationAnalysis(inputData=batchOut$transcript, inputType='transcript', meetingId='batchMeetingId', speakerId='userName')
```
`textConversationAnalysis()` provides a list with output at two levels of analysis--the meeting level (first item) and the speaker level (second item). These items are named according to the type of input that you have provided.
```{r, eval=TRUE}
# This is output at the meeting level. (Note that the values across meetings are equivalent because the sample dataset is a replication of the same meeting multiple times.)
head(convoTrans$transcriptlevel)
```
| Variable | Description |
|:---------------|:-----------------------------------------|
| batchMeetingId | The meeting identifier that you specified |
| transcriptStartTime | When the first utterance was recorded |
| transcriptEndTime | When the last utterance ended |
| utteranceTimeWindow_sum | Total number of seconds of speaking time |
| utteranceTimeWindow_x | Mean duration, in seconds, of utterances |
| utteranceTimeWindow_sd | Standard deviation of the duration, in seconds, of utterances |
| utteranceGap_x | Mean duration, in seconds, of silent time between consecutive utterances |
| utteranceGap_sd | Standard deviation of the duration, in seconds, of silent time between consecutive utterances |
| numUtterances | Count of the number of utterances in the meeting |
| numUniqueSpeakers | Count of the number of unique speakers in the meeting. Note that this includes any utterances for which the speaker is UNIDENTIFIED. |
| silentTime_sum | Total number of seconds of silent time |
| burstinessRaw | A measure of the concentration of utterances in time |
```{r, eval=TRUE}
# This is output at the speaker level
head(convoTrans$speakerlevel)
```
| Variable | Description |
|:---------------|:-----------------------------------------|
| batchMeetingId | The meeting identifier that you specified |
| userName | The speaker identifier that you specified |
| firstUtteranceTime | Timestamp for this person's first utterance |
| lastUtteranceTime | Timestamp for this person's last utterance |
| utteranceTimeWindow_sum | Total number of seconds of this person's speaking time |
| utteranceTimeWindow_x | Mean duration, in seconds, of this person's utterances |
| utteranceTimeWindow_sd | Standard deviation of the duration, in seconds, of this person's utterances |
| utteranceGap_x | Mean duration, in seconds, of silent time before this person speaks after a prior utterance|
| utteranceGap_sd | Standard deviation of the duration, in seconds, of silent time before this person speaks after a prior utterance |
| numUtterances | Count of the number of utterances this person made in this the meeting |
If you have already conducted a sentiment analysis using the `textSentiment()` function, you can further include those attributes. Note that currently you can only analyze one sentiment analysis method at a time. For example, here is a request for an analysis of the chat file:
```{r, eval=TRUE}
# Analyze the conversation within the chat file, including the sentiment metrics
convoChat = textConversationAnalysis(inputData=chatSent$syuzhet, inputType='chat', meetingId='batchMeetingId', speakerId='userName', sentMethod="syuzhet")
```
The names of the items in the list output for `chat` are `chatlevel` and `userlevel`:
```{r, eval=TRUE}
# This is output at the meeting level
head(convoChat$chatlevel)
```
| Variable | Description |
|:---------------|:-----------------------------------------|
| batchMeetingId | The meeting identifier that you specified |
| chatStartTime | The time of the first chat message in this meeting |
| chatEndTime | The time of the last chat message in this meeting |
| messageNumChars_sum | Total number of characters chatted in meeting |
| messageNumChars_x | Mean number of characters per message chatted in meeting |
| messageNumChars_sd | Standard deviation of the number of characters per message chatted in meeting |
| messageGap_x | Mean duration, in seconds, of time between chat messages in this meeting |
| messageGap_sd | Standard deviation of the duration, in seconds, of time between chat messages in this meeting |
| numUniqueMessagers | Number of individuals who sent chat messages in this meeting.
| numMessages | Total number of messages sent in this meeting |
| totalChatTime | Amount of time between first and last messages
| burstinessRaw | Measure of the concentration of chat messages in time
| ... | Additional variables depend on the type of sentiment analysis you may have requested. |
```{r, eval=TRUE}
# This is output at the speaker level
head(convoChat$userlevel)
```
| Variable | Description |
|:---------------|:-----------------------------------------|
| batchMeetingId | The meeting identifier that you specified |
| userName | The individual identifier you specified |
| firstMessageTime | The time of this person's first chat message in this meeting |
| lastMessageTime | The time of this person's last chat message in this meeting |
| messageNumChars_sum | Total number of characters this person chatted in meeting |
| messageNumChars_x | Mean number of characters per message this person chatted in meeting |
| messageNumChars_sd | Standard deviation of the number of characters per message this person chatted in meeting |
| messageGap_x | Mean duration, in seconds, of time before this person sends a chat message after a prior message |
| messageGap_sd | Standard deviation of the duration, in seconds, of time before this person sends a chat message after a prior message |
| ... | Additional variables depend on the type of sentiment analysis you may have requested. |
## Windowed conversation analysis
One of the unique strengths of collecting data using virtual meetings is the ability to assess *dynamics*--how meeting characteristics and participants' behavior changes over time. Beyond analyzing the raw events over time, `zoomGroupStats` enables you to run the textConversationAnalysis above within temporal windows in a given meeting. By windowing, and aggregating data within the window, you can derive more reliable indicators of attributes than relying solely on the raw events.
For example, using the following function call, you could analyze how conversation attributes--who is speaking alot, what is the sentiment of speech--change throughout a meeting, in 5-minute (`windowSize=300` seconds) increments.
```{r, eval=TRUE}
win.convo.out = windowedTextConversationAnalysis(inputData=batchOut$transcript, inputType='transcript', meetingId='batchMeetingId', speakerId='userName', sentMethod="none", timeVar="utteranceStartSeconds", windowSize=300)
```
The output of `windowedTextConversationAnalysis` is a list with two data.frames as items:
```{r, eval=TRUE}
# View the window-level output
head(win.convo.out$windowlevel)
```
| Variable | Description |
|:---------------|:-----------------------------------------|
| windowId | Incrementing numeric identifier for the temporal window |
| windowStart | Number of seconds from start of transcript when this window begins |
| windowEnd | Number of seconds from start of transcript when this window ends |
| ... | All other variables correspond to the textConversationAnalysis output; but, they are calculated within a given temporal window |
```{r, eval=TRUE}
# View the output for speakers within windows
head(win.convo.out$speakerlevel)
```
This output will provide a record for each possible speaker within each possible window. This is done so that valid zeros (e.g., no speaking) are represented in the dataset.
| Variable | Description |
|:---------------|:-----------------------------------------|
| batchMeetingId | Meeting identifier requested |
| userName | Speaker identifier requested |
| windowId | Incrementing numeric identifier for the temporal window |
| windowStart | Number of seconds from start of transcript when this window begins |
| windowEnd | Number of seconds from start of transcript when this window ends |
| ... | All other variables correspond to the textConversationAnalysis output; but, they are calculated within a given temporal window |
## Next Steps
In the [final part of this guide](http://zoomgroupstats.org/articles/part04-analyze-zoom-video-data.html), you will learn how to process and anlayze video files downloaded from Zoom sessions. | /scratch/gouwar.j/cran-all/cranData/zoomGroupStats/vignettes/part03-analyze-zoom-conversation-data.Rmd |
---
title: "Part 4: Analyzing Video Data from Zoom"
author: "Andrew P. Knight"
date: "`r Sys.Date()`"
vignette: >
%\VignetteIndexEntry{Part 4: Analyzing Video Data from Zoom}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
## Introduction to Analyzing Video from Zoom
One of the most valuable raw outputs from Zoom is the video recording of the session. Indeed, the video recording is, itself, the source of the transcript [that we considered in Part 03](http://zoomgroupstats.org/articles/part03-analyze-zoom-conversation-data.html). But, beyond spoken language, video offers the potential to examine participants' nonverbal behavior and a wide range of actions and interactions during virtual meetings. As discussed in [Part 01](http://zoomgroupstats.org/articles/part01-configure-zoom.html), you should attend carefully to your Zoom configuration before beginning to collect data. My recommendation is to select all of the video options that are available and optimize your recordings for 3rd party processing.
If you do select all of the recording options, you will be able to download several files from Zoom after a virtual meeting is complete and the recording is available:
* A gallery view: This gives a tiled view of meeting participants who have their camera on. It is important to note that participants' placement on the screen (i.e., the location of their tiles) is dynamic and will change depending on who has a camera on in the meeting at any given point in time.
* An active speaker view: This displays the camera feed of a single participant at a time. The participant who is featured is the one that Zoom has classified as the active speaker at that moment.
* Shared screen views: There are a few different shared screen views. One gives the active speaker as a small thumbnail within a larger field showing the shared screen.
Depending on your research questions, different video formats will be most relevant. For my own research, which focuses on group dynamics, I prefer the gallery view. This enables measuring the nonverbal behavior of all participants throughout the entirety of a virtual meeting (assuming they have their camera on).
`zoomGroupStats` currently supports one video (named as *prefix*_video.mp4) where *prefix* is the file naming convention that you included in your [batch processing template](https://github.com/andrewpknight/zoomGroupStats/blob/main/inst/extdata/myMeetingsBatch.xlsx). That is, for each meeting, you can include in your batch processing folder a single video file. You may still find it valuable to download other views (e.g., active speaker)--just add a suffix to the filename.
## Parsing Zoom Video feed
Analyzing videos is computationally and, consequently, time intensive. Think of a video as a series of still images that have been strung together--much like a flip book. When analyzing videos, we are breaking down a video into its still frames and, then, analyzing those still frames. Depending on the duration and quality (e.g., frame rate, size), there could be hundreds of thousands of images within your video. For example, if your video has a frame rate of 60 frames per second of footage, that would yield 3,600 frames for a single *minute* of video.
Given the time that it can take to analyze many videos, I "parse" or pre-process the video files from Zoom meetings before analyzing them. By "parse", I mean that I sample and save a sequence of still frames from each video that can be used in downstream analyses. This process is akin to how we parsed the transcript or chat files from Zoom meetings in [Part 03](http://zoomgroupstats.org/articles/part03-analyze-zoom-conversation-data.html). I usually do this at the start of a project and a time when I will not be using my computer (e.g., before going to sleep). When I launch the batch parsing function at night, it can run throughout the night while I am sleeping.
The `batchGrapVideoStills()` function presumes that you have followed the batch process setup described in [Part 02 of this guide](http://zoomgroupstats.org/articles/part02-process-zoom-files.html). If you have done this, you have a series of video files saved in the same directory as your batch input file.
```{r, eval=FALSE}
batchOut = batchProcessZoomOutput(batchInput="./myMeetingsBatch.xlsx")
```
And, the function presumes that you have run the `batchProcessZoomOutput()` function to generate the `batchOut` object:
```{r, eval=FALSE, error=FALSE, message=FALSE, warning=FALSE, include=FALSE, results='hide'}
batchOut = invisible(batchProcessZoomOutput(batchInput=system.file('extdata', 'myMeetingsBatch.xlsx', package = 'zoomGroupStats')))
```
With this setup complete, you can run the `batchGrabVideoStills` function:
```{r, eval=FALSE}
batchGrabVideoStills(batchInfo=batchOut$batchInfo, imageDir="~/Documents/myMeetings/videoImages", sampleWindow=60)
```
This function call will iterate through the meetings in your batch and, for each meeting where there is a video, sample a still frame from the video every 60 seconds. Because of a quirk with video still frames, the function will actually sample the first image at `sampleWindow/2` and then each subsequent image every `sampleWindow`. The function saves these images in a new directory that it creates within the same directory where the videos are saved.
In addition to the images that it saves, `batchGrabVideoStills` will return a data frame with information about what it did for each meeting:
| Variable | Description |
|:---------------|:-----------------------------------------|
| batchMeetingId | The meeting identifier |
| videoExists | TRUE or FALSE indicating whether there was a video for this meeting |
| imageDir |path to the directory where video images are saved |
| sampleWindow | The window requested for sampling frames |
| numFramesExtracted | The total number of image files that were saved for this meeting |
## Analyzing attributes of detected faces
With the still frames sampled from the video, you can now progress to analyzing attributes of the video. The current version of `zoomGroupStats` includes one function--`batchVideoFaceAnalysis()``--for analyzing the attributes of faces detected within the video. This function relies on the Rekognition service from Amazon Web Services. To use this function, [you must have appropriately configured your AWS credentials](https://github.com/paws-r/paws/blob/main/docs/credentials.md).
```{r, eval=FALSE}
vidOut = batchVideoFaceAnalysis(batchInfo=batchOut$batchInfo, imageDir="~/Documents/meetingImages", sampleWindow=60, facesCollectionID="group-r")
```
This function will iterate through a set of meetings. For each meeting, it will iterate through the extracted images from the video of the meeting. Within each image, it will detect human faces and measure attributes of those faces using Rekognition. If you create an identified collection of users in advance, you can use this collection to detect the identity of any faces in the video.
The function returns a data.frame that is at the face-image level of analysis. That is, each record in the data.frame corresponds to a face detected within an image, which is itself nested within a given meeting. As an example, imagine that you have a recording of a 60 minute meeting for which you have extracted 60 still frames (e.g., one every minute). Imagine that there were 5 participants in this meeting and that the participants kept their camera on throughout the meeting. This should yield approximately 300 records of data. Note that there will likely be fewer records simply because participants might have stepped away from their camera or been unrecognizable at a given point in the video.
For each detected face, there is an abundance of information from AWS rekognition. There are estimates of participants' age and gender, facial attributes (e.g., glasses, facial hair), and estimates of emotional expressions. If you provided an AWS collection of identified images, the identity of the participant will also be included, along with a confidence level.
## Next Steps
At this point in the guide, the next steps are on my end. You should expect the functionality of `zoomGroupStats` to continue to extend, as I build out new functions and make the existing functions more efficient. You should also expect this guide to extend. A subsequent topic that I will include, for example, is how to best analyze Zoom Recordings captured locally (i.e., not with the Zoom Cloud). If you have ideas or suggestions for functionality--or, if you have used the functions and encountered bugs or problems--[please reach out](mailto:knightap@wustl.edu). With many eyes, all bugs are shallow. | /scratch/gouwar.j/cran-all/cranData/zoomGroupStats/vignettes/part04-analyze-zoom-video-data.Rmd |
#' Donors from DIME Database
#'
#' A set of donor names from the Database on Ideology, Money in Politics, and
#' Elections (DIME). This dataset was used as a benchmark in the 2021 APSR
#' paper Adaptive Fuzzy String Matching: How to Merge Datasets with Only One
#' (Messy) Identifying Field by Aaron R. Kaufman and Aja Klevs, the dataset in
#' this package is a subset of the data from the replication archive of that
#' paper. The full dataset can be found in the paper's replication materials
#' here: \doi{10.7910/DVN/4031UL}.
#'
#' @author Adam Bonica
#' @references \doi{10.7910/DVN/4031UL}
#' @name dime_data
#' @docType data
#' @format ## `dime_data`
#' A data frame with 10,000 rows and 2 columns:
#' \describe{
#' \item{id}{Numeric ID / Row Number}
#' \item{x}{Donor Name}
#' ...
#' }#' @source <https://www.who.int/teams/global-tuberculosis-programme/data>
"dime_data"
| /scratch/gouwar.j/cran-all/cranData/zoomerjoin/R/data.R |
#' Fit a Probabilistic Matching Model using Naive Bayes + E.M.
#'
#' A Rust implementation of the Naive Bayes / Fellegi-Sunter model of record
#' linkage as detailed in the article "Using a Probabilistic Model to Assist
#' Merging of Large-Scale Administrative Records" by Enamorado, Fifield and
#' Imai (2019). Takes an integer matrix describing the similarities between
#' each possible pair of observations, and a vector of initial guesses of the
#' probability each pair is a match (these can either be set from domain
#' knowledge, or one can hand-label a subset of the data and leave the rest as
#' p=.5). Iteratively refines these guesses using the Expectation Maximization
#' algorithm until an optima is reached. for more details, see
#' \doi{10.1017/S0003055418000783}.
#'
#'
#' @param X an integer matrix of similarities. Must go from 0 (the most
#' disagreement) to the maximum without any "gaps" or unused levels. As an
#' example, a column with values 0,1,2,3 is a valid column, but 0,1,2,4 is not
#' as three is omitted
#'
#' @param g a vector of initial guesses that are iteratively improved using the
#' EM algorithm (my personal approach is to guess at logistic regression
#' coefficients and use them to create the intitial probability guesses). This
#' is chosen to avoid the model getting stuck in a local optimum, and to avoid
#' the problem of label-switching, where the labels for matches and non-matches
#' are reversed.
#'
#' @param tol tolerance in the sense of the infinity norm. i.e. how close the
#' parameters have to be between iterations before the EM algorithm terminates.
#'
#' @param max_iter iterations after which the algorithm will error out if it
#' has not converged.
#'
#' @return a vector of probabilities representing the posterior probability
#' each record pair is a match.
#'
#' @examples
#'
#' inv_logit <- function (x) {
#' exp(x)/(1+exp(x))
#'}
#'n <- 10^6
#'d <- 1:n %% 5 == 0
#'X <- cbind(
#' as.integer(ifelse(d, runif(n)<.8, runif(n)<.2)),
#' as.integer(ifelse(d, runif(n)<.9, runif(n)<.2)),
#' as.integer(ifelse(d, runif(n)<.7, runif(n)<.2)),
#' as.integer(ifelse(d, runif(n)<.6, runif(n)<.2)),
#' as.integer(ifelse(d, runif(n)<.5, runif(n)<.2)),
#' as.integer(ifelse(d, runif(n)<.1, runif(n)<.9)),
#' as.integer(ifelse(d, runif(n)<.1, runif(n)<.9)),
#' as.integer(ifelse(d, runif(n)<.8, runif(n)<.01))
#' )
#'
#' # inital guess at class assignments based on # a hypothetical logistic
#' # regression. Should be based on domain knowledge, or a handful of hand-coded
#' # observations.
#'
#'x_sum <- rowSums(X)
#'g <- inv_logit((x_sum - mean(x_sum))/sd(x_sum))
#'
#' out <- em_link(X, g,tol=.0001, max_iter = 100)
#'
#' @export
em_link <- function (X,g, tol = 10^-6, max_iter = 10^3) {
stopifnot("There can be no NA's in X (but you can add NA as its own agreement level)"
= !any(is.na(X)))
stopifnot("initial guesses must be valid probabilities (greater than 0 and less than 1)"
= all(g < 1 & g > 0))
rust_em_link(X,g, tol, max_iter)
}
| /scratch/gouwar.j/cran-all/cranData/zoomerjoin/R/em_link.R |
multi_by_validate <- function(a,b, by) {
# first pass to handle dplyr::join_by() call
if (inherits(by, "dplyr_join_by")) {
if (any(by$condition != "==")) {
stop("Inequality joins are not supported.")
}
new_by <- by$y
names(new_by) <- by$x
by <- new_by
}
if (is.null(by)) {
by_a <- intersect(names(a), names(b))
by_b <- intersect(names(a), names(b))
} else {
if (!is.null(names(by))) {
by_a <- names(by)
by_b <- by
} else {
by_a <- by
by_b <- by
}
stopifnot(by_a %in% names(a))
stopifnot(by_b %in% names(b))
}
return(list(
by_a,
by_b
))
}
#` @importFrom stats pnorm
euclidean_join_core <- function (a, b, by = NULL, n_bands = 30, band_width = 10, threshold=1.0, r=.5, progress = FALSE, mode="inner") {
stopifnot("'radius' must be greater than 0" = threshold > 0)
by <- multi_by_validate(a,b,by)
by_a <- by[[1]]
by_b <- by[[2]]
stopifnot("There should be no NA's in by_a[1]"=!any(is.na(dplyr::pull(a,by_a[1]))))
stopifnot("There should be no NA's in by_a[2]"=!any(is.na(dplyr::pull(a,by_a[2]))))
stopifnot("There should be no NA's in by_b[1]"=!any(is.na(dplyr::pull(b,by_b[1]))))
stopifnot("There should be no NA's in by_b[2]"=!any(is.na(dplyr::pull(b,by_b[2]))))
thresh_prob <- euclidean_probability(threshold, n_bands, band_width, r)
if (thresh_prob < .95) {
str <- paste0("A pair of records at the threshold (", threshold,
") have only a ", round(thresh_prob*100), "% chance of being compared.\n",
"Please consider changing `n_bands` and `band_width`, and `r`.")
warning(str)
}
match_table <- rust_p_norm_join(
a_mat = as.matrix(dplyr::select(a, dplyr::all_of(by_a))),
b_mat = as.matrix(dplyr::select(b, dplyr::all_of(by_b))),
radius = threshold,
band_width = band_width,
n_bands = n_bands,
r = r,
progress = progress,
seed = round(runif(1,0,2^32))
)
names_in_both <- intersect(names(a), names(b))
names(a)[names(a) %in% names_in_both] <-
paste0(names(a)[names(a) %in% names_in_both], ".x")
names(b)[names(b) %in% names_in_both] <-
paste0(names(b)[names(b) %in% names_in_both], ".y")
matches <- dplyr::bind_cols(a[match_table[, 1], ], b[match_table[, 2], ])
not_matched_a <- ! seq(nrow(a)) %in% match_table[,1]
not_matched_b <- ! seq(nrow(b)) %in% match_table[,2]
if (mode == "left") {
matches <- dplyr::bind_rows(matches,a[not_matched_a,])
} else if (mode == "right") {
matches <- dplyr::bind_rows(matches,b[not_matched_b,])
} else if (mode == "full") {
matches <- dplyr::bind_rows(matches,a[not_matched_a,],b[not_matched_b,])
} else if (mode == "inner"){
matches <- matches
} else if (mode == "anti") {
matches <- dplyr::bind_rows(a[not_matched_a,], b[not_matched_b,])
} else {
stop("Invalid Mode Selected!")
}
return(matches)
}
| /scratch/gouwar.j/cran-all/cranData/zoomerjoin/R/euclidean_join_core.R |
#' Spatial Anti Join Using LSH
#'
#' @param a the first dataframe you wish to join.
#' @param b the second dataframe you wish to join.
#'
#' @param by a named vector indicating which columns to join on. Format should
#' be the same as dplyr: \code{by = c("column_name_in_df_a" = "column_name_in_df_b")}, but
#' two columns must be specified in each dataset (x column and y column). Specification
#' made with `dplyr::join_by()` are also accepted.
#'
#' @param n_bands the number of bands used in the LSH algorithm (default
#' is 30). Use this in conjunction with the \code{band_width} to determine the
#' performance of the hashing.
#'
#' @param band_width the length of each band used in the minihashing algorithm
#' (default is 5) Use this in conjunction with the \code{n_bands} to determine
#' the performance of the hashing.
#'
#' @param threshold the distance threshold below which units should be considered a match
#'
#' @param r the r hyperparameter used to govern the sensitivity of the locality sensitive hash, as described in
#'
#' @param progress set to `TRUE` to print progress
#'
#' @return a tibble fuzzily-joined on the basis of the variables in `by.` Tries
#' to adhere to the same standards as the dplyr-joins, and uses the same
#' logical joining patterns (i.e. inner-join joins and keeps only observations in both datasets).
#'
#' @references Datar, Mayur, Nicole Immorlica, Pitor Indyk, and Vahab Mirrokni.
#' "Locality-Sensitive Hashing Scheme Based on p-Stable Distributions" SCG '04:
#' Proceedings of the twentieth annual symposium on Computational geometry
#' (2004): 253-262
#'
#' @examples
#'n <- 10
#'
#'X_1 <- matrix(c(seq(0,1,1/(n-1)), seq(0,1,1/(n-1))), nrow=n)
#'X_2 <- X_1 + .0000001
#'
#'X_1 <- as.data.frame(X_1)
#'X_2 <- as.data.frame(X_2)
#'
#'X_1$id_1 <- 1:n
#'X_2$id_2 <- 1:n
#'
#'
#'euclidean_anti_join(X_1, X_2, by = c("V1", "V2"), threshold =.00005)
#'
#'
#' @export
euclidean_anti_join <- function(a, b, by = NULL, threshold = 1, n_bands = 30, band_width = 5, r=.5, progress = FALSE) {
euclidean_join_core(a, b, mode = "anti", by = by, threshold = threshold, n_bands = n_bands, progress = progress, band_width = band_width, r = r)
}
#' Spatial Inner Join Using LSH
#'
#' @param a the first dataframe you wish to join.
#' @param b the second dataframe
#' you wish to join.
#'
#' @param by a named vector indicating which columns to join on. Format should
#' be the same as dplyr: \code{by = c("column_name_in_df_a" = "column_name_in_df_b")}, but
#' two columns must be specified in each dataset (x column and y column).
#'
#' @param n_bands the number of bands used in the LSH algorithm (default
#' is 30). Use this in conjunction with the \code{band_width} to determine the
#' performance of the hashing.
#'
#' @param band_width the length of each band used in the minihashing algorithm
#' (default is 5) Use this in conjunction with the \code{n_bands} to determine
#' the performance of the hashing.
#'
#' @param threshold the distance threshold below which units should be considered a match
#'
#' @param r the r hyperparameter used to govern the sensitivity of the locality sensitive hash, as described in
#'
#' @param progress set to `TRUE` to print progress
#'
#' @return a tibble fuzzily-joined on the basis of the variables in `by.` Tries
#' to adhere to the same standards as the dplyr-joins, and uses the same
#' logical joining patterns (i.e. inner-join joins and keeps only observations in both datasets).
#'
#' @references Datar, Mayur, Nicole Immorlica, Pitor Indyk, and Vahab Mirrokni.
#' "Locality-Sensitive Hashing Scheme Based on p-Stable Distributions" SCG '04:
#' Proceedings of the twentieth annual symposium on Computational geometry
#' (2004): 253-262
#'
#' @examples
#'n <- 10
#'
#'X_1 <- matrix(c(seq(0,1,1/(n-1)), seq(0,1,1/(n-1))), nrow=n)
#'X_2 <- X_1 + .0000001
#'
#'X_1 <- as.data.frame(X_1)
#'X_2 <- as.data.frame(X_2)
#'
#'X_1$id_1 <- 1:n
#'X_2$id_2 <- 1:n
#'
#'euclidean_inner_join(X_1, X_2, by = c("V1", "V2"), threshold =.00005)
#'
#'
#' @export
euclidean_inner_join <- function(a, b, by = NULL, threshold = 1, n_bands = 30, band_width = 5, r=.5, progress = FALSE) {
euclidean_join_core(a, b, mode = "inner", by = by, threshold = threshold, n_bands = n_bands,progress = progress, band_width = band_width, r = r)
}
#' Spatial Left Join Using LSH
#'
#' @inheritParams euclidean_anti_join
#'
#' @return a tibble fuzzily-joined on the basis of the variables in `by.` Tries
#' to adhere to the same standards as the dplyr-joins, and uses the same
#' logical joining patterns (i.e. inner-join joins and keeps only observations in both datasets).
#'
#' @references Datar, Mayur, Nicole Immorlica, Pitor Indyk, and Vahab Mirrokni.
#' "Locality-Sensitive Hashing Scheme Based on p-Stable Distributions" SCG '04:
#' Proceedings of the twentieth annual symposium on Computational geometry
#' (2004): 253-262
#'
#' @examples
#'n <- 10
#'
#'X_1 <- matrix(c(seq(0,1,1/(n-1)), seq(0,1,1/(n-1))), nrow=n)
#'X_2 <- X_1 + .0000001
#'
#'X_1 <- as.data.frame(X_1)
#'X_2 <- as.data.frame(X_2)
#'
#'X_1$id_1 <- 1:n
#'X_2$id_2 <- 1:n
#'
#'euclidean_left_join(X_1, X_2, by = c("V1", "V2"), threshold =.00005)
#'
#'
#' @export
euclidean_left_join <- function(a, b, by = NULL, threshold = 1, n_bands = 30, band_width = 5, r=.5, progress = FALSE) {
euclidean_join_core(a, b, mode = "left", by = by, threshold = threshold, n_bands = n_bands,progress = progress, band_width = band_width, r = r)
}
#' Spatial Right Join Using LSH
#'
#' @inheritParams euclidean_anti_join
#'
#' @return a tibble fuzzily-joined on the basis of the variables in `by.` Tries
#' to adhere to the same standards as the dplyr-joins, and uses the same
#' logical joining patterns (i.e. inner-join joins and keeps only observations in both datasets).
#'
#' @references Datar, Mayur, Nicole Immorlica, Pitor Indyk, and Vahab Mirrokni.
#' "Locality-Sensitive Hashing Scheme Based on p-Stable Distributions" SCG '04:
#' Proceedings of the twentieth annual symposium on Computational geometry
#' (2004): 253-262
#'
#' @examples
#'n <- 10
#'
#'X_1 <- matrix(c(seq(0,1,1/(n-1)), seq(0,1,1/(n-1))), nrow=n)
#'X_2 <- X_1 + .0000001
#'X_1 <- as.data.frame(X_1)
#'X_2 <- as.data.frame(X_2)
#'
#'X_1$id_1 <- 1:n
#'X_2$id_2 <- 1:n
#'
#'euclidean_right_join(X_1, X_2, by = c("V1", "V2"), threshold =.00005)
#'
#'
#' @export
euclidean_right_join <- function(a, b, by = NULL, threshold = 1, n_bands = 30, band_width = 5, r=.5, progress = FALSE) {
euclidean_join_core(a, b, mode = "right", by = by, threshold = threshold, n_bands = n_bands,progress = progress, band_width = band_width, r = r)
}
#' Spatial Full Join Using LSH
#'
#' @inheritParams euclidean_anti_join
#'
#' @return a tibble fuzzily-joined on the basis of the variables in `by.` Tries
#' to adhere to the same standards as the dplyr-joins, and uses the same
#' logical joining patterns (i.e. inner-join joins and keeps only observations in both datasets).
#'
#' @references Datar, Mayur, Nicole Immorlica, Pitor Indyk, and Vahab Mirrokni.
#' "Locality-Sensitive Hashing Scheme Based on p-Stable Distributions" SCG '04:
#' Proceedings of the twentieth annual symposium on Computational geometry
#' (2004): 253-262
#'
#' @examples
#'n <- 10
#'
#'X_1 <- matrix(c(seq(0,1,1/(n-1)), seq(0,1,1/(n-1))), nrow=n)
#'X_2 <- X_1 + .0000001
#'
#'X_1 <- as.data.frame(X_1)
#'X_2 <- as.data.frame(X_2)
#'
#'X_1$id_1 <- 1:n
#'X_2$id_2 <- 1:n
#'
#'euclidean_full_join(X_1, X_2, by = c("V1", "V2"), threshold =.00005)
#'
#' @export
euclidean_full_join <- function(a, b, by = NULL, threshold = 1, n_bands = 30, band_width = 5, r=.5, progress = FALSE) {
euclidean_join_core(a, b, mode = "full", by = by, threshold = threshold, n_bands = n_bands, progress = progress, band_width = band_width, r = r)
}
| /scratch/gouwar.j/cran-all/cranData/zoomerjoin/R/euclidean_logical_joins.R |
# Generated by extendr: Do not edit by hand
# nolint start
#
# This file was created with the following call:
# .Call("wrap__make_zoomerjoin_wrappers", use_symbols = TRUE, package_name = "zoomerjoin")
#' @docType package
#' @usage NULL
#' @useDynLib zoomerjoin, .registration = TRUE
NULL
rust_jaccard_join <- function(left_string_r, right_string_r, ngram_width, n_bands, band_size, threshold, progress, seed) .Call(wrap__rust_jaccard_join, left_string_r, right_string_r, ngram_width, n_bands, band_size, threshold, progress, seed)
rust_salted_jaccard_join <- function(left_string_r, right_string_r, left_salt_r, right_salt_r, ngram_width, n_bands, band_size, threshold, progress, seed) .Call(wrap__rust_salted_jaccard_join, left_string_r, right_string_r, left_salt_r, right_salt_r, ngram_width, n_bands, band_size, threshold, progress, seed)
rust_jaccard_similarity <- function(left_string_r, right_string_r, ngram_width) .Call(wrap__rust_jaccard_similarity, left_string_r, right_string_r, ngram_width)
rust_em_link <- function(x_robj, probs, tol, max_iter) .Call(wrap__rust_em_link, x_robj, probs, tol, max_iter)
rust_p_norm_join <- function(a_mat, b_mat, radius, band_width, n_bands, r, progress, seed) .Call(wrap__rust_p_norm_join, a_mat, b_mat, radius, band_width, n_bands, r, progress, seed)
# nolint end
| /scratch/gouwar.j/cran-all/cranData/zoomerjoin/R/extendr-wrappers.R |
simple_by_validate <- function(a,b, by) {
# first pass to handle dplyr::join_by() call
if (inherits(by, "dplyr_join_by")) {
if (any(by$condition != "==")) {
stop("Inequality joins are not supported.")
}
new_by <- by$y
names(new_by) <- by$x
by <- new_by
}
if (is.null(by)) {
by_a <- intersect(names(a), names(b))
by_b <- intersect(names(a), names(b))
stopifnot("Can't Determine Column to Match on" = length(by_a)==1)
message(paste0("Joining by '", by_a, "'\n"))
} else {
if (!is.null(names(by))) {
by_a <- names(by)
by_b <- unname(by)
} else {
by_a <- by
by_b <- by
}
stopifnot(by_a %in% names(a))
stopifnot(by_b %in% names(b))
}
return(list(
by_a,
by_b
))
}
#' @importFrom dplyr pull %>%
jaccard_join <- function (a, b, mode, by, salt_by, n_gram_width, n_bands,
band_width, threshold, progress = FALSE, a_salt = NULL, b_salt = NULL,
clean=FALSE, similarity_column = NULL) {
a <- tibble::as_tibble(a)
b <- tibble::as_tibble(b)
stopifnot("'threshold' must be of length 1" = length(threshold) == 1)
stopifnot("'threshold' must be between 0 and 1" = threshold <= 1 & threshold>=0)
stopifnot("'n_bands' must be greater than 0" = n_bands > 0)
stopifnot("'n_bands' must be length than 1" = length(n_bands) == 1)
stopifnot("'band_width' must be greater than 0" = band_width > 0)
stopifnot("'band_width' must be length than 1" = length(band_width) == 1)
stopifnot("'n_gram_width' must be greater than 0" = n_gram_width > 0)
stopifnot("'n_gram_width' must be length than 1" = length(n_gram_width) == 1)
thresh_prob <- jaccard_probability(threshold, n_bands, band_width)
if (thresh_prob < .95) {
str <- paste0("A pair of records at the threshold (", threshold,
") have only a ", round(thresh_prob*100), "% chance of being compared.\n",
"Please consider changing `n_bands` and `band_width`.")
warning(str)
}
by <- simple_by_validate(a,b,by)
by_a <- by[[1]]
by_b <- by[[2]]
stopifnot("'by' vectors must have length 1" = length(by_a)==1)
stopifnot("'by' vectors must have length 1" = length(by_b)==1)
stopifnot("There should be no NA's in by_a"=!any(is.na(dplyr::pull(a,by_a))))
stopifnot("There should be no NA's in by_b"=!any(is.na(dplyr::pull(b,by_b))))
salt_by_a <- NULL
salt_by_b <- NULL
# don't impute salt_by
if (!is.null(salt_by)) {
salt_by <- simple_by_validate(a,b,salt_by)
salt_by_a <- salt_by[[1]]
salt_by_b <- salt_by[[2]]
stopifnot("There should be no NA's in the blocking variables"=!
any(is.na(dplyr::select(a,dplyr::all_of(salt_by_a)))))
stopifnot("There should be no NA's in the blocking variables"=!
any(is.na(dplyr::select(b,dplyr::all_of(salt_by_b)))))
}
# Clean strings that are matched on
if (clean){
a_col <- gsub("[[:punct:] ]", "", dplyr::pull(a,by_a))
b_col <- gsub("[[:punct:] ]", "", dplyr::pull(b,by_b))
if (!is.null(salt_by_a) && !is.null(salt_by_b)) {
a_salt_col <- tidyr::unite(a,"salt_by_a", dplyr::all_of(salt_by_a)) %>%
dplyr::pull("salt_by_a")
b_salt_col <- tidyr::unite(b,"salt_by_b", dplyr::all_of(salt_by_b)) %>%
dplyr::pull("salt_by_b")
a_salt_col <- gsub("[[:punct:] ]", "", a_salt_col)
b_salt_col <- gsub("[[:punct:] ]", "", b_salt_col)
}
} else{
a_col <- dplyr::pull(a,by_a)
b_col <- dplyr::pull(b,by_b)
if (!is.null(salt_by_a) && !is.null(salt_by_b)) {
a_salt_col <- tidyr::unite(a,"salt_by_a", dplyr::all_of(salt_by_a)) %>%
dplyr::pull("salt_by_a")
b_salt_col <- tidyr::unite(b,"salt_by_b", dplyr::all_of(salt_by_b)) %>%
dplyr::pull("salt_by_b")
}
}
if (is.null(salt_by_a) | is.null(salt_by_b)) {
match_table <- rust_jaccard_join(
a_col, b_col,
n_gram_width, n_bands, band_width, threshold,
progress, seed = 1)
} else {
match_table <- rust_salted_jaccard_join(
a_col, b_col,
a_salt_col, b_salt_col,
n_gram_width, n_bands, band_width, threshold,
progress, seed = round(runif(1,0,2^64))
)
}
if (!is.null(similarity_column)) {
similarities <- jaccard_similarity(
pull(a[match_table[, 1],], by_a),
pull(b[match_table[, 2],],by_b),
n_gram_width
)
}
# Rename Columns in Both Tables
names_in_both <- intersect(names(a), names(b))
names(a)[names(a) %in% names_in_both] <- paste0(names(a)[names(a) %in% names_in_both], ".x")
names(b)[names(b) %in% names_in_both] <- paste0(names(b)[names(b) %in% names_in_both], ".y")
matches <- dplyr::bind_cols(a[match_table[, 1], ], b[match_table[, 2], ])
not_matched_a <- ! seq(nrow(a)) %in% match_table[,1]
not_matched_b <- ! seq(nrow(b)) %in% match_table[,2]
if(!is.null(similarity_column)) {
matches[,similarity_column] <- similarities
}
if (mode == "left") {
matches <- dplyr::bind_rows(matches,a[not_matched_a,])
} else if (mode == "right") {
matches <- dplyr::bind_rows(matches,b[not_matched_b,])
} else if (mode == "full") {
matches <- dplyr::bind_rows(matches,a[not_matched_a,],b[not_matched_b,])
} else if (mode == "inner"){
matches <- matches
} else if (mode == "anti") {
matches <- dplyr::bind_rows(a[not_matched_a,], b[not_matched_b,])
} else {
stop("Invalid Mode Selected!")
}
return(matches)
}
| /scratch/gouwar.j/cran-all/cranData/zoomerjoin/R/jaccard_join_core.R |
#' Fuzzy inner-join using minihashing
#'
#' @param a the first dataframe you wish to join.
#'
#' @param b the second dataframe you wish to join.
#'
#' @param by a named vector indicating which columns to join on. Format should
#' be the same as dplyr: \code{by = c("column_name_in_df_a" = "column_name_in_df_b")}, but
#' two columns must be specified in each dataset (x column and y column). Specification
#' made with `dplyr::join_by()` are also accepted.
#'
#' @param block_by a named vector indicating which column to block on, such that
#' rows that disagree on this field cannot be considered a match. Format should
#' be the same as dplyr: \code{by = c("column_name_in_df_a" =
#' "column_name_in_df_b")}
#'
#' @param n_gram_width the length of the n_grams used in calculating the
#' jaccard similarity. For best performance, I set this large enough that the
#' chance any string has a specific n_gram is low (i.e. \code{n_gram_width} = 2
#' or 3 when matching on first names, 5 or 6 when matching on entire
#' sentences).
#'
#' @param n_bands the number of bands used in the minihash algorithm (default
#' is 40). Use this in conjunction with the \code{band_width} to determine the
#' performance of the hashing. The default settings are for a
#' (.2,.8,.001,.999)-sensitive hash i.e. that pairs with a similarity of less
#' than .2 have a >.1% chance of being compared, while pairs with a similarity
#' of greater than .8 have a >99.9% chance of being compared.
#'
#' @param band_width the length of each band used in the minihashing algorithm
#' (default is 8) Use this in conjunction with the \code{n_bands} to determine
#' the performance of the hashing. The default settings are for a
#' (.2,.8,.001,.999)-sensitive hash i.e. that pairs with a similarity of less
#' than .2 have a >.1% chance of being compared, while pairs with a similarity
#' of greater than .8 have a >99.9% chance of being compared.
#'
#' @param threshold the jaccard similarity threshold above which two strings
#' should be considered a match (default is .95). The similarity is euqal to 1
#' - the jaccard distance between the two strings, so 1 implies the strings are
#' identical, while a similarity of zero implies the strings are completely
#' dissimilar.
#'
#' @param clean should the strings that you fuzzy join on be cleaned (coerced
#' to lower-case, stripped of punctuation and spaces)? Default is FALSE
#'
#' @param progress set to `TRUE` to print progress
#'
#' @param similarity_column an optional character vector. If provided, the data
#' frame will contain a column with this name giving the jaccard similarity
#' between the two fields. Extra column will not be present if anti-joining.
#'
#' @return a tibble fuzzily-joined on the basis of the variables in `by.` Tries
#' to adhere to the same standards as the dplyr-joins, and uses the same
#' logical joining patterns (i.e. inner-join joins and keeps only observations in both datasets).
#'
#' @examples
#'# load baby names data
#'#install.packages("babynames")
#'library(babynames)
#'
#'baby_names <- data.frame(name = tolower(unique(babynames$name))[1:500])
#'baby_names_sans_vowels <- data.frame(
#' name_wo_vowels =gsub("[aeiouy]","", baby_names$name)
#' )
#'# Check the probability two pairs of strings with
#'# similarity .8 will be matched with a band width of 30
#'# and 30 bands using the `jaccard_probability()` function:
#'jaccard_probability(.8,30,8)
#'# Run the join:
#'joined_names <- jaccard_inner_join(
#' baby_names,
#' baby_names_sans_vowels,
#' by = c("name"= "name_wo_vowels"),
#' threshold = .8,
#' n_bands = 20,
#' band_width = 6,
#' n_gram_width = 1,
#' clean = FALSE # default
#' )
#'joined_names
#' @export
jaccard_inner_join <- function(a, b,
by = NULL,
block_by = NULL,
n_gram_width = 2,
n_bands = 50,
band_width = 8,
threshold = .7,
progress = FALSE,
clean = FALSE,
similarity_column=NULL) {
jaccard_join(a, b, mode = "inner",
by = by, salt_by = block_by,
n_gram_width = n_gram_width,
n_bands = n_bands,
band_width = band_width,
threshold = threshold,
progress = progress,
similarity_column = similarity_column,
clean=clean)
}
#' Fuzzy anti-join using minihashing
#'
#' @inheritParams jaccard_inner_join
#'
#' @return a tibble fuzzily-joined on the basis of the variables in `by.` Tries
#' to adhere to the same standards as the dplyr-joins, and uses the same
#' logical joining patterns (i.e. inner-join joins and keeps only observations in both datasets).
#'
#' @examples
#'# load baby names data
#'#install.packages("babynames")
#'library(babynames)
#'
#'baby_names <- data.frame(name = tolower(unique(babynames$name))[1:500])
#'baby_names_sans_vowels <- data.frame(
#' name_wo_vowels =gsub("[aeiouy]","", baby_names$name)
#' )
#'# Check the probability two pairs of strings with
#'# similarity .8 will be matched with a band width of 30
#'# and 30 bands using the `jaccard_probability()` function:
#'jaccard_probability(.8,30,8)
#'# Run the join:
#'joined_names <- jaccard_anti_join(
#' baby_names,
#' baby_names_sans_vowels,
#' by = c("name"= "name_wo_vowels"),
#' threshold = .8,
#' n_bands = 20,
#' band_width = 6,
#' n_gram_width = 1,
#' clean = FALSE # default
#' )
#'joined_names
#' @export
jaccard_anti_join <- function(a, b,
by = NULL,
block_by = NULL,
n_gram_width = 2,
n_bands = 50,
band_width = 8,
threshold = .7,
progress = FALSE,
clean =FALSE, similarity_column =NULL) {
jaccard_join(a, b, mode = "anti", by = by,
salt_by = block_by,
n_gram_width = n_gram_width,
n_bands = n_bands, band_width = band_width,
similarity_column = similarity_column,
threshold = threshold, progress = progress, clean = clean)
}
#' Fuzzy left-join using minihashing
#'
#' @inheritParams jaccard_inner_join
#'
#' @return a tibble fuzzily-joined on the basis of the variables in `by.` Tries
#' to adhere to the same standards as the dplyr-joins, and uses the same
#' logical joining patterns (i.e. inner-join joins and keeps only observations in both datasets).
#'
#' @examples
#'# load baby names data
#'#install.packages("babynames")
#'library(babynames)
#'
#'baby_names <- data.frame(name = tolower(unique(babynames$name))[1:500])
#'baby_names_sans_vowels <- data.frame(
#' name_wo_vowels =gsub("[aeiouy]","", baby_names$name)
#' )
#'# Check the probability two pairs of strings with
#'# similarity .8 will be matched with a band width of 30
#'# and 30 bands using the `jaccard_probability()` function:
#'jaccard_probability(.8,30,8)
#'# Run the join:
#'joined_names <- jaccard_left_join(
#' baby_names,
#' baby_names_sans_vowels,
#' by = c("name"= "name_wo_vowels"),
#' threshold = .8,
#' n_bands = 20,
#' band_width = 6,
#' n_gram_width = 1,
#' clean = FALSE # default
#' )
#'joined_names
#' @export
jaccard_left_join <- function(a, b,
by = NULL,
block_by = NULL,
n_gram_width = 2,
n_bands = 50,
band_width = 8,
threshold = .7,
progress = FALSE,
clean = FALSE,
similarity_column = NULL
) {
jaccard_join(a, b, mode = "left", by = by,
salt_by = block_by,
n_gram_width = n_gram_width,
n_bands = n_bands, band_width = band_width,
threshold = threshold,
progress = progress,
similarity_column = similarity_column,
clean = clean)
}
#' Fuzzy right-join using minihashing
#'
#' @inheritParams jaccard_inner_join
#'
#' @return a tibble fuzzily-joined on the basis of the variables in `by.` Tries
#' to adhere to the same standards as the dplyr-joins, and uses the same
#' logical joining patterns (i.e. inner-join joins and keeps only observations in both datasets).
#'
#' @examples
#'# load baby names data
#'#install.packages("babynames")
#'library(babynames)
#'
#'baby_names <- data.frame(name = tolower(unique(babynames$name))[1:500])
#'baby_names_sans_vowels <- data.frame(
#' name_wo_vowels =gsub("[aeiouy]","", baby_names$name)
#' )
#'# Check the probability two pairs of strings with
#'# similarity .8 will be matched with a band width of 30
#'# and 30 bands using the `jaccard_probability()` function:
#'jaccard_probability(.8,30,8)
#'# Run the join:
#'joined_names <- jaccard_right_join(
#' baby_names,
#' baby_names_sans_vowels,
#' by = c("name"= "name_wo_vowels"),
#' threshold = .8,
#' n_bands = 20,
#' band_width = 6,
#' n_gram_width = 1,
#' clean = FALSE # default
#' )
#'joined_names
#' @export
jaccard_right_join <- function(a, b,
by = NULL,
block_by = NULL,
n_gram_width = 2,
n_bands = 50,
band_width = 8,
threshold = .7,
progress = FALSE,
clean = FALSE,
similarity_column = NULL
){
jaccard_join(a, b, mode = "right", by = by,
salt_by = block_by,
n_gram_width = n_gram_width,
n_bands = n_bands, band_width = band_width,
threshold = threshold,
progress = progress,
similarity_column = similarity_column,
clean = clean)
}
#' Fuzzy full-join using minihashing
#'
#' @inheritParams jaccard_inner_join
#'
#' @return a tibble fuzzily-joined on the basis of the variables in `by.` Tries
#' to adhere to the same standards as the dplyr-joins, and uses the same
#' logical joining patterns (i.e. inner-join joins and keeps only observations in both datasets).
#'
#' @examples
#'# load baby names data
#'#install.packages("babynames")
#'library(babynames)
#'
#'baby_names <- data.frame(name = tolower(unique(babynames$name))[1:500])
#'baby_names_sans_vowels <- data.frame(
#' name_wo_vowels =gsub("[aeiouy]","", baby_names$name)
#' )
#'# Check the probability two pairs of strings with
#'# similarity .8 will be matched with a band width of 30
#'# and 30 bands using the `jaccard_probability()` function:
#'jaccard_probability(.8,30,8)
#'# Run the join:
#'joined_names <- jaccard_full_join(
#' baby_names,
#' baby_names_sans_vowels,
#' by = c("name"= "name_wo_vowels"),
#' threshold = .8,
#' n_bands = 20,
#' band_width = 6,
#' n_gram_width = 1,
#' clean = FALSE # default
#' )
#'joined_names
#' @export
jaccard_full_join <- function(a, b,
by = NULL,
block_by = NULL,
n_gram_width = 2,
n_bands = 50,
band_width = 8,
threshold = .7,
progress = FALSE,
clean = FALSE,
similarity_column = NULL
){
jaccard_join(a, b, mode = "full", by = by,
salt_by = block_by,
n_gram_width = n_gram_width,
n_bands = n_bands, band_width = band_width,
threshold = threshold,
progress = progress,
similarity_column = similarity_column,
clean = clean)
}
| /scratch/gouwar.j/cran-all/cranData/zoomerjoin/R/jaccard_logical_joins.R |
#' Calculate jaccard_similarity of two character vectors
#'
#' @param a the first character vector
#' @param b the first character vector
#'
#' @param ngram_width the length of the shingles / ngrams used in the
#' similarity calculation
#'
#' @return a vector of jaccard similarities of the strings
#'
#' @examples
#' jaccard_similarity(c("the quick brown fox","jumped over the lazy dog"),
#' c("the quck bron fx","jumped over hte lazy dog"))
#'
#' @export
jaccard_similarity <- function(a, b, ngram_width=2) {
stopifnot(length(a) == length(b))
rust_jaccard_similarity(a, b, ngram_width)
}
| /scratch/gouwar.j/cran-all/cranData/zoomerjoin/R/jaccard_similarity.R |
#' Plot S-Curve for a LSH with given hyperparameters
#'
#' @param n_bands The number of LSH bands calculated
#'
#' @param band_width The number of hashes in each band
#'
#' @return A plot showing the probability a pair is proposed as a match, given
#' the Jaccard similarity of the two items.
#'
#' @examples
#' # Plot the probability two pairs will be matched as a function of their
#' # jaccard similarity, given the hyperparameters n_bands and band_width.
#' jaccard_curve(40,6)
#'
#' @export
jaccard_curve <- function(n_bands, band_width) {
stopifnot("number of bands must be a single integer" = length(n_bands)==1)
stopifnot("band width must be a single integer" = length(band_width)==1)
stopifnot(n_bands > 0)
stopifnot(band_width > 0)
similarity <- seq(0,1,.005)
probs <- 1-(1-similarity^band_width)^n_bands
plot(similarity, probs,
xlab = "Jaccard Similarity of Two Strings",
ylab = "Probability that Strings are Proposed as a Match",
type="l",
col = "blue"
)
}
#' Find Probability of Match Based on Similarity
#'
#' This is a port of the
#' [lsh_probability](https://docs.ropensci.org/textreuse/reference/lsh_probability.html)
#' function from the
#' [textreuse](https://cran.r-project.org/package=textreuse)
#' package, with arguments changed to reflect the hyperparameters in this
#' package. It gives the probability that two strings of jaccard similarity
#' `similarity` will be matched, given the chosen bandwidth and number of
#' bands.
#'
#' @param similarity the similarity of the two strings you want to compare
#'
#' @param n_bands The number of LSH bands used in hashing.
#'
#' @param band_width The number of hashes in each band.
#'
#' @return a decimal number giving the probability that the two items will be
#' returned as a candidate pair from the minhash algorithm.
#'
#' @examples
#' # Find the probability two pairs will be matched given they have a
#' # jaccard_similarity of .8,
#' # band width of 5, and 50 bands:
#' jaccard_probability(.8,5,50)
#' @export
jaccard_probability <- function(similarity, n_bands, band_width){
1-(1-similarity^band_width)^n_bands
}
#' Plot S-Curve for a LSH with given hyperparameters
#'
#' @param n_bands The number of LSH bands calculated
#'
#' @param band_width The number of hashes in each band
#'
#' @param r the "r" hyperparameter used to govern the sensitivity of the hash.
#'
#' @param up_to the right extent of the x axis.
#'
#' @return A plot showing the probability a pair is proposed as a match, given
#' the Jaccard similarity of the two items.
#'
euclidean_curve <- function(n_bands, band_width, r, up_to = 100) {
x <- seq(0, up_to, length.out=1500)
y <- euclidean_probability(x, n_bands, band_width,r)
plot(x, y,
xlab = "Euclidian Distance Between Two Vectors",
ylab = "Probability that Vectors are Proposed as a Match",
type="l",
col = "blue"
)
}
#' Find Probability of Match Based on Similarity
#'
#' @param distance the euclidian distance between the two vectors you want to
#' compare.
#'
#' @param n_bands The number of LSH bands used in hashing.
#'
#' @param band_width The number of hashes in each band.
#'
#' @param r the "r" hyperparameter used to govern the sensitivity of the hash.
#'
#' @return a decimal number giving the proability that the two items will be
#' returned as a candidate pair from the minihash algorithm.
#'
#' @importFrom stats pnorm
#' @export
euclidean_probability <- function(distance, n_bands, band_width, r) {
p <- 1 - 2*pnorm(-r/distance) - 2/(sqrt(2*pi)*r/distance)*(1-exp(-(r^2/(2*distance^2))))
1 - (1-p^band_width)^n_bands
}
#' Help Choose the Appropriate LSH Hyperparameters
#'
#' Runs a grid search to find the hyperparameters that will achieve an
#' (s1,s2,p1,p2)-sensitive locality sensitive hash. A locality sensitive hash
#' can be called (s1,s2,p1,p2)-sensitive if to strings with a similarity less
#' than s1 have a less than p1 chance of being compared, while two strings with
#' similarity s2 have a greater than p2 chance of being compared. As an
#' example, a (.1,.7,.001,.999)-sensitive LSH means that strings with
#' similarity less than .1 will have a .1% chance of being compared, while
#' strings with .7 similarity have a 99.9% chance of being compared.
#'
#' @param s1 the s1 parameter (the first similaity).
#' @param s2 the s2 parameter (the second similarity, must be greater than s1).
#' @param p1 the p1 parameter (the first probability).
#' @param p2 the p2 parameter (the second probability, must be greater than p1).
#'
#' @return a named vector with the hyperparameters that will meet the LSH
#' criteria, while reducing runitme.
#'
#' @examples
#' # Help me find the parameters that will minimize runtime while ensuring that
#' # two strings with similarity .1 will be compared less than .1% of the time,
#' # strings with .8 similaity will have a 99.95% chance of being compared:
#' jaccard_hyper_grid_search(.1,.9,.001,.995)
#'
#' @export
jaccard_hyper_grid_search <- function(s1=.1,s2=.7,p1=.001,p2=.999) {
stopifnot("s1 must be a single number"=length(s1)==1)
stopifnot("s2 must be a single number"=length(s2)==1)
stopifnot("p1 must be a single number"=length(p1)==1)
stopifnot("p2 must be a single number"=length(p2)==1)
stopifnot("similarity 1 must be less than similarity 2" = s1 < s2)
stopifnot("proability 1 must be less than similarity 2" = p1 < p2)
df <- expand.grid(
band_width = seq(1,75,1),
n_bands = seq(1,50000,1)
)
df$p1 <-jaccard_probability(s1, n_bands = df$n_bands, band_width = df$band_width)
df$p2 <-jaccard_probability(s2, n_bands = df$n_bands, band_width = df$band_width)
df$feasible <- (df$p1 < p1) & (df$p2 > p2)
df$prod <- df$band_width * df$n_bands
df <- df[df$feasible,]
selected <- which.min(df$prod)
return(c(
"band_width" = df$band_width[selected] ,
"n_bands" = df$n_bands[selected]
))
}
| /scratch/gouwar.j/cran-all/cranData/zoomerjoin/R/lsh_properties.R |
.onAttach <- function(libname, pkgname) {
if(Sys.getenv("_R_CHECK_LIMIT_CORES_")!= "") {
if(as.logical(Sys.getenv("_R_CHECK_LIMIT_CORES_"))) {
packageStartupMessage("_R_CHECK_LIMIT_CORES_ is set to TRUE. Running on 2 cores")
Sys.setenv("RAYON_NUM_THREADS" = 2)
}
}
}
| /scratch/gouwar.j/cran-all/cranData/zoomerjoin/R/on_load.R |
#' Fuzzy String Grouping Using Minhashing
#'
#' Performs fuzzy string grouping in which similar strings are assigned to the
#' same group. Uses the `fastgreedy.community` community detection algorithm
#' from the `igraph` package to create the groups. Must have igraph installed
#' in order to use this function.
#'
#'
#' @param string a character you wish to perform entity resolution on.
#'
#' @param n_gram_width the length of the n_grams used in calculating the
#' jaccard similarity. For best performance, I set this large enough that the
#' chance any string has a specific n_gram is low (i.e. \code{n_gram_width} = 2
#' or 3 when matching on first names, 5 or 6 when matching on entire
#' sentences).
#'
#' @param n_bands the number of bands used in the minihash algorithm (default
#' is 40). Use this in conjunction with the \code{band_width} to determine the
#' performance of the hashing. The default settings are for a
#' (.2,.8,.001,.999)-sensitive hash i.e. that pairs with a similarity of less
#' than .2 have a >.1% chance of being compared, while pairs with a similarity
#' of greater than .8 have a >99.9% chance of being compared.
#'
#' @param band_width the length of each band used in the minihashing algorithm
#' (default is 8) Use this in conjunction with the \code{n_bands} to determine
#' the performance of the hashing. The default settings are for a
#' (.2,.8,.001,.999)-sensitive hash i.e. that pairs with a similarity of less
#' than .2 have a >.1% chance of being compared, while pairs with a similarity
#' of greater than .8 have a >99.9% chance of being compared.
#'
#' @param threshold the jaccard similarity threshold above which two strings
#' should be considered a match (default is .95). The similarity is euqal to 1
#' - the jaccard distance between the two strings, so 1 implies the strings are
#' identical, while a similarity of zero implies the strings are completely
#' dissimilar.
#'
#' @param progress set to true to report progress of the algorithm
#'
#' @return a string vector storing the group of each element in the original
#' input strings. The input vector is grouped so that similar strings belong to
#' the same group, which is given a standardized name.
#'
#' @examples
#'
#' string <- c("beniamino", "jack", "benjamin", "beniamin",
#' "jacky", "giacomo", "gaicomo")
#' jaccard_string_group(string, threshold = .2, n_bands=90, n_gram_width=1)
#'
#' @export
#' @importFrom stats runif
#' @importFrom utils installed.packages
jaccard_string_group <- function(string, n_gram_width = 2, n_bands = 45, band_width = 8, threshold = .7, progress = FALSE) {
if (system.file(package = "igraph")=="") {
stop("library 'igraph' must be installed to run this function")
}
pairs <- rust_jaccard_join(string,
string,
ngram_width = n_gram_width,
n_bands,
band_size = band_width,
threshold = threshold,
progress = progress,
seed = round(stats::runif(1,0,2^64))
)
graph <- igraph::graph_from_edgelist(pairs)
fc <- igraph::fastgreedy.community(igraph::as.undirected(graph))
groups <- igraph::groups(fc)
lookup_table <- vapply(groups, "[[", integer(1), 1)
membership <- igraph::membership(fc)
return(string[lookup_table[membership]])
}
| /scratch/gouwar.j/cran-all/cranData/zoomerjoin/R/string_group.R |
## ---- include = FALSE---------------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## ----setup, include=F---------------------------------------------------------
library(tidyverse)
## ---- echo=F------------------------------------------------------------------
sim_data <- read_csv("sim_data.csv")
sim_data %>%
mutate(
name = ifelse(name == "time", "Time Usage (s)", "Memory Usage (MB)"),
join_type = ifelse(join_type == "Jaccard Distance",
"Jaccard Distance Join",
"Euclidean Distance Joins"),
) %>%
ggplot(aes(x=as.numeric(n), y=value, col = package, linetype = package)) +
geom_point() +
geom_line() +
facet_wrap(~ join_type + name, scales = 'free') +
scale_y_continuous("Time (s) / memory (MB)")
## ---- echo=T, eval=F----------------------------------------------------------
# library(zoomerjoin)
# library(fuzzyjoin)
# library(tidyverse)
# library(microbenchmark)
# library(profmem)
#
#
# # Sample million rows from DIME dataset
# data_1 <- as.data.frame(sample_n(dime_data, 10^6))
# names(data_1) <- c("id_1", "name")
# data_2 <- as.data.frame(sample_n(dime_data, 10^6))
# names(data_2) <- c("id_2", "name")
#
# # Generate datasets for euclidean join benchmarking
# n <- 10^5
# p <- 50
# X <- matrix(rnorm(n * p), n, p)
# X_1 <- as.data.frame(X)
# X_2 <- as.data.frame(X + .000000001)
#
# # Get time and memory use statistics for fuzzyjoin when performing jaccard join
# fuzzy_jaccard_bench <- function(n){
# time <- microbenchmark(
# stringdist_inner_join(data_1[1:n, ],
# data_2[1:n, ],
# method = "jaccard",
# max_dist = .6,
# q = 4
# ),
# times = 10
# )$time %>%
# median()
#
# mem <- profmem(stringdist_inner_join(data_1[1:n, ],
# data_2[1:n, ],
# method = "jaccard",
# max_dist = .6,
# q = 4
# )) %>%
# total()
#
# return(c(time = time, memory = mem))
# }
#
#
# # Get time and memory use statistics for zoomerjoin when performing jaccard join
# zoomer_jaccard_bench <- function(n) {
# time <- microbenchmark(
# jaccard_inner_join(data_1[1:n, ], data_2[1:n, ],
# by = "name", band_width = 11,
# n_bands = 350, threshold = .7,
# n_gram_width = 4
# ),
# times = 50
# )$time %>%
# median()
#
# mem <- profmem(
# jaccard_inner_join(data_1[1:n, ], data_2[1:n, ],
# by = "name", band_width = 11,
# n_bands = 350, threshold = .7,
# n_gram_width = 4
# )
# ) %>%
# total()
#
# return(c(time = time, memory = mem))
# }
#
# # Get time and memory use statistics for fuzzyjoin when performing Euclidean join
# fuzzy_euclid_bench <- function(n) {
# time <- microbenchmark(
# distance_join(X_1[1:n, ], X_2[1:n, ], max_dist = .1, method = "euclidean"),
# times = 10
# )$time %>%
# median()
#
# mem <- total(profmem(
# distance_join(X_1[1:n, ], X_2[1:n, ], max_dist = .1, method = "euclidean")
# ))
#
# return(c(time = time, memory = mem))
# }
#
# # Get time and memory use statistics for zoomerjoin when performing Euclidean join
# zoomer_euclid_bench <- function(n) {
# time <- microbenchmark(
# euclidean_inner_join(X_1[1:n, ], X_2[1:n, ],
# threshold = .1, n_bands = 90,
# band_width = 2, r = .1
# ),
# times = 50
# )$time %>%
# median()
#
# mem <- profmem(euclidean_inner_join(X_1[1:n, ], X_2[1:n, ],
# threshold = .1, n_bands = 90,
# band_width = 2, r = .1
# )) %>%
# total()
#
# return(c(time = time, memory = mem))
# }
#
#
# # Run Grid of Jaccard Benchmarks, Collect results into DF
# n <- seq(500, 4000, 250)
# names(n) <- n
# fuzzy_jacard_benches <- map_df(n, fuzzy_jaccard_bench, .id="n")
# zoomer_jacard_benches <- map_df(n, zoomer_jaccard_bench, .id="n")
# fuzzy_jacard_benches$package <- "fuzzyjoin"
# zoomer_jacard_benches$package <- "zoomerjoin"
# jaccard_benches <- bind_rows(fuzzy_jacard_benches, zoomer_jacard_benches)
# jaccard_benches$join_type <- "Jaccard Distance"
#
# # Run Grid of Euclidean Benchmarks, Collect results into DF
# n <- seq(250, 4000, 250)
# names(n) <- n
# fuzzy_euclid_benches <- map_df(n, fuzzy_euclid_bench, .id="n")
# zoomer_euclid_benches <- map_df(n, zoomer_euclid_bench, .id="n")
# fuzzy_euclid_benches$package <- "fuzzyjoin"
# zoomer_euclid_benches$package <- "zoomerjoin"
# euclid_benches <- bind_rows(fuzzy_euclid_benches, zoomer_euclid_benches)
# euclid_benches$join_type <- "Euclidean Distance"
#
# sim_data <- bind_rows(euclid_benches, jaccard_benches) %>%
# pivot_longer(c(time, memory)) %>%
# mutate(value = ifelse(name =="time", value / 10^9, value / 10^6)) # convert ns to s and bytes to Gb.
#
# write_csv(sim_data, "sim_data.csv")
| /scratch/gouwar.j/cran-all/cranData/zoomerjoin/inst/doc/benchmarks.R |
---
title: "Benchmarks"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{benchmarks}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
## Introduction
In this short vignette, I show off benchmarks of the zoomerjoin package,
comparing it to the excellent [fuzzyjoin](https://github.com/dgrtwo/fuzzyjoin)
package. The two packages are designed to do different things - the fuzzyjoin
package is *very fast,* and provides more distance functions (as well as other
joining modes) - but it's a useful comparison as it shows off the time that can
be saved using LSH relative to all pairwise comparisons, as long as you are
okay with using Jaccard similarity.
In the future, I am hoping to expand the package to implement [this LSH method
for the edit
distance](https://academic.oup.com/bioinformatics/article/35/14/i127/5529166),
and will add it to the benchmarks when / if this feature is completed.
```{r setup, include=F}
library(tidyverse)
```
## Benchmarks
Here, I show the time it takes fuzzyjoin and zoomerjoin to fuzzily join two
datasets as the size of each dataset increases. Fuzzyjoin is initially quick,
but the runtime scales with the square of the input size. Zoomerjoin is slower
for small datasets but is less memory-intensive, and scales with the sum of
the rows in each dataset, so it becomes quicker for larger datasets.
```{r, echo=F}
sim_data <- read_csv("sim_data.csv")
sim_data %>%
mutate(
name = ifelse(name == "time", "Time Usage (s)", "Memory Usage (MB)"),
join_type = ifelse(join_type == "Jaccard Distance",
"Jaccard Distance Join",
"Euclidean Distance Joins"),
) %>%
ggplot(aes(x=as.numeric(n), y=value, col = package, linetype = package)) +
geom_point() +
geom_line() +
facet_wrap(~ join_type + name, scales = 'free') +
scale_y_continuous("Time (s) / memory (MB)")
```
# Benchmarking Code:
Below, I include the code used to generate the benchmarks:
```{r, echo=T, eval=F}
library(zoomerjoin)
library(fuzzyjoin)
library(tidyverse)
library(microbenchmark)
library(profmem)
# Sample million rows from DIME dataset
data_1 <- as.data.frame(sample_n(dime_data, 10^6))
names(data_1) <- c("id_1", "name")
data_2 <- as.data.frame(sample_n(dime_data, 10^6))
names(data_2) <- c("id_2", "name")
# Generate datasets for euclidean join benchmarking
n <- 10^5
p <- 50
X <- matrix(rnorm(n * p), n, p)
X_1 <- as.data.frame(X)
X_2 <- as.data.frame(X + .000000001)
# Get time and memory use statistics for fuzzyjoin when performing jaccard join
fuzzy_jaccard_bench <- function(n){
time <- microbenchmark(
stringdist_inner_join(data_1[1:n, ],
data_2[1:n, ],
method = "jaccard",
max_dist = .6,
q = 4
),
times = 10
)$time %>%
median()
mem <- profmem(stringdist_inner_join(data_1[1:n, ],
data_2[1:n, ],
method = "jaccard",
max_dist = .6,
q = 4
)) %>%
total()
return(c(time = time, memory = mem))
}
# Get time and memory use statistics for zoomerjoin when performing jaccard join
zoomer_jaccard_bench <- function(n) {
time <- microbenchmark(
jaccard_inner_join(data_1[1:n, ], data_2[1:n, ],
by = "name", band_width = 11,
n_bands = 350, threshold = .7,
n_gram_width = 4
),
times = 50
)$time %>%
median()
mem <- profmem(
jaccard_inner_join(data_1[1:n, ], data_2[1:n, ],
by = "name", band_width = 11,
n_bands = 350, threshold = .7,
n_gram_width = 4
)
) %>%
total()
return(c(time = time, memory = mem))
}
# Get time and memory use statistics for fuzzyjoin when performing Euclidean join
fuzzy_euclid_bench <- function(n) {
time <- microbenchmark(
distance_join(X_1[1:n, ], X_2[1:n, ], max_dist = .1, method = "euclidean"),
times = 10
)$time %>%
median()
mem <- total(profmem(
distance_join(X_1[1:n, ], X_2[1:n, ], max_dist = .1, method = "euclidean")
))
return(c(time = time, memory = mem))
}
# Get time and memory use statistics for zoomerjoin when performing Euclidean join
zoomer_euclid_bench <- function(n) {
time <- microbenchmark(
euclidean_inner_join(X_1[1:n, ], X_2[1:n, ],
threshold = .1, n_bands = 90,
band_width = 2, r = .1
),
times = 50
)$time %>%
median()
mem <- profmem(euclidean_inner_join(X_1[1:n, ], X_2[1:n, ],
threshold = .1, n_bands = 90,
band_width = 2, r = .1
)) %>%
total()
return(c(time = time, memory = mem))
}
# Run Grid of Jaccard Benchmarks, Collect results into DF
n <- seq(500, 4000, 250)
names(n) <- n
fuzzy_jacard_benches <- map_df(n, fuzzy_jaccard_bench, .id="n")
zoomer_jacard_benches <- map_df(n, zoomer_jaccard_bench, .id="n")
fuzzy_jacard_benches$package <- "fuzzyjoin"
zoomer_jacard_benches$package <- "zoomerjoin"
jaccard_benches <- bind_rows(fuzzy_jacard_benches, zoomer_jacard_benches)
jaccard_benches$join_type <- "Jaccard Distance"
# Run Grid of Euclidean Benchmarks, Collect results into DF
n <- seq(250, 4000, 250)
names(n) <- n
fuzzy_euclid_benches <- map_df(n, fuzzy_euclid_bench, .id="n")
zoomer_euclid_benches <- map_df(n, zoomer_euclid_bench, .id="n")
fuzzy_euclid_benches$package <- "fuzzyjoin"
zoomer_euclid_benches$package <- "zoomerjoin"
euclid_benches <- bind_rows(fuzzy_euclid_benches, zoomer_euclid_benches)
euclid_benches$join_type <- "Euclidean Distance"
sim_data <- bind_rows(euclid_benches, jaccard_benches) %>%
pivot_longer(c(time, memory)) %>%
mutate(value = ifelse(name =="time", value / 10^9, value / 10^6)) # convert ns to s and bytes to Gb.
write_csv(sim_data, "sim_data.csv")
```
| /scratch/gouwar.j/cran-all/cranData/zoomerjoin/inst/doc/benchmarks.Rmd |
## -----------------------------------------------------------------------------
library(tidyverse)
library(microbenchmark)
library(fuzzyjoin)
library(zoomerjoin)
corpus_1 <- dime_data %>% # dime data is packaged with zoomerjoin
head(500)
names(corpus_1) <- c("a", "field")
corpus_1
## -----------------------------------------------------------------------------
corpus_2 <- dime_data %>% # dime data is packaged with zoomerjoin
tail(500)
names(corpus_2) <- c("b", "field")
corpus_2
## -----------------------------------------------------------------------------
set.seed(1)
start_time <- Sys.time()
join_out <- jaccard_inner_join(corpus_1, corpus_2,
by = "field", n_gram_width=6,
n_bands=20, band_width=6, threshold = .8)
print(Sys.time() - start_time)
print(join_out)
## -----------------------------------------------------------------------------
jaccard_curve(20,6)
## -----------------------------------------------------------------------------
organization_names <- c(
"American Civil Liberties Union",
"American Civil Liberties Union (ACLU)",
"NRA National Rifle Association",
"National Rifle Association NRA",
"National Rifle Association",
"Planned Parenthood",
"Blue Cross"
)
standardized_organization_names <- jaccard_string_group(organization_names, threshold=.5, band_width = 3)
print(standardized_organization_names)
| /scratch/gouwar.j/cran-all/cranData/zoomerjoin/inst/doc/guided_tour.R |
---
title: "A Zoomerjoin Guided Tour"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{A Zoomerjoin Guided Tour}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
### Introduction:
This vignette gives a basic overview of the core functionality of the
zoomerjoin package. Zoomerjoin empowers you to fuzzily-match datasets with
millions of rows in seconds, while staying light on memory usage. This makes it
feasible to perform fuzzy-joins on datasets in the hundreds of millions of
observations in a matter of minutes.
## How Does it Work?
Zoomerjoin's blazingly fast joins for the string distance are made possible by
an optimized, performant implementation of the
[MinHash](https://en.wikipedia.org/wiki/MinHash) algorithm written in Rust.
While most conventional joining packages compare the all pairs of records in
the two datasets you wish to join, the MinHash algorithm manages to compare
only similar records to each other. This results in matches that are orders of
magnitudes faster than other matching software packages: `zoomerjoin` takes
hours or minutes to join datasets that would have taken centuries to join using
other matching methods.
## Basic Syntax:
If you're familiar with the logical-join syntax from `dplyr`, then you already
know how to use fuzzy join to join two datasets. Zoomerjoin provides
`jaccard_inner_join()` and `jaccard_full_join()` (among others), which are the
fuzzy-joining analogues of the corresponding dplyr functions.
I demonstrate the syntax by using the package to join to corpuses, which formed
from entries from the [Database on Ideology, Money in Politics, and Elections
(DIME)](https://data.stanford.edu/dime) (Bonica 2016).
The first corpus looks as follows:
```{r}
library(tidyverse)
library(microbenchmark)
library(fuzzyjoin)
library(zoomerjoin)
corpus_1 <- dime_data %>% # dime data is packaged with zoomerjoin
head(500)
names(corpus_1) <- c("a", "field")
corpus_1
```
And the second looks as follows:
```{r}
corpus_2 <- dime_data %>% # dime data is packaged with zoomerjoin
tail(500)
names(corpus_2) <- c("b", "field")
corpus_2
```
The two Corpuses can't be directly joined because of misspellings. This means
we must use the fuzzy-matching capabilities of zoomerjoin:
```{r}
set.seed(1)
start_time <- Sys.time()
join_out <- jaccard_inner_join(corpus_1, corpus_2,
by = "field", n_gram_width=6,
n_bands=20, band_width=6, threshold = .8)
print(Sys.time() - start_time)
print(join_out)
```
The first two arguments, `a`, and `b`, are direct analogues of the `dplyr`
arguments, and are the two data frames you want to join. The `by` field also
acts the same as it does in 'dplyr' (it provides the function the columns you
want to match on).
The `n_gram_width` parameter determines how wide the n-grams that are used in
the similarity evaluation should be, while the `threshold` argument determines
how similar a pair of strings has to be (in Jaccard similarity) to be
considered a match. Users of the `stringdist` or `fuzzyjoin` package will be
familiar with both of these arguments, but should bear in mind that those
packages measure *string distance* (where a distance of 0 indicates complete
similarity), while this package operates on *string similarity,* so a threshold
of .8 will keep matches above 80% Jaccard similarity.
The `n_bands` and `band_width` parameters govern the performance of the LSH.
The default parameters should perform well for medium-size (n < 10^7) datasets
where matches are somewhat similar (similarity > .8), but may require tuning in
other settings. the `jaccard_hyper_grid_search()`, and `jaccard_curve()` functions
can help select these parameters for you given the properties of the LSH you
desire.
As an example, you can use the `jaccard_curve()` function to plot the probability
that a pair of records are compared at each possible Jaccard distance, $d$
between zero and one:
```{r}
jaccard_curve(20,6)
```
By looking at the plot produced, we can see that using these hyperparameters,
comparisons will almost never be made between pairs of records that have a
Jaccard similarity of less than .2 (saving time), pairs of records that have a
Jaccard similarity of greater than .8 are almost always compared (giving a low
false-negative rate).
For more details about the hyperparameters, the `textreuse` package has an
excellent vignette, and zoomerjoin provides a re-implementation of its
profiling tools, `jaccard_probability,` and `jaccard_bandwidth` (although the
implementations differ slightly as the hyperparameters in each package are
different).
## Standardizing String Names After A Merge
Often after merging, it can help to standardize the names or fields that have
been joined on. This way, you can assign a unique label or identifying key to
all observations that have a similar value of the merging variable. The
`jaccard_string_group()` function makes this possible. It first performs locality
sensitive hashing to identify similar pairs of observations within the dataset,
and then runs a community detection algorithm to identify clusters of similar
observations, which are each assigned a label. The community-detection
algorithm, `fastgreedy.community()` from the `igraph` package runs in log-linear
time, so the entire algorithm completes in linearithmic time.
Here's a short snippet showing how you can use `jaccard_string_group()` to
standardize a set of organization names.
```{r}
organization_names <- c(
"American Civil Liberties Union",
"American Civil Liberties Union (ACLU)",
"NRA National Rifle Association",
"National Rifle Association NRA",
"National Rifle Association",
"Planned Parenthood",
"Blue Cross"
)
standardized_organization_names <- jaccard_string_group(organization_names, threshold=.5, band_width = 3)
print(standardized_organization_names)
```
### References:
Bonica, Adam. 2016. Database on Ideology, Money in Politics, and Elections: Public version 2.0 [Computer file]. Stanford, CA: Stanford University Libraries.
| /scratch/gouwar.j/cran-all/cranData/zoomerjoin/inst/doc/guided_tour.Rmd |
## ---- include = FALSE---------------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## ----setup--------------------------------------------------------------------
library(zoomerjoin)
## -----------------------------------------------------------------------------
n <- 10^5 # number of data points
d <- 10^2 # dimension
# Create a matrix of 10^6 observations in R^100
X <- matrix(runif(n*d),n,d)
# Second Dataset is a copy of the first with points shifted an infinitesimal
# amount
X_2 <- as.data.frame(X + matrix(rnorm(n*d, 0,.0001), n,d))
X <- as.data.frame(X)
## -----------------------------------------------------------------------------
euclidean_probability(.01, n_bands = 5,band_width = 8,r = .25)
euclidean_probability(.1, n_bands = 5,band_width = 8,r = .25)
euclidean_probability(.01, n_bands = 10,band_width = 4,r = .15)
euclidean_probability(.1, n_bands = 10,band_width = 4,r = .15)
euclidean_probability(.01, n_bands = 40,band_width = 8,r = .15)
euclidean_probability(.1, n_bands = 40,band_width = 8,r = .15)
## -----------------------------------------------------------------------------
set.seed(1)
start <- Sys.time()
joined_out <- euclidean_inner_join(
X,
X_2,
threshold = .01,
n_bands = 40,
band_width = 8,
r=.15)
n_matches <- nrow(joined_out)
time_taken <- Sys.time() - start
print(paste("found", n_matches, "matches in", round(time_taken), "seconds"))
| /scratch/gouwar.j/cran-all/cranData/zoomerjoin/inst/doc/matching_vectors.R |
---
title: "Matching Vectors Based on Euclidean Distance"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{matching_vectors}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
```{r setup}
library(zoomerjoin)
```
# Introduction
The flagship feature of zoomerjoin is are the tidy joins for strings using the
Jaccard distance, but zoomerjoin also allows you to join vectors using
Euclidean distance. This can be useful for joining addresses or coordinates in
space.
Unlike other nearest-neighbor methods such as KD-trees, the joins do not slow
down as the dimension of the coordinates increases, so zoomerjoin can be used
can be used to find close points in a high-dimensional space (such as word
embeddings).
# Demonstration
For this demonstration, I create a simulated dataset of 10^5 points distributed
uniformly within a 100-dimensional hypercube. I join this to another dataset
which is a copy of the first with each point shifted an tiny random amount.
```{r}
n <- 10^5 # number of data points
d <- 10^2 # dimension
# Create a matrix of 10^6 observations in R^100
X <- matrix(runif(n*d),n,d)
# Second Dataset is a copy of the first with points shifted an infinitesimal
# amount
X_2 <- as.data.frame(X + matrix(rnorm(n*d, 0,.0001), n,d))
X <- as.data.frame(X)
```
I now want to join these two datasets together. The Euclidean joins take 3
hyperparameters: `n_bands`, `band_width`, and `r`. Which all have to be chosen
for the problem domain (although the defaults are generally sensible).
I use the `euclidean_probability` function in the package to understand the
probability that two observations at distance of .01 from each other are
indentified as a match at a variety of hyperparameter configurations.
```{r}
euclidean_probability(.01, n_bands = 5,band_width = 8,r = .25)
euclidean_probability(.1, n_bands = 5,band_width = 8,r = .25)
euclidean_probability(.01, n_bands = 10,band_width = 4,r = .15)
euclidean_probability(.1, n_bands = 10,band_width = 4,r = .15)
euclidean_probability(.01, n_bands = 40,band_width = 8,r = .15)
euclidean_probability(.1, n_bands = 40,band_width = 8,r = .15)
```
Using `n_bands=40`, `band_width=8`, and `r=.15` seems to provide a good balance
between identifying all true matches (as pairs less than .01 apart are
guaranteed to be found) with reducing the number of un-promising comparisons
(as pairs greater than .1 apart are unlikely to be compared). I then use the
`euclidean_inner_join` to find all matching pairs across the two datasets:
```{r}
set.seed(1)
start <- Sys.time()
joined_out <- euclidean_inner_join(
X,
X_2,
threshold = .01,
n_bands = 40,
band_width = 8,
r=.15)
n_matches <- nrow(joined_out)
time_taken <- Sys.time() - start
print(paste("found", n_matches, "matches in", round(time_taken), "seconds"))
```
Zoomerjoin is able to easily find all pairs in just under 30s (perhaps longer
on the runner that renders the website), even though the points lie in
high-dimensional (d=100) space. This makes zoomerjoin a useful tool when trying
to join or find matches between datasets of word or document embeddings.
| /scratch/gouwar.j/cran-all/cranData/zoomerjoin/inst/doc/matching_vectors.Rmd |
---
title: "Benchmarks"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{benchmarks}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
## Introduction
In this short vignette, I show off benchmarks of the zoomerjoin package,
comparing it to the excellent [fuzzyjoin](https://github.com/dgrtwo/fuzzyjoin)
package. The two packages are designed to do different things - the fuzzyjoin
package is *very fast,* and provides more distance functions (as well as other
joining modes) - but it's a useful comparison as it shows off the time that can
be saved using LSH relative to all pairwise comparisons, as long as you are
okay with using Jaccard similarity.
In the future, I am hoping to expand the package to implement [this LSH method
for the edit
distance](https://academic.oup.com/bioinformatics/article/35/14/i127/5529166),
and will add it to the benchmarks when / if this feature is completed.
```{r setup, include=F}
library(tidyverse)
```
## Benchmarks
Here, I show the time it takes fuzzyjoin and zoomerjoin to fuzzily join two
datasets as the size of each dataset increases. Fuzzyjoin is initially quick,
but the runtime scales with the square of the input size. Zoomerjoin is slower
for small datasets but is less memory-intensive, and scales with the sum of
the rows in each dataset, so it becomes quicker for larger datasets.
```{r, echo=F}
sim_data <- read_csv("sim_data.csv")
sim_data %>%
mutate(
name = ifelse(name == "time", "Time Usage (s)", "Memory Usage (MB)"),
join_type = ifelse(join_type == "Jaccard Distance",
"Jaccard Distance Join",
"Euclidean Distance Joins"),
) %>%
ggplot(aes(x=as.numeric(n), y=value, col = package, linetype = package)) +
geom_point() +
geom_line() +
facet_wrap(~ join_type + name, scales = 'free') +
scale_y_continuous("Time (s) / memory (MB)")
```
# Benchmarking Code:
Below, I include the code used to generate the benchmarks:
```{r, echo=T, eval=F}
library(zoomerjoin)
library(fuzzyjoin)
library(tidyverse)
library(microbenchmark)
library(profmem)
# Sample million rows from DIME dataset
data_1 <- as.data.frame(sample_n(dime_data, 10^6))
names(data_1) <- c("id_1", "name")
data_2 <- as.data.frame(sample_n(dime_data, 10^6))
names(data_2) <- c("id_2", "name")
# Generate datasets for euclidean join benchmarking
n <- 10^5
p <- 50
X <- matrix(rnorm(n * p), n, p)
X_1 <- as.data.frame(X)
X_2 <- as.data.frame(X + .000000001)
# Get time and memory use statistics for fuzzyjoin when performing jaccard join
fuzzy_jaccard_bench <- function(n){
time <- microbenchmark(
stringdist_inner_join(data_1[1:n, ],
data_2[1:n, ],
method = "jaccard",
max_dist = .6,
q = 4
),
times = 10
)$time %>%
median()
mem <- profmem(stringdist_inner_join(data_1[1:n, ],
data_2[1:n, ],
method = "jaccard",
max_dist = .6,
q = 4
)) %>%
total()
return(c(time = time, memory = mem))
}
# Get time and memory use statistics for zoomerjoin when performing jaccard join
zoomer_jaccard_bench <- function(n) {
time <- microbenchmark(
jaccard_inner_join(data_1[1:n, ], data_2[1:n, ],
by = "name", band_width = 11,
n_bands = 350, threshold = .7,
n_gram_width = 4
),
times = 50
)$time %>%
median()
mem <- profmem(
jaccard_inner_join(data_1[1:n, ], data_2[1:n, ],
by = "name", band_width = 11,
n_bands = 350, threshold = .7,
n_gram_width = 4
)
) %>%
total()
return(c(time = time, memory = mem))
}
# Get time and memory use statistics for fuzzyjoin when performing Euclidean join
fuzzy_euclid_bench <- function(n) {
time <- microbenchmark(
distance_join(X_1[1:n, ], X_2[1:n, ], max_dist = .1, method = "euclidean"),
times = 10
)$time %>%
median()
mem <- total(profmem(
distance_join(X_1[1:n, ], X_2[1:n, ], max_dist = .1, method = "euclidean")
))
return(c(time = time, memory = mem))
}
# Get time and memory use statistics for zoomerjoin when performing Euclidean join
zoomer_euclid_bench <- function(n) {
time <- microbenchmark(
euclidean_inner_join(X_1[1:n, ], X_2[1:n, ],
threshold = .1, n_bands = 90,
band_width = 2, r = .1
),
times = 50
)$time %>%
median()
mem <- profmem(euclidean_inner_join(X_1[1:n, ], X_2[1:n, ],
threshold = .1, n_bands = 90,
band_width = 2, r = .1
)) %>%
total()
return(c(time = time, memory = mem))
}
# Run Grid of Jaccard Benchmarks, Collect results into DF
n <- seq(500, 4000, 250)
names(n) <- n
fuzzy_jacard_benches <- map_df(n, fuzzy_jaccard_bench, .id="n")
zoomer_jacard_benches <- map_df(n, zoomer_jaccard_bench, .id="n")
fuzzy_jacard_benches$package <- "fuzzyjoin"
zoomer_jacard_benches$package <- "zoomerjoin"
jaccard_benches <- bind_rows(fuzzy_jacard_benches, zoomer_jacard_benches)
jaccard_benches$join_type <- "Jaccard Distance"
# Run Grid of Euclidean Benchmarks, Collect results into DF
n <- seq(250, 4000, 250)
names(n) <- n
fuzzy_euclid_benches <- map_df(n, fuzzy_euclid_bench, .id="n")
zoomer_euclid_benches <- map_df(n, zoomer_euclid_bench, .id="n")
fuzzy_euclid_benches$package <- "fuzzyjoin"
zoomer_euclid_benches$package <- "zoomerjoin"
euclid_benches <- bind_rows(fuzzy_euclid_benches, zoomer_euclid_benches)
euclid_benches$join_type <- "Euclidean Distance"
sim_data <- bind_rows(euclid_benches, jaccard_benches) %>%
pivot_longer(c(time, memory)) %>%
mutate(value = ifelse(name =="time", value / 10^9, value / 10^6)) # convert ns to s and bytes to Gb.
write_csv(sim_data, "sim_data.csv")
```
| /scratch/gouwar.j/cran-all/cranData/zoomerjoin/vignettes/benchmarks.Rmd |
---
title: "A Zoomerjoin Guided Tour"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{A Zoomerjoin Guided Tour}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
### Introduction:
This vignette gives a basic overview of the core functionality of the
zoomerjoin package. Zoomerjoin empowers you to fuzzily-match datasets with
millions of rows in seconds, while staying light on memory usage. This makes it
feasible to perform fuzzy-joins on datasets in the hundreds of millions of
observations in a matter of minutes.
## How Does it Work?
Zoomerjoin's blazingly fast joins for the string distance are made possible by
an optimized, performant implementation of the
[MinHash](https://en.wikipedia.org/wiki/MinHash) algorithm written in Rust.
While most conventional joining packages compare the all pairs of records in
the two datasets you wish to join, the MinHash algorithm manages to compare
only similar records to each other. This results in matches that are orders of
magnitudes faster than other matching software packages: `zoomerjoin` takes
hours or minutes to join datasets that would have taken centuries to join using
other matching methods.
## Basic Syntax:
If you're familiar with the logical-join syntax from `dplyr`, then you already
know how to use fuzzy join to join two datasets. Zoomerjoin provides
`jaccard_inner_join()` and `jaccard_full_join()` (among others), which are the
fuzzy-joining analogues of the corresponding dplyr functions.
I demonstrate the syntax by using the package to join to corpuses, which formed
from entries from the [Database on Ideology, Money in Politics, and Elections
(DIME)](https://data.stanford.edu/dime) (Bonica 2016).
The first corpus looks as follows:
```{r}
library(tidyverse)
library(microbenchmark)
library(fuzzyjoin)
library(zoomerjoin)
corpus_1 <- dime_data %>% # dime data is packaged with zoomerjoin
head(500)
names(corpus_1) <- c("a", "field")
corpus_1
```
And the second looks as follows:
```{r}
corpus_2 <- dime_data %>% # dime data is packaged with zoomerjoin
tail(500)
names(corpus_2) <- c("b", "field")
corpus_2
```
The two Corpuses can't be directly joined because of misspellings. This means
we must use the fuzzy-matching capabilities of zoomerjoin:
```{r}
set.seed(1)
start_time <- Sys.time()
join_out <- jaccard_inner_join(corpus_1, corpus_2,
by = "field", n_gram_width=6,
n_bands=20, band_width=6, threshold = .8)
print(Sys.time() - start_time)
print(join_out)
```
The first two arguments, `a`, and `b`, are direct analogues of the `dplyr`
arguments, and are the two data frames you want to join. The `by` field also
acts the same as it does in 'dplyr' (it provides the function the columns you
want to match on).
The `n_gram_width` parameter determines how wide the n-grams that are used in
the similarity evaluation should be, while the `threshold` argument determines
how similar a pair of strings has to be (in Jaccard similarity) to be
considered a match. Users of the `stringdist` or `fuzzyjoin` package will be
familiar with both of these arguments, but should bear in mind that those
packages measure *string distance* (where a distance of 0 indicates complete
similarity), while this package operates on *string similarity,* so a threshold
of .8 will keep matches above 80% Jaccard similarity.
The `n_bands` and `band_width` parameters govern the performance of the LSH.
The default parameters should perform well for medium-size (n < 10^7) datasets
where matches are somewhat similar (similarity > .8), but may require tuning in
other settings. the `jaccard_hyper_grid_search()`, and `jaccard_curve()` functions
can help select these parameters for you given the properties of the LSH you
desire.
As an example, you can use the `jaccard_curve()` function to plot the probability
that a pair of records are compared at each possible Jaccard distance, $d$
between zero and one:
```{r}
jaccard_curve(20,6)
```
By looking at the plot produced, we can see that using these hyperparameters,
comparisons will almost never be made between pairs of records that have a
Jaccard similarity of less than .2 (saving time), pairs of records that have a
Jaccard similarity of greater than .8 are almost always compared (giving a low
false-negative rate).
For more details about the hyperparameters, the `textreuse` package has an
excellent vignette, and zoomerjoin provides a re-implementation of its
profiling tools, `jaccard_probability,` and `jaccard_bandwidth` (although the
implementations differ slightly as the hyperparameters in each package are
different).
## Standardizing String Names After A Merge
Often after merging, it can help to standardize the names or fields that have
been joined on. This way, you can assign a unique label or identifying key to
all observations that have a similar value of the merging variable. The
`jaccard_string_group()` function makes this possible. It first performs locality
sensitive hashing to identify similar pairs of observations within the dataset,
and then runs a community detection algorithm to identify clusters of similar
observations, which are each assigned a label. The community-detection
algorithm, `fastgreedy.community()` from the `igraph` package runs in log-linear
time, so the entire algorithm completes in linearithmic time.
Here's a short snippet showing how you can use `jaccard_string_group()` to
standardize a set of organization names.
```{r}
organization_names <- c(
"American Civil Liberties Union",
"American Civil Liberties Union (ACLU)",
"NRA National Rifle Association",
"National Rifle Association NRA",
"National Rifle Association",
"Planned Parenthood",
"Blue Cross"
)
standardized_organization_names <- jaccard_string_group(organization_names, threshold=.5, band_width = 3)
print(standardized_organization_names)
```
### References:
Bonica, Adam. 2016. Database on Ideology, Money in Politics, and Elections: Public version 2.0 [Computer file]. Stanford, CA: Stanford University Libraries.
| /scratch/gouwar.j/cran-all/cranData/zoomerjoin/vignettes/guided_tour.Rmd |
---
title: "Matching Vectors Based on Euclidean Distance"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{matching_vectors}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
```{r setup}
library(zoomerjoin)
```
# Introduction
The flagship feature of zoomerjoin is are the tidy joins for strings using the
Jaccard distance, but zoomerjoin also allows you to join vectors using
Euclidean distance. This can be useful for joining addresses or coordinates in
space.
Unlike other nearest-neighbor methods such as KD-trees, the joins do not slow
down as the dimension of the coordinates increases, so zoomerjoin can be used
can be used to find close points in a high-dimensional space (such as word
embeddings).
# Demonstration
For this demonstration, I create a simulated dataset of 10^5 points distributed
uniformly within a 100-dimensional hypercube. I join this to another dataset
which is a copy of the first with each point shifted an tiny random amount.
```{r}
n <- 10^5 # number of data points
d <- 10^2 # dimension
# Create a matrix of 10^6 observations in R^100
X <- matrix(runif(n*d),n,d)
# Second Dataset is a copy of the first with points shifted an infinitesimal
# amount
X_2 <- as.data.frame(X + matrix(rnorm(n*d, 0,.0001), n,d))
X <- as.data.frame(X)
```
I now want to join these two datasets together. The Euclidean joins take 3
hyperparameters: `n_bands`, `band_width`, and `r`. Which all have to be chosen
for the problem domain (although the defaults are generally sensible).
I use the `euclidean_probability` function in the package to understand the
probability that two observations at distance of .01 from each other are
indentified as a match at a variety of hyperparameter configurations.
```{r}
euclidean_probability(.01, n_bands = 5,band_width = 8,r = .25)
euclidean_probability(.1, n_bands = 5,band_width = 8,r = .25)
euclidean_probability(.01, n_bands = 10,band_width = 4,r = .15)
euclidean_probability(.1, n_bands = 10,band_width = 4,r = .15)
euclidean_probability(.01, n_bands = 40,band_width = 8,r = .15)
euclidean_probability(.1, n_bands = 40,band_width = 8,r = .15)
```
Using `n_bands=40`, `band_width=8`, and `r=.15` seems to provide a good balance
between identifying all true matches (as pairs less than .01 apart are
guaranteed to be found) with reducing the number of un-promising comparisons
(as pairs greater than .1 apart are unlikely to be compared). I then use the
`euclidean_inner_join` to find all matching pairs across the two datasets:
```{r}
set.seed(1)
start <- Sys.time()
joined_out <- euclidean_inner_join(
X,
X_2,
threshold = .01,
n_bands = 40,
band_width = 8,
r=.15)
n_matches <- nrow(joined_out)
time_taken <- Sys.time() - start
print(paste("found", n_matches, "matches in", round(time_taken), "seconds"))
```
Zoomerjoin is able to easily find all pairs in just under 30s (perhaps longer
on the runner that renders the website), even though the points lie in
high-dimensional (d=100) space. This makes zoomerjoin a useful tool when trying
to join or find matches between datasets of word or document embeddings.
| /scratch/gouwar.j/cran-all/cranData/zoomerjoin/vignettes/matching_vectors.Rmd |
#' Get All Users in an Account
#'
#' Get all users in a zoom account. The user id is required to get a list of
#' webinars hosted by a particular user.
#'
#' @param account_id Account Id granted by the Zoom developer app.
#' @param client_id Client Id granted by the Zoom developer app.
#' @param client_secret Client secret granted by the Zoom developer app.
#'
#'
#' @importFrom magrittr "%>%"
#' @importFrom tidyselect "everything"
#' @import dplyr
#' @importFrom janitor "clean_names"
#' @importFrom purrr "map_dfr"
#' @importFrom jsonlite "fromJSON"
#' @importFrom httr "content"
#'
#' @return A data frame with the list of all users in the account.
#'
#' @seealso See <https://marketplace.zoom.us/docs/api-reference/zoom-api/> for
#' documentation on the Zoom API.
#' @export
#' @examples
#' \dontrun{
#' dat <- get_account_users(your_account_id,
#' your_client_id,
#' your_client_secret)
#' }
get_account_users <- function(account_id,
client_id,
client_secret)
{
. <- NA # prevent variable binding note for the dot
# Get new access token
access_token <- get_access_token(account_id, client_id, client_secret)
# Function-specific API stuff
api_url <- generate_url(query = "getusers")
# api_query_params <- generate_query_params(query = "getwebinarregistrants")
# message(api_query_params)
elements <- list()
next_token <- ""
while (next_token != "STOP") {
resp <- zoom_api_request(verb = "GET",
url = api_url,
token = access_token,
query_params = list(page_size = 300,
next_page_token = next_token)#api_query_params
)
resp2 <- jsonlite::fromJSON(httr::content(resp, "text"), flatten = TRUE)
next_token <- dplyr::if_else(resp2$next_page_token == "",
"STOP",
resp2$next_page_token)
elements <- append(elements, httr::content(resp, "text"))
}
list_to_df <- function(.x) {
df <- as.data.frame(jsonlite::fromJSON(.x, flatten = TRUE)) %>%
dplyr::mutate(dplyr::across(.cols = tidyselect::everything(), as.character))
}
df <- purrr::map_dfr(elements, list_to_df) %>%
janitor::clean_names() %>%
dplyr::select(-c(
.data$page_count,
.data$page_number,
.data$page_size,
.data$next_page_token,
.data$total_records
))
return(df)
}
| /scratch/gouwar.j/cran-all/cranData/zoomr/R/get_account_users.R |
#' Get Webinar Panelists
#'
#' Get the list of attendees who were also panelists from a single webinar.
#'
#' @param webinar_id Zoom Webinar Id, typically an 11 digit number.
#' @param account_id Account Id granted by the Zoom developer app.
#' @param client_id Client Id granted by the Zoom developer app.
#' @param client_secret Client secret granted by the Zoom developer app.
#'
#' @importFrom magrittr "%>%"
#' @importFrom tidyr "unnest"
#' @importFrom janitor "clean_names"
#' @import dplyr
#' @importFrom jsonlite "fromJSON"
#' @importFrom httr "content"
#'
#' @return A data frame with the list of panelists from that webinar.
#'
#' @seealso See <https://marketplace.zoom.us/docs/api-reference/zoom-api/> for
#' documentation on the Zoom API.
#' @export
#' @examples
#' \dontrun{
#' dat <- get_webinar_panelists(webinar_id = "99911112222",
#' your_account_id,
#' your_client_id,
#' your_client_secret)
#' }
get_panelists <- function(webinar_id,
account_id,
client_id,
client_secret)
{
. <- NA # prevent variable binding note for the dot
# Get new access token
access_token <- get_access_token(account_id, client_id, client_secret)
# Function-specific API stuff
api_url <- generate_url(query = "getpanelists",
webinar_id = webinar_id)
# Send GET request to specific survey
resp <- zoom_api_request(verb = "GET", url = api_url, token = access_token, query_params = "")
df <- as.data.frame(jsonlite::fromJSON(httr::content(resp, "text"), flatten = TRUE)) %>%
# tidyr::unnest("questions.question_details") %>%
janitor::clean_names() %>%
dplyr::select(-c(
.data$total_records
)
)
}
| /scratch/gouwar.j/cran-all/cranData/zoomr/R/get_panelists.R |
#' Get Webinar Registration Question Responses
#'
#' Get registration question answers from all registrants for a single webinar.
#'
#' @param webinar_id Zoom Webinar Id, typically an 11 digit number.
#' @param account_id Account Id granted by the Zoom developer app.
#' @param client_id Client Id granted by the Zoom developer app.
#' @param client_secret Client secret granted by the Zoom developer app.
#' @param registrant_status One or more of "approved", "pending", or
#' "denied". Default is "approved" only.
#'
#' @importFrom magrittr "%>%"
#' @import dplyr
#' @importFrom tidyr "unnest"
#' @importFrom janitor "clean_names"
#' @importFrom jsonlite "fromJSON"
#' @importFrom httr "content"
#'
#' @return A data frame with the registration question answers.
#'
#' @seealso See <https://marketplace.zoom.us/docs/api-reference/zoom-api/> for
#' documentation on the Zoom API.
#' @export
#' @examples
#' \dontrun{
#' dat <- get_registration_questions(webinar_id = "99911112222",
#' your_account_id,
#' your_client_id,
#' your_client_secret,
#' c("approved", "denied", "pending"))
#' }
get_registration_questions <- function(webinar_id,
account_id,
client_id,
client_secret,
registrant_status =
c("approved")
)
{
. <- NA # prevent variable binding note for the dot
# Get new access token
access_token <- get_access_token(account_id, client_id, client_secret)
# Function-specific API stuff
api_url <- generate_url(query = "getwebinarregistrants",
webinar_id = webinar_id)
elements <- list()
next_token <- ""
skip <- ""
status_options <- registrant_status
get_data_for_each_status <- function(.x){
while (next_token != "STOP") {
resp <- zoom_api_request(verb = "GET",
url = api_url,
token = access_token,
query_params = list(page_size = 300,
next_page_token = next_token,
status = .x)
)
if(jsonlite::fromJSON(httr::content(resp, "text"), flatten = TRUE)$total_records == 0) {
message(paste0("Webinar Id is found but there are not any registrants",
" with status '", .x, "'."))
next_token <- "STOP"
skip <- "YES"
} else {
resp2 <- jsonlite::fromJSON(httr::content(resp, "text"), flatten = TRUE)
next_token <- dplyr::if_else(resp2$next_page_token == "", "STOP", resp2$next_page_token)
elements <- append(elements, httr::content(resp, "text"))
skip <- "NO"
}
}
if(skip != "YES"){
list_to_df <- function(.x) {
df <- as.data.frame(jsonlite::fromJSON(.x, flatten = TRUE)) %>%
tidyr::unnest(.data$registrants.custom_questions) %>%
dplyr::mutate(dplyr::across(.cols = tidyselect::everything(), as.character))
if("title" %in% colnames(df)) {
df2 <- df %>%
dplyr::select(.data$registrants.id, .data$registrants.email, .data$title, .data$value) %>%
dplyr::rename(question = .data$title, response = .data$value) %>%
janitor::clean_names()
return(df2)
} else {
message(paste0("Zoom API did not return any registration question ",
"responses for selected Webinar Id"))
}
}
reg_q <- purrr::map_dfr(elements, list_to_df)
return(reg_q)
}
}
final_df <- purrr::map_dfr(status_options, get_data_for_each_status)
return(final_df)
}
| /scratch/gouwar.j/cran-all/cranData/zoomr/R/get_registration_questions.R |
#' Get Webinar Tracking Sources
#'
#' Get a summary of registrations and visitors by tracking source for a
#' specific webinar.
#'
#' @param webinar_id Zoom Webinar Id, typically an 11 digit number.
#' @param account_id Account Id granted by the Zoom developer app.
#' @param client_id Client Id granted by the Zoom developer app.
#' @param client_secret Client secret granted by the Zoom developer app.
#'
#' @importFrom magrittr "%>%"
#' @importFrom janitor "clean_names"
#' @import dplyr
#' @importFrom jsonlite "fromJSON"
#' @importFrom httr "content"
#'
#' @return A data frame with the list of panelists from that webinar.
#'
#' @seealso See <https://marketplace.zoom.us/docs/api-reference/zoom-api/> for
#' documentation on the Zoom API.
#' @export
#' @examples
#' \dontrun{
#' dat <- get_tracking_sources(webinar_id = "99911112222",
#' your_account_id,
#' your_client_id,
#' your_client_secret)
#' }
get_tracking_sources <- function(webinar_id,
account_id,
client_id,
client_secret)
{
. <- NA # prevent variable binding note for the dot
# Get new access token
access_token <- get_access_token(account_id, client_id, client_secret)
# Function-specific API stuff
api_url <- generate_url(query = "gettrackingsources",
webinar_id = webinar_id)
# Send GET request to specific survey
resp <- zoom_api_request(verb = "GET", url = api_url, token = access_token, query_params = "")
df <- as.data.frame(jsonlite::fromJSON(httr::content(resp, "text"), flatten = TRUE)) %>%
janitor::clean_names() %>%
dplyr::select(-c("tracking_sources_tracking_url", "total_records")) %>%
dplyr::rename(visitor_count = "tracking_sources_visitor_count",
registration_count = "tracking_sources_registrationr_count",
tracking_source_name = "tracking_sources_source_name",
tracking_source_id = "tracking_sources_id") %>%
dplyr::mutate(webinar_id = webinar_id) %>%
dplyr::relocate(webinar_id)
}
| /scratch/gouwar.j/cran-all/cranData/zoomr/R/get_tracking_sources.R |
#' Get Webinar Details
#'
#' Get metadata about a single webinar.
#'
#' @param webinar_id Zoom Webinar Id, typically an 11 digit number.
#' @param account_id Account Id granted by the Zoom developer app.
#' @param client_id Client Id granted by the Zoom developer app.
#' @param client_secret Client secret granted by the Zoom developer app.
#'
#' @importFrom jsonlite "fromJSON"
#' @importFrom httr "content"
#'
#' @return A data frame with the metadata about a webinar.
#'
#' @seealso See <https://marketplace.zoom.us/docs/api-reference/zoom-api/> for
#' documentation on the Zoom API.
#' @export
#' @examples
#' \dontrun{
#' dat <- get_webinar_details(webinar_id = "99911112222",
#' your_account_id,
#' your_client_id,
#' your_client_secret)
#' }
get_webinar_details <- function(webinar_id,
account_id,
client_id,
client_secret)
{
. <- NA # prevent variable binding note for the dot
# Get new access token
access_token <- get_access_token(account_id, client_id, client_secret)
# Function-specific API stuff
api_url <- generate_url(query = "getwebinardetails",
webinar_id = webinar_id)
# Send GET request to specific survey
resp <- zoom_api_request(verb = "GET", url = api_url, token = access_token, query_params = "")
# get into a data frame
cnt <- httr::content(resp)
cnt2 <- cnt[-13]
df <- as.data.frame(cnt2) %>%
janitor::clean_names()
}
| /scratch/gouwar.j/cran-all/cranData/zoomr/R/get_webinar_details.R |
#' Get Webinar Participants
#'
#' Get participant info about a single webinar.
#'
#' @param webinar_id Zoom Webinar Id, typically an 11 digit number.
#' @param account_id Account Id granted by the Zoom developer app.
#' @param client_id Client Id granted by the Zoom developer app.
#' @param client_secret Client secret granted by the Zoom developer app.
#'
#' @importFrom magrittr "%>%"
#' @importFrom tidyselect "everything"
#' @import dplyr
#' @importFrom janitor "clean_names"
#' @importFrom purrr "map_dfr"
#' @importFrom jsonlite "fromJSON"
#' @importFrom httr "content"
#'
#' @return A data frame with data on each participant at a webinar.
#'
#' @seealso See <https://marketplace.zoom.us/docs/api-reference/zoom-api/> for
#' documentation on the Zoom API.
#' @export
#' @examples
#' \dontrun{
#' dat <- get_webinar_participants(webinar_id = "99911112222",
#' your_account_id,
#' your_client_id,
#' your_client_secret)
#' }
get_webinar_participants <- function(webinar_id,
account_id,
client_id,
client_secret)
{
. <- NA # prevent variable binding note for the dot
# Get new access token
access_token <- get_access_token(account_id, client_id, client_secret)
# Function-specific API stuff
api_url <- generate_url(query = "getwebinarparticipants",
webinar_id = webinar_id)
# api_query_params <- generate_query_params(query = "getwebinarparticipants")
# message(api_query_params)
elements <- list()
page_counter <- 1
total_pages <- 1
next_token <- ""
while (next_token != "STOP") {
resp <- zoom_api_request(verb = "GET",
url = api_url,
token = access_token,
query_params = list(page_size = 300,
next_page_token = next_token)#api_query_params
)
resp2 <- jsonlite::fromJSON(httr::content(resp, "text"), flatten = TRUE)
next_token <- dplyr::if_else(resp2$next_page_token == "", "STOP", resp2$next_page_token)
elements <- append(elements, httr::content(resp, "text"))
}
list_to_df <- function(.x) {
df <- as.data.frame(jsonlite::fromJSON(.x, flatten = TRUE)) %>%
dplyr::mutate(dplyr::across(.cols = tidyselect::everything(), as.character))
}
df <- purrr::map_dfr(elements, list_to_df) %>%
janitor::clean_names() %>%
dplyr::select(-c(
.data$page_size,
.data$next_page_token,
.data$page_count,
.data$total_records
)
)
return(df)
}
| /scratch/gouwar.j/cran-all/cranData/zoomr/R/get_webinar_participants.R |
#' Get Webinar Polls
#'
#' Get the polls summary from a single webinar.
#'
#' @param webinar_id Zoom Webinar Id, typically an 11 digit number.
#' @param account_id Account Id granted by the Zoom developer app.
#' @param client_id Client Id granted by the Zoom developer app.
#' @param client_secret Client secret granted by the Zoom developer app.
#'
#' @importFrom magrittr "%>%"
#' @importFrom tidyr "unnest"
#' @importFrom janitor "clean_names"
#' @importFrom jsonlite "fromJSON"
#' @importFrom httr "content"
#'
#' @return A data frame with poll results from a webinar.
#'
#' @seealso See <https://marketplace.zoom.us/docs/api-reference/zoom-api/> for
#' documentation on the Zoom API.
#' @export
#' @examples
#' \dontrun{
#' dat <- get_webinar_polls(webinar_id = "99911112222",
#' your_account_id,
#' your_client_id,
#' your_client_secret)
#' }
get_webinar_polls <- function(webinar_id,
account_id,
client_id,
client_secret)
{
. <- NA # prevent variable binding note for the dot
# Get new access token
access_token <- get_access_token(account_id, client_id, client_secret)
# Function-specific API stuff
api_url <- generate_url(query = "getwebinarpolls",
webinar_id = webinar_id)
# Send GET request to specific survey
resp <- zoom_api_request(verb = "GET", url = api_url, token = access_token, query_params = "")
# check if it is empty (applicable data not available for this webinar)
if(length(httr::content(resp)$questions) == 0){
message("Zoom API returned no poll results for this webinar")
return(list())
} else {
df <- as.data.frame(jsonlite::fromJSON(
httr::content(resp, "text"),
flatten = TRUE
)
) %>%
tidyr::unnest("questions.question_details") %>%
janitor::clean_names()
return(df)
}
}
| /scratch/gouwar.j/cran-all/cranData/zoomr/R/get_webinar_polls.R |
#' Get Webinar Q & A
#'
#' Get the Q&A summary from a single webinar.
#'
#' @param webinar_id Zoom Webinar Id, typically an 11 digit number.
#' @param account_id Account Id granted by the Zoom developer app.
#' @param client_id Client Id granted by the Zoom developer app.
#' @param client_secret Client secret granted by the Zoom developer app.
#'
#' @importFrom magrittr "%>%"
#' @importFrom tidyr "unnest"
#' @importFrom janitor "clean_names"
#' @importFrom jsonlite "fromJSON"
#' @importFrom httr "content"
#'
#' @return A data frame with all of the Q&A data from a webinar.
#'
#' @seealso See <https://marketplace.zoom.us/docs/api-reference/zoom-api/> for
#' documentation on the Zoom API.
#' @export
#' @examples
#' \dontrun{
#' dat <- get_webinar_qanda(webinar_id = "99911112222",
#' your_account_id,
#' your_client_id,
#' your_client_secret)
#' }
get_webinar_qanda <- function(webinar_id,
account_id,
client_id,
client_secret)
{
. <- NA # prevent variable binding note for the dot
# Get new access token
access_token <- get_access_token(account_id, client_id, client_secret)
# Function-specific API stuff
api_url <- generate_url(query = "getwebinarqanda",
webinar_id = webinar_id)
# Send GET request to specific survey
resp <- zoom_api_request(verb = "GET",
url = api_url,
token = access_token,
query_params = "")
# check if it is empty (applicable data not available for this webinar)
if(length(httr::content(resp)$questions) == 0){
message("Zoom API returned no Q&A results for this webinar")
return(list())
} else {
df <- as.data.frame(jsonlite::fromJSON(
httr::content(resp, "text"),
flatten = TRUE
)
) %>%
tidyr::unnest("questions.question_details") %>%
janitor::clean_names()
return(df)
}
}
| /scratch/gouwar.j/cran-all/cranData/zoomr/R/get_webinar_qanda.R |
#' Get Webinar Registrants
#'
#' Get registrant info about a single webinar.
#'
#' @param webinar_id Zoom Webinar Id, typically an 11 digit number.
#' @param account_id Account Id granted by the Zoom developer app.
#' @param client_id Client Id granted by the Zoom developer app.
#' @param client_secret Client secret granted by the Zoom developer app.
#' @param registrant_status One or more of "approved", "pending", or
#' "denied". Default is "approved" only.
#'
#' @importFrom magrittr "%>%"
#' @importFrom tidyselect "everything"
#' @import dplyr
#' @importFrom janitor "clean_names"
#' @importFrom purrr "map_dfr"
#' @importFrom jsonlite "fromJSON"
#' @importFrom httr "content"
#'
#' @return A data frame with data on all the registrants for a webinar.
#'
#' @seealso See <https://marketplace.zoom.us/docs/api-reference/zoom-api/> for
#' documentation on the Zoom API.
#' @export
#' @examples
#' \dontrun{
#' dat <- get_webinar_registrants(webinar_id = "99911112222",
#' your_account_id,
#' your_client_id,
#' your_client_secret,
#' c("approved", "denied", "pending"))
#' }
get_webinar_registrants <- function(webinar_id,
account_id,
client_id,
client_secret,
registrant_status =
c("approved")
)
{
. <- NA # prevent variable binding note for the dot
# Get new access token
access_token <- get_access_token(account_id, client_id, client_secret)
# Function-specific API stuff
api_url <- generate_url(query = "getwebinarregistrants",
webinar_id = webinar_id)
elements <- list()
next_token <- ""
skip <- ""
status_options <- registrant_status
get_data_for_each_status <- function(.x){
while (next_token != "STOP") {
resp <- zoom_api_request(verb = "GET",
url = api_url,
token = access_token,
query_params = list(page_size = 300,
next_page_token = next_token,
status = .x)
)
if(jsonlite::fromJSON(httr::content(resp, "text"), flatten = TRUE)$total_records == 0) {
message(paste0("Webinar Id is found but there are not any registrants",
" with status '", .x, "'."))
next_token <- "STOP"
skip <- "YES"
} else {
resp2 <- jsonlite::fromJSON(httr::content(resp, "text"), flatten = TRUE)
next_token <- dplyr::if_else(resp2$next_page_token == "", "STOP", resp2$next_page_token)
elements <- append(elements, httr::content(resp, "text"))
skip <- "NO"
}
}
if(skip != "YES"){
list_to_df <- function(.x) {
df <- as.data.frame(jsonlite::fromJSON(.x, flatten = TRUE)) %>%
dplyr::mutate(dplyr::across(.cols = tidyselect::everything(), as.character))
}
df <- purrr::map_dfr(elements, list_to_df) %>%
janitor::clean_names() %>%
dplyr::select(-c(
.data$registrants_custom_questions,
.data$page_size,
.data$next_page_token,
.data$total_records
))
return(df)
}
}
final_df <- purrr::map_dfr(status_options, get_data_for_each_status)
return(final_df)
}
| /scratch/gouwar.j/cran-all/cranData/zoomr/R/get_webinar_registrants.R |
#' Get List of Webinars for a User.
#'
#' Get list of webinars for a User. This is used to get the webinar Id's to
#' pass into other functions.
#'
#' @param user_id Zoom User Id.
#' @param account_id Account Id granted by the Zoom developer app.
#' @param client_id Client Id granted by the Zoom developer app.
#' @param client_secret Client secret granted by the Zoom developer app.
#'
#' @importFrom dplyr "select"
#' @importFrom magrittr "%>%"
#' @importFrom janitor "clean_names"
#' @importFrom jsonlite "fromJSON"
#' @importFrom httr "content"
#'
#' @return A data frame with all of the webinars hosted by a specific user.
#'
#' @seealso See <https://marketplace.zoom.us/docs/api-reference/zoom-api/> for
#' documentation on the Zoom API.
#' @export
#' @examples
#' \dontrun{
#' dat <- get_webinar_details(user_id = "user_id_string",
#' your_account_id,
#' your_client_id,
#' your_client_secret)
#' }
list_webinars <- function(user_id,
account_id,
client_id,
client_secret)
{
. <- NA # prevent variable binding note for the dot
# Get new access token
access_token <- get_access_token(account_id, client_id, client_secret)
# Function-specific API stuff
api_url <- generate_url(query = "listwebinars",
user_id = user_id)
# Send GET request to specific survey
resp <- zoom_api_request(verb = "GET", url = api_url, token = access_token, query_params = list(page_size = 300))
# get into a data frame
dat<-as.data.frame(jsonlite::fromJSON(httr::content(resp, "text"), flatten = TRUE)) %>%
janitor::clean_names() %>%
dplyr::select(-c(
.data$page_size,
.data$next_page_token,
.data$total_records
)
)
}
| /scratch/gouwar.j/cran-all/cranData/zoomr/R/list_webinars.R |
#' Checks responses against Zoom response codes and returns error message.
#'
#' @param res results object from httr
#' @keywords internal
zoom_response_codes <-
function(res){
# Exit if fine:
if(res$status_code == 200){
return()
}
# Get error message
error_message <-
switch(
as.character(res$status_code),
`4700` =
c("Zoom API reported an authentication error (4700):",
"Your access token does not contain permission to access the",
"requested API endpoint scopes."),
`300` =
c("Zoom API reported an invalid request (300):",
"The next page token is invalid or expired."),
`400` =
c("Zoom API reported an invalid request:",
"Bad Request."),
`3001` =
c("Zoom API reported an invalid request (3001):",
"Meeting/Webinar ID does not exist"),
`401` =
c("Zoom API reported an invalid access token (401)"),
`404` =
c("Zoom API reported webinar Id is not found or has expired."),
# Default response for unknown status code:
c(glue::glue("Zoom API reported an atypical status code {res$status_code}"),
glue::glue("Full response: {res}"),
"A dictionary of status codes can be found here: https://developer.mozilla.org/en-US/docs/Web/HTTP/Status",
"Please check your request, and report at https://github.com/chrisumphlett/zoomr/issues if reoccurring:")
)
# Report the error message:
rlang::abort(error_message)
}
#' Get Access token
#'
#' Request a new token each time user executes a package function.
#'
#' @param account_id Zoom API app Account Id.
#' @param client_id Zoom API app Client Id.
#' @param client_secret Zoom API app Client Secret.
#'
#' @import httr
#'
#' @keywords internal
get_access_token <-
function(account_id, client_id, client_secret) {
url <- paste0("https://zoom.us/oauth/token?grant_type=account_credentials&account_id=", account_id)
request <- httr::POST(
url = url,
httr::authenticate(client_id, client_secret))
# Check if response type is OK
zoom_response_codes(request)
token <- httr::content(request)$access_token
return(token)
}
#' Generate URL for specific API query by type and (if appropriate) ID
#'
#' This is based on the function of the same name in the qualtRics package.
#'
#' @param query string. The specific API query desired. Generally named the
#' same as associated functions but without underscores, so the request for
#' `getwebinardetails()` would be be "getwebinardetails".
#' @param ... Named elements of URL for specific query desired, such as
#' `webinar_id`.
#'
#' @importFrom glue glue
#'
#' @return Endpoint URL to be passed to querying tools
#' @keywords internal
generate_url <-
function(query, ...){
args <- list(...)
list2env(args, envir = environment())
# Get the user's specific base URL from environment
# (and check it again in case the user has modified it externally somehow):
base_url <- "api.zoom.us"
# Construct URL root for the v3 api endpoint:
root_url <-
glue::glue("https://{base_url}/v2")
# List of templates for how to build URLs
# (add to this when new functions made):
endpoint_template <-
switch(
query,
getwebinardetails = "{rooturl}/report/webinars/{webinar_id}",
getwebinarparticipants = "{rooturl}/past_webinars/{webinar_id}/participants",
getwebinarqanda = "{rooturl}/report/webinars/{webinar_id}/qa",
getwebinarpolls = "{rooturl}/report/webinars/{webinar_id}/polls",
listwebinars = "{rooturl}/users/{user_id}/webinars",
getwebinarregistrants = "{rooturl}/webinars/{webinar_id}/registrants",
getpanelists = "{rooturl}/webinars/{webinar_id}/panelists",
getusers = "{rooturl}/users",
gettrackingsources = "{rooturl}/webinars/{webinar_id}/tracking_sources",
rlang::abort("Internal error: invalid URL generation query")
)
# Construct the actual URL:
api_url <- glue::glue(endpoint_template, rooturl = root_url, ...)
}
#' Generate Query Parameters for specific API query
#'
#' This is based on the `generate_url()` function.
#'
#' @param query string. The specific API query desired. Generally named the
#' same as associated functions but without underscores, so the request for
#' `getwebinardetails()` would be be "getwebinardetails".
#' @param ... Named query parameter elements for desired api endpoint, such as
#' `next_page_token`.
#'
#' @importFrom glue glue
#'
#' @return Query parameters to be passed to querying tools
#' @keywords internal
generate_query_params <-
function(query, ...){
args <- list(...)
list2env(args, envir = environment())
# List of templates for how to build URLs
# (add to this when new functions made):
params_template <-
switch(
query,
getwebinarparticipants = "list(page_size = 300)",
rlang::abort("Internal error: invalid query parameters generation")
)
# Construct the actual URL:
api_query_params <- glue::glue(params_template, ...)
}
# query = list(page_size = 2,
# next_page_token = "AsM0AJYGmwMWdMc5YoOLX5CvMUts8D1l1A2"))
#' Send httr requests to Zoom API
#'
#' This is based on the function of the same name in the qualtRics package.
#'
#' @param verb Type of request to be sent (@seealso [httr::VERB()])
#' @param url Zoom endpoint URL created by [generate_url()] functions
#' @param ... arguments passed to httr::content when parsing
#'
#' @import httr
#'
#' @keywords internal
zoom_api_request <-
function(verb = c("GET", "POST"),
url = url,
body = NULL,
as = c("parsed", "raw"),
token,
query_params = query_params,
...
) {
# Match args
verb <- rlang::arg_match(verb)
# Send request to Zoom API
res <- httr::RETRY(
verb,
url = url,
httr::add_headers(
"Authorization" = paste0("Bearer ", token)
),
# body = body,
times = 3,
terminate_on = 400:451,
quiet = TRUE,
query = query_params
)
# Check if response type is OK
zoom_response_codes(res)
# Get content out:
cnt <-
httr::content(
x = res,
"text",
...
)
# return(cnt)
return(res)
} | /scratch/gouwar.j/cran-all/cranData/zoomr/R/utils.R |
################################################################################
#
#'
#' Add the WHO Growth Reference z-scores to a data frame of anthropometric data
#' for weight, height or length, MUAC, head circumference, sub-scapular
#' skinfold and triceps skinfold.
#'
#' @param data A survey dataset as a data.frame object
#' @param sex Name of variable specifying the sex of the subject. This must be
#' coded as \code{1 = male} and \code{2 = female}. Give a quoted variable
#' name as in (e.g.) \code{"sex"}.
#'
#' @param firstPart Name of variable specifying:
#' \itemize{
#' \item Weight (kg) for BMI/A, W/A, W/H, or W/L
#' \item Head circumference (cm) for HC/A
#' \item Height (cm) for BMI/A for H/A
#' \item Length (cm) for L/A
#' \item MUAC (cm) for MUAC/A
#' \item Sub-scapular skinfold (mm) for SSF/A
#' \item Triceps skinfold (mm) for TSF/A
#' }
#'
#' Give a quoted variable name as in (e.g.) \code{"weight"}. Be careful with
#' units (weight in kg; height, length, head circumference, and MUAC in cm,
#' skinfolds in mm).
#'
#' @param secondPart Name of variable specifying:
#' \itemize{
#' \item Age (days) for H/A, HC/A, L/A, MUAC/A, SSF/A, or TSF/A
#' \item Height (cm) BMI/A or W/H
#' \item Length (cm) for W/L
#' }
#'
#' Give a quoted variable name as in (e.g.) \code{"age"}. Be careful with
#' units (age in days; height and length in cm).
#'
#' @param thirdPart Name of variable specifying age (in days) for BMI/A. Give a
#' quoted variable name as in (e.g.) \code{"age"}. Be careful with units
#' (age in days).
#'
#' @param index The index to be calculated and added to \code{data}. One of:
#' \describe{
#' \item{\code{bfa}}{BMI for age}
#' \item{\code{hca}}{Head circumference for age}
#' \item{\code{hfa}}{Height for age}
#' \item{\code{lfa}}{Length for age}
#' \item{\code{mfa}}{MUAC for age}
#' \item{\code{ssa}}{Sub-scapular skinfold for age}
#' \item{\code{tsa}}{Triceps skinfold for age}
#' \item{\code{wfa}}{Weight for age}
#' \item{\code{wfh}}{Weight for height}
#' \item{\code{wfl}}{Weight for length}
#' }
#'
#' Give a quoted index name as in (e.g.) \code{"wfh"}.
#'
#' @param standing Variable specifying how stature was measured. If NULL then age
#' (for \code{"hfa"} or \code{"lfa"}) or height rules (for \code{"wfh"} or
#' \code{"wfl"}) will be applied. This must be coded as \code{1 = Standing};
#' \code{2 = Supine}; \code{3 = Unknown}. All other values will be recoded to
#' \code{3 = Unknown}. Give a quoted variable name as in (e.g.) \code{"measured"}
#' or a single value (e.g.\code{"measured = 1"}). If no value (or NULL) is
#' specified then height and age rules will be applied.
#'
#' @param output The name of the column containing the specified index to be
#' added to the dataset. This is an optional parameter. If you do not specify
#' a value for output then the added column will take the name of the specified
#' index with a \code{"z"} appended.
#'
#' @param digits The number of decimal places for \code{output}. Defaults to 2 d.p.
#'
#' @return A data.frame of the survey dataset with the calculated z-scores added.
#'
#' @examples
#' # Calculate weight-for-height (wfh) for the anthro3 dataset
#' addWGSR(data = anthro3,
#' sex = "sex",
#' firstPart = "weight",
#' secondPart = "height",
#' index = "wfh")
#'
#' # Calculate weight-for-age (wfa) for the anthro3 dataset
#' addWGSR(data = anthro3,
#' sex = "sex",
#' firstPart = "weight",
#' secondPart = "age",
#' index = "wfa")
#'
#' # Calculate height-for-age (hfa) for the anthro3 dataset
#' addWGSR(data = anthro3,
#' sex = "sex",
#' firstPart = "height",
#' secondPart = "age",
#' index = "hfa")
#'
#' # Calculate MUAC-for-age (mfa) for the anthro4 dataset
#'
#' ## Convert age in anthro4 from months to days
#' testData <- anthro4
#' testData$age <- testData$agemons * (365.25 / 12)
#'
#' addWGSR(data = testData,
#' sex = "sex",
#' firstPart = "muac",
#' secondPart = "age",
#' index = "mfa")
#'
#' @export
#'
#'
#
################################################################################
addWGSR <- function(data, sex, firstPart, secondPart, thirdPart = NA,
index = NA, standing = NULL,
output = paste(index, "z", sep = ""), digits = 2) {
## If 'standing' is not specified then create a column in 'data' holding 3 (unknown) for all rows
addedStanding <- FALSE
if(is.null(standing)) {
## Random column name for 'standing'
standing <- paste(sample(c(letters, LETTERS), size = 16, replace = TRUE),
collapse = "")
data[[standing]] <- 3
addedStanding <- TRUE
}
## Calculate specified index
z <- vector(mode = "numeric", length = nrow(data))
pb <- txtProgressBar(min = 0, max = nrow(data), style = 1)
for(i in 1:nrow(data)) {
z[i] <- ifelse(!is.na(thirdPart),
getWGSR(sex = data[[sex]][i], firstPart = data[[firstPart]][i],
secondPart = data[[secondPart]][i],
index = index, standing = data[[standing]][i],
thirdPart = data[[thirdPart]][i]),
getWGSR(sex = data[[sex]][i], firstPart = data[[firstPart]][i],
secondPart = data[[secondPart]][i],
index = index, standing = data[[standing]][i]))
setTxtProgressBar(pb, i)
}
cat("\n", sep = "")
## Remove added 'standing' column from 'data'
if(addedStanding) {
data[[standing]] <- NULL
}
## Add index to 'data' and return 'data'
data[[output]] <- round(z, digits = digits)
return(data)
}
################################################################################
#
#'
#' Calculate WHO Growth Reference z-score for a given anthropometric
#' measurement.
#'
#' This function is usually called by the \code{addWGSR()} function
#' but could be used as a stand-alone calculator for getting z-score for a given
#' anthropometric measurement.
#'
#' @param sex Sex of the subject. This must be coded as \code{1 = male} and
#' \code{2 = female}.
#' @param firstPart Name of variable specifying:
#' \itemize{
#' \item Weight (kg) for BMI/A, W/A, W/H, or W/L
#' \item Head circumference (cm) for HC/A
#' \item Height (cm) for BMI/A for H/A
#' \item Length (cm) for L/A
#' \item MUAC (cm) for MUAC/A
#' \item Sub-scapular skinfold (mm) for SSF/A
#' \item Triceps skinfold (mm) for TSF/A
#' }
#'
#' Give a quoted variable name as in (e.g.) \code{"weight"}. Be careful with
#' units (weight in kg; height, length, head circumference, and MUAC in cm,
#' skinfolds in mm).
#'
#' @param secondPart Name of variable specifying:
#' \itemize{
#' \item Age (days) for H/A, HC/A, L/A, MUAC/A, SSF/A, or TSF/A
#' \item Height (cm) BMI/A or W/H
#' \item Length (cm) for W/L
#' }
#'
#' Give a quoted variable name as in (e.g.) \code{"age"}. Be careful with
#' units (age in days; height and length in cm).
#'
#' @param thirdPart Name of variable specifying age (in days) for BMI/A. Give a
#' quoted variable name as in (e.g.) \code{"age"}. Be careful with units
#' (age in days).
#'
#' @param index The index to be calculated and added to \code{data}. One of:
#' \describe{
#' \item{\code{bfa}}{BMI for age}
#' \item{\code{hca}}{Head circumference for age}
#' \item{\code{hfa}}{Height for age}
#' \item{\code{lfa}}{Length for age}
#' \item{\code{mfa}}{MUAC for age}
#' \item{\code{ssa}}{Sub-scapular skinfold for age}
#' \item{\code{tsa}}{Triceps skinfold for age}
#' \item{\code{wfa}}{Weight for age}
#' \item{\code{wfh}}{Weight for height}
#' \item{\code{wfl}}{Weight for length}
#' }
#'
#' Give a quoted index name as in (e.g.) \code{"wfh"}.
#'
#' @param standing Variable specifying how stature was measured. If NULL then age
#' (for \code{"hfa"} or \code{"lfa"}) or height rules (for \code{"wfh"} or
#' \code{"wfl"}) will be applied. This must be coded as \code{1 = Standing};
#' \code{2 = Supine}; \code{3 = Unknown}. All other values will be recoded to
#' \code{3 = Unknown}. Give a quoted variable name as in (e.g.) \code{"measured"}
#' or a single value (e.g.\code{"measured = 1"}). If no value (or NULL) is
#' specified then height and age rules will be applied.
#'zz
#' @return A numeric value or vector of z-scores for the specified \code{index}.
#'
#' @examples
#' # Given a male child 10 months old with a weight of 5.7 kgs, height of 64.2
#' # cms, and MUAC of 125 mm:
#' #
#' # Calculate weight-for-height
#' getWGSR(sex = 1,
#' firstPart = 5.7,
#' secondPart = 64.2,
#' index = "wfh",
#' standing = 3)
#'
#' # calculate weight-for-age
#' getWGSR(sex = 1,
#' firstPart = 5.7,
#' secondPart = 10,
#' index = "wfa",
#' standing = 3)
#'
#' # calculate height-for-age
#' getWGSR(sex = 1,
#' firstPart = 64.2,
#' secondPart = 10,
#' index = "hfa",
#' standing = 3)
#'
#' # Calculate MUAC-for-age z-score for a girl
#' getWGSR(sex = 1,
#' firstPart = 20,
#' secondPart = 62 * (365.25 / 12),
#' index = "mfa")
#'
#' @export
#'
#
################################################################################
getWGSR <- function(sex, firstPart, secondPart,
index = NA, standing = NA, thirdPart = NA) {
## Avoid missing and impossible values in 'standing' by coding NA and other values to '3'
if(is.na(standing) | !(standing %in% c(1, 2, 3))) {
standing = 3
}
## Unknown index specified - return NA
if(!(index %in% c("bfa", "hca", "hfa", "lfa", "mfa",
"ssa", "tsa", "wfa", "wfh", "wfl"))) {
return(NA)
}
## Missing data for 'sex', 'firstPart', or 'secondPart' - return NA
if(is.na(sex) | is.na(firstPart) | is.na(secondPart)) {
return(NA)
}
## 'sex' must be male (1) or female (2)
if(!(sex %in% c(1, 2))) {
return(NA)
}
## 'firstPart' or 'secondPart' are not numeric - return NA
if(!is.numeric(firstPart) | !is.numeric(secondPart)) {
return(NA)
}
## Missing 'thirdPart' (age) is missing for BMI-for-age - return NA
if(index == "bfa" & is.na(thirdPart)) {
return(NA)
}
## 'thirdPart' (age) is not numeric for BMI-for-age - return NA
if(index == "bfa" & !is.numeric(thirdPart)) {
return(NA)
}
## 'secondPart' is zero then BMI cannot be calculated
if(index == "bfa" & secondPart == 0) {
return(NA)
}
## Round lengths to nearest 0.1 cm
if(index %in% c("wfh", "wfl")) {
secondPart <- round(secondPart, 1)
}
## Round ages to the nearest day
if(index %in% c("hca", "hfa", "lfa", "mfa", "ssa", "tsa", "wfa")) {
secondPart <- round(secondPart, digits = 0)
}
if(index == "bfa") {
thirdPart <- round(thirdPart, 0)
}
## Rules for length-for-age and height-for-age indices
if(standing == 1 & (index == "lfa" | index == "hfa") & secondPart < 731) {
index <- "lfa"
firstPart <- firstPart + 0.7
}
if(standing == 2 & (index == "lfa" | index == "hfa") & secondPart < 731) {
index <- "lfa"
}
if(standing == 3 & (index == "lfa" | index == "hfa") & secondPart < 731) {
index <- "lfa"
}
if(standing == 1 & (index == "lfa" | index == "hfa") & secondPart >= 731) {
index <- "hfa"
}
if(standing == 2 & (index == "lfa" | index == "hfa") & secondPart >= 731) {
index <- "hfa"
firstPart <- firstPart - 0.7
}
if(standing == 3 & (index == "lfa" | index == "hfa") & secondPart >= 731) {
index <- "hfa"
}
## Rules for weight-for-length and weight-for-height indices
if(standing == 1 & (index == "wfl" | index == "wfh") & secondPart < 65) {
index = "wfl"
secondPart <- secondPart + 0.7
}
if(standing == 1 & (index == "wfl" | index == "wfh") & secondPart >= 65) {
index = "wfh"
}
if(standing == 2 & (index == "wfl" | index == "wfh") & secondPart <= 110) {
index = "wfl"
}
if(standing == 2 & (index == "wfl" | index == "wfh") & secondPart > 110) {
index = "wfh"
secondPart <- secondPart - 0.7
}
if(standing == 3 & (index == "wfl" | index == "wfh") & secondPart < 87) {
index = "wfl"
}
if(standing == 3 & (index == "wfl" | index == "wfh") & secondPart >= 87) {
index = "wfh"
}
## Rules for BMI-for-age index
if(standing == 1 & index == "bfa" & thirdPart < 731) {
secondPart <- secondPart + 0.7
}
if(standing == 2 & index == "bfa" & thirdPart >= 731) {
secondPart <- secondPart - 0.7
}
## Calculate BMI (as 'firstPart') and place age in 'secondPart'
if(index == "bfa") {
firstPart <- firstPart / (secondPart / 100)^2
secondPart <- thirdPart
}
## 'secondPart' is out of range for specified 'index' - return NA
rangeSecondPart <- range(wgsrData$given[wgsrData$index == index])
if(secondPart < rangeSecondPart[1] | secondPart > rangeSecondPart[2]) {
return(NA)
}
## Lookup reference values and calculate z-score
lkpIndexSex <- wgsrData[wgsrData$index == index & wgsrData$sex == sex, ]
L <- approx(lkpIndexSex$given, lkpIndexSex$l, xout = secondPart, ties = "ordered")$y
M <- approx(lkpIndexSex$given, lkpIndexSex$m, xout = secondPart, ties = "ordered")$y
S <- approx(lkpIndexSex$given, lkpIndexSex$s, xout = secondPart, ties = "ordered")$y
z <- (((firstPart / M) ^ L) - 1) / (L * S)
SD3pos <- M * (1 + L * S * (+3))^(1 / L)
SD2pos <- M * (1 + L * S * (+2))^(1 / L)
SD23pos <- SD3pos - SD2pos
SD3neg <- M * (1 + L * S * (-3))^(1 / L)
SD2neg <- M * (1 + L * S * (-2))^(1 / L)
SD23neg <- SD2neg - SD3neg
if(z > 3) z <- 3 + ((firstPart - SD3pos) / SD23pos)
if(z < -3) z <- -3 + ((firstPart - SD3neg) / SD23neg)
return(z)
}
| /scratch/gouwar.j/cran-all/cranData/zscorer/R/addWGSR.R |
################################################################################
#
#'
#' Calculate z-scores for WHZ, HAZ, WAZ using the WHO Growth Reference (2006)
#' for a single child data.
#'
#' @param data Data frame containing corresponding data on \code{sex},
#' \code{weight}, \code{height}, and \code{age} of children. Default is NULL.
#' If specified, parameters for \code{sex}, \code{weight}, \code{height} and
#' \code{age} should be provided as character values of the names of
#' variables in \code{data} corresponding to the parameters required.
#' @param sex Either numeric values (1 = male; 2 = female) indicating sex of
#' child (default) or character value (if \code{data} is specified) indicating
#' variable name in \code{data} containing information on sex of
#' child/children (1 = male; 2 = female).
#' @param weight Either numeric values for weight in kg with at least 1 decimal
#' place (default) or character value (if \code{data} is specified) indicating
#' variable name in \code{data} containing information on weight of
#' child/children.
#' @param height Either numeric values for height in cm with at least 1 decimal
#' place (default) or character value (if \code{data} is specified) indicating
#' variable name in \code{data} containing information on height of
#' child/children.
#' @param age Either numeric values for age in whole months (default) or character
#' value (if \code{data} is specified) indicating variable name in \code{data}
#' containing information on age of child/children.
#' @param index One of "wfh", "hfa", "wfa" (specifies the required index) or "all"
#' to calculate all three indices
#' @return Either a single numeric value for z-score of the anthropometric index
#' selected if data is for single child or a data frame of numeric values for
#' z-scores of each anthropometric index if data is for multiple children and
#' more than one anthropometric index selected.
#'
#' The function fails messily when \code{secondPart} is outside of the range
#' given in the WGS reference (i.e. 45 to 120 cm for height and 0 to 60 months
#' for age). It is up to you to check the ranges of your data.
#'
#' The reference data for W/H assumes supine length is used for children with a
#' standing height below 85cm
#'
#' Heights should be specified in cm to the nearest mm (i.e. to 1 d.p.)
#'
#' Ages should be specified in whole months
#'
#' Weights should be specified in kg to available precision
#'
#' The function requires reference data \code{wgsData} included in this package
#'
#' @examples
#' # apply \code{getAllWGS()} to a make believe 52 month old male child with weight of
#' # 14.6 kg and height of 98.0 cm
#' waz <- getAllWGS(sex = 1, # 1 = Male / 2 = Female
#' weight = 14.6, # Weight in kilograms
#' height = 98, # Height in centimetres
#' age = 52, # Age in whole months
#' index = "wfa") # Anthropometric index (weight-for-age)
#' waz
#'
#' haz <- getAllWGS(sex = 1,
#' weight = 14.6,
#' height = 98, # Height in centimetres
#' age = 52,
#' index = "hfa") # Anthropometric index (height-for-age)
#' haz
#'
#' whz <- getAllWGS(sex = 1,
#' weight = 14.6,
#' height = 98,
#' age = 52,
#' index = "wfh") # Anthropometric index (weight-for-height)
#' whz
#'
#' # apply \code{getAllWGS()} to \code{anthro1} dataset
#' waz <- getAllWGS(data = anthro1,
#' sex = "sex",
#' weight = "weight",
#' height = "height",
#' age = "age",
#' index = "wfa")
#' waz
#'
#' haz <- getAllWGS(sex = anthro1$sex,
#' weight = anthro1$weight,
#' height = anthro1$height,
#' age = anthro1$age,
#' index = "hfa")
#' haz
#'
#' all <- getAllWGS(data = anthro1,
#' sex = "sex",
#' weight = "weight",
#' height = "height",
#' age = "age",
#' index = "all")
#' all
#'
#' @export
#'
#
################################################################################
getAllWGS <- function(data = NULL, sex, weight, height, age, index) {
## If user selects index of "wfa" or "all"...
if(index == "wfa" | index == "all"){
## If user does not provide data (is.null(data))...
if(is.null(data)){
## If user provides non-numeric arguments for parameters...
if(!is.numeric(weight) | !is.numeric(sex) | !is.numeric(age)){
## Stop operation
stop("If data provided, sex, weight and age must be numeric. Try again.", call. = TRUE)
}
##
waz <- mapply(FUN = getWGS,
sexObserved = sex,
firstPart = weight,
secondPart = age,
index = "wfa")
}
## If !is.null(data)...
if(!is.null(data)){
## If user provides non-character arguments for paratmers...
if(!is.character(sex) | !is.character(weight) | !is.character(age)){
## Stop operation
stop("If data not provided, sex, weight and age must be character values. Try again", call. = TRUE)
}
##
waz <- mapply(FUN = getWGS,
sexObserved = data [, sex],
firstPart = data[ , weight],
secondPart = data[ , age],
index = "wfa")
}
}
## If user selects index of haz or all...
if(index == "hfa" | index == "all"){
## If user does not provide data (is.null(data))...
if(is.null(data)){
## If user provides non-numeric arguments for parameters...
if(!is.numeric(sex) | !is.numeric(height) | !is.numeric(age)){
## Stop operation
stop("If data provided, sex, height and age must be numeric. Try again.", call. = TRUE)
}
##
haz <- mapply(FUN = getWGS,
sexObserved = sex,
firstPart = height,
secondPart = age,
index = "hfa")
}
##
if(!is.null(data)){
##
if(!is.character(sex) | !is.character(height) | !is.character(age)){
##
stop("If data not provided, sex, height and age must be character values. Try again.", call. = TRUE)
}
##
haz <- mapply(FUN = getWGS,
sexObserved = data[ , sex],
firstPart = data[ , height],
secondPart = data[ , age],
index = "hfa")
}
}
## If user selects index of "whz" or "all"...
if(index == "wfh" | index == "all"){
##
if(is.null(data)){
##
if(!is.numeric(sex) | !is.numeric(weight) | !is.numeric(height)){
##
stop("If data provided, sex, weight and height must be numeric. Try again.", call. = TRUE)
}
##
whz <- mapply(FUN = getWGS,
sexObserved = sex,
firstPart = weight,
secondPart = height,
index = "wfh")
}
##
if(!is.null(data)){
##
if(!is.character(sex) | !is.character(weight) | !is.character(height)){
##
stop("If data not provided, sex, weight and height must be character values. Try again.", call. = TRUE)
}
##
whz <- mapply(FUN = getWGS,
sexObserved = data[ , sex],
firstPart = data[ , weight],
secondPart = data[ , height],
index = "wfh")
}
}
##
if(index == "wfa") { z <- data.frame(waz) }
if(index == "hfa") { z <- data.frame(haz) }
if(index == "wfh") { z <- data.frame(whz) }
if(index == "all") {
z <- data.frame(cbind(waz, haz, whz))
names(z) <- c("waz", "haz", "whz")
}
return(z)
}
| /scratch/gouwar.j/cran-all/cranData/zscorer/R/getAllWGS.R |
################################################################################
#
#'
#' Calculate z-scores for WHZ, HAZ, WAZ using the WHO Growth Reference (2006)
#' for a cohort or sample of children.
#'
#' @param data Data frame containing the variables needed for calculation
#' @param FUN Function to apply; default to \code{getWGS()}
#' @param sexObserved Sex of child (1 = Male; 2 = Female)
#' @param firstPart Weight (kg; for WHZ and WAZ) or height (cm; for HAZ)
#' @param secondPart Age (months; for HAZ and WAZ) or height (cm; for WHZ)
#' @param index One of "wfh", "hfa", "wfa" (specifies the required index)
#' @return Numeric vector of \code{z-scores} of the anthropometric index selected
#'
#' The function fails messily when \code{secondPart} is outside of the range
#' given in the WGS reference (i.e. 45 to 120 cm for height and 0 to 60 months
#' for age). It is up to you to check the ranges of your data.
#'
#' The reference data for W/H assumes supine length is used for children with a
#' standing height below 85cm
#'
#' Heights should be specified in cm to the nearest mm (i.e. to 1 d.p.)
#'
#' Ages should be specified in whole months
#'
#' Weights should be specified in kg to available precision
#'
#' The function requires reference data \code{wgsData} included in this package
#'
#' @examples
#' # apply getWGS to first child in sample data anthro1
#' wazAll <- getCohortWGS(data = anthro1,
#' sexObserved = "sex",
#' firstPart = "weight",
#' secondPart = "age",
#' index = "wfa")
#' wazAll
#'
#' hazAll <- getCohortWGS(data = anthro1,
#' sexObserved = "sex",
#' firstPart = "height",
#' secondPart = "age",
#' index = "hfa")
#' hazAll
#'
#' whzAll <- getCohortWGS(data = anthro1,
#' sexObserved = "sex",
#' firstPart = "weight",
#' secondPart = "height",
#' index = "wfh")
#' whzAll
#' @export
#'
#
################################################################################
getCohortWGS <- function(data, FUN = getWGS, sexObserved, firstPart, secondPart, index) {
z <- mapply(FUN = getWGS,
data[ , sexObserved],
data[ , firstPart],
data[ , secondPart],
index)
return(z)
}
| /scratch/gouwar.j/cran-all/cranData/zscorer/R/getCohortWGS.R |
################################################################################
#
#'
#' Calculate z-scores for WHZ, HAZ, WAZ using the WHO Growth Reference (2006)
#' for a single child data.
#'
#' @param sexObserved Sex of child (1 = Male; 2 = Female)
#' @param firstPart Weight (in kg for WHZ and WAZ) or height (in cm for HAZ)
#' @param secondPart Age (in months for HAZ and WAZ) or height (in cm for WHZ)
#' @param index One of "wfh", "hfa", "wfa" (specifies the required index)
#' @return z-score of the anthropometric index selected
#'
#' @section Warning:
#' The function fails messily when \code{secondPart} is outside of the range
#' given in the WGS reference (i.e. 45 to 120 cm for height and 0 to 60 months
#' for age). It is up to you to check the ranges of your data.
#'
#' @section Reminders:
#' The reference data for W/H assumes supine length is used for children with a
#' standing height below 85cm
#'
#' Heights should be specified in cm to the nearest mm (i.e. to 1 d.p.)
#'
#' Ages should be specified in whole months
#'
#' Weights should be specified in kg to available precision
#'
#' The function requires reference data \code{wgsData} included in this package
#'
#' @section Note:
#' This is a legacy function from the first CRAN release of \code{zscorer}
#' which focused mainly on the calculation of z-scores for weight-for-age,
#' weight-for-height and height-for-age. This function has been kept in the package
#' to ensure that existing analysis workflows implemented using the function
#' continue to work.
#'
#' @examples
#' # apply \code{getWGS()} to a make believe 52 month old male child with weight of
#' # 14.6 kg and height of 98.0 cm
#' waz <- getWGS(sexObserved = 1, # 1 = Male / 2 = Female
#' firstPart = 14.6, # Weight in kilograms
#' secondPart = 52, # Age in whole months
#' index = "wfa") # Anthropometric index (weight-for-age)
#' waz
#'
#' haz <- getWGS(sexObserved = 1,
#' firstPart = 98, # Height in centimetres
#' secondPart = 52,
#' index = "hfa") # Anthropometric index (height-for-age)
#' haz
#'
#' whz <- getWGS(sexObserved = 1,
#' firstPart = 14.6,
#' secondPart = 98,
#' index = "wfh") # Anthropometric index (weight-for-height)
#' whz
#'
#' @export
#'
#
################################################################################
getWGS <- function(sexObserved, firstPart, secondPart, index) {
if(is.na(sexObserved) | is.na(firstPart) | is.na(secondPart)) { return(NA) }
lookupRow <- subset(wgsData, indicator == index & sex == sexObserved & given == secondPart)
z <- (((firstPart / lookupRow$m) ^ lookupRow$l) - 1) / (lookupRow$l * lookupRow$s)
SD3pos <- lookupRow$m * (1 + lookupRow$l * lookupRow$s * (+3))^(1 / lookupRow$l)
SD2pos <- lookupRow$m * (1 + lookupRow$l * lookupRow$s * (+2))^(1 / lookupRow$l)
SD23pos <- SD3pos - SD2pos
SD3neg <- lookupRow$m * (1 + lookupRow$l * lookupRow$s * (-3))^(1 / lookupRow$l)
SD2neg <- lookupRow$m * (1 + lookupRow$l * lookupRow$s * (-2))^(1 / lookupRow$l)
SD23neg <- SD2neg - SD3neg
if(z > 3) { z <- 3 + ((firstPart - SD3pos) / SD23pos) }
if(z < -3) { z <- -3 + ((firstPart - SD3neg) / SD23neg) }
return(z)
}
| /scratch/gouwar.j/cran-all/cranData/zscorer/R/getWGS.R |
################################################################################
#
#'
#' Initialise built-in Shiny application
#'
#' @return NULL
#'
#' @examples
#' if(interactive()) run_zscorer()
#'
#' @export
#
run_zscorer <- function() {
appDir <- system.file("zscorer", package = "zscorer")
if (appDir == "") {
stop("Could not find Shiny directory. Try re-installing `zscorer`.",
call. = FALSE)
}
shiny::runApp(appDir, display.mode = "normal")
}
| /scratch/gouwar.j/cran-all/cranData/zscorer/R/runApp.R |
################################################################################
#
#'
#' Tool for calculating z-scores for weight-for-age, height-for-age,
#' weight-for-height, BMI-for-age, head circumference-for-age,
#' arm circumference-for-age, subscapular skinfold-for-age and
#' triceps skinfold-for-age z-score using the WHO Growth
#' Reference (2006)
#'
#' @references World Health Organization. WHO Child Growth Standards: Length/Height-for-age,
#' Weight-for-age, Weight-for-length, Weight-for-height, and Body Mass Index-for age:
#' Methods and Development. 1st ed. World Health Organization; 2006.
#' ISBN ISBN 92 4 154693 X
#'
#' @docType package
#' @name zscorer
#' @keywords internal
#' @importFrom utils read.table setTxtProgressBar txtProgressBar
#' @importFrom stats approx
#' @importFrom shiny runApp
#'
#
################################################################################
"_PACKAGE"
## quiets concerns of R CMD check re: the variable bindings that appear in zscorer
if(getRversion() >= "2.15.1") utils::globalVariables(c("wgsData","indicator",
"sex", "given"))
################################################################################
#
#' World Health Organization (WHO) Growth Reference (2006) data
#'
#' @format A data frame with 6 columns and 2338 rows.
#' \describe{
#' \item{\code{indicator}}{One of weight-for-age (\code{waz}),
#' height-for-age (\code{haz}), or weight-for-height (\code{whz})
#' anthropometric indicators}
#' \item{\code{sex}}{Sex of child (1 = Male; 2 = Female)}
#' \item{\code{given}}{Variable to which standardisation is to be made. For
#' \code{waz} and \code{haz}, \code{given} is age in months. For \code{whz},
#' \code{given} is height in cm}
#' \item{\code{l}}{\code{L} component of the LMS method for normalising growth
#' centile standards. \code{L} is the trend in the optimal power to obtain
#' normality}
#' \item{\code{m}}{\code{M} component of the LMS method for normalising growth
#' centile standards. \code{M} is the trend in the mean}
#' \item{\code{s}}{\code{S} component of the LMS method for normalising growth
#' centile standards. \code{S} is the trend in the coefficient of variation}
#' }
#'
#' @source \cite{World Health Organization. WHO Child Growth Standards:
#' Length/Height-for-age, Weight-for-age, Weight-for-length, Weight-for-height,
#' and Body Mass Index-for age: Methods and Development. 1st ed.
#' World Health Organization; 2006.}
#'
#
################################################################################
"wgsData"
################################################################################
#
#'
#' Anthropometric data from a SMART survey in Kabul, Afghanistan.
#'
#' @format A data frame with 873 observations and 11 variables
#' \describe{
#' \item{\code{psu}}{Primary sampling unit}
#' \item{\code{age}}{Age of child (months)}
#' \item{\code{sex}}{Gender of child}
#' \item{\code{weight}}{Weight of child (kgs)}
#' \item{\code{height}}{Height of child (cm)}
#' \item{\code{muac}}{Mid-upper arm circumference (mm)}
#' \item{\code{oedema}}{Presence or absence of oedema}
#' \item{\code{haz}}{Height-for-age z-score}
#' \item{\code{waz}}{Weight-for-age z-score}
#' \item{\code{whz}}{Weight-for-height z-score}
#' \item{\code{flag}}{Data quality flag}
#' }
#
################################################################################
"anthro1"
################################################################################
#
#'
#' Anthropometric data from a single state from a Demographic and Health Survey
#' (DHS) of a West African country.
#'
#' @format A data frame with 796 observations and 6 variables
#' \describe{
#' \item{\code{psu}}{Primary sampling unit}
#' \item{\code{age}}{Age (months)}
#' \item{\code{sex}}{Gender}
#' \item{\code{wt}}{Weight (kg)}
#' \item{\code{ht}}{height (cm)}
#' \item{\code{oedema}}{Presence or absence of oedema}
#' }
#
################################################################################
"anthro2"
################################################################################
#
#'
#' Anthropometric data from a Rapid Assessment Method (RAM) survey from Burundi.
#'
#' @format A data frame with 221 observations and 7 variables
#' \describe{
#' \item{\code{psu}}{Primary sampling unit}
#' \item{\code{age}}{Age (months)}
#' \item{\code{sex}}{Gender}
#' \item{\code{weight}}{Weight (kg)}
#' \item{\code{height}}{Height (cm)}
#' \item{\code{muac}}{Mid-upper arm circumference (cm)}
#' \item{\code{oedema}}{Presence or absence of oedema}
#' }
#
################################################################################
"anthro3"
################################################################################
#
#' A subset of mid-upper arm circumference data from study conducted to create
#' MUAC-for-age z-scores
#'
#' @format A data.frame with 257 observations and 4 variables
#' \describe{
#' \item{\code{pk_serial}}{Unique identifier}
#' \item{\code{muac}}{Mid-upper arm circumference in centimetres}
#' \item{\code{agemons}}{Age in months}
#' \item{\code{sex}}{Sex; 1 = Male; 2 = Female}
#' }
#'
#' @source Mramba Lazarus, Ngari Moses, Mwangome Martha, Muchai Lilian, Bauni
#' Evasius, Walker A Sarah et al. A growth reference for mid upper arm
#' circumference for age among school age children and adolescents, and
#' validation for mortality: growth curve construction and longitudinal
#' cohort study BMJ 2017; 358 :j3423 \url{https://doi.org/10.1136/bmj.j3423}
#'
#
################################################################################
"anthro4"
| /scratch/gouwar.j/cran-all/cranData/zscorer/R/zscorer.R |
## ----setup, include = FALSE----------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
| /scratch/gouwar.j/cran-all/cranData/zscorer/inst/doc/anthropometry.R |
---
title: "WHO Child Growth Standards: Methods and Development"
author: "World Health Organisation"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{WHO Child Growth Standards: Methods and Development}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r setup, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
In 1993 the World Health Organization (WHO) undertook a comprehensive review of the uses and interpretation of anthropometric references. The review concluded that the NCHS/WHO growth reference, which had been recommended for international use since the late 1970s, did not adequately represent early childhood growth and that new growth curves were necessary. The World Health Assembly endorsed this recommendation in 1994. In response WHO undertook the Multicentre Growth Reference Study (MGRS) between 1997 and 2003 to generate new curves for assessing the growth and development of children the world over.
The MGRS combined a longitudinal follow-up from birth to 24 months and a cross-sectional survey of children aged 18 to 71 months. Primary growth data and related information were gathered from 8440 healthy breastfed infants and young children from widely diverse ethnic backgrounds and cultural settings (Brazil, Ghana, India, Norway, Oman and USA). The MGRS is unique in that it was purposely designed to produce a standard by selecting healthy children living under conditions likely to favour the achievement of their full genetic growth potential. Furthermore, the mothers of the children selected for the construction of the standards engaged in fundamental health-promoting practices, namely breastfeeding and not smoking.
This report presents the first set of WHO Child Growth Standards (i.e. length/height-for-age, weight- for-age, weight-for-length, weight-for-height and body mass index (BMI)-for-age) and describes the methodical process followed in their development. The first step in this process was a consultative expert review of some 30 growth curve construction methods, including types of distributions and smoothing techniques to identify the best approach to constructing the standards. Next was the selection of a software package flexible enough to allow the comparative testing of the alternative methods used to generate the growth curves. Then the selected approach was applied systematically to search for the best models to fit the data for each indicator.
The Box-Cox-power-exponential (BCPE) method, with curve smoothing by cubic splines was selected for constructing the WHO child growth curves. The BCPE accommodates various kinds of distributions, from normal to skewed or kurtotic. The age-based indicators originating at birth required a power-transformation to stretch the age scale (x-axis) as a preliminary step to fitting the curves. For each set of curves, the search for the best model specification began by examining various combinations of degrees of freedom to fit the median and variance estimator curves. When data had a non-normal distribution, degrees of freedom for parameters to model skewness and kurtosis were added to the initial model and adequacy of fit evaluated. Apart from length/height-for-age, which followed a normal distribution, the other standards required the modelling of skewness, but not kurtosis. The diagnostic tools used iteratively to detect possible model misfits and biases in the fitted curves included various tests of local and global goodness of fit, worm plots and residual plots. Patterns of differences between empirical and fitted percentiles were also examined, as were proportions of observed versus expected percentages of children with measurements below selected percentiles.
The methodology described above was followed to generate for boys and girls aged 0 to 60 months percentile and z-score curves for length/height-for-age, weight-for-age, weight-for-length, weight- for-height and BMI-for-age. The last standard is an addition to the set of indicators previously available as part of the NCHS/WHO reference. In-depth descriptions are presented of how each sex- specific standard was constructed. Also presented are comparisons of the new WHO standards with the NCHS/WHO growth reference and the CDC 2000 growth charts.
To interpret differences between the WHO standards and the NCHS/WHO reference it is important to understand that they reflect differences not only in the populations used, but also in the methodologies applied to construct the two sets of growth curves. To address the significant skewness of the NCHS/WHO sample's weight-for-age and weight-for-height, separate standard deviations were calculated for distributions below and above the median for each of the two indicators. This approach is limited in fitting skewed data, especially at the extreme tails of the distribution, since it only partially adjusts for the skewness inherent in the weight-based indicators. The WHO standards, on the other hand, employed LMS-based methods that fit skewed data adequately and generate fitted curves that follow closely the empirical data. Like the WHO standards, construction of the CDC 2000 growth charts was also based on the LMS method and, therefore, differences between this reference and the WHO standards are largely a reflection of differences in the populations on which the two sets of curves were based.
*Length/height-for-age.* The standard for linear growth has a part based on length (length-for-age, 0 to 24 months) and another on height (height-for-age, 2 to 5 years). The two parts were constructed using the same model but the final curves reflect the average difference between recumbent length and standing height. By design, children between 18 and 30 months in the cross-sectional component of the MGRS had both length and height measurements taken. The average difference between the two measurements in this set of 1625 children was 0.73 cm. To fit a single model for the whole age range, 0.7 cm was therefore added to the cross-sectional height values before merging them with the longitudinal sample's length data. After the model was fitted, the median curve was shifted back downwards by 0.7 cm for ages above two years, and the coefficient of variation curve adjusted to the new median values to construct the height-for-age growth curves. The same power transformation of age was applied to stretch the age scale for each of the sexes before fitting cubic splines to generate their respective growth curves. The boys' curves required a model with higher degrees of freedom to fit both the median and coefficient of variation curves. The data for both sexes followed the normal distribution.
*Weight-for-age.* The weights of the longitudinal and cross-sectional samples were merged without any adjustments and a single model was fitted to generate one continuous set of curves constituting each sex-specific weight-for-age standard. The same power transformation was applied to both boys' and girls' age before fitting the curve construction model. The weight data for both sexes were skewed, so in specifying the model, the parameter related to skewness was fitted in addition to the median and the approximate coefficient of variation. In modelling skewness the girls' curves required more degrees of freedom to fit a curve for this parameter.
*Weight-for-length/height.* The construction of the weight-for-length (45 to 110 cm) and weight-for- height (65 to 120 cm) standards followed a procedure similar to that applied to construct the length/height-for-age standards. That is, to fit a single model, 0.7 cm was added to the cross-sectional height values, and after the model was fitted, the weight-for-length centile curves in the length interval 65.7 to 120.7 cm were shifted back by 0.7 cm to derive the weight-for-height standards corresponding to the height range 65 cm to 120 cm. The lower limit of the weight-for-length standards (45 cm) was chosen to cover up to approximately -2 SD girls' length at birth. The upper limit for the weight-for- height standards was influenced by the need to accommodate the tallest children at age 60 months, that is, 120 cm is approximately +2 SD boys' height-for-age at 60 months. The overlap between the upper end of the weight-for-length standards and the lower end of the weight-for-height standards is intended to facilitate their application in severely undernourished populations and emergency settings.
There was no evidence that a length/height transformation similar to that described for age was required for constructing the weight-for-length/height standards. The modelling of the median and variance curves followed the procedure described for the first two standards. Results from the final model for girls' weight-for-length/height suggested the need to investigate potential improvements in the curves by modelling kurtosis. Adjustment for kurtosis, however had a negligible impact on the final centiles. Therefore, considering that modelling the fourth parameter would increase complexity in application of the standards and create inconsistency between the sexes, the final curves were generated without adjusting for kurtosis. The degrees of freedom for the median and variance curves varied between the boys' and girls' standards. The fact that the weight-for-length/height indicator combines different velocities for the two measurements involved (weight and length/height) at overlapping ages likely explains the slight wiggle in the final WHO standards (for both boys and girls) as also observed in other references.
*Body mass index-for-age.* Body mass index is the ratio weight (in kg)/recumbent length or standing height (in m^2^). To address the difference between length and height, the approach used for constructing the BMI-for-age standards was different from that described for length/height-for-age. Because BMI is a ratio with squared length or height in the denominator, adding 0.7 cm to the height values and back- transforming them after fitting was not feasible. The solution adopted was to construct the standards for the younger and the older children separately based on two sets of data with an overlapping range of ages below and above 24 months. To construct the BMI-for-age standard based on length (0 to 2 years), the longitudinal sample's length data and the cross-sectional sample's height data (18 to 30 months) were combined after adding 0.7 cm to the height values. Analogously, to construct the standard from 2 to 5 years, the cross-sectional sample's height plus the longitudinal sample's length data (18 to 24 months) were combined after subtracting 0.7 cm from the length values. Thus, a common set of data from 18 to 30 months was used to generate the BMI standards for the younger and the older children. The resulting disjunction between the two standards thus in essence reflects the 0.7 cm difference between length and height. This does not mean, however, that a child at a specific age will have the same length- and height-based BMI-for-age z-score as this is mathematically impossible given the nature of the BMI ratio.
An age power transformation as described for the other age-based standards was required before constructing the length-based BMI-for-age curves. No such transformation was necessary for the height-based BMI-for-age. The WHO length- and height-based BMI-for-age standards do not overlap, i.e. the length-based interval ends at 730 days and the height-based interval starts at 731 days. Cubic spline fitting was achieved with variable degrees of freedom for the length- versus height-based standards, and also for the boys' versus girls' final curves.
Technical aspects of the standards. The method used to construct the WHO standards generally relied on the Box-Cox power exponential distribution and the final selected models simplified to the LMS model. As a result, the computation of percentiles and z-scores for these standards uses formulae based on the LMS method. However, a restriction was imposed on all indicators to enable the derivation of percentiles only within the interval corresponding to z-scores between -3 and 3. The underlying reasoning is that percentiles beyond ±3 SD are invariant to changes in equivalent z-scores. The loss accruing to this restriction is small since the inclusion range corresponds to the 0.135th to 99.865th percentiles.
The weight-based indicators presented right-skewed distributions. When modelled correctly, right skewness has the effect of making distances between positive z-scores increase progressively the farther away they are from the median, while distances between negative z-scores decrease progressively. The LMS method fits skewed data adequately by using a Box-Cox normal distribution, which follows the empirical data closely. The drawback, however, is that the outer tails of the distribution are highly affected by extreme data points even if only very few. A restricted application of the LMS method was thus used for the construction of the WHO weight-based indicators, limiting the Box-Cox normal distribution to the interval corresponding to z-scores where empirical data were available (i.e. between -3 SD and 3 SD). Beyond these limits, the standard deviation at each age (or length/height) was fixed to the distance between ±2 SD and ±3 SD, respectively. This approach avoids making assumptions about the distribution of data beyond the limits of the observed values.
Epidemiological aspects of the standards. As expected, there are notable differences with the NCHS/WHO reference that vary by age, sex, anthropometric measure and specific percentile or z-score curve. Differences are particularly important in infancy. Stunting will be greater throughout childhood when assessed using the new WHO standards compared to the NCHS/WHO reference. The growth pattern of breastfed infants will result in a substantial increase in rates of underweight during the first half of infancy and a decrease thereafter. For wasting, the main difference is during infancy when wasting rates will be substantially higher using the new WHO standards. With respect to overweight, use of the new WHO standards will result in a greater prevalence that will vary by age, sex and nutritional status of the index population.
The growth standards presented in this report provide a technically robust tool that represents the best description of physiological growth for children under five years of age. The standards depict normal early childhood growth under optimal environmental conditions and can be used to assess children everywhere, regardless of ethnicity, socioeconomic status and type of feeding.
*Taken from:*
World Health Organization. WHO Child Growth Standards: Length/Height-for-age, Weight-for-age, Weight-for-length, Weight-for-height, and Body Mass Index-for age: Methods and Development. 1st ed. World Health Organization; 2006. ISBN 92 4 154693 X
| /scratch/gouwar.j/cran-all/cranData/zscorer/inst/doc/anthropometry.Rmd |
## ----setup, include = FALSE----------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
if(!require(zscorer)) install.packages("zscorer")
## ----usage1, echo = TRUE, eval = FALSE-----------------------------------
# anthro3
## ----usage1a, echo = FALSE, eval = TRUE----------------------------------
head(anthro3)
## ----usage2, echo = TRUE, eval = TRUE------------------------------------
svy <- addWGSR(data = anthro3, sex = "sex", firstPart = "weight",
secondPart = "height", index = "wfh")
## ----usage2a, echo = FALSE, eval = TRUE----------------------------------
head(svy)
## ----usage2b, echo = TRUE, eval = FALSE----------------------------------
# ?addWGSR
## ----usage3, echo = TRUE, eval = TRUE------------------------------------
table(is.na(svy$wfhz))
## ----usage4, echo = TRUE, eval = TRUE------------------------------------
svy[is.na(svy$wfhz), ]
## ----usage5, echo = TRUE, eval = TRUE------------------------------------
svy <- addWGSR(data = svy, sex = "sex", firstPart = "weight",
secondPart = "age", index = "wfa")
## ----usage5a, echo = TRUE, eval = TRUE-----------------------------------
summary(svy$wfaz)
## ----usage5b, echo = TRUE, eval = TRUE-----------------------------------
svy$age <- svy$age * (365.25 / 12)
head(svy)
## ----usage5c, echo = TRUE, eval = TRUE-----------------------------------
svy <- addWGSR(data = svy, sex = "sex", firstPart = "weight",
secondPart = "age", index = "wfa")
head(svy)
summary(svy$wfaz)
## ----usage6, echo = TRUE, eval = TRUE------------------------------------
svy$muac <- svy$muac / 10
head(svy)
## ----usage6a, echo = TRUE, eval = TRUE-----------------------------------
svy <- addWGSR(svy, sex = "sex", firstPart = "muac",
secondPart = "age", index = "mfa")
head(svy)
## ----usage7, echo = TRUE, eval = TRUE------------------------------------
svy <- addWGSR(data = svy, sex = "sex", firstPart = "weight",
secondPart = "height", thirdPart = "age", index = "bfa",
output = "bmiAgeZ", digits = 4)
head(svy)
| /scratch/gouwar.j/cran-all/cranData/zscorer/inst/doc/calculate_zscore.R |
---
title: "Calculating anthropometric z-scores using zscorer"
author: "Mark Myatt"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Calculating anthropometric z-scores using zscorer}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r setup, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
if(!require(zscorer)) install.packages("zscorer")
```
# Calculating anthropometric z-scores using `zscorer`
The main function in the `zscorer` package is `addWGSR()`.
To demonstrate its usage, we will use the accompanying dataset in `zscorer` called `anthro3`. We inspect the dataset as follows:
```{r usage1, echo = TRUE, eval = FALSE}
anthro3
```
which returns:
```{r usage1a, echo = FALSE, eval = TRUE}
head(anthro3)
```
`anthro3` contains anthropometric data from a Rapid Assessment Method (RAM) survey from Burundi.
Anthropometric indices (e.g. weight-for-height z-scores) have not been calculated and added to the data.
We will use the `addWGSR()` function to add weight-for-height (wfh) z-scores to the example data:
```{r usage2, echo = TRUE, eval = TRUE}
svy <- addWGSR(data = anthro3, sex = "sex", firstPart = "weight",
secondPart = "height", index = "wfh")
```
A new column named **wfhz** has been added to the dataset:
```{r usage2a, echo = FALSE, eval = TRUE}
head(svy)
```
The `wfhz` column contains the weight-for-height (wfh) z-scores calculated from the `sex`, `weight`, and `height` columns in the `anthro3` dataset. The calculated z-scores are rounded to two decimals places unless the `digits` option is used to specify a different precision (run `?addWGSR` to see description of various parameters that can be specified in the `addWGSR()` function).
The `addWGSR()` function takes up to nine parameters to calculate each index separately, depending on the index required. These are described in the *Help* files of the `zscorer` package which can be accessed as follows:
```{r usage2b, echo = TRUE, eval = FALSE}
?addWGSR
```
The **standing** parameter specifies how “stature” (i.e. length or height) was measured. If this is not specified, and in some special circumstances, height and age rules will be applied when calculating z-scores. These rules are described in the table below.
+---------------+---------------+---------------+---------------+----------------------------------------+
| **index** | **standing** | **age** | **height** | **Action** |
+===============+===============+===============+===============+========================================+
| hfa or lfa | standing | < 731 days | | index = lfa |
| | | | | height = height + 0.7 cm |
+---------------+---------------+---------------+---------------+----------------------------------------+
| hfa or lfa | supine | < 731 days | | index = lfa |
+---------------+---------------+---------------+---------------+----------------------------------------+
| hfa or lfa | unknown | < 731 days | | index = lfa |
+---------------+---------------+---------------+---------------+----------------------------------------+
| hfa or lfa | standing | ≥ 731 days | | index = hfa |
+---------------+---------------+---------------+---------------+----------------------------------------+
| hfa or lfa | supine | ≥ 731 days | | index = hfa |
| | | | | height = height - 0.7 cm |
+---------------+---------------+---------------+---------------+----------------------------------------+
| hfa or lfa | unknown | ≥ 731 days | | index = hfa |
+---------------+---------------+---------------+---------------+----------------------------------------+
| wfh or wfl | standing | | < 65 cm | index = wfl |
| | | | | height = height + 0.7 cm |
+---------------+---------------+---------------+---------------+----------------------------------------+
| wfh or wfl | standing | | ≥ 65 cm | index = wfh |
+---------------+---------------+---------------+---------------+----------------------------------------+
| wfh or wfl | supine | | ≤ 110 cm | index = wfl |
+---------------+---------------+---------------+---------------+----------------------------------------+
| wfh or wfl | supine | | more than | index = wfh |
| | | | 110 cm | height = height - 0.7 cm |
+---------------+---------------+---------------+---------------+----------------------------------------+
| wfh or wfl | unknown | | < 87 cm | index = wfl |
+---------------+---------------+---------------+---------------+----------------------------------------+
| wfh or wfl | unknown | | ≥ 87 cm | index = wfh |
+---------------+---------------+---------------+---------------+----------------------------------------+
| bfa | standing | < 731 days | | height = height + 0.7 cm |
+---------------+---------------+---------------+---------------+----------------------------------------+
| bfa | standing | ≥ 731 days | | height = height - 0.7 cm |
+---------------+---------------+---------------+---------------+----------------------------------------+
The `addWGSR()` function will not produce error messages unless there is something very wrong with the data or the specified parameters. If an error is encountered in a record then the value **NA** is returned. Error conditions are listed in the table below.
+--------------------------------------------------+----------------------------------------+
| **Error condition** | **Action** |
+==================================================+========================================+
| Missing or nonsense value in `standing` parameter| Set `standing` to `3` (unknown) and |
| | apply appropriate height or age rules. |
+--------------------------------------------------+----------------------------------------+
| Unknown `index` specified | Return **NA** for z-score. |
+--------------------------------------------------+----------------------------------------+
| Missing `sex` | Return **NA** for z-score. |
+--------------------------------------------------+----------------------------------------+
| Missing `firstPart` | Return **NA** for z-score. |
+--------------------------------------------------+----------------------------------------+
| Missing `secondPart` | Return **NA** for z-score. |
+--------------------------------------------------+----------------------------------------+
| `sex` is not male (`1`) or female (`2`) | Return **NA** for z-score. |
+--------------------------------------------------+----------------------------------------+
| `firstPart` is not numeric | Return **NA** for z-score. |
+--------------------------------------------------+----------------------------------------+
| `secondPart` is not numeric | Return **NA** for z-score. |
+--------------------------------------------------+----------------------------------------+
| Missing `thirdPart` when `index = "bfa"` | Return **NA** for z-score. |
+--------------------------------------------------+----------------------------------------+
| `thirdPart` is not numeric when `index = "bfa"` | Return **NA** for z-score. |
+--------------------------------------------------+----------------------------------------+
| `secondPart` is out of range for specified index | Return **NA** for z-score. |
+--------------------------------------------------+----------------------------------------+
We can see this error behaviour using the example data:
```{r usage3, echo = TRUE, eval = TRUE}
table(is.na(svy$wfhz))
```
We can display the problem record:
```{r usage4, echo = TRUE, eval = TRUE}
svy[is.na(svy$wfhz), ]
```
The problem is due to the value **9** in the `sex` column, which should be coded **1** (for male) and **2** (for female). Z-scores are only calculated for records with sex specified as either **1** (male) or **2** (female). All other values, including **NA**, will return **NA**.
The `addWGSR()` function requires that data are recorded using the required units or required codes (see `?addWGSR` to check units required by the different function parameters).
The `addWGSR()` function will return incorrect values if the data are not recorded using the required units. For example, this attempt to add weight-for-age z-scores to the example data:
```{r usage5, echo = TRUE, eval = TRUE}
svy <- addWGSR(data = svy, sex = "sex", firstPart = "weight",
secondPart = "age", index = "wfa")
```
will give incorrect results:
```{r usage5a, echo = TRUE, eval = TRUE}
summary(svy$wfaz)
```
The odd range of values is due to age being recorded in months rather than days.
It is simple to convert all ages from months to days:
```{r usage5b, echo = TRUE, eval = TRUE}
svy$age <- svy$age * (365.25 / 12)
head(svy)
```
before calculating and adding weight-for-age z-scores:
```{r usage5c, echo = TRUE, eval = TRUE}
svy <- addWGSR(data = svy, sex = "sex", firstPart = "weight",
secondPart = "age", index = "wfa")
head(svy)
summary(svy$wfaz)
```
The muac column in the example dataset is recorded in millimetres (mm). We need to convert this to centimetres (cm):
```{r usage6, echo = TRUE, eval = TRUE}
svy$muac <- svy$muac / 10
head(svy)
```
before using the `addWGSR()` function to calculate MUAC-for-age z-scores:
```{r usage6a, echo = TRUE, eval = TRUE}
svy <- addWGSR(svy, sex = "sex", firstPart = "muac",
secondPart = "age", index = "mfa")
head(svy)
```
As a last example we will use the `addWGSR()` function to add body mass index-for-age (bfa) z-scores to the data to create a new variable called bmiAgeZ with a precision of 4 decimal places as:
```{r usage7, echo = TRUE, eval = TRUE}
svy <- addWGSR(data = svy, sex = "sex", firstPart = "weight",
secondPart = "height", thirdPart = "age", index = "bfa",
output = "bmiAgeZ", digits = 4)
head(svy)
```
| /scratch/gouwar.j/cran-all/cranData/zscorer/inst/doc/calculate_zscore.Rmd |
## ----setup, include = FALSE----------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
if(!require(zscorer)) install.packages("zscorer")
## ----example1, eval = TRUE-----------------------------------------------
# weight-for-age z-score
waz <- getWGS(sexObserved = 1, # 1 = Male / 2 = Female
firstPart = 14.6, # Weight in kilograms up to 1 decimal place
secondPart = 52, # Age in whole months
index = "wfa") # Anthropometric index (weight-for-age)
waz
# height-for-age z-score
haz <- getWGS(sexObserved = 1,
firstPart = 98, # Height in centimetres
secondPart = 52,
index = "hfa") # Anthropometric index (height-for-age)
haz
# weight-for-height z-score
whz <- getWGS(sexObserved = 1,
firstPart = 14.6,
secondPart = 98,
index = "wfh") # Anthropometric index (weight-for-height)
whz
## ----sample-data1, eval = FALSE------------------------------------------
# # Make a call for the anthro1 dataset
# anthro1
## ---- echo = FALSE, eval = TRUE------------------------------------------
library(zscorer)
## ----sample-data2, eval = TRUE-------------------------------------------
head(anthro1)
## ----example2, eval = TRUE-----------------------------------------------
# weight-for-age z-score
waz <- getCohortWGS(data = anthro1,
sexObserved = "sex",
firstPart = "weight",
secondPart = "age",
index = "wfa")
head(waz, 50)
# height-for-age z-score
haz <- getCohortWGS(data = anthro1,
sexObserved = "sex",
firstPart = "height",
secondPart = "age",
index = "hfa")
head(haz, 50)
# weight-for-height z-score
whz <- getCohortWGS(data = anthro1,
sexObserved = "sex",
firstPart = "weight",
secondPart = "height",
index = "wfh")
head(whz, 50)
## ----example3, eval = TRUE-----------------------------------------------
# weight-for-age z-score
zScores <- getAllWGS(data = anthro1,
sex = "sex",
weight = "weight",
height = "height",
age = "age",
index = "all")
head(zScores, 20)
| /scratch/gouwar.j/cran-all/cranData/zscorer/inst/doc/legacy_functions.R |
---
title: "Legacy functions"
author: "Ernest Guevarra"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Legacy functions}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r setup, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
if(!require(zscorer)) install.packages("zscorer")
```
# Legacy functions
Earlier versions (pre-release and v0.1.0) of `zscorer` used different functions that calculated only three anthropometric indices: **weight-for-age**, **height-for-age** and **weight-for-height**. Also, these functions used a simplified construct of the WHO Growth Reference in which children's ages were recorded in months compared to days in the standard WHO Growth Reference.
With the developers' recent work on anthropometric data quality processes implemented in R (see [nipnTK](https://nutriverse.io/nipnTK)), a more consistent and standard set of functions were deemed necessary to calculate not just three but all anthropometric indices used in the WHO Growth Standards and to make the children's age in the reference data consistent with the standard using days. This work has now culminated in the current `zscorer` function.
For the purposes of backwards compatibility and to keep a record of past codebase for previous versions of the functions, the legacy functions have been kept in `zscorer`. This vignette describes those functions and shows examples of how to use them.
For new users of `zscorer`, developers recommend to start learning and using the new functions instead of using these legacy functions. For previous `zscorer` users, developers recommend to review past code that use the legacy functions and if feasible adapt code to the new functions available.
## Calculating z-scores using the legacy functions
The `zscorer` package comes with the original legacy functions included in its `version 0.1.0`. These functions allow for the calculation of `weight-for-age`, `height-for-age` and `weight-for-height z-scores` for individual children and for a cohort of children.
### Calculating z-score for each of the three anthropometric indices for a single child
For this example, we will use the `getWGS()` function and apply it to dummy data of a **52 month** old male child with a weight of **14.6 kg** and a height of **98.0 cm**.
```{r example1, eval = TRUE}
# weight-for-age z-score
waz <- getWGS(sexObserved = 1, # 1 = Male / 2 = Female
firstPart = 14.6, # Weight in kilograms up to 1 decimal place
secondPart = 52, # Age in whole months
index = "wfa") # Anthropometric index (weight-for-age)
waz
# height-for-age z-score
haz <- getWGS(sexObserved = 1,
firstPart = 98, # Height in centimetres
secondPart = 52,
index = "hfa") # Anthropometric index (height-for-age)
haz
# weight-for-height z-score
whz <- getWGS(sexObserved = 1,
firstPart = 14.6,
secondPart = 98,
index = "wfh") # Anthropometric index (weight-for-height)
whz
```
Applying the `getWGS()` function results in a calculated `z-score` for one child.
### Calculating z-score for each of the three anthropometric indices for a cohort or sample of children
For this example, we will use the `getCohortWGS()` function and apply it to sample data `anthro1` that came with `zscorer`.
```{r sample-data1, eval = FALSE}
# Make a call for the anthro1 dataset
anthro1
```
As you will see, this dataset has the 4 variables you will need to use with `getCohortWGS()` to calculate the `z-score` for the corresponding anthropometric index. These are `age`, `sex`, `weight` and `height`.
```{r, echo = FALSE, eval = TRUE}
library(zscorer)
```
```{r sample-data2, eval = TRUE}
head(anthro1)
```
To calculate the three anthropometric indices for all the children in the sample, we execute the following commands in R:
```{r example2, eval = TRUE}
# weight-for-age z-score
waz <- getCohortWGS(data = anthro1,
sexObserved = "sex",
firstPart = "weight",
secondPart = "age",
index = "wfa")
head(waz, 50)
# height-for-age z-score
haz <- getCohortWGS(data = anthro1,
sexObserved = "sex",
firstPart = "height",
secondPart = "age",
index = "hfa")
head(haz, 50)
# weight-for-height z-score
whz <- getCohortWGS(data = anthro1,
sexObserved = "sex",
firstPart = "weight",
secondPart = "height",
index = "wfh")
head(whz, 50)
```
Applying the `getCohortWGS()` function results in a vector of calculated `z-scores` for all children in the cohort or sample.
### Calculating z-scores for all of the three anthropometric indices in one function
For this example, we will use the `getAllWGS()` function and apply it to sample data `anthro1` that came with `zscorer`.
```{r example3, eval = TRUE}
# weight-for-age z-score
zScores <- getAllWGS(data = anthro1,
sex = "sex",
weight = "weight",
height = "height",
age = "age",
index = "all")
head(zScores, 20)
```
Applying the `getAllWGS()` function results in a data frame of calculated `z-scores` for all children in the cohort or sample for all the anthropometric indices.
| /scratch/gouwar.j/cran-all/cranData/zscorer/inst/doc/legacy_functions.Rmd |
## ---- include = FALSE----------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
library(zscorer)
## ----runApp, echo = FALSE, eval = FALSE----------------------------------
# run_zscorer()
## ----zscorerFrontpage, echo = FALSE, eval = TRUE, out.width = "90%"------
knitr::include_graphics("../man/figures/zscorerApp.png")
## ----single, echo = FALSE, eval = TRUE, out.width = "30%"----------------
knitr::include_graphics("../man/figures/single.png")
## ----singleMenu, echo = FALSE, eval = TRUE, out.width = "30%"------------
knitr::include_graphics("../man/figures/singleSidebar.png")
## ----sidebarSingleInput, echo = FALSE, eval = TRUE, out.width = "30%"----
knitr::include_graphics("../man/figures/sidebarSingleInput.png")
## ----singleResults, echo = FALSE, eval = TRUE, out.width = "90%"---------
knitr::include_graphics("../man/figures/singleResults.png")
## ----cohort, echo = FALSE, eval = TRUE, out.width = "30%"----------------
knitr::include_graphics("../man/figures/cohort.png")
## ----cohortMenu, echo = FALSE, eval = TRUE, out.width = "30%"------------
knitr::include_graphics("../man/figures/cohortSidebar.png")
## ----cohortOptions, echo = FALSE, eval = TRUE, out.width = "30%"---------
knitr::include_graphics("../man/figures/sidebarCohortOptions.png")
## ----cohortResults, echo = FALSE, eval = TRUE, out.width = "90%"---------
knitr::include_graphics("../man/figures/cohortResults.png")
| /scratch/gouwar.j/cran-all/cranData/zscorer/inst/doc/using-shiny-app.R |
---
title: "Using zscorer Shiny application"
author: "Ernest Guevarra"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Using zscorer Shiny application}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
library(zscorer)
```
`zscorer` comes packaged with a built-in Shiny application. The application is initiated by issuing the following command:
```{r runApp, echo = FALSE, eval = FALSE}
run_zscorer()
```
This opens the installed web browser in the user's device showing the following:
```{r zscorerFrontpage, echo = FALSE, eval = TRUE, out.width = "90%"}
knitr::include_graphics("../man/figures/zscorerApp.png")
```
# Getting the z-score of a single child
The first (and default) functionality of the built-in Shiny application is for calculating the z-scores for a single child. This functionality is accessed by selecting the `single` option on the tabbed menu available in the top edge of the application interface as shown below:
```{r single, echo = FALSE, eval = TRUE, out.width = "30%"}
knitr::include_graphics("../man/figures/single.png")
```
When this functionality is selected, the sidebar menu provides the following options:
```{r singleMenu, echo = FALSE, eval = TRUE, out.width = "30%"}
knitr::include_graphics("../man/figures/singleSidebar.png")
```
In this sidebar menu, the user should enter all the anthropometric measurements available for the child to whom user wants to calculate z-scores of. For the calculation to work, the minimum data needed to be supplied by the user are:
* **Age**;
* **Sex**; and,
* At least one anthropometric measurement among **weight (kgs)**, **height (cms)**, **mid-upper arm circumference (cms)**, **head circumference (cms)**, **subscapular skinfold (cms)** or **triceps skinfold (cms)**.
Please ensure that any anthropometric measurement is supplied in the appropriate units as indicated above.
To illustrate, let's use the dummy data of a **52 month** old **male** child with a weight of **14.6 kg** and a height of
**98.0 cm**. This data is typed into the sidebar menu and then click on the `Calculate` tab as shown below:
```{r sidebarSingleInput, echo = FALSE, eval = TRUE, out.width = "30%"}
knitr::include_graphics("../man/figures/sidebarSingleInput.png")
```
This produces the following output in the main panel of the application:
```{r singleResults, echo = FALSE, eval = TRUE, out.width = "90%"}
knitr::include_graphics("../man/figures/singleResults.png")
```
Depending on the anthropometric measurements supplied, the corresponding applicable z-score indices will be provided. No z-score result will be provided if the required anthropometric measurement is not supplied. In the example above, MUAC-for-age, head circumference-for-age, subscapular skinfold-for-age and triceps skinfold-for-age are not provide as no MUAC, head circumference, subscapular skinfold or triceps skinfold are provided.
# Getting the z-score of a cohort of children
The second functionality of the built-in Shiny application is for calculating the z-scores for a cohort of children. This functionality is accessed by selecting the `cohort` option on the tabbed menu available in the top edge of the application interface as shown below:
```{r cohort, echo = FALSE, eval = TRUE, out.width = "30%"}
knitr::include_graphics("../man/figures/cohort.png")
```
When this functionality is selected, the sidebar menu provides the following options:
```{r cohortMenu, echo = FALSE, eval = TRUE, out.width = "30%"}
knitr::include_graphics("../man/figures/cohortSidebar.png")
```
In this sidebar menu, the user is expected to upload cohort data of children with their anthropometric data. The user is also expected to select which anthropometric indices to calculate. If no anthropometric indices are selected, no calculations will be performed. A cohort data can be uploaded by clicking on the `Browse` button and then selecting the file with cohort data to upload. Once uploaded, the sidebar menu will provide the following additional options:
```{r cohortOptions, echo = FALSE, eval = TRUE, out.width = "30%"}
knitr::include_graphics("../man/figures/sidebarCohortOptions.png")
```
For the `zscorer` app to be able to calculate z-scores from the cohort data, the variables corresponding to the required information needs to be supplied. By default, the application will search the variable names in the data and pick out typical variable names for the measurements (e.g., `ht` or `height` or `HT` or `Height` or `HEIGHT` for height measurements). If the variables are named unconventionally, user will have to select the variable name for the particular measurement from the choices. If the measurements are not in the data, then the variable field should be left empty.
Once the various parameters have been provided, user should click on the `Calculate` button which results in the following:
```{r cohortResults, echo = FALSE, eval = TRUE, out.width = "90%"}
knitr::include_graphics("../man/figures/cohortResults.png")
```
The dataset provides is presented as a table in the main panel but now with additional fields for the calculated anthropometric indices. The resulting dataset with the calculated anthropometric indices can then be downloaded by clicking on the `Download` button.
| /scratch/gouwar.j/cran-all/cranData/zscorer/inst/doc/using-shiny-app.Rmd |
################################################################################
#
# Server function
#
################################################################################
##
function(input, output, session) {
#
##############################################################################
## INPUTS
##############################################################################
## Header text for single child input
output$header1 <- renderText({
## If dataType == 1
if(input$dataType == 1){
"Enter child's age, sex and anthropometric measurements"
}
})
## Input for sex
output$sex1 <- renderUI({
## If user selects "wfa" or "wfh" and "Single child" options...
if(input$dataType == 1){
selectInput(inputId = "sex1",
label = "Sex",
choices = list("Select" = "", "Male" = 1, "Female" = 2),
selected = "")
}
})
## Input for weight
output$weight1 <- renderUI({
## If user selects "wfa" or "wfh" and "Single child" options...
if(input$dataType == 1){
numericInput(inputId = "weight1",
label = "Weight (kg)",
value = NULL,
min = 1,
max = 30,
step = 0.1)
}
})
## Input for height
output$height1 <- renderUI({
## If user selects "hfa" or "wfh" and "Single child" options...
if(input$dataType == 1){
numericInput(inputId = "height1",
label = "Height (cm)",
value = NULL,
min = 45,
max = 120,
step = 0.1)
}
})
## Input for head circumference
output$hc1 <- renderUI({
## If user selects "hcfa"
if(input$dataType == 1){
numericInput(inputId = "hc1",
label = "Head circumference (cm)",
value = NULL,
min = 30,
max = 60,
step = 0.1)
}
})
## Input for arm circumference
output$muac1 <- renderUI({
## If user selects "acfa"
if(input$dataType == 1){
numericInput(inputId = "muac1",
label = "Mid-upper arm circumference (cm)",
value = NULL,
min = 10,
max = 25,
step = 0.1)
}
})
## Input for subscapular skinfold
output$ss1 <- renderUI({
## If user selects "ssfa"
if(input$dataType == 1){
numericInput(inputId = "ss1",
label = "Subscapular skinfold (mm)",
value = NULL,
min = 3,
max = 20,
step = 0.1)
}
})
## Input for triceps skinfold
output$ts1 <- renderUI({
## If user selects "tsfa"
if(input$dataType == 1){
numericInput(inputId = "ts1",
label = "Triceps skinfold (cm)",
value = NULL,
min = 3,
max = 23,
step = 0.1)
}
})
## Input for age
output$age1 <- renderUI({
## If user selects "hfa" or "wfa" and "Single child" options...
if(input$dataType == 1){
numericInput(inputId = "age1",
label = "Age (months)",
value = NULL,
min = 0,
max = 60,
step = 0)
}
})
##
index.list <- reactive({
index.list <- NULL
if(!is.null(input$weight1) & input$dataType == 1) { index.list <- c(index.list, "wfa") }
if(!is.null(input$height1) & input$dataType == 1) { index.list <- c(index.list, "hfa") }
if(input$dataType == 2) { index.list <- c(index.list, "wfh", "wfa", "hfa", "bfa", "hcfa", "acfa", "ssfa", "tsfa") }
if(!is.null(input$weight1) & !is.null(input$height1) & input$dataType == 1) {
index.list <- c(index.list, "wfh")
}
##
full.index.list <- c("wfa", "hfa", "wfh", "bfa", "hcfa", "acfa", "ssfa", "tsfa")
names(full.index.list) <- c("Weight-for-age",
"Height-for-age",
"Weight-for-height",
"BMI-for-age",
"Head circumference-for-age",
"MUAC-for-age",
"Subscapular skinfold-for-age",
"Triceps skinfold-for-age")
##
sub.index.list <- full.index.list[full.index.list %in% index.list]
##
return(sub.index.list)
})
## Input for index type
output$index1 <- renderUI({
if(input$dataType == 2) {
selectInput(inputId = "index1",
label = "Select anthropometric index",
choices = index.list(),
multiple = TRUE,
size = 8,
selectize = FALSE)
}
})
##
output$header2 <- renderText({
## If dataType == 2...
if(input$dataType == 2){
"Upload anthropometric data from multiple children"
}
})
## File input
output$file1 <- renderUI({
## If user selects "Cohort/sample of children" option...
if(input$dataType == 2){
fileInput(inputId = "file1",
label = "Upload children cohort/sample data",
accept = c("text/csv", "text/comma-separated-values,text/plain", ".csv"),
placeholder = "Select anthro data file")
}
})
## Input for sex variable
output$sex2 <- renderUI({
## If file1 is present...
if(!is.null(input$file1) & input$dataType == 2){
## Select UI
selectInput(inputId = "sex2",
label = "Select sex variable",
choices = c(" ", names(anthroDF())),
selected = names(anthroDF())[names(anthroDF()) %in% c("sex", "SEX", "Sex", "Gender", "gender", "GENDER")])
}
})
## Input for weight variable
output$weight2 <- renderUI({
## If user selects "wfa" or "wfh" and "Cohort/sample data" and file1 is present...
if(input$dataType == 2 & !is.null(input$file1)){
## Select UI
selectInput(inputId = "weight2",
label = "Select weight variable",
choices = c(" ", names(anthroDF())),
selected = names(anthroDF())[names(anthroDF()) %in% c("wt", "WT", "Wt", "weight", "Weight", "WEIGHT")])
}
})
## Input for height variable
output$height2 <- renderUI({
## If user selects "hfa" or "wfh" and "Cohort/sample data" and file1 is present...
if(input$dataType == 2 & !is.null(input$file1)){
## Select UI
selectInput(inputId = "height2",
label = "Select height variable",
choices = c(" ", names(anthroDF())),
selected = names(anthroDF())[names(anthroDF()) %in% c("ht", "HT", "Ht", "height", "Height", "HEIGHT")])
}
})
## Input for age variable
output$age2 <- renderUI({
## If user selects "hfa" or "wfa" and "Cohort/sample data" and file1 is present...
if(input$dataType == 2 & !is.null(input$file1)){
## Select UI
selectInput(inputId = "age2",
label = "Select age variable",
choices = c(" ", names(anthroDF())),
selected = names(anthroDF())[names(anthroDF()) %in% c("age", "AGE")])
}
})
## Input for head circumference variable
output$hc2 <- renderUI({
## If user selects "hcfa" and "Cohort/sample data" and file1 is present...
if(input$dataType == 2 & !is.null(input$file1)){
## Select UI
selectInput(inputId = "hc2",
label = "Select head circumference variable",
choices = c(" ", names(anthroDF())),
selected = names(anthroDF())[names(anthroDF()) %in% c("hc", "headCircumference", "hcircumference", "headCirc", "hcirc")])
}
})
## Input for muac variable
output$muac2 <- renderUI({
## If user selects "acfa" and "Cohort/sample data" and file1 is present...
if(input$dataType == 2 & !is.null(input$file1)){
## Select UI
selectInput(inputId = "muac2",
label = "Select MUAC variable",
choices = c(" ", names(anthroDF())),
selected = names(anthroDF())[names(anthroDF()) %in% c("muac", "MUAC")])
}
})
## Input for sub-scapular skinfold variable
output$ss2 <- renderUI({
## If user selects "ssfa" and "Cohort/sample data" and file1 is present...
if(input$dataType == 2 & !is.null(input$file1)){
## Select UI
selectInput(inputId = "ss2",
label = "Select subscapular skinfold variable",
choices = c(" ", names(anthroDF())),
selected = names(anthroDF())[names(anthroDF()) %in% c("subscapularSkinfold", "ss")]
)
}
})
## Input for triceps skinfold variable
output$ts2 <- renderUI({
## If user selects "tsfa" and "Cohort/sample data" and file1 is present...
if(input$dataType == 2 & !is.null(input$file1)){
## Select UI
selectInput(inputId = "ts2",
label = "Select triceps skinfold variable",
choices = c(" ", names(anthroDF())),
selected = names(anthroDF())[names(anthroDF()) %in% c("tricepsSkinfold", "ts")])
}
})
## Calculate action button - cohort/sample
output$calculate2 <- renderUI({
if(input$dataType == 2 & !is.null(input$file1)){
##
div(style="display:inline-block; float:left;",
actionButton(inputId = "calculate2",
label = "Calculate",
class = "btn-primary",
icon = icon(name = "calculator", class = "fa-lg"),
width = "100%")
)
}
})
## Calculate action button - cohort/sample - download
output$download <- renderUI({
if(input$dataType == 2 & !is.null(input$file1)){
##
div(style="display:inline-block; float:right",
downloadButton(outputId = "downloadResults",
label = "Download",
class = "btn-primary",
icon = icon(name = "download", class = "fa-lg"))
)
}
})
## Calculate action button - single child
output$calculate1 <- renderUI({
if(input$dataType == 1){
##
actionButton(inputId = "calculate1",
label = "Calculate",
class = "btn-primary",
icon = icon(name = "calculator", class = "fa-lg"))
}
})
##############################################################################
#
# OUTPUTS
#
##############################################################################
## Read file1 data
anthroDF <- reactive({
## If user selects "Cohort/sample children"...
if(input$dataType == 2 & !is.null(input$file1)){
## Read anthro data
read.csv(input$file1$datapath, header = TRUE, sep = ",")
}
})
##############################################################################
#
# CALCULATIONS
#
##############################################################################
##
observeEvent(input$calculate1, {
## For single calculations
if(input$dataType == 1 & !is.null(input$weight1) & !is.null(input$height1)) {
##
req(input$sex1, input$weight1, input$age1, input$height1)
age <- input$age1 * (365.25 / 12)
waz <- getWGSR(sex = input$sex1, firstPart = input$weight1, secondPart = age, index = "wfa")
haz <- getWGSR(sex = input$sex1, firstPart = input$height1, secondPart = age, index = "hfa")
whz <- getWGSR(sex = input$sex1, firstPart = input$weight1, secondPart = input$height1, index = "wfh")
bfaz <- getWGSR(sex = input$sex1, firstPart = input$weight1, secondPart = input$height1, thirdPart = age, index = "bfa")
output$waz <- renderText({ waz })
output$haz <- renderText({ haz })
output$whz <- renderText({ whz })
output$bfaz <- renderText({ bfaz })
}
##
if(input$dataType == 1 & !is.null(input$muac1)) {
##
req(input$sex1, input$muac1, input$age1)
age <- input$age1 * (365.25 / 12)
mfaz <- getWGSR(sex = input$sex1, firstPart = input$muac1, secondPart = age, index = "mfa")
output$mfaz <- renderText({ mfaz })
}
##
if(input$dataType == 1 & !is.null(input$hc1)) {
##
req(input$sex1, input$hc1, input$age1)
age <- input$age1 * (365.25 / 12)
hcz <- getWGSR(sex = input$sex1, firstPart = input$hc1, secondPart = age, index = "hfa")
output$hcz <- renderText({ hcz })
}
##
if(input$dataType == 1 & !is.null(input$ss1)) {
##
req(input$sex1, input$ss1, input$age1)
age <- input$age1 * (365.25 / 12)
ssaz <- getWGSR(sex = input$sex1, firstPart = input$ss1, secondPart = age, index = "ssa")
output$ssaz <- renderText({ ssaz })
}
##
if(input$dataType == 1 & !is.null(input$ts1)) {
##
req(input$sex1, input$ts1, input$age1)
age <- input$age1 * (365.25 / 12)
ssaz <- getWGSR(sex = input$sex1, firstPart = input$ts1, secondPart = age, index = "tsa")
output$tsaz <- renderText({ tsaz })
}
})
##
observeEvent(input$calculate2, {
##
zScoreDF <- anthroDF()
## convert age into days
zScoreDF[[input$age2]] <- zScoreDF[[input$age2]] * (365.25 / 12)
## for cohort calculations
##if(input$dataType == 2) {
## BMI-for-age
if("bfa" %in% input$index1) {
zScoreDF <- addWGSR(data = zScoreDF,
sex = input$sex2,
firstPart = input$weight2,
secondPart = input$height2,
thirdPart = input$age2,
index = "bfa",
output = "bfa")
}
##
if("wfa" %in% input$index1) {
zScoreDF <- addWGSR(data = zScoreDF,
sex = input$sex2,
firstPart = input$weight2,
secondPart = input$age2,
index = "wfa",
output = "wfa")
}
##
if("hfa" %in% input$index1) {
zScoreDF <- addWGSR(data = zScoreDF,
sex = input$sex2,
firstPart = input$height2,
secondPart = input$age2,
index = "hfa",
output = "hfa")
}
##
if("hcfa" %in% input$index1) {
zScoreDF <- addWGSR(data = zScoreDF,
sex = input$sex2,
firstPart = input$hc2,
secondPart = input$age2,
index = "hca",
output = "hca")
}
##
if("acfa" %in% input$index1) {
zScoreDF <- addWGSR(data = zScoreDF,
sex = input$sex2,
firstPart = input$muac2,
secondPart = input$age2,
index = "mfa",
output = "mfa")
}
##
if("ssfa" %in% input$index1) {
zScoreDF <- addWGSR(data = zScoreDF,
sex = input$sex2,
firstPart = input$ss2,
secondPart = input$age2,
index = "ssa",
output = "ssa")
}
##
if("tsfa" %in% input$index1) {
zScoreDF <- addWGSR(data = zScoreDF,
sex = input$sex2,
firstPart = input$ts2,
secondPart = input$age2,
index = "tsa",
output = "tsa")
}
##
if("wfh" %in% input$index1) {
zScoreDF <- addWGSR(data = zScoreDF,
sex = input$sex2,
firstPart = input$weight2,
secondPart = input$height2,
index = "wfh",
output = "wfh")
}
##
output$zScoreTable <- DT::renderDataTable(zScoreDF,
options = list(pageLength = 15))
#}
##
output$downloadResults <- downloadHandler(
filename <- function() {
"zscoreResults.csv"
},
content <- function(file) {
write.csv(zScoreDF, file)
}
)
})
}
| /scratch/gouwar.j/cran-all/cranData/zscorer/inst/zscorer/server.R |
################################################################################
#
# UI
#
################################################################################
## Load dependencies
if(!require(shiny)) install.packages("shiny")
if(!require(shinythemes)) install.packages("shinythemes")
if(!require(zscorer)) install.package("zscorer")
##
navbarPage(title = "zscorer", id = "chosenTab", theme = shinytheme("sandstone"),
tabPanel(title = "", value = 1, icon = icon(name = "home", class = "fa-lg"),
div(class = "outer",
tags$head(includeCSS("styles.css"))
),
sidebarLayout(
sidebarPanel(width = 3,
h5(textOutput("header1")),
## Age input
uiOutput(outputId = "age1"),
## sex input
uiOutput(outputId = "sex1"),
## Weight input
uiOutput(outputId = "weight1"),
## Height input
uiOutput(outputId = "height1"),
## MUAC input
uiOutput(outputId = "muac1"),
## Head circumference input
uiOutput(outputId = "hc1"),
## Subscapular skinfold input
uiOutput(outputId = "ss1"),
## Triceps skinfold
uiOutput(outputId = "ts1"),
## Header 2 - input file with anthropometric data (dataType == 2)
h5(textOutput("header2")),
## File input - anthro data
uiOutput(outputId = "file1"),
## Anthropometric index input
uiOutput(outputId = "index1"),
## Sex variable for cohort/sample
uiOutput(outputId = "sex2"),
## Age variable for cohort/sample
uiOutput(outputId = "age2"),
## Weight variable for cohort/sample
uiOutput(outputId = "weight2"),
## Height variable for cohort/sample
uiOutput(outputId = "height2"),
## mauc variable for cohort/sample
uiOutput(outputId = "muac2"),
## head circumference variable for cohort/sample
uiOutput(outputId = "hc2"),
## subscapular skinfold variable for cohort/sample
uiOutput(outputId = "ss2"),
## Age variable for cohort/sample
uiOutput(outputId = "ts2"),
## Action button to calculate single child z-scores
uiOutput(outputId = "calculate1"),
## Action button to calculate cohort/sample z-scores
uiOutput(outputId = "calculate2"),
## Action button to download cohort/sample z-scores
uiOutput(outputId = "download")
),
## Main panel
mainPanel(width = 9,
tabsetPanel(id = "dataType", selected = 1,
tabPanel(title = "Single", value = 1,
conditionalPanel("input.calculate1",
column(width = 3,
wellPanel(h5("Weight-for-age z-score"),
hr(),
uiOutput(outputId = "waz")
)
),
column(width = 3,
wellPanel(h5("Height-for-age z-score"),
hr(),
uiOutput(outputId = "haz")
)
),
column(width = 3,
wellPanel(h5("Weight-for-height z-score"),
hr(),
uiOutput(outputId = "whz")
)
),
column(width = 3,
wellPanel(h5("BMI-for-age z-score"),
hr(),
uiOutput(outputId = "bfaz")
)
),
column(width = 3,
wellPanel(h5("MUAC-for-age z-score"),
hr(),
uiOutput(outputId = "mfaz")
)
),
column(width = 3,
wellPanel(h5("Head circumference-for-age z-score"),
hr(),
uiOutput(outputId = "hcz")
)
),
column(width = 3,
wellPanel(h5("Subscapular skinfold-for-age z-score"),
hr(),
uiOutput(outputId = "ssaz")
)
),
column(width = 3,
wellPanel(h5("Triceps skinfold-for-age z-score"),
hr(),
uiOutput(outputId = "tsaz")
)
)
)
),
tabPanel(title = "Cohort", value = 2,
conditionalPanel("input.calculate2",
## z-scores table
DT::dataTableOutput("zScoreTable")
)
)
)
)
)
),
tabPanel(title = "About", value = 2,
sidebarPanel(width = 3,
HTML("
<h4>Contents</h4>
<h5><a href='#HEAD1'>Introduction</a></h5>
<h5><a href='#HEAD2'>Installation</a></h5>
<h5><a href='#HEAD3'>Usage</a></h5>
<h5><a href='#HEAD4'>Authors</a></h5>
<h5><a href='#HEAD5'>License</a></h5>
")
),
mainPanel(width = 9,
HTML("
<a id='HEAD1'></a><h3>zscorer: Weight-for-age, height-for-age, weight-for-height,
BMI-for-age, head circumference-for-age, arm circumference-for-age, subscapular
skinfold-for-age and triceps skinfold-for-age z-score calculator</h3>
<br/>
<p><code>zscorer</code> facilitates the calculation of <strong>z-scores</strong>
(i.e. the number of standard deviations from the mean) and adds them to survey data:</p>
<ul>
<li><b>Weight-for-length (wfl)</b> z-scores for children with lengths between 45 and 110 cm
<li><b>Weight-for-height (wfh)</b> z-scores for children with heights between 65 and 120 cm
<li><b>Length-for-age (lfa)</b> z-scores for children aged less than 24 months
<li><b>Height-for-age (hfa)</b> z-scores for children aged between 24 and 228 months
<li><b>Weight-for-age (wfa)</b> z-scores for children aged between zero and 120 months
<li><b>Body mass index-for-age (bfa)</b> z-scores for children aged between zero and 228 months
<li><b>MUAC-for-age (mfa)</b> z-scores for children aged between 3 and 228 months
<li><b>Triceps skinfold-for-age (tsa)</b> z-scores for children aged between 3 and 60 months
<li><b>Sub-scapular skinfold-for-age (ssa)</b> z-scores for children aged between 3 and 60 months
<li><b>Head circumference-for-age (hca)</b> z-scores for children aged between zero and 60 months
</ul>
<p>The <code>z-scores</code> are calculated using the <b>WHO Child Growth Standards</b> for
children aged between zero and 60 months or the **WHO Growth References** for school-aged
children and adolescents. MUAC-for-age (mfa) z-scores for children aged between 60 and 228
months are calculated using the MUAC-for-age growth reference developed by Mramba et al. (2017)
using data from the USA and Africa. This reference has been validated with African school-age
children and adolescents. The <code>zscorer</code> comes packaged with the WHO Growth References
data and the MUAC-for-age reference data.
<p><code>zscorer</code> can be used to calculate the appropriate <strong>z-score</strong>
for the corresponding anthropometric index for a single child to assess growth and
nutritional status against the standard. It can also be used to calculate
the <strong>z-scores</strong> for an entire cohort or sample of children
(such as in nutrition surveys) to allow for assessing the nutritional status
of the entire child population.</p>
<br/>
<a id='HEAD2'></a><h3>Installation</h3>
<p>You can install <code>zscorer</code> from GitHub with:</p>
<blockquote>
<code># install.packages('remotes')<br/>
remotes::install_github('nutriverse/zscorer')<br/>
# load package<br/>
library(zscorer)</code>
</blockquote>
<p>or from CRAN with:</p>
<blockquote>
<code>install.packages('zscorer')<br/>
# load package<br/>
library(zscorer)</code>
</blockquote>
<br/>
<a id='HEAD3'></a><h3>Usage</h3>
<p>To run <code>zscorer</code> Shiny app, use the following command in R:</p><br/>
<blockquote>
<code>> run_zscorer()</code>
</blockquote>
<br/>
<a id='HEAD4'></a><h3>Authors</h3>
<p>The R scripts on which this package was based on were written by Mark
Myatt and Ernest Guevarra on the 20th of December 2012.</p>
<p>Additional scripts that expand on previously written scripts to
enhance utility and functionality, has been written by Mark Myatt and
Ernest Guevarra</p>
<p>Ernest Guevarra has packaged these scripts into an R standard format
and is the maintainer of this package</p>
<p>The <code>zscorer</code> Shiny app built into the package was created
and maintained by Ernest Guevarra</p>
<br/>
<a id='HEAD5'></a><h3>License</h3>
<p>This package and the built in Shiny app is licensed under the AGPL-3
License.</p>
<br/>
")
)
)
)
| /scratch/gouwar.j/cran-all/cranData/zscorer/inst/zscorer/ui.R |
---
title: "WHO Child Growth Standards: Methods and Development"
author: "World Health Organisation"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{WHO Child Growth Standards: Methods and Development}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r setup, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
```
In 1993 the World Health Organization (WHO) undertook a comprehensive review of the uses and interpretation of anthropometric references. The review concluded that the NCHS/WHO growth reference, which had been recommended for international use since the late 1970s, did not adequately represent early childhood growth and that new growth curves were necessary. The World Health Assembly endorsed this recommendation in 1994. In response WHO undertook the Multicentre Growth Reference Study (MGRS) between 1997 and 2003 to generate new curves for assessing the growth and development of children the world over.
The MGRS combined a longitudinal follow-up from birth to 24 months and a cross-sectional survey of children aged 18 to 71 months. Primary growth data and related information were gathered from 8440 healthy breastfed infants and young children from widely diverse ethnic backgrounds and cultural settings (Brazil, Ghana, India, Norway, Oman and USA). The MGRS is unique in that it was purposely designed to produce a standard by selecting healthy children living under conditions likely to favour the achievement of their full genetic growth potential. Furthermore, the mothers of the children selected for the construction of the standards engaged in fundamental health-promoting practices, namely breastfeeding and not smoking.
This report presents the first set of WHO Child Growth Standards (i.e. length/height-for-age, weight- for-age, weight-for-length, weight-for-height and body mass index (BMI)-for-age) and describes the methodical process followed in their development. The first step in this process was a consultative expert review of some 30 growth curve construction methods, including types of distributions and smoothing techniques to identify the best approach to constructing the standards. Next was the selection of a software package flexible enough to allow the comparative testing of the alternative methods used to generate the growth curves. Then the selected approach was applied systematically to search for the best models to fit the data for each indicator.
The Box-Cox-power-exponential (BCPE) method, with curve smoothing by cubic splines was selected for constructing the WHO child growth curves. The BCPE accommodates various kinds of distributions, from normal to skewed or kurtotic. The age-based indicators originating at birth required a power-transformation to stretch the age scale (x-axis) as a preliminary step to fitting the curves. For each set of curves, the search for the best model specification began by examining various combinations of degrees of freedom to fit the median and variance estimator curves. When data had a non-normal distribution, degrees of freedom for parameters to model skewness and kurtosis were added to the initial model and adequacy of fit evaluated. Apart from length/height-for-age, which followed a normal distribution, the other standards required the modelling of skewness, but not kurtosis. The diagnostic tools used iteratively to detect possible model misfits and biases in the fitted curves included various tests of local and global goodness of fit, worm plots and residual plots. Patterns of differences between empirical and fitted percentiles were also examined, as were proportions of observed versus expected percentages of children with measurements below selected percentiles.
The methodology described above was followed to generate for boys and girls aged 0 to 60 months percentile and z-score curves for length/height-for-age, weight-for-age, weight-for-length, weight- for-height and BMI-for-age. The last standard is an addition to the set of indicators previously available as part of the NCHS/WHO reference. In-depth descriptions are presented of how each sex- specific standard was constructed. Also presented are comparisons of the new WHO standards with the NCHS/WHO growth reference and the CDC 2000 growth charts.
To interpret differences between the WHO standards and the NCHS/WHO reference it is important to understand that they reflect differences not only in the populations used, but also in the methodologies applied to construct the two sets of growth curves. To address the significant skewness of the NCHS/WHO sample's weight-for-age and weight-for-height, separate standard deviations were calculated for distributions below and above the median for each of the two indicators. This approach is limited in fitting skewed data, especially at the extreme tails of the distribution, since it only partially adjusts for the skewness inherent in the weight-based indicators. The WHO standards, on the other hand, employed LMS-based methods that fit skewed data adequately and generate fitted curves that follow closely the empirical data. Like the WHO standards, construction of the CDC 2000 growth charts was also based on the LMS method and, therefore, differences between this reference and the WHO standards are largely a reflection of differences in the populations on which the two sets of curves were based.
*Length/height-for-age.* The standard for linear growth has a part based on length (length-for-age, 0 to 24 months) and another on height (height-for-age, 2 to 5 years). The two parts were constructed using the same model but the final curves reflect the average difference between recumbent length and standing height. By design, children between 18 and 30 months in the cross-sectional component of the MGRS had both length and height measurements taken. The average difference between the two measurements in this set of 1625 children was 0.73 cm. To fit a single model for the whole age range, 0.7 cm was therefore added to the cross-sectional height values before merging them with the longitudinal sample's length data. After the model was fitted, the median curve was shifted back downwards by 0.7 cm for ages above two years, and the coefficient of variation curve adjusted to the new median values to construct the height-for-age growth curves. The same power transformation of age was applied to stretch the age scale for each of the sexes before fitting cubic splines to generate their respective growth curves. The boys' curves required a model with higher degrees of freedom to fit both the median and coefficient of variation curves. The data for both sexes followed the normal distribution.
*Weight-for-age.* The weights of the longitudinal and cross-sectional samples were merged without any adjustments and a single model was fitted to generate one continuous set of curves constituting each sex-specific weight-for-age standard. The same power transformation was applied to both boys' and girls' age before fitting the curve construction model. The weight data for both sexes were skewed, so in specifying the model, the parameter related to skewness was fitted in addition to the median and the approximate coefficient of variation. In modelling skewness the girls' curves required more degrees of freedom to fit a curve for this parameter.
*Weight-for-length/height.* The construction of the weight-for-length (45 to 110 cm) and weight-for- height (65 to 120 cm) standards followed a procedure similar to that applied to construct the length/height-for-age standards. That is, to fit a single model, 0.7 cm was added to the cross-sectional height values, and after the model was fitted, the weight-for-length centile curves in the length interval 65.7 to 120.7 cm were shifted back by 0.7 cm to derive the weight-for-height standards corresponding to the height range 65 cm to 120 cm. The lower limit of the weight-for-length standards (45 cm) was chosen to cover up to approximately -2 SD girls' length at birth. The upper limit for the weight-for- height standards was influenced by the need to accommodate the tallest children at age 60 months, that is, 120 cm is approximately +2 SD boys' height-for-age at 60 months. The overlap between the upper end of the weight-for-length standards and the lower end of the weight-for-height standards is intended to facilitate their application in severely undernourished populations and emergency settings.
There was no evidence that a length/height transformation similar to that described for age was required for constructing the weight-for-length/height standards. The modelling of the median and variance curves followed the procedure described for the first two standards. Results from the final model for girls' weight-for-length/height suggested the need to investigate potential improvements in the curves by modelling kurtosis. Adjustment for kurtosis, however had a negligible impact on the final centiles. Therefore, considering that modelling the fourth parameter would increase complexity in application of the standards and create inconsistency between the sexes, the final curves were generated without adjusting for kurtosis. The degrees of freedom for the median and variance curves varied between the boys' and girls' standards. The fact that the weight-for-length/height indicator combines different velocities for the two measurements involved (weight and length/height) at overlapping ages likely explains the slight wiggle in the final WHO standards (for both boys and girls) as also observed in other references.
*Body mass index-for-age.* Body mass index is the ratio weight (in kg)/recumbent length or standing height (in m^2^). To address the difference between length and height, the approach used for constructing the BMI-for-age standards was different from that described for length/height-for-age. Because BMI is a ratio with squared length or height in the denominator, adding 0.7 cm to the height values and back- transforming them after fitting was not feasible. The solution adopted was to construct the standards for the younger and the older children separately based on two sets of data with an overlapping range of ages below and above 24 months. To construct the BMI-for-age standard based on length (0 to 2 years), the longitudinal sample's length data and the cross-sectional sample's height data (18 to 30 months) were combined after adding 0.7 cm to the height values. Analogously, to construct the standard from 2 to 5 years, the cross-sectional sample's height plus the longitudinal sample's length data (18 to 24 months) were combined after subtracting 0.7 cm from the length values. Thus, a common set of data from 18 to 30 months was used to generate the BMI standards for the younger and the older children. The resulting disjunction between the two standards thus in essence reflects the 0.7 cm difference between length and height. This does not mean, however, that a child at a specific age will have the same length- and height-based BMI-for-age z-score as this is mathematically impossible given the nature of the BMI ratio.
An age power transformation as described for the other age-based standards was required before constructing the length-based BMI-for-age curves. No such transformation was necessary for the height-based BMI-for-age. The WHO length- and height-based BMI-for-age standards do not overlap, i.e. the length-based interval ends at 730 days and the height-based interval starts at 731 days. Cubic spline fitting was achieved with variable degrees of freedom for the length- versus height-based standards, and also for the boys' versus girls' final curves.
Technical aspects of the standards. The method used to construct the WHO standards generally relied on the Box-Cox power exponential distribution and the final selected models simplified to the LMS model. As a result, the computation of percentiles and z-scores for these standards uses formulae based on the LMS method. However, a restriction was imposed on all indicators to enable the derivation of percentiles only within the interval corresponding to z-scores between -3 and 3. The underlying reasoning is that percentiles beyond ±3 SD are invariant to changes in equivalent z-scores. The loss accruing to this restriction is small since the inclusion range corresponds to the 0.135th to 99.865th percentiles.
The weight-based indicators presented right-skewed distributions. When modelled correctly, right skewness has the effect of making distances between positive z-scores increase progressively the farther away they are from the median, while distances between negative z-scores decrease progressively. The LMS method fits skewed data adequately by using a Box-Cox normal distribution, which follows the empirical data closely. The drawback, however, is that the outer tails of the distribution are highly affected by extreme data points even if only very few. A restricted application of the LMS method was thus used for the construction of the WHO weight-based indicators, limiting the Box-Cox normal distribution to the interval corresponding to z-scores where empirical data were available (i.e. between -3 SD and 3 SD). Beyond these limits, the standard deviation at each age (or length/height) was fixed to the distance between ±2 SD and ±3 SD, respectively. This approach avoids making assumptions about the distribution of data beyond the limits of the observed values.
Epidemiological aspects of the standards. As expected, there are notable differences with the NCHS/WHO reference that vary by age, sex, anthropometric measure and specific percentile or z-score curve. Differences are particularly important in infancy. Stunting will be greater throughout childhood when assessed using the new WHO standards compared to the NCHS/WHO reference. The growth pattern of breastfed infants will result in a substantial increase in rates of underweight during the first half of infancy and a decrease thereafter. For wasting, the main difference is during infancy when wasting rates will be substantially higher using the new WHO standards. With respect to overweight, use of the new WHO standards will result in a greater prevalence that will vary by age, sex and nutritional status of the index population.
The growth standards presented in this report provide a technically robust tool that represents the best description of physiological growth for children under five years of age. The standards depict normal early childhood growth under optimal environmental conditions and can be used to assess children everywhere, regardless of ethnicity, socioeconomic status and type of feeding.
*Taken from:*
World Health Organization. WHO Child Growth Standards: Length/Height-for-age, Weight-for-age, Weight-for-length, Weight-for-height, and Body Mass Index-for age: Methods and Development. 1st ed. World Health Organization; 2006. ISBN 92 4 154693 X
| /scratch/gouwar.j/cran-all/cranData/zscorer/vignettes/anthropometry.Rmd |
---
title: "Calculating anthropometric z-scores using zscorer"
author: "Mark Myatt"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Calculating anthropometric z-scores using zscorer}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r setup, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
if(!require(zscorer)) install.packages("zscorer")
```
# Calculating anthropometric z-scores using `zscorer`
The main function in the `zscorer` package is `addWGSR()`.
To demonstrate its usage, we will use the accompanying dataset in `zscorer` called `anthro3`. We inspect the dataset as follows:
```{r usage1, echo = TRUE, eval = FALSE}
anthro3
```
which returns:
```{r usage1a, echo = FALSE, eval = TRUE}
head(anthro3)
```
`anthro3` contains anthropometric data from a Rapid Assessment Method (RAM) survey from Burundi.
Anthropometric indices (e.g. weight-for-height z-scores) have not been calculated and added to the data.
We will use the `addWGSR()` function to add weight-for-height (wfh) z-scores to the example data:
```{r usage2, echo = TRUE, eval = TRUE}
svy <- addWGSR(data = anthro3, sex = "sex", firstPart = "weight",
secondPart = "height", index = "wfh")
```
A new column named **wfhz** has been added to the dataset:
```{r usage2a, echo = FALSE, eval = TRUE}
head(svy)
```
The `wfhz` column contains the weight-for-height (wfh) z-scores calculated from the `sex`, `weight`, and `height` columns in the `anthro3` dataset. The calculated z-scores are rounded to two decimals places unless the `digits` option is used to specify a different precision (run `?addWGSR` to see description of various parameters that can be specified in the `addWGSR()` function).
The `addWGSR()` function takes up to nine parameters to calculate each index separately, depending on the index required. These are described in the *Help* files of the `zscorer` package which can be accessed as follows:
```{r usage2b, echo = TRUE, eval = FALSE}
?addWGSR
```
The **standing** parameter specifies how “stature” (i.e. length or height) was measured. If this is not specified, and in some special circumstances, height and age rules will be applied when calculating z-scores. These rules are described in the table below.
+---------------+---------------+---------------+---------------+----------------------------------------+
| **index** | **standing** | **age** | **height** | **Action** |
+===============+===============+===============+===============+========================================+
| hfa or lfa | standing | < 731 days | | index = lfa |
| | | | | height = height + 0.7 cm |
+---------------+---------------+---------------+---------------+----------------------------------------+
| hfa or lfa | supine | < 731 days | | index = lfa |
+---------------+---------------+---------------+---------------+----------------------------------------+
| hfa or lfa | unknown | < 731 days | | index = lfa |
+---------------+---------------+---------------+---------------+----------------------------------------+
| hfa or lfa | standing | ≥ 731 days | | index = hfa |
+---------------+---------------+---------------+---------------+----------------------------------------+
| hfa or lfa | supine | ≥ 731 days | | index = hfa |
| | | | | height = height - 0.7 cm |
+---------------+---------------+---------------+---------------+----------------------------------------+
| hfa or lfa | unknown | ≥ 731 days | | index = hfa |
+---------------+---------------+---------------+---------------+----------------------------------------+
| wfh or wfl | standing | | < 65 cm | index = wfl |
| | | | | height = height + 0.7 cm |
+---------------+---------------+---------------+---------------+----------------------------------------+
| wfh or wfl | standing | | ≥ 65 cm | index = wfh |
+---------------+---------------+---------------+---------------+----------------------------------------+
| wfh or wfl | supine | | ≤ 110 cm | index = wfl |
+---------------+---------------+---------------+---------------+----------------------------------------+
| wfh or wfl | supine | | more than | index = wfh |
| | | | 110 cm | height = height - 0.7 cm |
+---------------+---------------+---------------+---------------+----------------------------------------+
| wfh or wfl | unknown | | < 87 cm | index = wfl |
+---------------+---------------+---------------+---------------+----------------------------------------+
| wfh or wfl | unknown | | ≥ 87 cm | index = wfh |
+---------------+---------------+---------------+---------------+----------------------------------------+
| bfa | standing | < 731 days | | height = height + 0.7 cm |
+---------------+---------------+---------------+---------------+----------------------------------------+
| bfa | standing | ≥ 731 days | | height = height - 0.7 cm |
+---------------+---------------+---------------+---------------+----------------------------------------+
The `addWGSR()` function will not produce error messages unless there is something very wrong with the data or the specified parameters. If an error is encountered in a record then the value **NA** is returned. Error conditions are listed in the table below.
+--------------------------------------------------+----------------------------------------+
| **Error condition** | **Action** |
+==================================================+========================================+
| Missing or nonsense value in `standing` parameter| Set `standing` to `3` (unknown) and |
| | apply appropriate height or age rules. |
+--------------------------------------------------+----------------------------------------+
| Unknown `index` specified | Return **NA** for z-score. |
+--------------------------------------------------+----------------------------------------+
| Missing `sex` | Return **NA** for z-score. |
+--------------------------------------------------+----------------------------------------+
| Missing `firstPart` | Return **NA** for z-score. |
+--------------------------------------------------+----------------------------------------+
| Missing `secondPart` | Return **NA** for z-score. |
+--------------------------------------------------+----------------------------------------+
| `sex` is not male (`1`) or female (`2`) | Return **NA** for z-score. |
+--------------------------------------------------+----------------------------------------+
| `firstPart` is not numeric | Return **NA** for z-score. |
+--------------------------------------------------+----------------------------------------+
| `secondPart` is not numeric | Return **NA** for z-score. |
+--------------------------------------------------+----------------------------------------+
| Missing `thirdPart` when `index = "bfa"` | Return **NA** for z-score. |
+--------------------------------------------------+----------------------------------------+
| `thirdPart` is not numeric when `index = "bfa"` | Return **NA** for z-score. |
+--------------------------------------------------+----------------------------------------+
| `secondPart` is out of range for specified index | Return **NA** for z-score. |
+--------------------------------------------------+----------------------------------------+
We can see this error behaviour using the example data:
```{r usage3, echo = TRUE, eval = TRUE}
table(is.na(svy$wfhz))
```
We can display the problem record:
```{r usage4, echo = TRUE, eval = TRUE}
svy[is.na(svy$wfhz), ]
```
The problem is due to the value **9** in the `sex` column, which should be coded **1** (for male) and **2** (for female). Z-scores are only calculated for records with sex specified as either **1** (male) or **2** (female). All other values, including **NA**, will return **NA**.
The `addWGSR()` function requires that data are recorded using the required units or required codes (see `?addWGSR` to check units required by the different function parameters).
The `addWGSR()` function will return incorrect values if the data are not recorded using the required units. For example, this attempt to add weight-for-age z-scores to the example data:
```{r usage5, echo = TRUE, eval = TRUE}
svy <- addWGSR(data = svy, sex = "sex", firstPart = "weight",
secondPart = "age", index = "wfa")
```
will give incorrect results:
```{r usage5a, echo = TRUE, eval = TRUE}
summary(svy$wfaz)
```
The odd range of values is due to age being recorded in months rather than days.
It is simple to convert all ages from months to days:
```{r usage5b, echo = TRUE, eval = TRUE}
svy$age <- svy$age * (365.25 / 12)
head(svy)
```
before calculating and adding weight-for-age z-scores:
```{r usage5c, echo = TRUE, eval = TRUE}
svy <- addWGSR(data = svy, sex = "sex", firstPart = "weight",
secondPart = "age", index = "wfa")
head(svy)
summary(svy$wfaz)
```
The muac column in the example dataset is recorded in millimetres (mm). We need to convert this to centimetres (cm):
```{r usage6, echo = TRUE, eval = TRUE}
svy$muac <- svy$muac / 10
head(svy)
```
before using the `addWGSR()` function to calculate MUAC-for-age z-scores:
```{r usage6a, echo = TRUE, eval = TRUE}
svy <- addWGSR(svy, sex = "sex", firstPart = "muac",
secondPart = "age", index = "mfa")
head(svy)
```
As a last example we will use the `addWGSR()` function to add body mass index-for-age (bfa) z-scores to the data to create a new variable called bmiAgeZ with a precision of 4 decimal places as:
```{r usage7, echo = TRUE, eval = TRUE}
svy <- addWGSR(data = svy, sex = "sex", firstPart = "weight",
secondPart = "height", thirdPart = "age", index = "bfa",
output = "bmiAgeZ", digits = 4)
head(svy)
```
| /scratch/gouwar.j/cran-all/cranData/zscorer/vignettes/calculate_zscore.Rmd |
---
title: "Legacy functions"
author: "Ernest Guevarra"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Legacy functions}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r setup, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
if(!require(zscorer)) install.packages("zscorer")
```
# Legacy functions
Earlier versions (pre-release and v0.1.0) of `zscorer` used different functions that calculated only three anthropometric indices: **weight-for-age**, **height-for-age** and **weight-for-height**. Also, these functions used a simplified construct of the WHO Growth Reference in which children's ages were recorded in months compared to days in the standard WHO Growth Reference.
With the developers' recent work on anthropometric data quality processes implemented in R (see [nipnTK](https://nutriverse.io/nipnTK)), a more consistent and standard set of functions were deemed necessary to calculate not just three but all anthropometric indices used in the WHO Growth Standards and to make the children's age in the reference data consistent with the standard using days. This work has now culminated in the current `zscorer` function.
For the purposes of backwards compatibility and to keep a record of past codebase for previous versions of the functions, the legacy functions have been kept in `zscorer`. This vignette describes those functions and shows examples of how to use them.
For new users of `zscorer`, developers recommend to start learning and using the new functions instead of using these legacy functions. For previous `zscorer` users, developers recommend to review past code that use the legacy functions and if feasible adapt code to the new functions available.
## Calculating z-scores using the legacy functions
The `zscorer` package comes with the original legacy functions included in its `version 0.1.0`. These functions allow for the calculation of `weight-for-age`, `height-for-age` and `weight-for-height z-scores` for individual children and for a cohort of children.
### Calculating z-score for each of the three anthropometric indices for a single child
For this example, we will use the `getWGS()` function and apply it to dummy data of a **52 month** old male child with a weight of **14.6 kg** and a height of **98.0 cm**.
```{r example1, eval = TRUE}
# weight-for-age z-score
waz <- getWGS(sexObserved = 1, # 1 = Male / 2 = Female
firstPart = 14.6, # Weight in kilograms up to 1 decimal place
secondPart = 52, # Age in whole months
index = "wfa") # Anthropometric index (weight-for-age)
waz
# height-for-age z-score
haz <- getWGS(sexObserved = 1,
firstPart = 98, # Height in centimetres
secondPart = 52,
index = "hfa") # Anthropometric index (height-for-age)
haz
# weight-for-height z-score
whz <- getWGS(sexObserved = 1,
firstPart = 14.6,
secondPart = 98,
index = "wfh") # Anthropometric index (weight-for-height)
whz
```
Applying the `getWGS()` function results in a calculated `z-score` for one child.
### Calculating z-score for each of the three anthropometric indices for a cohort or sample of children
For this example, we will use the `getCohortWGS()` function and apply it to sample data `anthro1` that came with `zscorer`.
```{r sample-data1, eval = FALSE}
# Make a call for the anthro1 dataset
anthro1
```
As you will see, this dataset has the 4 variables you will need to use with `getCohortWGS()` to calculate the `z-score` for the corresponding anthropometric index. These are `age`, `sex`, `weight` and `height`.
```{r, echo = FALSE, eval = TRUE}
library(zscorer)
```
```{r sample-data2, eval = TRUE}
head(anthro1)
```
To calculate the three anthropometric indices for all the children in the sample, we execute the following commands in R:
```{r example2, eval = TRUE}
# weight-for-age z-score
waz <- getCohortWGS(data = anthro1,
sexObserved = "sex",
firstPart = "weight",
secondPart = "age",
index = "wfa")
head(waz, 50)
# height-for-age z-score
haz <- getCohortWGS(data = anthro1,
sexObserved = "sex",
firstPart = "height",
secondPart = "age",
index = "hfa")
head(haz, 50)
# weight-for-height z-score
whz <- getCohortWGS(data = anthro1,
sexObserved = "sex",
firstPart = "weight",
secondPart = "height",
index = "wfh")
head(whz, 50)
```
Applying the `getCohortWGS()` function results in a vector of calculated `z-scores` for all children in the cohort or sample.
### Calculating z-scores for all of the three anthropometric indices in one function
For this example, we will use the `getAllWGS()` function and apply it to sample data `anthro1` that came with `zscorer`.
```{r example3, eval = TRUE}
# weight-for-age z-score
zScores <- getAllWGS(data = anthro1,
sex = "sex",
weight = "weight",
height = "height",
age = "age",
index = "all")
head(zScores, 20)
```
Applying the `getAllWGS()` function results in a data frame of calculated `z-scores` for all children in the cohort or sample for all the anthropometric indices.
| /scratch/gouwar.j/cran-all/cranData/zscorer/vignettes/legacy_functions.Rmd |
---
title: "Using zscorer Shiny application"
author: "Ernest Guevarra"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Using zscorer Shiny application}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
library(zscorer)
```
`zscorer` comes packaged with a built-in Shiny application. The application is initiated by issuing the following command:
```{r runApp, echo = FALSE, eval = FALSE}
run_zscorer()
```
This opens the installed web browser in the user's device showing the following:
```{r zscorerFrontpage, echo = FALSE, eval = TRUE, out.width = "90%"}
knitr::include_graphics("../man/figures/zscorerApp.png")
```
# Getting the z-score of a single child
The first (and default) functionality of the built-in Shiny application is for calculating the z-scores for a single child. This functionality is accessed by selecting the `single` option on the tabbed menu available in the top edge of the application interface as shown below:
```{r single, echo = FALSE, eval = TRUE, out.width = "30%"}
knitr::include_graphics("../man/figures/single.png")
```
When this functionality is selected, the sidebar menu provides the following options:
```{r singleMenu, echo = FALSE, eval = TRUE, out.width = "30%"}
knitr::include_graphics("../man/figures/singleSidebar.png")
```
In this sidebar menu, the user should enter all the anthropometric measurements available for the child to whom user wants to calculate z-scores of. For the calculation to work, the minimum data needed to be supplied by the user are:
* **Age**;
* **Sex**; and,
* At least one anthropometric measurement among **weight (kgs)**, **height (cms)**, **mid-upper arm circumference (cms)**, **head circumference (cms)**, **subscapular skinfold (cms)** or **triceps skinfold (cms)**.
Please ensure that any anthropometric measurement is supplied in the appropriate units as indicated above.
To illustrate, let's use the dummy data of a **52 month** old **male** child with a weight of **14.6 kg** and a height of
**98.0 cm**. This data is typed into the sidebar menu and then click on the `Calculate` tab as shown below:
```{r sidebarSingleInput, echo = FALSE, eval = TRUE, out.width = "30%"}
knitr::include_graphics("../man/figures/sidebarSingleInput.png")
```
This produces the following output in the main panel of the application:
```{r singleResults, echo = FALSE, eval = TRUE, out.width = "90%"}
knitr::include_graphics("../man/figures/singleResults.png")
```
Depending on the anthropometric measurements supplied, the corresponding applicable z-score indices will be provided. No z-score result will be provided if the required anthropometric measurement is not supplied. In the example above, MUAC-for-age, head circumference-for-age, subscapular skinfold-for-age and triceps skinfold-for-age are not provide as no MUAC, head circumference, subscapular skinfold or triceps skinfold are provided.
# Getting the z-score of a cohort of children
The second functionality of the built-in Shiny application is for calculating the z-scores for a cohort of children. This functionality is accessed by selecting the `cohort` option on the tabbed menu available in the top edge of the application interface as shown below:
```{r cohort, echo = FALSE, eval = TRUE, out.width = "30%"}
knitr::include_graphics("../man/figures/cohort.png")
```
When this functionality is selected, the sidebar menu provides the following options:
```{r cohortMenu, echo = FALSE, eval = TRUE, out.width = "30%"}
knitr::include_graphics("../man/figures/cohortSidebar.png")
```
In this sidebar menu, the user is expected to upload cohort data of children with their anthropometric data. The user is also expected to select which anthropometric indices to calculate. If no anthropometric indices are selected, no calculations will be performed. A cohort data can be uploaded by clicking on the `Browse` button and then selecting the file with cohort data to upload. Once uploaded, the sidebar menu will provide the following additional options:
```{r cohortOptions, echo = FALSE, eval = TRUE, out.width = "30%"}
knitr::include_graphics("../man/figures/sidebarCohortOptions.png")
```
For the `zscorer` app to be able to calculate z-scores from the cohort data, the variables corresponding to the required information needs to be supplied. By default, the application will search the variable names in the data and pick out typical variable names for the measurements (e.g., `ht` or `height` or `HT` or `Height` or `HEIGHT` for height measurements). If the variables are named unconventionally, user will have to select the variable name for the particular measurement from the choices. If the measurements are not in the data, then the variable field should be left empty.
Once the various parameters have been provided, user should click on the `Calculate` button which results in the following:
```{r cohortResults, echo = FALSE, eval = TRUE, out.width = "90%"}
knitr::include_graphics("../man/figures/cohortResults.png")
```
The dataset provides is presented as a table in the main panel but now with additional fields for the calculated anthropometric indices. The resulting dataset with the calculated anthropometric indices can then be downloaded by clicking on the `Download` button.
| /scratch/gouwar.j/cran-all/cranData/zscorer/vignettes/using-shiny-app.Rmd |
#' Make Sequential colour gradient palette
#'@importFrom scales seq_gradient_pal show_col
#'@param high colour for high end of gradient.
#'@param low colour for low end of gradient.
#'@param mid colour for middle of gradient.
#'@param n the number of colors in palette
#'@param plot Logical. Whether or not draw plot
#'@export
gradientColor=function(high="red",low="white",mid=NULL,n=20,plot=FALSE){
if(is.null(mid)){
x <- seq(0, 1, length.out = n)
vals=seq_gradient_pal(low, high)(x)
} else{
x <- seq(0, 1, length.out = n/2)
val1=seq_gradient_pal(low, mid)(x)
x2 <- seq(0, 1, length.out = n/2+1)
val2=seq_gradient_pal(mid, high)(x2)
vals=c(val1,val2[-1])
}
if(plot) show_col(vals)
vals
}
#' Convert numeric vector min to 0, max to maxvalue
#' @param x A vector
#' @param maxvalue maximal value
#' @export
normalize2=function(x,maxvalue=10){
if(is.numeric(x)) {
min=min(x,na.rm=TRUE)
max=max(x,na.rm=TRUE)
x=round((x-min)*(maxvalue-1)/(max-min))+1
x[is.na(x)]=1
}
x
}
#'Add gradient background color to ztable
#'@param z An object of class ztable
#'@param palette Name of color palette
#'@param mycolor user defined color vectors
#'@param rows columns to make heatmap
#'@param cols columns to make heatmap
#'@param changeColor Logical. Whether of not change font color automatically
#'@param reverse If true, reverse the font color
#'@param margin An integer. Choices are one of 0,1 and 2. 0(default), heatmap for all numeric data.
#' 1 ; rowwise heatmap, 2: columnwise heatmap.
#'@export
#'@examples
#' require(magrittr)
#' ztable(head(mtcars)) %>% makeHeatmap()
#' \dontrun{
#' ztable(head(mtcars)) %>% makeHeatmap(palette="YlOrRd",cols=c(1,4,6),margin=2)
#' ztable(head(mtcars)) %>% makeHeatmap(rows=c(1,3,5),margin=1)
#' require(moonBook)
#' x=table(acs$smoking,acs$Dx)
#' ztable(x) %>% makeHeatmap
#' ztable(x) %>% makeHeatmap(palette="Blues")
#' ztable(x) %>% makeHeatmap(mycolor=gradientColor(low="yellow",mid="orange",high="red"))
#' }
makeHeatmap=function(z,
palette="Reds",mycolor=NULL,
rows=NULL,cols=NULL,
changeColor=TRUE,reverse=FALSE,
margin=0){
if(is.null(mycolor)) mycolor=palette2colors(palette)
ncolor=length(mycolor)
df=z$x
if(is.null(rows)) rows=1:nrow(df)
if(is.null(cols)) cols=1:ncol(df)
df1=df[rows,cols]
select=sapply(df1,is.numeric)
selected=cols[which(select)]
if(margin==0){
res=as.matrix(df1[select])
max=res[which.max(res)]
min=res[which.min(res)]
res=round((res-min)*(ncolor-1)/(max-min))+1
res[is.na(res)]=1
} else if(margin==1){
res=df1[select]
result=apply(res,1,normalize2,ncolor)
res[]=t(result)
} else{
res=df1[select]
res[]=apply(res,2,normalize2,ncolor)
}
for(i in 1:nrow(res)){
for(j in 1:ncol(res)){
color=NULL
if(changeColor) {
if(reverse) {
color=ifelse(res[i,j]>ncolor/2,"black","white")
} else{
color=ifelse(res[i,j]>ncolor/2,"white","black")
}
}
z<-addCellColor(z,rows=rows[i]+1,cols=selected[j]+1,bg=mycolor[res[i,j]],color=color)
}
}
z
}
#'@describeIn ztable Makes a ztable for class table
#'@export
ztable.table=function(x,digits=NULL,...){
m=matrix(x,ncol=ncol(x))
colnames(m)=colnames(x)
rownames(m)=rownames(x)
ztable(m)
}
| /scratch/gouwar.j/cran-all/cranData/ztable/R/makeHeatmap.R |
#' Place two or more ztables or figures side by side in Latex or HTML format
#'
#' Place two or more ztables or figures side by side in Latex or HTML format.
#' Requires Latex "boxedminipage" package in preamble.
#' The ztable for this purpose should be made by function ztable with tabular="TRUE".
#' @param width a numeric vector specifies the width to which the tables or
#' figures should be scaled
#' @param listTables a list consists of object of "ztable" or valid figure name
#' @param type Type of table to produce. Possible values for type are "latex" or
#' "html". Default value is "latex".
#' @export
#' @examples
#' require(ztable)
#' z=ztable(head(mtcars[1:3]),tabular=TRUE)
#' parallelTables(c(0.4,0.3),list(z,z))
#' parallelTables(c(0.5,0.5),list(z,z))
#' parallelTables(c(0.5,0.5),list(z,z,type="html"))
#' z1=ztable(head(iris[1:3]),turn=TRUE,angle=10,zebra=1)
#' z2=ztable(head(iris[1:3]),turn=TRUE,angle=-10,zebra=2)
#' parallelTables(c(0.5,0.5),list(z1,z2))
parallelTables=function(width,listTables,type="latex"){
a=length(width)
if(class(listTables)!="list"){
cat("\nThe 2nd parameter listTables sholud be a list\n")
return(invisible())
}
b=length(listTables)
if(a!=b) {
cat("\nLengths of width and tables are different\n")
cat(paste("length of width=",a,",length of tables=",b,sep=""))
return(invisible())
}
if(type=="html") parallelTablesHTML(width,listTables)
else parallelTablesLatex(width,listTables)
}
#' Place two or more ztables or figures side by side in Latex format
#'
#' Place two or more ztables or figures side by side in HTML format.
#' The ztable for this purpose should be made by function ztable with tabular="TRUE".
#' @param width a numeric vector specifies the width to which the tables or
#' figures should be scaled
#' @param listTables a list consists of object of "ztable" or valid figure name
parallelTablesLatex=function(width,listTables){
a=length(width)
cat("\\begin{table}[!htb]\n")
for(i in 1:a){
cat(paste("\\begin{minipage}{",width[i],"\\linewidth}\n\\centering\n",
sep=""))
if(class(listTables[[i]])=="ztable") print(listTables[[i]],type="latex")
else if(class(listTables[[i]])=="character") {
cat(paste("\\includegraphics[width=1\\linewidth]{",
listTables[[i]],"}\n",sep=""))
}
cat("\\end{minipage}\n")
}
cat("\\end{table}")
}
#' Place two or more ztables or figures side by side in HTML format
#'
#' Place two or more ztables or figures side by side in HTML format.
#' The ztable for this purpose should be made by function ztable with tabular="TRUE".
#' @param width a numeric vector specifies the width to which the tables or
#' figures should be scaled
#' @param listTables a list consists of object of "ztable" or valid figure name
parallelTablesHTML=function(width,listTables){
a=length(width)
cat("<table width=\"100%\" cellspacing=\"5px\" cellpadding=\"5px\" border=\"0\">
\n<colgroup>\n")
for(i in 1:a) cat(paste("<col width=",
ifelse(width[i]<=1,width[i]*100,width[i]),
"%>\n",sep=""))
cat("</colgroup>\n<tr>")
for(i in 1 :a){
cat("<td>")
if(class(listTables[[i]])=="ztable") print(listTables[[i]],type="html")
else if(class(listTables[[i]])=="character")
cat(paste("<img src=\"",listTables[[i]],"\">",sep=""))
cat("</td>\n")
}
cat("</tr>\n</table>\n")
}
| /scratch/gouwar.j/cran-all/cranData/ztable/R/parallelTables.R |
#' Find rgb value from color name
#'
#'@param name a valid color name
#'@return rgb value
name2rgb=function(name){
if(substr(name,1,1)=="#") {
result=name
} else{
number=grep(paste("^",name,sep=""),ztable::zcolors$name)
if(length(number)<1) result="white"
else{
rgb=ztable::zcolors[number[1],2]
result=paste("#",rgb,sep="")
}
}
result
}
#' Delete first components of align
#'
#' @param align A character for define the align of column in Latex format
align2nd=function(align){
if(substr(align,1,1)=="|") {
result=substr(align,2,nchar(align))
result=align2nd(result)
} else result=substr(align,2,nchar(align))
result
}
#' Count the number of align
#'
#' @param align A character for define the align of column in Latex format
#' @export
alignCount=function(align){
result=unlist(strsplit(align,"|",fixed=TRUE))
temp=c()
for(i in 1:length(result)) temp=paste(temp,result[i],sep="")
nchar(temp)
}
#' Check the validity of align
#'
#' @param align A character for define the align of column in Latex format
#' @param ncount An integer equals of ncol function
#' @param addrow An integer
#' @export
alignCheck=function(align,ncount,addrow){
count=alignCount(align)
#cat("align=",align,"count=",count,"\n")
while(count != (ncount+addrow)){
if(count< (ncount+addrow)) align=paste(align,"c",sep="")
else if(count > (ncount+addrow)) align=align2nd(align)
count=alignCount(align)
#cat("align=",align,"count=",count,"\n")
}
result=align
result
}
#' Convert the align in Latex format to html format
#'
#' @param align A character of align in Latex format
#' @export
align2html=function(align){
result=c()
for(i in 1:nchar(align)){
temp=substr(align,i,i)
if(temp=="|") next
temp=ifelse(temp=="l","left",ifelse(temp=="r","right","center"))
result=c(result,temp)
}
result
}
#' Add or delete vertical lines in a ztable
#'
#' @param z An object of ztable
#' @param type An integer or one of c("none","all")
#' @param add An integer vector indicating columns where the width of vertical lines added
#' @param del An integer vector indicating columns where the width of vertical lines subtracted
#' @importFrom stringr str_remove_all fixed
#' @export
vlines=function(z,type=NULL,add=NULL,del=NULL){
if(is.null(type) & is.null(add) & is.null(del)) {
cat("\nvlines : add or delete vertical lines to a ztable\n
Usage: type must be one of these or NULL: 0-1 or \"none\",\"all\"\n
add and del: An integer vector indicating position to add or delete vertical line(s)\n")
return(z)
}
align=str_remove_all(z$align,fixed("|"))
vlines=align2lines(z$align)
colcount=colGroupCount(z)
addrow=ifelse(z$include.rownames,1,0)
#align=alignCheck(align,ncol(z$x),addrow)
count=nchar(align)
if(!is.null(type)) {
vltype=NULL
if(!is.numeric(type)) {
if(toupper(type) == "NONE") vltype=0
else if(toupper(type) == "ALL") vltype=1
else return(z)
}
if((type>=0) & (type<=1)) vltype=type
if(vltype==0) vlines=rep(0,count+1)
else vlines=rep(1,count+1) #vltype=1
}
if(!is.null(add)){
if(is.numeric(add)){
for(i in 1:length(add)) {
if(add[i]<1 | add[i]>(count+1)) next
vlines[add[i]]=vlines[add[i]]+1
}
}
}
if(!is.null(del)){
if(is.numeric(del)){
for(i in 1:length(del)){
if(del[i]<1 | del[i]>(count+1)) next
if(vlines[del[i]]>0) vlines[del[i]]=vlines[del[i]]-1
}
}
}
newalign=vline2align(align,vlines)
z$align=newalign
z
}
#' Add or delete horizontal lines in a ztable
#'
#' @param z An object of ztable
#' @param type An integer or one of c("none","all")
#' @param add An integer vector indicating rows where the horizontal lines added
#' @param del An integer vector indicating rows where the horizontal lines deleted
#' @export
hlines=function(z,type=NULL,add=NULL,del=NULL){
if(is.null(type) & is.null(add) & is.null(del)) {
cat("\nhlines : add or delete horizontal lines to a ztable\n
Usage: type must be one of these or NULL: 0-1 or \"none\",\"all\"\n
add and del: An integer vector indicating position to add or delete horizontal line(s)\n")
return(z)
}
count=nrow(z$x)
if(!is.null(z$hline.after)) result=z$hline.after
else result=c(-1,0,count)
if(!is.null(type)) {
if(!is.numeric(type)) {
if(toupper(type) == "NONE") hltype=0
else if(toupper(type) == "ALL") hltype=1
else return(z)
}
if((type>=0) & (type<=1)) hltype=type
if(hltype==0) result=c(-1,0,count)
else result=c(-1,0,1:count)
}
if(!is.null(add)){
if(is.numeric(add)){
for(i in 1:length(add)) {
result=c(result,add)
}
}
}
if(!is.null(del)){
if(is.numeric(del)){
result1=c()
for(i in 1:length(result)){
if(!(result[i] %in% del)) result1=c(result1,result[i])
}
result=result1
}
}
z$hline.after=result
z
}
#' Make a latex "align" from a string and vertical line specifier
#'
#' @param align A character string indicating align of latex table
#' @param vlines An integer vector indicating vertical line position
#' @export
vline2align=function(align,vlines){
newalign=c()
for(i in 1:nchar(align)) {
if(vlines[i]>0) for(j in 1:vlines[i]) newalign=c(newalign,"|")
newalign=c(newalign,substr(align,i,i))
}
last=vlines[length(vlines)]
if(last>0) for(j in 1:last) newalign=c(newalign,"|")
temp=newalign[1]
if(length(newalign)>1)
for(i in 2:length(newalign)) {
temp=paste(temp,newalign[i],sep="")
}
temp
}
#' count the vertical column lines from align of Latex format
#'
#' @param align A string of align Latex format
#' @return a numeric vector consists of vertical lines of each column
#' @export
align2lines=function(align){
result=c()
length=nchar(align)
count=0
number=alignCount(align)
for(i in 1:length){
temp=substr(align,1,1)
if(temp=="|") {
count=count+1
if(i==length) result=c(result,count)
}
else{
result=c(result,count)
count=0
}
align=substr(align,2,nchar(align))
}
if(length(result)==number) result=c(result,0)
result
}
#' Make a character string indicating the alignment of components of table.
#'
#' @param z An object of ztable
#' @export
getNewAlign=function(z){
#cat("z$align=",z$align,"\n")
if(is.null(z$cgroup)) return(z$align)
lines=align2lines(z$align)
lines
exAlign=str_remove_all(z$align,fixed("|"))
exAlign
ncount=ncol(z$x)
addrow=ifelse(z$include.rownames,1,0)
addrow
colCount=colGroupCount(z)
colCount
result=c()
start=1+addrow
# Add column group align "c" if lines
for(i in 1:length(colCount)){
#cat("start=",start,"stop=",colCount[i]+addrow,",")
result=paste(result,substr(exAlign,start=start,stop=(colCount[i]+addrow)),sep="")
#cat("i=",i,",start=",start,"stop=",(colCount[i]+addrow),",result=",result)
start=colCount[i]+1+addrow
#cat(",line[start]=",start,"\n")
if(lines[start]==0) result=paste(result,"c",sep="")
#cat("result=",result,"\n")
}
result
if(colCount[length(colCount)]<ncount)
result=paste(result,substr(exAlign,start=start,stop=nchar(z$align)),sep="")
result
newlines=c()
for(i in 1:length(lines)){
if(i==1) newlines=lines[1]
else newlines=c(newlines,lines[i])
if((i-1) %in% colCount[-length(colCount)])
if(lines[i+1]==0) newlines=c(newlines,0)
}
temp=c()
for(i in 1:length(newlines)){
if(newlines[i]>0) for(j in 1:newlines[i]) temp=paste(temp,"|",sep="")
if(i>nchar(result)) break
temp=paste(temp,substr(result,start=i,stop=i),sep="")
}
#temp=paste(temp,"c",sep="")
temp
}
#' print html style
#' @param z An object of ztable
#' @export
myhtmlStyle=function(z){
if(is.null(z$family)) family="times"
else family=z$family
cat("<head>")
cat("<style>
table {
font-family:",family,";\n")
cat("color: ",z$color,";\n")
#cat("border: ",z$color," 1px solid;\n")
cat("text-align: right;}
th {
padding: 1px 1px 5px 5px;
}
td {
padding: 1px 1px 5px 5px; }
</style>")
cat("</head>")
}
#' Print HTML head if ztable object a has a colgroup
#'
#' @param z An object of ztable
#' @export
printHTMLHead=function(z){
if(is.null(z$cgroup)) return()
if(is.null(z$n.cgroup)) return()
#colCount=colGroupCount(z)
ncount=ncol(z$x)
addrow=ifelse(z$include.rownames,1,0)
cGroupSpan=cGroupSpan(z)
cGroupSpan
totalCol=totalCol(z)
totalCol
vlines=align2lines(z$align)
for(i in 1:length(z$cgroup)){
cat("<tr>\n")
if(z$include.rownames) {
cat("<td style=\"")
if(i==1) cat("border-top: 2px solid gray; border-bottom: hidden;")
cat(paste(" border-left: ",vlines[1],"px solid black;",sep=""))
if(z$cgroupbg[[i]][1]!="white")
cat(paste("background-color: ",name2rgb(z$cgroupbg[[i]][1]),sep=""))
if(z$cgroupcolor[[i]][1]!=z$color)
cat(paste("color: ",name2rgb(z$cgroupcolor[[i]][1]),";",sep=""))
cat("\"> </td>\n")
}
colSum=1
for(j in 1:length(z$cgroup[[i]])) {
if(is.na(z$cgroup[[i]][j])) {
cat("<td colspan=\"",cGroupSpan[[i]][j],"\" align=\"center\" ")
cat("style=\"")
if(i==1) cat("border-top: 2px solid gray;")
cat("border-bottom: hidden;")
cat(paste(" border-left: ",vlines[colSum+1],"px solid black;",sep=""))
colSum=colSum+cGroupSpan[[i]][j]
#if(colSum==ncol(z$x)+1)
cat(paste("border-right:",vlines[colSum+1],"px solid black;",sep=""))
if(z$cgroupbg[[i]][j+1]!="white")
cat(paste("background-color: ",name2rgb(z$cgroupbg[[i]][j+1]),";",sep=""))
if(z$cgroupcolor[[i]][j+1]!=z$color)
cat(paste("color: ",name2rgb(z$cgroupcolor[[i]][j+1]),";",sep=""))
cat(paste("\"></td>\n",sep=""))
} else {
cat("<td colspan=\"",cGroupSpan[[i]][j],"\" align=\"center\" ")
if(z$colnames.bold) cat("style=\"font-weight: bold;")
else cat("style=\"font-weight: normal;")
if(i==1) cat("border-top: 2px solid gray;")
if(z$cgroup[[i]][j]!="") cat(" border-bottom: 1px solid gray;")
else cat(" border-bottom: hidden;")
cat(paste(" border-left: ",vlines[colSum+1],"px solid black;",sep=""))
colSum=colSum+cGroupSpan[[i]][j]
if(colSum==ncol(z$x)+1)
cat(paste("border-right:",vlines[colSum+1],"px solid black;",sep=""))
if(z$cgroupbg[[i]][j+1]!="white")
cat(paste("background-color: ",name2rgb(z$cgroupbg[[i]][j+1]),";",sep=""))
if(z$cgroupcolor[[i]][j+1]!=z$color)
cat(paste("color: ",name2rgb(z$cgroupcolor[[i]][j+1]),";",sep=""))
cat(paste("\">",z$cgroup[[i]][j],"</td>\n",sep=""))
}
#if((j < ncol(z$cgroup)) & ((colSum+j-1)<totalCol)) {
if(j < length(z$cgroup[[i]])) {
result=colSum+1
if(result<=length(vlines)) {
if(vlines[result]==0){
cat("<td style=\"")
if(i==1) cat("border-top: 2px solid gray;")
cat("border-bottom: hidden\"> </td>\n")
}
}
}
}
cat("</tr>\n")
}
}
#' Print an object of class "ztable" to html table
#'
#' @param z An object of class "ztable"
#' @param xdata A formatted data.frame
ztable2html=function(z,xdata){
ncount=ncol(z$x)
addrow=ifelse(z$include.rownames,1,0)
# caption position
if(z$caption.position=="r") cposition="right"
else if(z$caption.position=="l") cposition="left"
else cposition="center"
fontsize=ifelse(z$size>=5,11+(z$size-5)*2,10-(4-z$size))
headingsize=fontsize-2
rgroupcount=0
printrgroup=1
if(!is.null(z$n.rgroup)){
if(length(z$n.rgroup)>1) {
for(i in 2:length(z$n.rgroup)) {
printrgroup=c(printrgroup,printrgroup[length(printrgroup)]+z$n.rgroup[i-1])
}
}
rgroupcount=1
}
NewAlign=getNewAlign(z)
totalCol=totalCol(z)
colCount=colGroupCount(z)
# rgroupcount=0
# printrgroup=1
# if(!is.null(z$n.rgroup)){
# if(length(z$n.rgroup)>1) {
# for(i in 2:length(z$n.rgroup)) {
# printrgroup=c(printrgroup,printrgroup[length(printrgroup)]+z$n.rgroup[i-1])
# }
# }
# rgroupcount=1
# }
# table position
if(z$position=="flushleft") tposition="left"
else if(z$position=="flushright") tposition="right"
else tposition="center"
#cat("<table class='gmisc_table'")
myhtmlStyle(z)
cat("<table ")
cat(paste("align=\"",tposition,"\" style=\"border-collapse: collapse; caption-side:",
z$caption.placement,"; font-size:",as.integer(fontsize),"pt;\">",sep=""))
cat(paste("<caption style=\"text-align:",cposition,";",sep=""))
if(z$caption.bold) cat("font-weight: bold")
cat(paste("\">",z$caption,"</caption>",sep=""))
if((z$show.heading==TRUE) & (!is.null(attr(z$x,"heading")))) {
head=attr(z$x,"heading")
for(i in 1:length(head)) {
if(nchar(head[i])<1) next
cat(paste("<tr>\n<td style=\"border-top: hidden; font-size: ",
as.integer(headingsize),"pt; padding: 0px 0px;\" colspan=\"",ncount+addrow,
"\" align=\"left\" >",head[i],sep=""))
cat("</td>\n</tr>\n")
}
}
vlines=align2lines(z$align)
printtop=1
if(!is.null(z$cgroup)) {
printHTMLHead(z)
printtop=0
}
if(z$include.colnames) {
cat("<tr>\n")
subcolnames=ifelse(is.null(z$subcolnames),0,1)
if(z$include.rownames) {
result=1
if(!is.null(isspanCol(z,1,1)))
cat(paste("<th colspan=\"",isspanCol(z,1,1),"\"",sep=""))
else if(!is.null(isspanRow(z,1,1))){
result=isspanRow(z,1,1)
if(result>0) cat(paste("<th rowspan=\"",result,"\"",sep=""))
} else cat("<th ")
cat(paste("style=\"border-left: ",vlines[1],
"px solid black;",
"background-color: ",name2rgb(z$cellcolor[1,1]),";",sep=""))
if(printtop) cat("border-top: 2px solid gray;")
if(subcolnames==0) cat("border-bottom: 1px solid gray;")
else cat("border-bottom: hidden;")
cat(paste("\"> </th>\n",sep=""))
}
colpos=align2html(z$align)
for(i in 1:ncol(z$x)) {
result=1
if(!is.null(isspanCol(z,1,(i+1)))){
result=isspanCol(z,1,(i+1))
if(result>0) cat(paste("<th colspan=\"",result,"\"",sep=""))
else if(result==0) next
} else if(!is.null(isspanRow(z,1,(i+1)))){
result=isspanRow(z,1,(i+1))
if(result>0) cat(paste("<th rowspan=\"",isspanRow(z,1,(i+1)),"\"",sep=""))
else cat("<th")
} else cat("<th ")
if(result!=0){
cat("<th ")
drawbottom=0
if((subcolnames==1)) {
if(is.na(z$subcolnames[i])){
cat("rowspan=\"2\" ")
drawbottom=1
}
}
cat(paste("align=\"center\" ",sep=""))
if(z$colnames.bold) cat("style=\"font-weight: bold;")
else cat("style=\"font-weight: normal;")
cat(paste("border-left: ",vlines[i+1],"px solid black;",sep=""))
if((i==ncol(z$x)) & (length(vlines)>ncol(z$x)+1))
cat(paste("border-right:",vlines[i+2],"px solid black;",sep=""))
if((subcolnames==0) | (subcolnames+drawbottom==2))
cat("border-bottom: 1px solid gray;")
else cat("border-bottom: hidden;")
if(printtop) cat("border-top: 2px solid gray;")
if(z$cellcolor[1,i+1]!="white")
cat(paste("background-color: ",name2rgb(z$cellcolor[1,i+1]),";",sep=""))
if(z$frontcolor[1,i+1]!=z$color)
cat(paste("color: ",name2rgb(z$frontcolor[1,i+1]),";",sep=""))
cat(paste("\">",colnames(z$x)[i],"</th>\n",sep=""))
if(i %in% colCount[-length(colCount)]) {
if(vlines[i+2]==0){
if(subcolnames==0) cat("<th style=\"border-bottom: 1px solid gray;")
else cat("<th style=\"border-bottom: hidden;")
if(printtop) cat("border-top: 2px solid gray; ")
if((z$cellcolor[1,i+1]!="white") & (z$cellcolor[1,i+1]==z$cellcolor[1,i+2]))
cat("background-color: ",name2rgb(z$cellcolor[1,i+1]),";")
cat("\"> </th>\n")
}
}
}
}
cat("</tr>\n")
printtop=0
if(subcolnames){
cat("<tr>\n")
if(addrow) {
cat(paste("<th style=\"border-left: ",vlines[1],
"px solid black;","border-bottom: 1px solid gray;",
"background-color: ",name2rgb(z$cellcolor[1,1]),";",sep=""))
cat(paste("\"> </th>\n",sep=""))
}
for(i in 1:length(z$subcolnames)){
if(is.na(z$subcolnames[i])) {
if(vlines[i+2]==0){
if(i!=length(z$subcolnames)){
cat("<th style=\"border-bottom: 1px solid gray;")
#if(printtop) cat("border-top: 2px solid gray;")
if((z$cellcolor[1,i+1]!="white") & (z$cellcolor[1,i+1]==z$cellcolor[1,i+2]))
cat("background-color: ",name2rgb(z$cellcolor[1,i+1]),";")
cat("\"> </th>\n")
}
}
next
}
cat("<th align=\"center\" ")
if(z$colnames.bold) cat("style=\"font-weight: bold;")
else cat("style=\"font-weight: normal;")
cat(paste("border-left: ",vlines[i+1],"px solid black;",sep=""))
if((i==ncol(z$x)) & (length(vlines)>ncol(z$x)+1))
cat(paste("border-right:",vlines[i+2],"px solid black;",sep=""))
cat("border-bottom: 1px solid gray;")
if(z$cellcolor[1,i+1]!="white")
cat(paste("background-color: ",name2rgb(z$cellcolor[1,i+1]),";",sep=""))
if(z$frontcolor[1,i+1]!=z$color)
cat(paste("color: ",name2rgb(z$frontcolor[1,i+1]),";",sep=""))
cat(paste("\">",z$subcolnames[i],"</th>\n",sep=""))
if(i %in% colCount[-length(colCount)]) {
if(vlines[i+2]==0){
cat("<th style=\"border-bottom: 1px solid gray;")
#if(printtop) cat("border-top: 2px solid gray;")
if((z$cellcolor[1,i+1]!="white") & (z$cellcolor[1,i+1]==z$cellcolor[1,i+2]))
cat("background-color: ",name2rgb(z$cellcolor[1,i+1]),";")
cat("\"> </th>\n")
}
}
}
cat("</tr>\n")
}
}
colpos=align2html(z$align)
addrow=ifelse(z$include.rownames,1,0)
addrow
rgroupprinted=0
for(i in 1:nrow(z$x)){
if(rgroupcount>0) {
if(i %in% printrgroup) {
rgroupprinted=1
if(is.null(z$cspan.rgroup)){
temp=paste("<tr>\n<td colspan=\"",totalCol,
"\" align=\"left\""," style=\"font-weight: bold;",sep="")
if(z$rgroupbg[rgroupcount]!="white")
temp=paste(temp,"background-color:",name2rgb(z$rgroupbg[rgroupcount]),";",sep="")
if(z$rgroupcolor[rgroupcount]!="black")
temp=paste(temp,"color:",name2rgb(z$rgroupcolor[rgroupcount]),";",sep="")
temp=paste(temp," border-left: ",vlines[1],"px solid black; ",sep="")
temp=paste(temp,"border-right:",vlines[ncol(z$x)+2],"px solid black;",sep="")
temp=paste(temp,"border-bottom: 1px solid black;",sep="")
temp=paste(temp,"border-top: 1px solid black;",sep="")
temp=paste(temp,"\">",z$rgroup[rgroupcount],"</td>\n",sep="")
}
else {
if(z$cspan.rgroup==1) {
temp=paste("<tr>\n<td align=\"left\""," style=\"font-weight: bold;",sep="")
# if(z$colcolor[1]!="white")
# temp=paste(temp,"background-color:",name2rgb(z$colcolor[1]),";",sep="")
if(z$rgroupbg[rgroupcount]!="white")
temp=paste(temp,"background-color:",name2rgb(z$rgroupbg[rgroupcount]),";",sep="")
if(z$rgroupcolor[rgroupcount]!="black")
temp=paste(temp,"color:",name2rgb(z$rgroupcolor[rgroupcount]),";",sep="")
temp=paste(temp," border-left: ",vlines[1],"px solid black; ",sep="")
#temp=paste(temp,"border-bottom: 1px solid black;",sep="")
if(i!=1) temp=paste(temp,"border-top: hidden; ",sep="")
if(!is.null(z$hline.after)){
if((i-1) %in% z$hline.after)
temp=paste(temp,"border-top: 1px solid black;")
}
temp=paste(temp,"\">",z$rgroup[rgroupcount],"</td>\n",sep="")
for(j in 1:(ncount+addrow-1)){
temp1=paste("<td style=\"border-left: ",
vlines[j+1],"px solid black; ",sep="")
if(!is.null(z$hline.after)){
if((i-1) %in% z$hline.after)
temp1=paste(temp1,"border-top: 1px solid black;")
}
else if(i!=1) temp1=paste(temp1,"border-top: hidden; ",sep="")
if((j==ncol(z$x)) & (length(vlines)>ncol(z$x)+1))
temp1=paste(temp1,"border-right:",vlines[j+2],"px solid black;",sep="")
if(!is.null(z$colcolor)) {
if(z$colcolor[j+1]!="white")
temp1=paste(temp1,"background-color:",
name2rgb(z$colcolor[j+1])," ",sep="")
}
temp1=paste(temp1,"\"></td>\n",sep="")
if(is.null(isspanRow(z,i+1,j+1))) temp=paste(temp,temp1,sep="")
else if(isspanRow(z,i+1,j+1)>0) temp=paste(temp,temp1,sep="")
if(!is.null(colCount)){
if(j %in% colCount[-length(colCount)]) {
if(vlines[j+2]==0){
#if((z$cellcolor[i+1,j+1]!="white")&(z$cellcolor[i+1,j+1]==z$cellcolor[i+1,j+2]))
# temp=paste(temp,"<td style=\"background-color: ",
# name2rgb(z$cellcolor[i+1,j+1]),"\"></td>\n",
# sep="")
#else temp=paste(temp,"<td></td>\n",sep="")
temp=paste(temp,"<td",sep="")
if(i!=1) temp=paste(temp,"style=\"border-top: hidden;\"")
temp=paste(temp,"></td>\n",sep="")
}
}
}
}
} else {
if(z$cspan.rgroup<1 | z$cspan.rgroup>(ncount+addrow))
z$cspan.rgroup=ncount+addrow
temp=paste("<tr>\n<td colspan=\"",z$cspan.rgroup,
"\" align=\"left\""," style=\"font-weight: bold;",sep="")
# if(z$colcolor[1]!="white")
# temp=paste(temp,"background-color:",name2rgb(z$colcolor[1]),";",sep="")
if(z$rgroupbg[rgroupcount]!="white")
temp=paste(temp,"background-color:",name2rgb(z$rgroupbg[rgroupcount]),";",sep="")
if(z$rgroupcolor[rgroupcount]!="black")
temp=paste(temp,"color:",name2rgb(z$rgroupcolor[rgroupcount]),";",sep="")
temp=paste(temp," border-left: ",vlines[1],"px solid black; ",sep="")
temp=paste(temp,"border-bottom: 1px solid black;",sep="")
temp=paste(temp,"border-top: 1px solid black;",sep="")
if(!is.null(z$hline.after)){
if((i-1) %in% z$hline.after)
temp=paste(temp,"border-top: 1px solid black;")
}
temp=paste(temp,"\">",z$rgroup[rgroupcount],"</td>\n",sep="")
if(z$cspan.rgroup<(ncount+addrow)) {
for(j in (z$cspan.rgroup):(ncount+addrow-1)) {
temp1=paste("<td style=\"border-left: ",
vlines[j+1],"px solid black; ",sep="")
if((j==ncol(z$x)) & (length(vlines)>ncol(z$x)+1))
temp1=paste(temp1,"border-right:",vlines[j+2],"px solid black;",sep="")
#temp1=paste(temp1,"border-bottom: 1px solid black;",sep="")
#temp1=paste(temp1,"border-top: 1px solid black;",sep="")
if(!is.null(z$hline.after)){
if((i-1) %in% z$hline.after)
temp1=paste(temp1,"border-top: 1px solid black;")
}
else if(i!=1) temp1=paste(temp1,"border-top: hidden; ",sep="")
if(!is.null(z$colcolor)) {
if(z$colcolor[j+1]!="white")
temp1=paste(temp1,"background-color:",
name2rgb(z$colcolor[j+1])," ",sep="")
}
temp1=paste(temp1,"\"></td>\n",sep="")
if(is.null(isspanRow(z,i+1,j+1))) temp=paste(temp,temp1,sep="")
else if(isspanRow(z,i+1,j+1)>0) temp=paste(temp,temp1,sep="")
if(!is.null(colCount)){
if(j %in% colCount[-length(colCount)]) {
if(vlines[j+2]==0) {
#if((z$cellcolor[i+1,j+1]!="white")&(z$cellcolor[i+1,j+1]==z$cellcolor[i+1,j+2]))
# temp=paste(temp,"<td style=\"background-color: ",
# name2rgb(z$cellcolor[i+1,j+1]),"\"></td>\n",
# sep="")
#else temp=paste(temp,"<td></td>\n",sep="")
if(i!=1) temp=paste(temp,"<td style=\"border-top: hidden;\"",sep="")
else temp=paste(temp,"<td",sep="")
temp=paste(temp,"></td>\n")
}
}
}
}
}
}
}
cat(temp,"</tr>\n")
rgroupcount=rgroupcount+1
}
}
bcolor="white"
#if(i %in% z$prefix.rows)
# if(is.numeric(z$zebra)) bcolor=z$zebra.color[i]
# cat("<tr style=\"background-color:",name2rgb(bcolor),"\">")
cat("<tr>\n")
if(z$include.rownames) {
result=1
if(!is.null(isspanCol(z,(i+1),1)))
cat(paste("<td colspan=\"",isspanCol(z,i+1,1),"\"",sep=""))
else if(!is.null(isspanRow(z,(i+1),1))){
result=isspanRow(z,(i+1),1)
if(result>0) cat(paste("<td rowspan=\"",result,"\"",sep=""))
} else cat("<td ")
if(result>0){
#cat("result=",result,"\n")
cat(paste(" style=\"border-left: ",vlines[1],"px solid black; ",sep=""))
if(i==1 & printtop) cat("border-top: 2px solid gray;")
else if(i!=1 | rgroupprinted) cat("border-top: hidden;")
if(!is.null(z$hline.after)){
if((i-1) %in% z$hline.after)
if(!(i %in% printrgroup)) cat("border-top: 1px solid black;")
}
if(z$cellcolor[i+1,1]!="white")
cat(paste("background-color: ",name2rgb(z$cellcolor[i+1,1]),"; ",sep=""))
if(z$frontcolor[i+1,1]!=z$color)
cat(paste("color: ",name2rgb(z$frontcolor[i+1,1]),"; ",sep=""))
cat(paste("\">",rownames(z$x)[i],"</td>\n",sep=""))
}
}
for(j in 1:ncount) {
if(is.null(isspanCol(z,(i+1),(j+1)))){
if(is.null(isspanRow(z,(i+1),(j+1)))){
result=-1
cat("<td ")
} else {
result=isspanRow(z,(i+1),(j+1))
if(result > 0) {
cat("<td rowspan=\"",result,"\" ")
}
}
if((result==-1)|(result>1)){
cat(paste("align=\"",colpos[j+addrow],"\" style=\"border-left: ",
vlines[j+1],"px solid black;",sep=""))
if((j==ncol(z$x)) & (length(vlines)>ncol(z$x)+1))
cat(paste("border-right:",vlines[j+2],"px solid black;",sep=""))
if(i==1 & printtop) cat("border-top: 2px solid gray;")
else if(i!=1 | rgroupprinted) cat("border-top: hidden;")
if(!is.null(z$hline.after)){
if((i-1) %in% z$hline.after)
if(!(i %in% printrgroup)) cat("border-top: 1px solid black;")
}
if(z$cellcolor[i+1,j+1]!="white")
cat(paste("background-color: ",name2rgb(z$cellcolor[i+1,j+1]),";",sep=""))
if(z$frontcolor[i+1,j+1]!=z$color)
cat(paste("color: ",name2rgb(z$frontcolor[i+1,j+1]),";",sep=""))
cat("\">")
cat(paste(xdata[i,j],"</td>\n",sep=""))
}
if(j %in% colCount[-length(colCount)]) {
if(vlines[j+2]==0) {
backcolor=NULL
if(!is.null(z$rowcolor)){
if(z$rowcolor[i+1]!="white") backcolor=z$rowcolor[i+1]
}
if(is.null(backcolor)){
if((z$cellcolor[i+1,j+1]!="white")&(z$cellcolor[i+1,j+1]==z$cellcolor[i+1,j+2]))
backcolor=z$cellcolor[i+1,j+1]
}
cat("<td style=\"")
if(i==1 & printtop) cat("border-top: 2px solid gray;")
else if(i!=1 | rgroupprinted) cat("border-top: hidden;")
if(!is.null(backcolor)) cat(" background-color: ",name2rgb(backcolor),";")
cat("\"></td>\n")
}
}
} else {
result=isspanCol(z,(i+1),(j+1))
if(result>0) {
width=spanColWidth(z,(i+1),(j+1))
cat(paste("<td colspan=\"",result,"\" align=\"",colpos[j+addrow],"\" style=\"border-left: ",
vlines[j+1],"px solid black;",sep=""))
#if((j==ncol(z$x)) & (length(vlines)>ncol(z$x)+1))
cat(paste("border-right:",vlines[j+width+1],"px solid black;",sep=""))
if(i==1 & printtop) cat("border-top: 2px solid gray;")
else if(i!=1 | rgroupprinted) cat("border-top: hidden;")
if(!is.null(z$hline.after)){
if((i-1) %in% z$hline.after)
if(!(i %in% printrgroup)) cat("border-top: 1px solid black;")
}
if(z$cellcolor[i+1,j+1]!="white")
cat(paste("background-color: ",name2rgb(z$cellcolor[i+1,j+1]),";",sep=""))
if(z$frontcolor[i+1,j+1]!=z$color)
cat(paste("color: ",name2rgb(z$frontcolor[i+1,j+1]),";",sep=""))
cat("\">")
cat(paste(xdata[i,j],"</td>\n",sep=""))
if(isGroupCol(j,result,colCount)) {
if(vlines[j+width+1]==0) {
cat("<td style=\"")
if(i==1 & printtop) cat("border-top: 2px solid gray;")
else if(i!=1 | rgroupprinted) cat("border-top: hidden;")
if(!is.null(backcolor)) cat(" background-color: ",name2rgb(backcolor),";")
cat("\"></td>\n")
}
}
}
}
}
cat("</tr>\n")
}
if((z$show.footer!=TRUE) | (is.null(attr(z$x,"footer")))) footer=""
else footer=attr(z$x,"footer")
cat("<tr>\n")
cat(paste("<td colspan=\"",totalCol,
"\" align=\"left\" style=\"font-size:",as.integer(headingsize),
"pt ;border-top: 1px solid black; border-bottom: hidden;\">",footer,"</td>\n",sep=""))
cat("</tr>\n")
cat("</table>\n")
}
#' Print an object of ztable via rstudioapi::viewer
#'
#' @param z An object of ztable
#' @importFrom rstudioapi viewer
#' @importFrom utils browseURL
ztable2viewer=function(z){
temp.f=tempfile(fileext=".html")
sink(temp.f)
cat(paste("<html>",
"<head>",
"<meta http-equiv=\"Content-type\" content=\"text/html;charset=UTF-8\">",
"</head>",
"<body>",
"<div style=\"margin: 0 auto; display: table; margin-top: 1em;\">",
sep="\n"))
print(z,type="html")
cat(paste("</div>","</body>","</html>",sep="\n"))
sink()
viewer <- getOption("viewer")
if (!is.null(viewer)){
rstudioapi::viewer(temp.f)
} else{
if(is.character(temp.f)) utils::browseURL(temp.f)
}
}
| /scratch/gouwar.j/cran-all/cranData/ztable/R/print.ztable.html.R |
#' Definition of Latex Color
#'
#' A dataset containing the name of color and Hex-triplets and latex definition
#'
#' To use this color definition, a latex package "color" should be included in
#' your preamble.
#'
#'@format A data frame with 749 rows and 3 variables:
#'\describe{
#' \item{name}{Color name}
#' \item{rgb}{Hex triplet of color}
#' \item{definition}{Latex command of color definition}
#' }
#'
"zcolors"
| /scratch/gouwar.j/cran-all/cranData/ztable/R/zcolors.R |
#' Exporting a R object to an object of class "ztable"
#'
#' Exporting a R object to an object of class "ztable"
#' @param x An R object, mainly data.frame
#'@param digits Numeric vector of length equal to one (in which case it will be
#' replicated as necessary) or to the number of columns of the resulting table
#' @param ... arguments to be passed to \code{\link{ztable_sub}}
#' @export
ztable=function(x,digits=NULL,...) UseMethod("ztable")
#'@describeIn ztable Default method of ztable
#'@export
ztable.default=function(x,digits=NULL,...){
cat(paste("\n Sorry, Currently function ztable() cannot handle",
" the object of class ",class(x),"!\n",sep=""))
invisible()
}
#'@describeIn ztable Makes a ztable for class 'data.frame'
#'@export
ztable.data.frame=function(x,digits=NULL,...){
z=ztable_sub(x,digits=digits,...)
class(z) <-c("ztable")
z
}
#' Exporting "data.frame" to an object of class "ztable"
#'
#' Exporting "data.frame" to an object of class "ztable"
#'@param x A data.frame
#'@param family Font family. Default value is NULL. Possible value is one of the c("serif","times").
#'@param size An integer from 1 to 10 indicating font size= c("tiny","scriptsize",
#' "footnotesize","small","normalsize","large","Large","LARGE","huge","Huge")
#' respectively. Defaulting is 5(= "normalsize").
#'@param color A character indicating color of ztable
#'@param tablewidth A numeric value indicating desired table width as a ratio to linewidth.
#' This value is only useful when caption is longer than table length.
#' Default value is 0.3.
#'@param type character indicating formats of ztable, either "html" or "latex".
#' Default value is "latex"
#'@param include.rownames A logical value whether or not include rownames in the table
#' Default value is TRUE.
#'@param placement The table will have placement given by placement where placement
#' must be NULL or contain only elements of {"h","t","b","p","!","H"}.
#' Default value is "!hbtp".
#'@param position The table will be have placed at the center of the paper
#' if position is "center" or "c", and at the left side of the paper
#' if it equals "left" or "l", and at the right side of the paper
#' if it equals "right" or "r". The position is translated to specific
#' latex environments such as "flushright" or "flushleft" or "center"
#' (provided as a character vector) will enclose the tabular environment.
#' Default value is "center".
#'@param show.heading A logical value whether or not include headings in the table.
#' Default value is TRUE.
#'@param show.footer A logical value whether or not include headings in the table.
#' Default value is TRUE.
#'@param caption A character
#'@param caption.placement The caption will be have placed at the top of the table
#' if caption.placement is "top" and at the bottom of the table
#' if it equals "bottom". Default value is "top".
#'@param caption.position The caption will be have placed at the center of the table
#' if caption.position is "center" or "c", and at the left side of the table
#' if it equals "left" or "l", and at the right side of the table
#' if it equals "right" or "r". Default value is "center".
#'@param caption.bold whether or not use bold font for caption
#'@param align Character vector : nchar equal to the number of columns of the
#' resulting table indicating the alignment of the corresponding columns.
#'@param digits Numeric vector of length equal to one (in which case it will be
#' replicated as necessary) or to the number of columns of the resulting table
#'@param display Character vector of length equal to the number of columns of the
#' resulting table indicating the format for the corresponding columns.
#' Since the row names are printed in the first column, the length of display
#' is one greater than ncol(x) if x is a data.frame. These values are passed
#' to the formatC function. Use "d" (for integers), "f", "e", "E", "g", "G",
#' "fg" (for reals), or "s" (for strings). "f" gives numbers in the usual
#' xxx.xxx format; "e" and "E" give n.ddde+nn or n.dddE+nn (scientific format);
#' "g" and "G" put x[i] into scientific format only if it saves space to do so.
#' "fg" uses fixed format as "f", but digits as number of significant digits.
#' Note that this can lead to quite long result strings. Default value is NULL.
#' the class of x.
#'@param sidewaystable Logical value whether or not set the tabular environment=
#' "sidewaystable". Requires Latex "rotating" package in preamble.
#' Default value is FALSE.
#'@param longtable Logical value whether or not set the tabular environment=
#' "longtable". Requires Latex "longtable" package in preamble.
#' Default value is FALSE.
#'@param wraptable Logical value whether or not set the tabular environment=
#' "wraptable". Requires Latex "wrapfig" package in preamble.
#' Default value is FALSE.
#'@param rotate Logical value whether or not set the tabular environment=
#' "rotate". No special arrangement is made to find space for the result.
#' Requires Latex "rotating" package in preamble.
#' If TRUE, requires the rotate angle(counterclockwise).
#' Default value is FALSE.
#'@param turn Logical value whether or not set the tabular environment=
#' "turn". In this environment, Latex leaves space for the rotated table.
#' Requires Latex "rotating" package in preamble.
#' If TRUE, requires the rotate angle.
#' Default value is FALSE.
#'@param angle An integer indicate the angle to rotate(degree); range -180 to 180.
#' Default value is 0.
#'@param wraptablewidth A integer indicate wraptable width in centimeter. Default=12.
#'@param tabular Logical value whether or not set the tabular environment.
#' If TRUE, no tabular environment is set. Default value is FALSE.
#'@param label Character vector of length 1 containing the LaTeX label or HTML anchor.
#' Set to NULL to suppress the label. Default value is NULL.
#'@param hline.after A vector of numbers between -1 and "nrow(x)", inclusive,
#' indicating the rows after which a horizontal line should appear.
#' If NULL is used no lines are produced. Default value is c(-1,0,nrow(x))
#' which means draw a line before and after the columns names and at the
#' end of the table. Repeated values are allowed.
#'@param booktabs Logical value. If TRUE, the toprule, midrule and bottomrule tags
#' from the LaTex "booktabs" package are used rather than hline for the
#' horizontal line tags. Requires Latex "booktabs" package in preamble.
#' Default value is TRUE.
#'@param prefix.rows A numeric vector contains the position of rows on which
#' extra Latex commands should be added as a prefix.
#'@param commands A character vector of the length 1 or same length of the nrow of
#' data.frame which contains the command that should be added as a prefix at
#' the specified rows. Default value is NULL, i.e. do not add commands.
#'@param top.command A character vector of the length 1 which contains the command
#' that should be added as a prefix at the colnames row.
#'@param zebra Null or an integer of 0 or 1 or 2 or 3. The arguments zebra and zebra.color are
#' used to make a Zebra striping table(table with alternating background colors)
#' easily. A value of 1 sets background color of all odd rows/columns with specified with
#' zebra.color. A value of 2 sets all even rows/columns. A value of 0 sets
#' background colors of all rows/columns with colors specified with zebra.color.
#' When zebra is 1 or 2, the parameters of prefix.rows and commands ignored.
#' When zebra=3, the background colors can be defined by addRowColor, addColColor
#' and addCellColor functions.
#' Default is NULL.
#'@param zebra.color A color name or a numeric value indicating pre-defined color.
#' When parameter zebra is 0 or 1 or 2 and zebra.color is NULL, then zebra.color
#' is set to "platinum". Numeric values between 1 to 13 is converted to
#' predefined color names. The predefined color names are c("peach","peach-orange",
#' "peachpuff","peach-yellow","pear","pearl","peridot","periwinkle","pastelred",
#' "pastelgray"). Default is NULL.
#'@param zebra.colnames whether or not use background colors in column names row,
#' Default value is FALSE
#'@param zebra.rownames whether or not use background colors in row names column,
#' Default value is TRUE
#'@param zebra.type An integer of 0 or 1 or 2 or 3. A value of 1 sets background colors by row.
#' A value of 2 sets background colors by column. A value of 0 sets background colors of all cells.
#' A value of 3 sets background colors of cells specified with zebra.list.
#' Default value is 1.
#'@param zebra.list A list consists of y,x,color. zebra.list is used only when zebra.type=3.
#' zebra.list sets the cells specified with rows of vector "y" and columns of vector "x" with "color".
#' The y and x are integer vector indicating rows and columns. NA value of y or x indicating all columns or rows.
#' The color is an character vector consists of names of color.
#'@param colnames.bold whether or not use bold font for column names, Default value is FALSE
#'@param include.colnames Logical. If TRUE the column names is printed. Default value is TRUE.
#'@param cgroup A character vector or matrix indicating names of column group. Default value is NULL
#'@param n.cgroup A integer vector or matrix indicating the numbers of columns included in each cgroup
#' Default value is NULL
#'@param rgroup A character vector indicating names of row group. Default value is NULL
#'@param n.rgroup A integer vector indicating the numbers of rows included in each rgroup
#' Default value is NULL
#'@param cspan.rgroup The number of columns that an rgroup should span. It spans by default all
#' columns but you may want to limit this if you have column colors that you want to retain.
#'@param pcol number of column displaying p value
#'@export
#'@examples
#' require(ztable)
#' x=head(iris)
#' ztable(x)
#' \dontrun{
#' ztable(x,size=3,caption="Table 1. ztable Test")
#' ztable(x,size=7,caption="Table 1. ztable Test",caption.position="l")
#' ztable(x,size=7,caption="Table 1. ztable Test",caption.placement="bottom",
#' caption.position="l")
#' fit=lm(mpg~.,data=mtcars)
#' ztable(fit)
#' data(USArrests)
#' pr1 <- prcomp(USArrests)
#' ztable(pr1)
#' ztable(summary(pr1))
#' require(survival)
#' data(colon)
#' attach(colon)
#' out <- glm(status ~ rx+obstruct+adhere+nodes+extent, data=colon, family=binomial)
#' ztable(out)
#' colon$TS = Surv(time,status==1)
#' out1=coxph(TS~rx+obstruct+adhere+differ+extent+surg+node4,data=colon)
#' ztable(out1)
#' ztable(head(mtcars),zebra=1)
#' ztable(head(mtcars),zebra=1,zebra.type=2)
#' }
ztable_sub=function(x,
family=NULL,
size=5, # normal size, range 1-10
color=getOption("ztable.color","black"),
tablewidth=0.3,
type=getOption("ztable.type","latex"),
include.rownames=getOption("ztable.include.rownames",TRUE),
placement="!hbtp",position="c",
show.heading=getOption("ztable.show.heading",TRUE),
show.footer=getOption("ztable.show.footer",TRUE),
caption=NULL,
caption.placement=getOption("ztable.caption.placement","top"),
caption.position=getOption("ztable.caption.position","c"),
caption.bold=getOption("ztable.caption.bold",FALSE),
align=NULL,digits=NULL,display=NULL,
sidewaystable=FALSE,
longtable=FALSE,
rotate=FALSE,
turn=FALSE,
angle=0,
wraptable=FALSE,wraptablewidth=12,
tabular=FALSE,
label=NULL,hline.after=NULL,
booktabs=getOption("ztable.booktabs",TRUE),
prefix.rows=NULL,commands=NULL,top.command=NULL,
zebra=getOption("ztable.zebra",NULL),
zebra.color=getOption("ztable.zebra.color",NULL),
zebra.type=getOption("ztable.zebra.type",1),
zebra.colnames=getOption("ztable.zebra.colnames",FALSE),
zebra.rownames=getOption("ztable.zebra.rownames",TRUE),
zebra.list=NULL,
colnames.bold=getOption("ztable.colnames.bold",FALSE),
include.colnames=getOption("ztable.include.colnames",TRUE),
cgroup=NULL,n.cgroup=NULL,
rgroup=NULL,n.rgroup=NULL,cspan.rgroup=NULL,
pcol=NULL){
ncount=ncol(x)
nrow=nrow(x)
cn=colnames(x)
if(identical(caption.placement,"bottom") | identical(caption.placement,"b"))
caption.placement="bottom"
else caption.placement="top"
if(identical(caption.position,"left")|identical(caption.position,"l"))
caption.position="l"
else if(identical(caption.position,"right")|identical(caption.position,"r"))
caption.position="r"
else caption.position="c"
if(identical(position,"left")|identical(position,"l"))
position="flushleft"
else if(identical(position,"right")|identical(position,"r"))
position="flushright"
else position="center"
addrow=ifelse(include.rownames,1,0)
logicals <- sapply(x, is.logical)
x[logicals] <- lapply(x[logicals], as.character)
characters <- sapply(x, is.character)
factors <- sapply(x, is.factor)
ints <- sapply(x, is.integer)
if(is.null(align)){
y <- c("r", c("r","l")[(characters | factors) + 1])
if(include.rownames) for(i in 1:length(y)) align=paste(align,y[i],sep="")
else for(i in 2:length(y)) align=paste(align,y[i],sep="")
}
if(is.null(digits)) digits=c(0,rep(2,ncol(x)))
if(length(digits)==1) digits=rep(digits,ncount+1)
if (is.null(display)) {
display <- rep("f", ncol(x))
display[ints] <- "d"
display[characters | factors] <- "s"
display <- c("s", display)
}
if(!is.null(zebra)) {
if(zebra==0){
prefix.rows=1:nrow(x)
if(is.null(zebra.color)) zebra.color=1:10
} else if(zebra==1) {
prefix.rows=seq(2,nrow(x),by=2)
if(is.null(zebra.color)) zebra.color=2 #peach-orange
} else {
zebra=2
prefix.rows=seq(1,nrow(x),by=2)
if(is.null(zebra.color)) zebra.color=2 #peach-orange
}
mycolor=c("peach","peach-orange","peachpuff","peach-yellow","pear",
"pearl","peridot","periwinkle","pastelred","pastelgray")
if(zebra!=0) {
zebra.color[1]=validColor(zebra.color[1],mycolor)
zebra.color=rep(zebra.color[1],nrow)
}
else { # zebra==0; all rows
result=c()
for(i in 1:length(zebra.color)){
result=c(result,validColor(zebra.color[i],mycolor))
}
zebra.color=result
if(length(zebra.color)<nrow)
zebra.color=rep(zebra.color,1+(nrow/length(zebra.color)))
}
}
cellcolor=make.cell.color(x,zebra,zebra.color,zebra.type,zebra.list,
zebra.colnames,zebra.rownames)
frontcolor=make.frontcolor(x,color)
if(!is.null(prefix.rows) & (length(commands)==1))
commands=rep(commands,nrow)
if((0 %in% prefix.rows) & is.null(top.command) &(length(commands)>0))
top.command=commands[1]
if(!is.numeric(size)) size=5
else if(size<0 | size>10) size=5
result=list(x=x,
family=family,
cellcolor=cellcolor,
frontcolor=frontcolor,
size=size,
color=color,
tablewidth=tablewidth,
type=type,
include.rownames=include.rownames,
placement=placement,
position=position,
show.heading=show.heading,
show.footer=show.footer,
caption=caption,
caption.placement=caption.placement,
caption.position=caption.position,
caption.bold=caption.bold,
align=align,
digits=digits,
display=display,
sidewaystable=sidewaystable,
longtable=longtable,
wraptable=wraptable,
wraptablewidth=wraptablewidth,
tabular=tabular,
rotate=rotate,
turn=turn,
angle=angle,
label=label,
hline.after=hline.after,
booktabs=booktabs,
prefix.rows=prefix.rows,
commands=commands,
top.command=top.command,
zebra=zebra,
zebra.color=zebra.color,
zebra.type=zebra.type,
zebra.list=zebra.list,
zebra.colnames=zebra.colnames,
zebra.rownames=zebra.rownames,
include.colnames=include.colnames,
colnames.bold=colnames.bold,
cgroup=cgroup,
n.cgroup=n.cgroup,
rgroup=rgroup,
n.rgroup=n.rgroup,
cspan.rgroup=cspan.rgroup,
pcol=pcol
)
class(result) <-c("ztable")
result
}
#' Make a data.frame named "cellcolor" from ztable call
#'
#'@param x a data.frame
#'@param zebra Null or an integer of 0 or 1 or 2. The arguments zebra and zebra.color are
#' used to make a Zebra striping table(table with alternating background colors)
#' easily. A value of 1 sets background color of all odd rows/columns with specified with
#' zebra.color. A value of 2 sets all even rows/columns. A value of 0 sets
#' background colors of all rows/columns with colors specified with zebra.color.
#' When zebra is 1 or 2, the parameters of prefix.rows and commands ignored.
#' Default is NULL.
#'@param zebra.color A color name or a numeric value indicating pre-defined color.
#' When parameter zebra is 0 or 1 or 2 and zebra.color is NULL, then zebra.color
#' is set to "platinum". Numeric values between 1 to 13 is converted to
#' predefined color names. The predefined color names are c("peach","peach-orange",
#' "peachpuff","peach-yellow","pear","pearl","peridot","periwinkle","pastelred",
#' "pastelgray"). Default is NULL.
#'@param zebra.type An integer of 0 or 1 or 2 or 3. A value of 1 sets background colors by row.
#' A value of 2 sets background colors by column. A value of 0 sets background colors of all cells.
#' A value of 3 sets background colors of cells specified with zebra.list.
#' Default value is 1.
#'@param zebra.list A list consists of y,x,color. zebra.list is used only when zebra.type=3.
#' zebra.list sets the cells specified with rows of vector "y" and columns of vector "x" with "color".
#' The y and x are integer vector indicating rows and columns. NA value of y or x indicating all columns or rows.
#' The color is an character vector consists of names of color.
#'@param zebra.colnames whether or not use background colors in column names row,
#' Default value is FALSE
#'@param zebra.rownames whether or not use background colors in row names column,
#' Default value is TRUE
make.cell.color=function(x,zebra,zebra.color,zebra.type,zebra.list,
zebra.colnames,zebra.rownames){
temp=rep("white",nrow(x)+1)
cellcolor=c()
for(i in 1:(ncol(x)+1)) cellcolor=cbind(cellcolor,temp)
colnames(cellcolor)=c(" ",colnames(x))
rownames(cellcolor)=c(" ",rownames(x))
if(!is.null(zebra)){
if(is.null(zebra.color)) {
if(zebra==0) color=1:10
else color=2
}
else color=zebra.color
mycolor=c("peach","peach-orange","peachpuff","peach-yellow","pear",
"pearl","peridot","periwinkle","pastelred","pastelgray")
result=c()
for(i in 1:length(color)){
result=c(result,validColor(color[i],mycolor))
}
color=result
if(zebra==0){
if(zebra.type==1){
cellcolor=apply(cellcolor,2,repColor,color=color)
}
else if(zebra.type==2){
cellcolor=apply(cellcolor,1,repColor,color=color)
cellcolor=t(cellcolor)
}
else if(zebra.type==0){
temp=rep(color,1+length(cellcolor)/length(color))
for(i in 1:nrow(cellcolor))
for(j in 1:ncol(cellcolor))
cellcolor[i,j]=temp[(i-1)*ncol(cellcolor)+j]
}
}
else if(zebra==1){
if(zebra.type==1){
select=seq(2,nrow(cellcolor),by=2)
for(i in select)
for(j in 1:ncol(cellcolor)) cellcolor[i,j]=color[1]
}
else if(zebra.type==2){
select=seq(1,ncol(cellcolor),by=2)
for(i in 1:nrow(cellcolor))
for(j in select)
cellcolor[i,j]=color[1]
}
}
else if(zebra==2){
if(zebra.type==1){
select=seq(1,nrow(cellcolor),by=2)
for(i in select)
for(j in 1:ncol(cellcolor)) cellcolor[i,j]=color[1]
}
else if(zebra.type==2){
select=seq(2,ncol(cellcolor),by=2)
for(i in 1:nrow(cellcolor))
for(j in select)
cellcolor[i,j]=color[1]
}
}
}
if(zebra.colnames==FALSE) {
cellcolor[1,1:ncol(cellcolor)]="white"
}
if(zebra.rownames==FALSE) {
cellcolor[1:nrow(cellcolor),1]="white"
}
if(zebra.type==3){
if(!is.null(zebra.list)){
ylength=length(zebra.list$y)
if(length(zebra.list$color)<ylength){
zebra.list$color=rep(zebra.list$color,1+ylength/length(zebra.list$color))
}
while(length(zebra.list$x)<ylength){
zebra.list$x=c(zebra.list$x,NA)
}
for(i in 1:ylength){
if(is.na(zebra.list$y[i])) {
if(is.na(zebra.list$x[i])) next
for(j in 1:nrow(cellcolor)) cellcolor[j,zebra.list$x[i]]=zebra.list$color[i]
}
else if(is.na(zebra.list$x[i])){
for(j in 1:ncol(cellcolor)) cellcolor[zebra.list$y[i],j]=zebra.list$color[i]
}
else cellcolor[zebra.list$y[i],zebra.list$x[i]]=zebra.list$color[i]
}
}
}
cellcolor
}
#' Make a data.frame named "cellcolor" from ztable call
#'
#'@param x A data.frame
#'@param color A character string
make.frontcolor=function(x,color="black"){
temp=rep(color,nrow(x)+1)
frontcolor=c()
for(i in 1:(ncol(x)+1)) frontcolor=cbind(frontcolor,temp)
colnames(frontcolor)=c(" ",colnames(x))
rownames(frontcolor)=c(" ",rownames(x))
frontcolor
}
#' Make vector x from vector color
#'
#' Internal function of make.cell.color
#' @param x A destination vector
#' @param color A charactor vector consists of color names
repColor=function(x,color){
#cat("color=",color,"\n")
temp=rep(color,1+length(x)/length(color))
for(i in 1:length(x)) x[i]=temp[i]
x
}
#'Update ztable before print
#'
#'Update options of ztable before print
#'@param z An object of class "ztable"
#'@param family Font family. Default value is NULL. Possible value is one of the c("serif","times").
#'@param size An integer from 1 to 10 indicating font size= c("tiny","scriptsize",
#' "footnotesize","small","normalsize","large","Large","LARGE","huge","Huge")
#' respectively.
#'@param color A character indicating color of ztable
#'@param tablewidth A numeric indicating desired table width as a ratio to linewidth.
#' Default value is 0.3.
#'@param type character indicating formats of ztable, either "html" or "latex".
#'@param include.rownames A logical value whether or not include rownames in the table
#'@param placement The table will have placement given by placement where placement
#' must be NULL or contain only elements of {"h","t","b","p","!","H"}.
#'@param position The table will be have placed at the center of the paper
#' if position is "center" or "c", and at the left side of the paper
#' if it equals "left" or "l", and at the right side of the paper
#' if it equals "right" or "r". The position is translated to specific
#' latex environments such as "flushright" or "flushleft" or "center"
#' (provided as a character vector) will enclose the tabular environment.
#'@param show.heading A logical value whether or not include headings in the table.
#'@param show.footer A logical value whether or not include headings in the table.
#'@param caption A character
#'@param caption.placement The caption will be have placed at the top of the table
#' if caption.placement is "top" and at the bottom of the table
#' if it equals "bottom".
#'@param caption.position The caption will be have placed at the center of the table
#' if caption.position is "center" or "c", and at the left side of the table
#' if it equals "left" or "l", and at the right side of the table
#' if it equals "right" or "r".
#'@param caption.bold whether or not use bold font for caption
#'@param align Character vector : nchar equal to the number of columns of the
#' resulting table indicating the alignment of the corresponding columns.
#'@param digits Numeric vector of length equal to one (in which case it will be
#' replicated as necessary) or to the number of columns of the resulting table
#'@param display Character vector of length equal to the number of columns of the
#' resulting table indicating the format for the corresponding columns.
#' Since the row names are printed in the first column, the length of display
#' is one greater than ncol(x) if x is a data.frame. These values are passed
#' to the formatC function. Use "d" (for integers), "f", "e", "E", "g", "G",
#' "fg" (for reals), or "s" (for strings). "f" gives numbers in the usual
#' xxx.xxx format; "e" and "E" give n.ddde+nn or n.dddE+nn (scientific format);
#' "g" and "G" put x[i] into scientific format only if it saves space to do so.
#' "fg" uses fixed format as "f", but digits as number of significant digits.
#' Note that this can lead to quite long result strings.
#'@param sidewaystable Logical value whether or not set the tabular environment=
#' "sidewaystable". Requires Latex "rotating" package in preamble.
#'@param longtable Logical value whether or not set the tabular environment=
#' "longtable". Requires Latex "longtable" package in preamble.
#'@param wraptable Logical value whether or not set the tabular environment=
#' "wraptable". Requires Latex "wrapfig" package in preamble.
#'@param rotate Logical value whether or not set the tabular environment=
#' "rotate". No special arrangement is made to find space for the result.
#' Requires Latex "rotating" package in preamble.
#' If TRUE, requires the rotate angle(counterclockwise).
#'@param turn Logical value whether or not set the tabular environment=
#' "turn". In this environment, Latex leaves space for the rotated table.
#' Requires Latex "rotating" package in preamble.
#' If TRUE, requires the rotate angle.
#'@param angle An integer indicate the angle to rotate(degree); range -180 to 180.
#'@param wraptablewidth A integer indicate wraptable width in centimeter.
#'@param tabular Logical value whether or not set the tabular environment.
#' If TRUE, no tabular environment is set.
#'@param label Character vector of length 1 containing the LaTeX label or HTML anchor.
#' Set to NULL to suppress the label.
#'@param hline.after A vector of numbers between -1 and "nrow(x)", inclusive,
#' indicating the rows after which a horizontal line should appear.
#' If NULL is used no lines are produced. Default value is c(-1,0,nrow(x))
#' which means draw a line before and after the columns names and at the
#' end of the table. Repeated values are allowed.
#'@param booktabs Logical value. If TRUE, the toprule, midrule and bottomrule tags
#' from the LaTex "booktabs" package are used rather than hline for the
#' horizontal line tags. Requires Latex "booktabs" package in preamble.
#'@param prefix.rows A numeric vector contains the position of rows on which
#' extra Latex commands should be added as a prefix.
#'@param commands A character vector of the length 1 or same length of the nrow of
#' data.frame which contains the command that should be added as a prefix at
#' the specified rows.
#'@param top.command A character vector of the length 1 which contains the command
#' that should be added as a prefix at the colnames row.
#'@param zebra Null or a integer of 1 or 2. The arguments zebra and zebra.color are
#' used to make a Zebra striping table(table with alternating background colors)
#' easily. A value of 1 sets background color of all odd rows with specified with
#' zebra.color. A value of 2 sets all even rows. when zebra is 1 or 2,
#' the parameters of prefix.rows and commands ignored.
#'@param zebra.color A color name or a numeric value indicating pre-defined color.
#' When parameter zebra is 0 or 1 or 2 and zebra.color is NULL, then zebra.color
#' is set to "platinum". Numeric values between 1 to 13 is converted to
#' predefined color names. The predefined color names are c("peach","peach-orange",
#' "peachpuff","peach-yellow","pear","pearl","peridot","periwinkle","pastelred",
#' "pastelgray").
#'@param zebra.type An integer of 0 or 1 or 2 or 3. A value of 1 sets background colors by row.
#' A value of 2 sets background colors by column. A value of 0 sets background colors of all cells.
#' A value of 3 sets background colors of cells specified with zebra.list.
#' Default value is 1.
#'@param zebra.list A list consists of y,x,color. zebra.list is used only when zebra.type=3.
#' zebra.list sets the cells specified with cells[y,x] with "color". The y and x are
#' integer indicating rows and columns. NA value of y or x indicating all columns or rows.
#'@param zebra.colnames whether or not use background colors in column names row,
#' Default value is FALSE
#'@param zebra.rownames whether or not use background colors in row names column,
#' Default value is TRUE
#'@param colnames.bold whether or not use bold font for column names.
#'@param include.colnames Logical. If TRUE the column names is printed.
#'@param cgroup A character vector or matrix indicating names of column group. Default value is NULL
#'@param n.cgroup A integer vector or matrix indicating the numbers of columns included in each cgroup
#' Default value is NULL
#'@param rgroup A character vector indicating names of row group. Default value is NULL
#'@param n.rgroup A integer vector indicating the numbers of rows included in each rgroup
#' Default value is NULL
#'@param cspan.rgroup The number of columns that an rgroup should span. It spans by default all
#' columns but you may want to limit this if you have column colors that you want to retain.
#'@param pcol number of column displaying p value
#'@export
update_ztable=function(z,
family=NULL,
size=NULL, # normal size, range 1-10
color=NULL,
tablewidth=NULL,
type=NULL,
include.rownames=NULL,
placement=NULL,position=NULL,
show.heading=NULL,
show.footer=NULL,
caption=NULL,
caption.placement=NULL,
caption.position=NULL,
caption.bold=NULL,
align=NULL,digits=NULL,display=NULL,
sidewaystable=NULL,
longtable=NULL,
rotate=NULL,
turn=NULL,
angle=NULL,
wraptable=NULL,wraptablewidth=NULL,
tabular=NULL,
label=NULL,hline.after=NULL,
booktabs=NULL,
prefix.rows=NULL,commands=NULL,top.command=NULL,
zebra=NULL,
zebra.color=NULL,
zebra.type=NULL,
zebra.list=NULL,
zebra.colnames=NULL,
zebra.rownames=NULL,
colnames.bold=NULL,
include.colnames=NULL,
cgroup=NULL,
n.cgroup=NULL,
rgroup=NULL,
n.rgroup=NULL,
cspan.rgroup=NULL,
pcol=NULL){
if(!is.null(family)) z$family=family
if(!is.null(size)) z$size=size
if(!is.null(color)) z$color=color
if(!is.null(tablewidth)) z$tablewidth=tablewidth
if(!is.null(type)) z$type=type
if(!is.null(include.rownames)) z$include.rownames=include.rownames
if(!is.null(placement)) z$placement=placement
if(!is.null(position)) z$position=position
if(!is.null(show.heading)) z$show.heading=show.heading
if(!is.null(show.footer)) z$show.footer=show.footer
if(!is.null(caption)) z$caption=caption
if(!is.null(caption.placement)) z$caption.placement=caption.placement
if(!is.null(caption.position)) z$caption.position=caption.position
if(!is.null(caption.bold)) z$caption.bold=caption.bold
if(!is.null(align)) z$align=align
if(!is.null(digits)) {
if(is.null(digits)) digits=c(0,rep(2,ncol(z$x)))
if(length(digits)==1) digits=rep(digits,ncol(z$x)+1)
z$digits=digits
}
if(!is.null(display)) z$display=display
if(!is.null(sidewaystable)) z$sidewaystable=sidewaystable
if(!is.null(longtable)) z$longtable=longtable
if(!is.null(rotate)) z$rotate=rotate
if(!is.null(turn)) z$turn=turn
if(!is.null(angle)) z$angle=angle
if(!is.null(wraptable)) z$wraptable=wraptable
if(!is.null(wraptablewidth)) z$wraptablewidth=wraptablewidth
if(!is.null(tabular)) z$tabular=tabular
if(!is.null(label)) z$label=label
if(!is.null(hline.after)) z$hline.after=hline.after
if(!is.null(booktabs)) z$booktabs=booktabs
if(!is.null(prefix.rows)) z$prefix.rows=prefix.rows
if(!is.null(commands)) z$commands=commands
if(!is.null(top.command)) z$top.command=top.command
if(!is.null(zebra)) z$zebra=zebra
if(!is.null(zebra.color)) z$zebra.color=zebra.color
if(!is.null(zebra.type)) z$zebra.type=zebra.type
if(!is.null(zebra.list)) z$zebra.list=zebra.list
if(!is.null(zebra.colnames)) z$zebra.colnames=zebra.colnames
if(!is.null(zebra.rownames)) z$zebra.rownames=zebra.rownames
if(!is.null(colnames.bold)) z$colnames.bold=colnames.bold
if(!is.null(include.colnames)) z$include.colnames=include.colnames
if(!is.null(cgroup)) z$cgroup=cgroup
if(!is.null(n.cgroup)) z$n.cgroup=n.cgroup
if(!is.null(rgroup)) z$rgroup=rgroup
if(!is.null(n.rgroup)) z$n.rgroup=n.rgroup
if(!is.null(cspan.rgroup)) z$cspan.rgroup=cspan.rgroup
if(!is.null(pcol)) z$pcol=pcol
if(!is.null(z$zebra)) { if(z$zebra!=3) z$cellcolor=make.cell.color(x=z$x,zebra=z$zebra,zebra.color=z$zebra.color,
zebra.type=z$zebra.type,
zebra.list=z$zebra.list,
zebra.colnames=z$zebra.colnames,
zebra.rownames=z$zebra.rownames)
}
z
}
#' Print an object of class "ztable"
#'
#' @param x An object of class "ztable"
#' @param ... further argument passed to other function
#' @export
print.ztable=function(x,...){
z=update_ztable(z=x,...)
print_ztable(z)
}
#' Print an object of class "ztable"
#'
#' @param z An object of class "ztable"
print_ztable=function(z){
xdata=data2table(z)
if(z$type=="latex") ztable2latex(z,xdata)
else if(z$type=="viewer") ztable2viewer(z)
else ztable2html(z,xdata)
}
#'Subfunction used in ztable2latex
#'
#' @param string a character vector
tr=function(string) {
string=gsub("%","\\%",string,fixed=TRUE)
string=gsub(" -","\\hspace{0.5cm}",string,fixed=TRUE)
string
}
#'Subfunction used in ztable2html
#'
#' @param string a character vector
tr2=function(string) {
string=gsub(" -"," ",string,fixed=TRUE)
string=gsub(" ","",string,fixed=TRUE)
string
}
#' Convert data to formatted data for table
#'
#' @param z An object of class "ztable"
tableLength=function(z){
xdata=data2table(z)
a=apply(xdata,2,function(x) max(nchar(x)))
if(z$include.colnames){
b=nchar(colnames(xdata))
l=c()
for(i in 1:ncol(xdata)){
l=c(l,max(a[i],b[i]))
}
length=sum(l)
}
else length=sum(a)
result=length+ncol(xdata)-1
if(z$include.rownames) result=result+max(nchar(rownames(xdata)))+1
result
}
#' Convert data to formatted data for table
#'
#' @param z An object of class "ztable"
#' @export
data2table=function(z){
data<-z$x
ncount=ncol(data)
nrow=nrow(data)
select=sapply(data,is.factor)
data[select]=lapply(data[select],as.character)
#data
for(i in 1:nrow){
for(j in 1:ncount) {
if(z$display[j+1]=="s"){
temp=data[i,j]
if(z$type=="latex") temp<-tr(temp)
if(z$type=="html") temp<-tr2(temp)
}
else{
if(is.na(z$x[i,j])) {
temp<-""
} else{
temp<-formatC(z$x[i,j],digits=z$digits[j+1],
format=z$display[j+1])
}
}
data[i,j]<-temp
}
}
pcol=z$pcol
pcol
if(!is.null(pcol)) {
temp=data[[pcol]]
temp
pos=which((as.numeric(temp)==0) & (temp!=""))
if(length(pos)>0){
tempvalue=temp[pos][1]
temp[pos]<- paste0("< ",substr(tempvalue,1,nchar(tempvalue)-1),"1")
}
data[[pcol]]=temp
}
data
}
#' Convert long caption to minipage
#'
#' @param z An object of ztable
#' @param caption A character vector to convert
caption2minipage=function(z,caption){
tlength=tableLength(z)
if(nchar(caption)>tlength){
tablewidth=max(z$tablewidth,tlength/85)
mycaption=paste("\\begin{minipage}[c]{",tablewidth,"\\linewidth}",
caption,"\\end{minipage}",sep="")
}
else mycaption=caption
mycaption
}
#' Print an object of class "ztable" to Latex table
#'
#' @param z An object of class "ztable"
#' @param xdata A formatted data.frame
ztable2latex=function(z,xdata){
ncount=ncol(z$x)
nrow=nrow(z$x)
cn=colnames(z$x)
addrow=ifelse(z$include.rownames,1,0)
NewAlign=getNewAlign(z)
#NewAlign=z$align
totalCol=totalCol(z)
colCount=colGroupCount(z)
vlines=align2lines(z$align)
rgroupcount=0
printrgroup=1
if(!is.null(z$n.rgroup)){
if(length(z$n.rgroup)>1) {
for(i in 2:length(z$n.rgroup)) {
printrgroup=c(printrgroup,printrgroup[length(printrgroup)]+z$n.rgroup[i-1])
}
}
rgroupcount=1
}
Fontsize=c("tiny","scriptsize","footnotesize","small","normalsize",
"large","Large","LARGE","huge","Huge")
if(z$tabular) sort="tabular"
else if(z$sidewaystable) sort="sidewaystable"
else if(z$wraptable) sort="wraptable"
else if(z$rotate) sort="rotate"
else if(z$turn) sort="turn"
else sort="table"
headingsize=ifelse(z$size>3,z$size-2,1)
z$cellcolor=define_colors(z$cellcolor)
start=attr(z$cellcolor,"no")
z$frontcolor=define_colors(z$frontcolor,no=start)
start=attr(z$frontcolor,"no")
if(!is.null(z$cgroupcolor)) {
for(i in 1:length(z$cgroupcolor)){
z$cgroupcolor[[i]]=define_colors(z$cgroupcolor[[i]],no=start)
start=attr(z$cgroupcolor[[i]],"no")
}
}
if(!is.null(z$cgroupbg)) {
for(i in 1:length(z$cgroupbg)){
z$cgroupbg[[i]]=define_colors(z$cgroupbg[[i]],no=start)
start=attr(z$cgroupbg[[i]],"no")
}
}
if(!is.null(z$rgroupcolor)) z$rgroupcolor=define_colors(z$rgroupcolor,no=start)
start=attr(z$rgroupcolor,"no")
if(!is.null(z$rgroupbg)) z$rgroupbg=define_colors(z$rgroupbg,no=start)
start=attr(z$rgroupbg,"no")
align=alignCheck(z$align,ncount,addrow)
if(z$longtable){
cat(paste("\\color{",z$color,"}\n",sep=""))
cat(paste("\\begin{",Fontsize[z$size],"}\n",sep=""))
cat(paste("\\begin{longtable}{",NewAlign,"}\n",sep=""))
} else {
if(z$wraptable) {
if(z$position=="flushright") wrapposition<-"r"
else wrapposition<-"l"
cat(paste("\\begin{wraptable}{",wrapposition,"}[10pt]{",
z$wraptablewidth,"cm}\n",sep=""))
} else if((sort=="rotate") | (sort=="turn")){
cat(paste("\\begin{",sort,"}{",z$angle,"}\n",sep=""))
} else if(sort!="tabular"){ # sidewaystable or table
cat(paste("\\begin{",sort,"}[",z$placement,"]\n",sep=""))
cat(paste("\\begin{",z$position,"}\n",sep=""))
}
if(!is.null(z$family)){
if(z$family=="serif") cat("\\sffamily\n")
else if(z$family=="times") cat("\\rmfamily\n")
else if(z$family=="tt") cat("\\ttfamily\n")
else {
temp=paste0("\\",z$family,"\n")
cat(temp)
}
}
cat(paste("\\begin{",Fontsize[z$size],"}\n",sep=""))
cat(paste("\\color{",z$color,"}\n",sep=""))
cat(paste("\\begin{tabular}{",NewAlign,"}\n",sep=""))
}
if(!is.null(z$caption) & z$caption.placement=="top"){
mycaption=caption2minipage(z,z$caption)
cat(paste("\\multicolumn{",totalCol,"}{",
z$caption.position,"}{",sep=""))
if(z$caption.bold) cat(paste("\\textbf{",mycaption,"}",sep=""))
else cat(mycaption)
cat("}\\\\ \n")
}
if((z$show.heading==TRUE) & (!is.null(attr(z$x,"heading")))) {
head=attr(z$x,"heading")
for(i in 1:length(head)) {
h1=gsub("~","$\\sim$",head[i],fixed=TRUE)
if(nchar(head[i])<1) next
cat(paste("\\multicolumn{",totalCol,"}{l}{\\",Fontsize[headingsize],
"{",h1,"}}\\\\ \n",sep=""))
}
}
if(is.null(z$hline.after)) cat(ifelse(z$booktabs,"\\toprule[1.2pt]\n","\\hline\n"))
else if(-1 %in% z$hline.after) cat(ifelse(z$booktabs,"\\toprule[1.2pt]\n","\\hline\n"))
if(!is.null(z$cgroup)) printLatexHead(z)
subcolnames=ifelse(is.null(z$subcolnames),0,1)
if(subcolnames) {
if(is.na(z$subcolnames[1])) firstcn=paste("\\multirow{2}{*}{}",sep="")
else firstcn=cn[1]
}
else firstcn=cn[1]
if(z$colnames.bold) firstcn=paste("\\textbf{",firstcn,"}",sep="")
if(z$frontcolor[1,2]!=z$color) firstcn=paste("\\color{",z$frontcolor[1,2],"}",firstcn,sep="")
if(z$cellcolor[1,2]!="white") firstcn=paste("\\cellcolor{",z$cellcolor[1,2],"}",firstcn,sep="")
if(z$include.rownames) {
result=1
if(!is.null(isspanCol(z,1,1)))
first=paste("\\multicolumn{",isspanCol(z,1,1),"}{c}{}",sep="")
else if(!is.null(isspanRow(z,1,1))){
result=isspanRow(z,1,1)
if(result>0) first=paste("\\multirow{",result,"}{*}{}",sep="")
} else first=""
if(z$cellcolor[1,1]!="white")
first=paste("\\cellcolor{",z$cellcolor[1,1],first,"}",sep="")
firstrow=paste(first,"&",firstcn,sep="")
}
else firstrow=firstcn
if(ncount>1) {
for(i in 2:ncount) {
firstrow=paste(firstrow,"&",sep="")
if((i==2)&(!is.null(colCount))){
if(1 %in% colCount[-length(colCount)]) {
if(vlines[1+2]==0) firstrow=paste(firstrow,"&",sep="")
}
}
if(z$cellcolor[1,i+1]!="white")
firstrow=paste(firstrow,"\\cellcolor{",z$cellcolor[1,i+1],"}",sep="")
if(z$frontcolor[1,i+1]!=z$color)
firstrow=paste(firstrow,"\\color{",z$frontcolor[1,i+1],"}",sep="")
if(z$colnames.bold) boldcn=paste("\\textbf{",cn[i],"}",sep="")
else boldcn=cn[i]
result=1
if(!is.null(isspanCol(z,1,(i+1)))){
result=isspanCol(z,1,(i+1))
if(result>0) boldcn=paste("\\multicolumn{",result,"}{c}{",boldcn,"}",sep="")
else if(result==0) next
} else if(!is.null(isspanRow(z,1,(i+1)))){
boldcn=paste("\\multirow{",isspanRow(z,1,(i+1)),"}{*}{",boldcn,"}",sep="")
}
if((subcolnames==1)) {
if(is.na(z$subcolnames[i])){
# boldcn=paste("\\multirow{2}{*}{",boldcn,"}",sep="")
boldcn=""
}
}
firstrow=paste(firstrow,boldcn,sep="")
if(!is.null(colCount)){
if(i %in% colCount[-length(colCount)]) {
if(vlines[i+2]==0) {
#if(z$cellcolor[1,i+1]!="white")
# firstrow=paste(firstrow,"&\\cellcolor{",z$cellcolor[1,i+1],"}",sep="")
#else firstrow=paste(firstrow,"&",sep="")
firstrow=paste(firstrow,"&",sep="")
}
}
}
}
}
if((0 %in% z$prefix.rows) & !is.null(z$top.command)) cat(z$top.command)
if(z$include.colnames) {
cat(paste(firstrow,"\\\\ \n",sep=""))
if(subcolnames){
if(z$include.rownames) {
if(z$cellcolor[1,1]!="white")
cat(paste("\\cellcolor{",z$cellcolor[1,1],"} &",sep=""))
else cat("&")
}
for(i in 1:length(z$subcolnames)){
if(is.na(z$subcolnames[i])) {
temp=paste("\\multirow{-2}{*}{",colnames(z$x)[i],"}",sep="")
if(!is.null(z$colcolor)){
if(z$frontcolor[1,i+1]!=z$color)
temp=paste("\\color{",z$frontcolor[1,i+1],"}",temp,sep="")
if(z$cellcolor[1,i+1]!="white")
temp=paste("\\cellcolor{",z$cellcolor[1,i+1],"}",temp,sep="")
}
cat(temp)
if(i!=length(z$subcolnames)) cat("&")
if(i %in% colCount[-length(colCount)]) {
if(vlines[i+2]==0){
if((z$cellcolor[1,i+1]!="white") & (z$cellcolor[1,i+1]==z$cellcolor[1,i+2]))
cat(paste("\\cellcolor{",z$cellcolor[1,i+1],"}&",sep=""))
else cat("&")
}
}
next
}
if(z$colnames.bold) boldcn=paste("\\textbf{",z$subcolnames[i],"}",sep="")
else boldcn=z$subcolnames[i]
if(z$cellcolor[1,i+1]!="white")
cat(paste("\\cellcolor{",z$cellcolor[1,i+1],"}",boldcn,"&",sep=""))
else cat(paste(boldcn,"&",sep=""))
if(i %in% colCount[-length(colCount)]) {
if(vlines[i+2]==0){
if((z$cellcolor[1,i+1]!="white") & (z$cellcolor[1,i+1]==z$cellcolor[1,i+2]))
cat(paste("\\cellcolor{",z$cellcolor[1,i+1],"}&",sep=""))
else cat("&")
}
}
}
cat("\\\\ \n")
}
if(is.null(z$hline.after)) cat(ifelse(z$booktabs,"\\midrule\n","\\hline\n"))
else if(0 %in% z$hline.after) cat(ifelse(z$booktabs,"\\midrule\n","\\hline\n"))
}
for(i in 1:nrow){
printcline=0
if(rgroupcount>0) {
if(i %in% printrgroup) {
for(k in 1:length(printrgroup)){
if(i == printrgroup[k]){
if(is.na(z$rgroup[k])) break
if(z$rgroup[k]=="") break
printRowGroup(z,i)
break
}
}
}
}
if(i %in% z$prefix.rows) {
#if(is.numeric(z$zebra))
# cat(paste("\\rowcolor{",z$zebra.color[i],"}",sep=""))
if(!is.null(z$commands[i])) cat(z$commands[i])
}
tempo=NULL
if(z$include.rownames) {
tempo=rownames(z$x)[i]
if(z$frontcolor[i+1,1]!=z$color) {
tempo=paste("\\color{",z$frontcolor[i+1,1],"}",
tempo,sep="")
}
if(z$cellcolor[i+1,1]!="white") {
tempo=paste("\\cellcolor{",z$cellcolor[i+1,1],"}",
tempo,sep="")
}
if(!is.null(isspanCol(z,(i+1),1)))
tempo=paste("\\multicolumn{",isspanCol(z,i+1,1),"}{c}{",tempo,"}",sep="")
else if(!is.null(isspanRow(z,(i+1),1))){
result=isspanRow(z,(i+1),1)
if(result<0) tempo=paste("\\multirow{",result,"}{*}{",tempo,"}",sep="")
}
cat(tempo)
}
for(j in 1:ncount) {
skip=0
if(z$frontcolor[i+1,j+1]==z$color) temp1=xdata[i,j]
else temp1=paste("\\color{",z$frontcolor[i+1,j+1],"}",
xdata[i,j],sep="")
if(z$cellcolor[i+1,j+1]!="white") {
temp1=paste("\\cellcolor{",z$cellcolor[i+1,j+1],"}",
temp1,sep="")
}
if(is.null(isspanCol(z,(i+1),(j+1)))){
if(is.null(isspanRow(z,(i+1),(j+1)))){
result=1
} else {
result=isspanRow(z,(i+1),(j+1))
if(result < 0) {
k=getspanRowData(z,i+1,j+1)
if(z$cellcolor[i+1,j+1]=="white") temp2=xdata[k+1,j]
else temp2=paste("\\cellcolor{",z$cellcolor[i+1,j+1],"}",
xdata[k-1,j],sep="")
temp1=paste("\\multirow{",result,"}{*}{",temp2,"}",sep="")
}
else {
skip=1
result=0 #
if(z$cellcolor[i+1,j+1]=="white") skipcolor=""
else skipcolor=paste("\\cellcolor{",z$cellcolor[i+1,j+1],"}",sep="")
}
}
if(j %in% colCount[-length(colCount)]) {
if(vlines[j+2]==0) {
backcolor=NULL
if(!is.null(z$rowcolor)){
if(z$rowcolor[i+1]!="white") backcolor=z$rowcolor[i+1]
}
if(is.null(backcolor)){
if((z$cellcolor[i+1,j+1]!="white")&(z$cellcolor[i+1,j+1]==z$cellcolor[i+1,j+2]))
backcolor=z$cellcolor[i+1,j+1]
}
if(is.null(backcolor)) temp1=paste(temp1,"&",sep="")
else temp1=paste(temp1,"&\\cellcolor{",backcolor,"}",sep="")
#temp1=paste(temp1,"&",sep="")
}
}
} else {
result=isspanCol(z,(i+1),(j+1))
if(result>0) {
width=spanColWidth(z,(i+1),(j+1))
mcalign="c"
mclinecount=vlines[j+width+1]
if(mclinecount > 0) {
for(k in 1:mclinecount)
mcalign=paste(mcalign,"|",sep="")
}
temp1=paste("\\multicolumn{",result,"}{",mcalign,"}{",temp1,"}",sep="")
if(isGroupCol(j,result,colCount))
if(vlines[j+width+1]==0)
#if((j+result)<ncol(z$x))
temp1=paste(temp1,"&",sep="")
#if((j+result-1) %in% colCount[-length(colCount)])
# if(vlines[j+result+1]==0) temp1=paste(temp1,"&",sep="")
}
else next
}
#browser()
if(is.null(tempo)) {
cat(temp1)
tempo=temp1
}
else {
if(result!=0) cat(paste("&",temp1,sep=""))
else if(skip) cat(paste("&",skipcolor,sep=""))
}
if(!is.null(colCount)){
count=j
if(!is.null(isspanCol(z,i+1,j+1))){
result=isspanCol(z,(i+1),(j+1))
if(result>0) count=count+result
}
#if(count %in% colCount[-length(colCount)]) {
#if(vlines[count+2]==0) cat("&")
#if(z$cellcolor[i+1,j+1]=="white") cat("&")
#else cat(paste("&\\cellcolor{",z$cellcolor[i+1,j+1],"}",sep=""))
#}
}
}
cat(paste("\\\\ \n",sep=""))
if(i %in% z$hline.after)
cat(ifelse(z$booktabs,ifelse(i==nrow,"\\bottomrule[1.2pt]\n","\\midrule"),"\\hline\n"))
}
if(is.null(z$hline.after)) cat(ifelse(z$booktabs,"\\bottomrule[1.2pt]\n","\\hline\n"))
footer=attr(z$x,"footer")
if(!is.null(footer) & (z$show.footer)){
myfooter=caption2minipage(z,footer)
myfooter=gsub("~","$\\sim$",myfooter,fixed=TRUE)
cat(paste("\\multicolumn{",totalCol,"}{l}{\\",Fontsize[headingsize],
"{",myfooter,"}}\\\\ \n",sep=""))
}
if(!is.null(z$caption) & z$caption.placement=="bottom"){
mycaption=caption2minipage(z,z$caption)
if(z$caption.bold) cat(paste("\\multicolumn{",totalCol,"}{",
z$caption.position,"}{\\textbf{",mycaption,"}}\\\\ \n",sep=""))
else cat(paste("\\multicolumn{",totalCol,"}{",
z$caption.position,"}{",mycaption,"}\\\\ \n",sep=""))
}
if(z$longtable) {
if(!is.null(z$label)) cat(paste("\\label{",z$label,"}\n",sep=""))
cat("\\end{longtable}\n")
cat(paste("\\end{",Fontsize[z$size],"}\n",sep=""))
} else {
cat("\\end{tabular}\n")
cat(paste("\\end{",Fontsize[z$size],"}\n",sep=""))
if(!is.null(z$label)) cat(paste("\\label{",z$label,"}\n",sep=""))
if(sort!="tabular") {
if((sort=="table") | (sort=="sidewaystable"))
cat(paste("\\end{",z$position,"}\n",sep=""))
cat(paste("\\end{",sort,"}\n",sep=""))
}
}
cat("\\color{black}\n")
}
#' Print Row Groups in a latex table
#'
#' @param z An object of class ztable
#' @param i An integer indicating row
printRowGroup=function(z,i){
ncount=ncol(z$x)
nrow=nrow(z$x)
cn=colnames(z$x)
addrow=ifelse(z$include.rownames,1,0)
NewAlign=getNewAlign(z)
totalCol=totalCol(z)
colCount=colGroupCount(z)
vlines=align2lines(z$align)
printrgroup=1
if(!is.null(z$n.rgroup)){
if(length(z$n.rgroup)>1) {
for(j in 2:length(z$n.rgroup)) {
printrgroup=c(printrgroup,printrgroup[length(printrgroup)]+z$n.rgroup[j-1])
}
}
}
printrgroup
printcline=0
rgroupcount=0
for(k in 1:length(printrgroup)){
if(i == printrgroup[k]){
rgroupcount=k
break
}
}
if(i %in% printrgroup) {
if(is.null(z$cspan.rgroup)){
if(i>1) cat(paste("\\cline{1-",totalCol,"}\n",sep=""))
vlines=align2lines(NewAlign)
#mcalign=substr(extractAlign(NewAlign),start=1,stop=1)
mcalign="l"
if(vlines[1]>0)
for(k in 1:vlines[1]) mcalign=paste("|",mcalign,sep="")
if(vlines[totalCol+1]>0)
for(k in 1:vlines[totalCol+1]) mcalign=paste(mcalign,"|",sep="")
temp=paste("\\multicolumn{",totalCol,"}{",mcalign,"}{",sep="")
# if(z$colcolor[1]!="white")
# temp=paste(temp,"\\cellcolor{",z$colcolor[1],"}",sep="")
if(z$rgroupbg[rgroupcount]!="white")
temp=paste(temp,"\\cellcolor{",z$rgroupbg[rgroupcount],"}",sep="")
if(z$rgroupcolor[rgroupcount]!="black")
temp=paste(temp,"\\color{",z$rgroupcolor[rgroupcount],"}",sep="")
temp=paste(temp,"\\textbf{",z$rgroup[rgroupcount],"}}",sep="")
printcline=totalCol
}
else {
if(z$cspan.rgroup==1) {
# if(z$colcolor[1]!="white")
# temp=paste("\\cellcolor{",z$colcolor[1],"}",sep="")
# else temp=""
temp=""
if(z$rgroupbg[rgroupcount]!="white")
temp=paste(temp,"\\cellcolor{",z$rgroupbg[rgroupcount],"}",sep="")
if(z$rgroupcolor[rgroupcount]!="black")
temp=paste(temp,"\\color{",z$rgroupcolor[rgroupcount],"}",sep="")
temp=paste(temp,"\\textbf{",z$rgroup[rgroupcount],"}",sep="")
for(j in 1:(ncount+addrow-1)){
temp1=""
if(z$colcolor[j+1]!="white")
temp1=paste("\\cellcolor{",z$colcolor[j+1],"}",sep="")
else {
if(!is.null(isspanRow(z,i+1,j+1))){
#cat("i=",i,",j=",j,"isspanRow(z,i,j+1)=",isspanRow(z,i+1,j+1),"\n")
if(isspanRow(z,i+1,j+1)<=0) {
#for(k in 1:nrow(z$spanRow)) {
# if(z$spanRow[k,1]!=j+1) next
# if(z$spanRow[k,2]>=i+1) next
# if(z$spanRow[k,3]==i+1) break
#}
temp1=paste("\\cellcolor{",z$cellcolor[i+1,j+1],"}",sep="")
}
}
else temp1=""
}
temp=paste(temp,temp1,sep="&")
if(!is.null(colCount)){
if(j %in% colCount[-length(colCount)]) {
if(vlines[j+2]==0) {
#if(z$colcolor[j+1]!="white")
# temp=paste(temp,"&\\cellcolor{",z$colcolor[j+1],"}",sep="")
#else temp=paste(temp,"&",sep="")
temp=paste(temp,"&",sep="")
}
}
}
}
} else {
if(z$cspan.rgroup<1 | z$cspan.rgroup>(ncount+addrow))
z$cspan.rgroup=ncount+addrow
printcline=z$cspan.rgroup
nvlines=align2lines(NewAlign)
#mcalign=substr(extractAlign(NewAlign),start=1,stop=1)
mcalign="l"
if(nvlines[1]>0)
for(k in 1:vlines[1]) mcalign=paste("|",mcalign,sep="")
if(nvlines[printcline+1]>0)
for(k in 1:nvlines[printcline+1]) mcalign=paste(mcalign,"|",sep="")
temp=paste("\\multicolumn{",z$cspan.rgroup,"}{",mcalign,"}{\\textbf{",
"\\cellcolor{",z$rgroupbg[rgroupcount],"}",
"\\color{",z$rgroupcolor[rgroupcount],"}",
z$rgroup[rgroupcount],"}}",sep="")
#temp=paste("\\cellcolor{",z$colcolor[1],"}",temp,sep="")
if(z$cspan.rgroup<(ncount+addrow)) {
for(j in (z$cspan.rgroup):(ncount+addrow-1)) {
if(z$colcolor[j+1]!="white")
temp=paste(temp,"&\\cellcolor{",z$colcolor[j+1],"}",sep="")
else {
if(!is.null(isspanRow(z,i+1,j+1))){
if(isspanRow(z,i+1,j+1)<=0) {
#for(k in 1:nrow(z$spanRow)) {
# if(z$spanRow[k,1]!=j+1) next
# if(z$spanRow[k,2]>=i+1) next
# if(z$spanRow[k,3]==i+1) break
#}
temp=paste(temp,"&\\cellcolor{",z$cellcolor[i+1,j+1],"}",sep="")
}
else temp=paste(temp,"&",sep="")
}
else temp=paste(temp,"&",sep="")
}
if(!is.null(colCount)){
if(j %in% colCount[-length(colCount)]) {
if(vlines[j+2]==0){
#if(z$colcolor[z$cspan.rgroup+j]!="white")
# temp=paste(temp,"&\\cellcolor{",z$colcolor[z$cspan.rgroup+j],"}",sep="")
#else temp=paste(temp,"&",sep="")
temp=paste(temp,"&",sep="")
}
}
}
}
}
}
}
cat(paste(temp,"\\\\ \n",sep=""))
if(printcline>0) cat(paste("\\cline{1-",printcline,"}\n",sep=""))
rgroupcount=rgroupcount+1
}
}
#' Find valid color name
#'
#' @param a An integer or a character
#' @param mycolor predefined color names
#' @return a valid Latex color name
#' @export
validColor=function(a,mycolor){
if(is.numeric(a)) {
if(a>0 && a <11)
a=mycolor[a]
else a="peach"
} else {
a=validColor2(a)
}
a
}
#' Find valid color name
#'
#' @param a An integer or a character
#' @return a valid Latex color name
#' @importFrom grDevices colors
#' @export
validColor2=function(a){
if(!is.character(a)) a="peach"
else if(substr(a,1,1)=="#"){
a=a
} else {
if(tolower(a) %in% colors()){
a=tolower(a)
} else {
result=grep(paste("^",a,sep=""),ztable::zcolors$name,ignore.case=TRUE)
if(length(result)>0) {
a=ztable::zcolors$name[result][which.min(nchar(ztable::zcolors$name[result]))]
} else a="peach"
}
}
a
}
#' Define colors
#'
#' Define colors of mycolors
#' @param mycolors characters vectors of color names
#' @param no An integer indicating start number
#' @export
define_colors=function(mycolors,no=1) {
if(is.null(mycolors)) return()
uniquecolors=unique(as.vector(unique(mycolors)))
uniquecolors
count=no
for(i in 1:length(uniquecolors)) {
if(uniquecolors[i]=="white") next
if(substr(uniquecolors[i],1,1)=="#") {
definition=hex2rgbDef(uniquecolors[i],no=count)
cat(definition,"\n")
mycolors[mycolors==uniquecolors[i]]=paste0("tempcolor",count)
count=count+1
} else{
number=grep(paste("^",uniquecolors[i],sep=""),ztable::zcolors$name)
if(length(number)<1) next
else{
definition=ztable::zcolors[number[1],3]
cat(definition,"\n")
}
}
}
attr(mycolors,"no")=count
mycolors
}
hex2rgbDef=function(hex="#C90000",no=1){
r=hex2decimal(substr(hex,2,3))
g=hex2decimal(substr(hex,4,5))
b=hex2decimal(substr(hex,6,7))
paste0("\\definecolor{tempcolor",no,"}{rgb}{",r,",",g,",",b,"}")
}
hex2decimal=function(hex="C9"){
temp=paste0("0x",hex)
round(strtoi(temp)/256,2)
}
| /scratch/gouwar.j/cran-all/cranData/ztable/R/ztable.R |
#' Make ztable from object cbind.mytable
#'
#'@param x An object of cbind.mytable
#'@param digits Numeric vector of length equal to one (in which case it will be
#' replicated as necessary) or to the number of columns of the resulting table
#'@param ... arguments to be passed to \code{\link{ztable_sub}}
#'@export
#'@examples
#'require(moonBook)
#'res=mytable(sex+DM~.,data=acs)
#'z=ztable(res)
#'z
ztable.cbind.mytable=function(x,digits=NULL,...){
t=list()
myalign="ll"
for(i in 1:length(x)){
count=ncol(x[[i]]$res)
if(x[[i]]$show.all==FALSE) count=ncol(x[[i]]$res)-7
for(j in 2: count) myalign=paste(myalign,"c",sep="")
if(i==1) t[[i]]=x[[i]]$res[1:count]
else t[[i]]=x[[i]]$res[2:count]
sub1=paste("(N=",x[[i]]$count,")",sep="")
if(i==1) {
sub1=c(NA,sub1)
while(length(sub1)<count) sub1=c(sub1,NA)
} else {
while(length(sub1)<(count-1)) sub1=c(sub1,NA)
}
if(i==1) sub=sub1
else sub=c(sub,sub1)
}
mydf=t[[1]]
for(i in 2:length(x)) mydf=cbind(mydf,t[[i]])
caption=paste("Descriptive Statistics Stratified by \'",
toupper(attr(x,"group")[1]),"\' and \'",
toupper(attr(x,"group")[2]),"\'",sep="")
z=ztable(mydf,caption=caption,align=myalign)
z=addSubColNames(z,sub)
z$include.rownames=FALSE
#colnames(z$x)[1]=""
#cgroup=c(toupper(attr(x,"group")[1]),attr(x,"caption"))
cgroup=c("",attr(x,"caption"))
colnames(z$x)[1]=""
n.cgroup=c(1,rep(count-1,length(x)))
z=addcgroup(z,cgroup=cgroup,n.cgroup)
z=vlines(z,type=0)
class(z)=c("ztable","ztable.mytable","ztable.cbind.mytable")
z=trim.ztable(z)
z
}
| /scratch/gouwar.j/cran-all/cranData/ztable/R/ztable.cbind.mytable.R |
#' Make ztable from object mytable
#'
#'@param x An object of mytable
#'@param digits Numeric vector of length equal to one (in which case it will be
#' replicated as necessary) or to the number of columns of the resulting table
#' @param ... arguments to be passed to \code{\link{ztable_sub}}
#' @export
#'@examples
#'require(moonBook)
#'res=mytable(sex~.,data=acs)
#'z=ztable(res)
#'z
ztable.mytable=function(x,digits=NULL,...){
count=ncol(x$res)
if(x$show.all==FALSE) count=ncol(x$res)-7
#
# myalign="ll"
# for(i in 2: (count-1)) myalign=paste(myalign,"c",sep="")
# myalign=paste(myalign,"r",sep="")
z=ztable(x$res[1:count])
colnames(z$x)[1]=""
sub=paste("(N=",x$count,")",sep="")
sub=c("",sub)
while(length(sub)<count) sub=c(sub,NA)
z=addSubColNames(z,sub)
z$include.rownames=FALSE
z=vlines(z,type=0)
class(z)=c("ztable","ztable.mytable")
z=trim.ztable(z)
z
}
#'Make align for an object of class ztable.mytable
#'@param z An object of class ztable.mytable
make_align=function(z){
if(is.null(z$cgroup)){
myalign="ll"
count=ncol(z$x)
for(i in 2: (count-2)) myalign=paste(myalign,"c",sep="")
myalign=paste(myalign,"rr",sep="")
} else{
count=length(z$cgroup[[1]])-1
count
colpergroup=(ncol(z$x)-1)/count
colpergroup
myalign="ll"
for(i in 1:count){
temp=c(rep("c",colpergroup-2),"rr")
myalign=paste0(myalign,str_flatten(temp))
}
myalign
}
myalign
}
#'Make align and edit p value column for an object of class ztable.mytable
#'@param z An object of class ztable.mytable
trim.ztable=function(z){
if(is.null(z$cgroup)){
df=z$x
df$p[df$p=="0.000"]="< 0.001"
z$x=df
} else{
count=length(z$cgroup[[1]])-1
count
colpergroup=(ncol(z$x)-1)/count
colpergroup
pcol=which(colnames(z$x)=="p")
pcol
for(i in 1:length(pcol)){
no=pcol[i]
z$x[[no]][z$x[[no]]=="0.000"]="< 0.001"
}
}
myalign=make_align(z)
#cat("myalign=",myalign,"\n")
z$align=myalign
z
}
#'Arrange total column to the left
#'@param z An object of class ztable.mytable or ztable.cbind.mytable
#'@examples
#'require(moonBook)
#'require(ztable)
#'require(magrittr)
#'mytable(sex~.,data=acs,show.total=TRUE) %>% ztable() %>% totalLeft()
#'\dontrun{
#'mytable(sex+Dx~.,data=acs,show.total=TRUE) %>% ztable %>% totalLeft
#'}
#'@export
totalLeft=function(z){
if("ztable.cbind.mytable" %in% class(z)){
if("Total" %in% colnames(z$x)){
no=length(z$subcolname)
groupno=length(z$n.cgroup[[1]])-1
colpergroup=z$n.cgroup[[1]][2]
newno=1
for(i in 1:groupno){
start=2+(i-1)*colpergroup
end=start+colpergroup-1
temp=c(end-1,setdiff(start:end,end-1))
newno=c(newno,temp)
}
newno
tempcolname=colnames(z$x)
z$x=z$x[newno]
z$subcolnames=z$subcolnames[newno]
colnames(z$x)=tempcolname[newno]
}
} else if("ztable.mytable" %in% class(z)){
if("Total" %in% colnames(z$x)){
no=length(z$subcolname)
totalno=ifelse("ptest" %in% colnames(z$x),no-8,no-1)
newno=c(1,totalno,setdiff(1:no,c(1,totalno)))
z$x=z$x[newno]
z$subcolnames=z$subcolnames[newno]
}
}
z
}
| /scratch/gouwar.j/cran-all/cranData/ztable/R/ztable.mytable.R |
#'@describeIn ztable Makes a ztable for class matrix
#'@export
ztable.matrix=function(x,digits=NULL,...){
result=data.frame(x,stringsAsFactors=FALSE)
colnames(result)=colnames(x)
out=ztable(result,...)
out
}
#'@describeIn ztable Makes a ztable for class 'lm'
#'@export
ztable.lm=function(x,digits=NULL,...){
result=data.frame(summary(x)$coeff)
colnames(result)=c("Estimate","Std. Error","t value","Pr(>|t|)")
h=deparse(x$call)
#h=gsub("~","$\\sim$",h,fixed=TRUE)
h=paste("Call: ",h,sep="")
attr(result,"footer")=h
if (is.null(digits)) mydigits=c(1,4,4,2,4)
else mydigits=digits
out=ztable_sub(result,digits=mydigits,pcol=4,...)
out
}
#'@describeIn ztable Makes a ztable for class 'fitdistr'
#'@export
ztable.fitdistr=function(x,digits=NULL,...){
if(is.null(digits)) mydigits=3
else mydigits=digits
result=rbind(x$estimate,x$sd)
rownames(result)=c("estimate","sd")
result=data.frame(result)
string=paste("N=",x$n,", The log-likelihood=",round(x$loglik,2),sep="")
attr(result,"footer")=string
out=ztable_sub(result,digits=mydigits)
out
}
#'@describeIn ztable Makes a ztable for class 'nls'
#' @importFrom stats formula
#'@export
ztable.nls=function(x,digits=NULL,...){
result=data.frame(summary(x)$coeff)
colnames(result)=c("Estimate","Std. Error","t value","Pr(>|t|)")
s=deparse(formula(x))
h1=paste(" model: ", s,"\n",sep="")
h2=paste(" data: ", deparse(x$data),"\n", sep = "")
h=c("Nonlinear regression model\n",h1,h2)
attr(result,"heading")=h
if (is.null(digits)) mydigits=c(1,4,4,2,4)
else mydigits=digits
out=ztable_sub(result,digits=mydigits,pcol=4,...)
out
}
#'@describeIn ztable Makes a ztable for class 'aov'
#'@export
ztable.aov=function(x,digits=NULL,...){
result=summary(x)[[1]]
if(!is.null(x$call)){
h=deparse(x$call)
h=paste("Call: ",h,sep="")
attr(result,"footer")=h
}
if (is.null(digits)) mydigits=c(1,0,2,2,2,4)
else mydigits=digits
out=ztable_sub(result,digits=mydigits,pcol=5,...)
out
}
#'@describeIn ztable Makes a ztable for class 'anova'
#'@export
ztable.anova=function(x,digits=NULL,...){
result=data.frame(x)
colnames(result)=colnames(x)
if(is.null(digits)) {
if(ncol(x)==4) mydigits=c(1,0,2,0,2)
else if (ncol(x)==5) mydigits=c(1,0,2,2,2,4)
else mydigits=c(1,1,2,1,2,2,4)
}
else mydigits=digits
#attr(result,"heading")=attr(x,"heading")
h=c()
if(!is.null(attr(x,"heading"))) {
heading=attr(x,"heading")
for(i in 1:length(heading)) {
h=c(h,unlist(strsplit(heading[i],"\n")))
}
}
attr(result,"heading")=h
if(!is.null(x$call)){
h=deparse(x$call)
h=paste("Call: ",h,sep="")
attr(result,"footer")=h
}
out=ztable_sub(result,digits=mydigits,pcol=ncol(x),...)
out
}
#'@describeIn ztable Makes a ztable for class 'glm'
#'@importFrom stats confint.default coef
#'@export
ztable.glm=function(x,digits=NULL,...){
a=summary(x)$coeff
b=data.frame(a)
colnames(b)=colnames(a)
suppressMessages(d<-confint.default(x))
OR=data.frame(exp(coef(x)),exp(d))
OR=round(OR,4)
OR=cbind(OR,round(summary(x)$coefficient[,4],4))
#result=na.omit(result)
colnames(OR)=c("OR","lcl","ucl","p")
i=apply(OR,1,function(x) any(is.na(x)))
OR[i,c(1,2,3)]=NA
out=cbind(b,OR[c(1,2,3)])
h=deparse(x$call)
if(length(h)==1) h=paste("Call: ",h,sep="")
else if(length(h)==2) h=paste("Call: ",h[1],h[2],sep="")
attr(out,"footer")=h
if (is.null(digits)) mydigits=c(1,4,4,2,4,2,2,2)
else mydigits=digits
out=ztable_sub(out,digits=mydigits,pcol=4,...)
out
}
#'@describeIn ztable Makes a ztable for class 'coxph'
#'@export
ztable.coxph=function(x,digits=NULL,...){
a=summary(x)$coeff
b=summary(x)$conf.int
result=cbind(b[,c(1,3,4)],a[,c(3,4,5)])
result=data.frame(result)
h=deparse(x$call)
if(length(h)==1) h=paste("Call: ",h,sep="")
else if(length(h)==2) h=paste("Call: ",h[1],h[2],sep="")
attr(result,"footer")=h
colnames(result)=c("HR","lcl", "ucl", "se(coef)","z","Pr(>|z|)")
if (is.null(digits)) mydigits=c(0,3,3,3,3,3,4)
else mydigits=digits
out=ztable_sub(result,digits=mydigits,pcol=6,...)
out
}
#'@describeIn ztable Makes a ztable for class 'prcomp'
#'@export
ztable.prcomp=function(x,digits=NULL,...){
result=data.frame(x$rotation)
colnames(result)=colnames(x$rotation)
attr(result,"heading") <- "Rotation:"
if(!is.null(x$call)){
h=deparse(x$call)
h=paste("Call: ",h,sep="")
attr(result,"footer")=h
}
if (is.null(digits)) mydigits=c(1,4,4,4,4)
else mydigits=digits
out=ztable_sub(result,digits=mydigits,...)
out
}
#'@describeIn ztable Makes a ztable for class 'summary.prcomp'
#'@export
ztable.summary.prcomp=function(x,digits=NULL,...){
result=data.frame(x$importance)
colnames(result)=colnames(x$importance)
attr(result,"heading") <- "Importance of components:"
if(!is.null(x$call)){
h=deparse(x$call)
h=paste("Call: ",h,sep="")
attr(result,"footer")=h
}
if (is.null(digits)) mydigits=c(1,4,4,4,4)
else mydigits=digits
out=ztable_sub(result,digits=mydigits,...)
out
}
| /scratch/gouwar.j/cran-all/cranData/ztable/R/ztable1.R |
#' Add row colors of an object of ztable
#'
#' @param z An object of ztable
#' @param rows An integer vector indicating specific rows
#' @param bg A character vector indicating background color
#' @param color A character vector indicating color
#' @param condition Logical expression to select rows
#' @importFrom magrittr "%>%"
#' @export
#' @examples
#' z=ztable(head(iris))
#' z=addRowColor(z,c(1,3),color="platinum")
#' z
addRowColor=function(z,rows=NULL,bg=NULL,color=NULL,condition=NULL){
if(!is.null(bg)){
for(i in 1:length(bg)) bg[i]=validColor(bg[i])
selected=NULL
selected <- if (!missing(condition)) {
e <- substitute(condition)
r <- eval(e, z$x, parent.frame())
if (!is.logical(r))
stop("'subset' must be logical")
selected=which(r & !is.na(r) )+1
}
rows=c(rows,selected)
if(is.null(rows)) rows=1:(nrow(z$x)+1)
if(length(rows)>length(bg)) bg=rep(bg,1+length(rows)/length(bg))
for(i in 1:length(rows))
for(j in 1:ncol(z$cellcolor))
z$cellcolor[rows[i],j]=bg[i]
z$zebra.type=3
z$zebra=3
if(is.null(z$rowcolor)) z$rowcolor=rep("white",nrow(z$cellcolor))
for(j in 1:length(rows)) z$rowcolor[rows[j]]=bg[j]
}
if(!is.null(color)){
for(i in 1:length(color)) color[i]=validColor(color[i])
if(length(rows)>length(color)) color=rep(color,1+length(rows)/length(color))
for(i in 1:length(rows))
for(j in 1:ncol(z$frontcolor))
z$frontcolor[rows[i],j]=color[i]
}
z
}
#' Add column colors of an object of ztable
#'
#' @param z An object of ztable
#' @param cols An integer vector indicating specific columns
#' @param bg A character vector indicating background color
#' @param color A character vector indicating color
#'@export
#' @examples
#' z=ztable(head(iris))
#' z=addColColor(z,c(1,3),color="platinum")
#' z
addColColor=function(z,cols=NULL,bg=NULL,color=NULL){
cols <- if (missing(cols))
1:(ncol(z$x)+1)
else {
nl <- as.list(seq_along(z$x))
names(nl) <- names(z$x)
result=tryCatch(class(cols),error=function(e) "error")
add=0
if(result=="error") add=1
eval(substitute(cols), nl, parent.frame())+add
}
if(!is.null(bg)){
for(i in 1:length(bg)) bg[i]=validColor(bg[i])
if(length(cols)>length(bg)) bg=rep(bg,1+length(cols)/length(bg))
for(j in 1:length(cols))
for(i in 1:nrow(z$cellcolor))
z$cellcolor[i,cols[j]]=bg[j]
z$zebra.type=3
z$zebra=3
if(is.null(z$colcolor)) z$colcolor=rep("white",ncol(z$cellcolor))
for(j in 1:length(cols)) z$colcolor[cols[j]]=bg[j]
}
if(!is.null(color)){
for(i in 1:length(color)) color[i]=validColor(color[i])
if(length(cols)>length(color)) color=rep(color,1+length(cols)/length(color))
for(j in 1:length(cols))
for(i in 1:nrow(z$frontcolor))
z$frontcolor[i,cols[j]]=color[j]
}
z
}
#' Add column colors of an object of ztable
#'
#' @param z An object of ztable
#' @param rows An integer vector indicating specific rows
#' @param cols An integer vector indicating specific columns
#' @param bg A character vector indicating background color
#' @param color A character vector indicating color
#' @param condition Logical expression to select rows
#' @export
#' @examples
#' \dontrun{
#' z=ztable(head(iris))
#' z=addRowColor(z,c(1,3),color="platinum")
#' z=addColColor(z,2,color="cyan")
#' z=addCellColor(z,cols=c(5,4),rows=5,color="red")
#' z
#' }
addCellColor=function(z,rows=NULL,cols=NULL,bg=NULL,color=NULL,condition=NULL){
selected=NULL
selected <- if (!missing(condition)) {
e <- substitute(condition)
r <- eval(e, z$x, parent.frame())
if (!is.logical(r))
stop("'subset' must be logical")
selected=which(r & !is.na(r) )+1
}
rows=c(rows,selected)
if(is.null(rows)) rows=1:(nrow(z$x)+1)
cols <- if (missing(cols))
1:(ncol(z$x)+1)
else {
nl <- as.list(seq_along(z$x))
names(nl) <- names(z$x)
result=tryCatch(class(cols),error=function(e) "error")
add=0
if(result=="error") add=1
eval(substitute(cols), nl, parent.frame())+add
}
# while(length(rows)!=length(cols)){
# if(length(rows)<length(cols)){
# rows=c(rows,rows)
# if(length(rows)>length(cols)) rows=rows[1:length(cols)]
# }
# if(length(rows)>length(cols)){
# cols=c(cols,cols)
# if(length(cols)>length(rows)) cols=cols[1:length(rows)]
# }
# }
if(!is.null(bg)){
for(i in 1:length(bg)) bg[i]=validColor(bg[i])
if(length(cols)>length(bg)) bg=rep(bg,1+length(cols)/length(bg))
for(i in 1:length(rows)) {
for(j in 1:length(cols)){
z$cellcolor[rows[i],cols[j]]=bg[j]
}
}
}
if(!is.null(color)){
for(i in 1:length(color)) color[i]=validColor(color[i])
if(length(cols)>length(color)) color=rep(color,1+length(cols)/length(color))
for(i in 1:length(rows)) {
for(j in 1:length(cols)){
z$frontcolor[rows[i],cols[j]]=color[j]
}
}
# for(i in 1:length(cols)) {
# z$frontcolor[rows[i],cols[i]]=color[i]
# result=getspanRowLength(z,rows[i],cols[i])
#
# if(!is.null(result)){
# if(result>1){
# for(j in 1:(result-1)) z$frontcolor[(rows[i]+j),cols[i]]=color[i]
# }
# }
# }
}
z$zebra.type=3
z$zebra=3
z
}
#' Add column colors of an object of ztable
#'
#' @param z An object of ztable
#' @param rows An integer vector indicating specific rows
#' @param cols An integer vector indicating specific columns
#' @param color A character vector indicating color
#' @export
#' @examples
#' z=ztable(head(iris))
#' z=addFrontColor(z,rows=2:4,cols=c(2,4,6),color=c("red","green","blue"))
#' z
addFrontColor=function(z,rows,cols,color){
for(i in 1:length(color)) color[i]=validColor(color[i])
if(length(cols)>length(color)) color=rep(color,1+length(cols)/length(color))
for(i in 1:length(rows)) {
for(j in 1:length(cols)){
z$frontcolor[rows[i],cols[j]]=color[j]
result=getspanRowLength(z,rows[i],cols[j])
if(!is.null(result)){
if(result>1){
for(k in 1:(result-1)) z$frontcolor[(rows[i]+k),cols[j]]=color[j]
}
}
}
}
z
}
#' Gets spanRow length
#'
#'@param z An object of ztable
#'@param i An integer indicating the row of specific cell
#'@param j An integer indicating the column of specific cell
#'@export
#'@return row count when spanRow starts, 0 when column spans.
getspanRowLength=function(z,i,j){
if(is.null(z$spanRow)) return(NULL)
newspan=z$spanRow
for(k in 1:nrow(newspan)) {
if(newspan[k,1]!=j) next
if(newspan[k,2]>i) next
if(newspan[k,2]==i) return(newspan[k,3]-newspan[k,2]+1)
else if((newspan[k,2]<j) & (newspan[k,3]>=j)) return(0)
else next
}
return(NULL)
}
#' Add column groups of an object of ztable
#'
#'@param z An object of ztable
#'@param cgroup A character vector or matrix indicating names of column group. Default value is NULL
#'@param n.cgroup A integer vector or matrix indicating the numbers of columns included in each cgroup
#' Default value is NULL
#'@param color A character vector indicating the font color of each cells.
#'@param bg A character vector indicating the background color of each cells.
#'@param top Logical. Whether or not cgroup be placed at top.
#'@export
addcgroup=function(z,cgroup,n.cgroup,color="black",bg="white",top=FALSE){
if(length(color)==1){
color=rep(color,length(cgroup)+1)
} else{
color=c("black",color)
}
if(length(bg)==1){
bg=rep(bg,length(cgroup)+1)
} else{
bg=c("white",bg)
}
if(length(z$cgroup)==0) {
z$cgroup=list()
z$cgroup[[1]]=cgroup
z$cgroupcolor=list()
z$cgroupcolor[[1]]=color
z$cgroupbg=list()
z$cgroupbg[[1]]=bg
z$n.cgroup=list()
z$n.cgroup[[1]]=n.cgroup
} else{
if(top){
no=length(z$cgroup)
for(i in no:1){
z$cgroup[[no+1]]=z$cgroup[[no]]
z$cgroupcolor[[no+1]]=z$cgroupcolor[[no]]
z$cgroupbg[[no+1]]=z$cgroupbg[[no]]
z$n.cgroup[[no+1]]=z$n.cgroup[[no]]
}
z$cgroup[[1]]=cgroup
z$cgroupcolor[[1]]=color
z$cgroupbg[[1]]=bg
z$n.cgroup[[1]]=n.cgroup
} else{
no=length(z$cgroup)+1
z$cgroup[[no]]=cgroup
z$cgroupcolor[[no]]=color
z$cgroupbg[[no]]=bg
z$n.cgroup[[no]]=n.cgroup
}
}
z
}
#' Add row groups of an object of ztable
#'
#'@param z An object of ztable
#'@param rgroup A character vector indicating names of row group. Default value is NULL
#'@param n.rgroup A integer vector indicating the numbers of rows included in each rgroup
#' Default value is NULL
#'@param cspan.rgroup An integer indicating the column span of rgroup
#'@param color A character vector indicating the font color of rgroup.
#'@param bg A character vector indicating the background color of rgroup.
#'@export
addrgroup=function(z,rgroup,n.rgroup,cspan.rgroup=NULL,color="black",bg="white"){
if(is.null(rgroup)) return(z)
for(i in 1:length(rgroup)) {
if(is.na(rgroup[i])) rgroup[i]=""
}
z$rgroup=rgroup
z$n.rgroup=n.rgroup
z$cspan.rgroup=cspan.rgroup
if(length(bg)==1) bg=rep(bg,length(rgroup))
if(length(color)==1) color=rep(color,length(rgroup))
z$colcolor=rep(bg,ncol(z$cellcolor))
z$rgroupcolor=color
z$rgroupbg=bg
z
}
#' Count the colgroup of an object of ztable
#'
#' @param z An object of class ztable
#' @return A vector indicating the position of colgroup
#'@export
colGroupCount=function(z){
if(is.null(z$cgroup)) return(NULL)
if(is.null(z$n.cgroup)) return(NULL)
result=c()
for(i in 1:length(z$n.cgroup)){
count=0
for(j in 1:length(z$n.cgroup[[i]])) {
if(is.na(z$n.cgroup[[i]][j])) break
count=count+z$n.cgroup[[i]][j]
result=c(result,count)
}
}
a=unique(result)
a[order(a)]
}
#' Count the colspan of each colgroup
#'
#' @param z An object of ztable
#' @return A matrix indicating the column span occupied by each colgroup
#' @export
cGroupSpan=function(z){
(vlines=align2lines(z$align))
(colCount=colGroupCount(z))
newCount=c()
addrow=ifelse(z$include.rownames,1,0)
for(i in 1:length(colCount)) {
if(vlines[colCount[i]+1+addrow]==0) newCount=c(newCount,colCount[i])
}
newCount
if(is.null(newCount)) return(z$n.cgroup)
result=z$n.cgroup
for(i in 1:length(z$n.cgroup)){
start=0
for(j in 1:length(z$n.cgroup[[i]])) {
if(is.na(z$n.cgroup[[i]][j])) break
end=start+z$n.cgroup[[i]][j]
count=0
for(k in 1:length(newCount)){
if(newCount[k]>start & newCount[k]<end) count=count+1
}
result[[i]][j]=result[[i]][j]+count
#cat("start=",start,",end=",end,",result[",i,",",j,"]=",result[i,j],"\n")
start=end
}
}
result
}
#' Print the head of latex table if the object of ztable has a colgroup
#'
#' @param z An object of ztable
#' @export
printLatexHead=function(z){
if(is.null(z$cgroup)) return()
if(is.null(z$n.cgroup)) return()
#colCount=colGroupCount(z)
ncount=ncol(z$x)
addrow=ifelse(z$include.rownames,1,0)
cGroupSpan=cGroupSpan(z)
totalCol=totalCol(z)
vlines=align2lines(z$align)
#vlines=align2lines(getNewAlign(z))
#vlines
for(i in 1:length(z$cgroup)){
colSum=0
linecount=1
if(z$include.rownames) {
firstrow=cat(paste("\\cellcolor{",z$cgroupbg[[i]][1],"} &",sep=""))
colSum=1
linecount=1
}
for(j in 1:length(z$cgroup[[i]])) {
if(is.na(z$cgroup[[i]][j])) break
mcalign="c"
if((j==1) & (addrow==0) & (vlines[linecount+1]>0))
for(k in 1:vlines[linecount+1]) mcalign=paste("|",mcalign,sep="")
end=colSum+cGroupSpan[[i]][j]+1
linecount=linecount+z$n.cgroup[[i]][j]
if(vlines[linecount+1]>0)
for(k in 1:vlines[linecount+1]) mcalign=paste(mcalign,"|",sep="")
second=paste("\\multicolumn{",cGroupSpan[[i]][j],"}{",mcalign,"}{",sep="")
colSum=colSum+cGroupSpan[[i]][j]
if(z$cgroupbg[[i]][j+1]!="white")
second=paste(second,"\\cellcolor{",z$cgroupbg[[i]][j+1],"}",sep="")
if(z$cgroupcolor[[i]][j+1]!=z$color) {
second=paste(second,"\\color{",z$cgroupcolor[[i]][j+1],"}",sep="")
}
if(z$colnames.bold)
second=paste(second,"\\textbf{",z$cgroup[[i]][j],"}}",sep="")
else second=paste(second,z$cgroup[[i]][j],"}",sep="")
if(j!=1) second=paste("&",second,sep="")
cat(second)
if(linecount<(ncol(z$x)+1)) if(vlines[linecount+1]==0) cat("&")
}
cat("\\\\ \n")
colSum=addrow+1
start=1
for(j in 1:length(z$cgroup[[i]])) {
if(is.na(z$cgroup[[i]][j])) break
if(z$cgroup[[i]][j]!="")
cat(paste("\\cline{",colSum,"-",colSum+cGroupSpan[[i]][j]-1,"}",sep=""))
colSum=colSum+cGroupSpan[[i]][j]
start=start+z$n.cgroup[[i]][j]
if(j < length(z$cgroup[[i]])) if(vlines[start+1]==0) colSum=colSum+1
}
cat("\n")
}
}
#' Calculating total columns of ztable
#'
#' @param z An object of ztable
#' @export
totalCol=function(z){
ncount=ncol(z$x)
addrow=ifelse(z$include.rownames,1,0)
result=ncount+addrow
vlines=align2lines(z$align)
if(!is.null(z$cgroup)) {
colCount=colGroupCount(z)
if(length(colCount)>1){
for(i in 1:(length(colCount)-1)) {
if(vlines[colCount[i]+2]==0) result=result+1
}
}
}
result
}
#' Merging data cells of ztable object in columns
#'
#' @param z An object of ztable
#' @param row An integer indicating the row of merging data cell
#' @param from An integer indicating start column of merging data cell
#' @param to An integer indicating end column of merging data cell
#' @param bg An optional character indicating the background color of merging cell
#' @param color An optional character indicating the font color of merging cell
#' @export
spanCol=function(z,row,from,to,bg=NULL,color=NULL){
if(length(row)!=1) {
warning("Only one row is permitted")
return(z)
}
if(row<0 | (row > (nrow(z$x)+1))) {
warning("Out of range : row")
return(z)
}
if(from>to){
warning("\"to\" must be equal to or greater than \"from\"")
return(z)
}
if(is.null(z$spanCol)) z$spanCol=matrix(c(row,from,to),nrow=1)
else z$spanCol=rbind(z$spanCol,c(row,from,to))
#colnames(z$spanCol)=c("row","from","to")
z=addCellColor(z,cols=from,rows=row,bg=bg,color=color)
z
}
#' Merging data cells of ztable object in rows
#'
#' @param z An object of ztable
#' @param col An integer indicating the column of merging data cell
#' @param from An integer indicating start row of merging data cell
#' @param to An integer indicating end row of merging data cell
#' @param bg An optional character indicating the background color of merging cell
#' @param color An optional character indicating the font color of merging cell
#' @export
spanRow=function(z,col,from,to,bg=NULL,color=NULL){
if(length(row)!=1) {
warning("Only one row is permitted")
return(z)
}
if(col<0 | col > (ncol(z$x)+1)) {
warning("Out of range : col")
return(z)
}
if(from>to){
warning("\"to\" must be equal to or greater than \"from\"")
return(z)
}
if(is.null(z$spanRow)) z$spanRow=matrix(c(col,from,to),nrow=1)
else z$spanRow=rbind(z$spanRow,c(col,from,to))
#colnames(z$spanRow)=c("col","from","to")
#if(!is.null(color)) z=addCellColor(z,cols=col,rows=from,color=color)
z=addCellColor(z,cols=col,rows=from,bg=bg,color=color)
z
}
#' Identify the spanCol status of a cell
#'
#'@param z An object of ztable
#'@param i An integer indicating the row of specific cell
#'@param j An integer indicating the column of specific cell
#'@return column plus space count when spanCol starts, 0 when column spans,
#' minus value when spanCol ends, NULL when no span.
isspanCol=function(z,i,j){
if(is.null(z$spanCol)) return(NULL)
newspan=getNewSpanCol(z)
for(k in 1:nrow(newspan)) {
if(newspan[k,1]!=i) next
if(newspan[k,2]>j) next
if(newspan[k,2]==j) return(newspan[k,3]-newspan[k,2]+1)
else if((newspan[k,2]<j) & (z$spanCol[k,3]>=j)) return(0)
else next
}
return(NULL)
}
#' Calculate the spanColWidth when spanCol start
#'
#'@param z An object of ztable
#'@param i An integer indicating the row of specific cell
#'@param j An integer indicating the column of specific cell
#
#'@return column count when spanCol start
spanColWidth=function(z,i,j){
if(is.null(z$spanCol)) return(NULL)
newspan=z$spanCol
for(k in 1:nrow(newspan)) {
if(newspan[k,1]!=i) next
if(newspan[k,2]>j) next
if(newspan[k,2]==j) return(newspan[k,3]-newspan[k,2]+1)
else next
}
return(NULL)
}
#' Calculating new spanCol with spanCol plus space made by column group
#'
#'@param z An object of ztable
#'@export
getNewSpanCol=function(z){
result=z$spanCol
result1=result
result1
if(is.null(z$cgroup)) return(result)
if(is.null(colGroupCount(z))) return(result)
vlines=align2lines(z$align)
vlines
addcol=ifelse(z$include.rownames,1,0)
#colCount=colGroupCount(z)+1
colCount=colGroupCount(z)+addcol
newCount=c()
colCount
for(i in 1:length(colCount)) {
if(vlines[colCount[i]+1]==0) newCount=c(newCount,colCount[i])
}
if(is.null(newCount)) return(result)
for(i in 1:nrow(result)) {
for(j in 1:(length(newCount))) {
if((result[i,2]<=newCount[j]) & (result[i,3]>newCount[j])) {
result1[i,3]=result1[i,3]+1
}
}
}
return(result1)
}
#' Identify the spanRow status of a cell
#'
#'@param z An object of ztable
#'@param i An integer indicating the row of specific cell
#'@param j An integer indicating the column of specific cell
#'@return columns count plus spaces by rgroup when spanRow starts, 0 when row spans,
#' minus value when spanRow ends, NULL when no span.
isspanRow=function(z,i,j){
if(is.null(z$spanRow)) return(NULL)
newspanRow=getNewSpanRow(z)
for(k in 1:nrow(z$spanRow)) {
if(z$spanRow[k,1]!=j) next
if(z$spanRow[k,2]>i) next
if(z$spanRow[k,2]==i) return(newspanRow[k,3]-newspanRow[k,2]+1)
else if(z$spanRow[k,3]==i) return(-(newspanRow[k,3]-newspanRow[k,2]+1))
else if((z$spanRow[k,2]<i) & (z$spanRow[k,3]>i)) return(0)
else next
}
return(NULL)
}
#'Gets the spanRow start column
#'
#'@param z An object of ztable
#'@param i An integer indicating the row of specific cell
#'@param j An integer indicating the column of specific cell
#'
#'@return An integer indicating column where spanRow start. This function is for latex
#' multirow
getspanRowData=function(z,i,j){
for(k in 1:nrow(z$spanRow)) {
if(z$spanRow[k,1]!=j) next
if(z$spanRow[k,2]>=i) next
if(z$spanRow[k,3]==i) return(z$spanRow[k,2])
}
return(NULL)
}
#' Calculating new spanRow with spanRow plus space made by row group
#'
#'@param z An object of ztable
getNewSpanRow=function(z){
result=z$spanRow
result1=result
if(is.null(z$rgroup)) return(result)
if(is.null(z$n.rgroup)) return(result)
#colCount=colGroupCount(z)+1
printrgroup=1
if(!is.null(z$n.rgroup)){
if(length(z$n.rgroup)>1) {
for(i in 2:length(z$n.rgroup)) {
printrgroup=c(printrgroup,printrgroup[length(printrgroup)]+z$n.rgroup[i-1])
}
}
}
for(i in 1:nrow(result)) {
for(j in 2:(length(printrgroup))) {
if((result[i,2]<=printrgroup[j]) & (result[i,3]>printrgroup[j])) {
result1[i,3]=result1[i,3]+1
}
}
}
result1
}
#' Returns whether or not column with position start plus length is group column
#'
#' @param start An integer indicating start column position
#' @param length An integer indicating spanCol length
#' @param colCount An integer vector calculating from colGroupCount()
#' @export
isGroupCol=function(start,length,colCount){
if(is.null(colCount)) return(0)
newstart=start
for(i in 1:length(colCount)){
if(colCount[i]<start) newstart=start+1
}
result=colCount
for(i in 1:length(colCount)){
result[i]=colCount[i]+(i-1)+1
}
if((newstart+length) %in% result[-length(result)]) return(1)
else return(0)
}
#' Add a adjunctive name below column name in a ztable
#'
#'@param z An object of ztable
#'@param subcolnames A charactor vector
#'@export
addSubColNames=function(z,subcolnames){
if(length(subcolnames)!=length(z$x))
warning("length of subconames is different from length of z$x")
else z$subcolnames=subcolnames
z
}
#' Add row color or cellcolor for rows or cells of p-value less than sigp in a ztable
#'
#'@param z An object of ztable
#'@param level A p-value
#'@param bg A character indicating background color
#'@param color A character indicating color
#'@export
addSigColor=function(z,level=0.05,bg="lightcyan",color="black"){
if("ztable.mytable" %in% class(z)) {
if(is.null(z$cgroup)){
temp=z$x[[ncol(z$x)]]
temp[temp=="< 0.001"]=0
below05=which(as.numeric(temp)<level)+1
if(length(below05)>0) {
z1=addRowColor(z,rows=below05,bg=bg,color=color)
} else{
z1=z
}
} else{
count=length(z$cgroup[[1]])-1
count
colpergroup=(ncol(z$x)-1)/count
colpergroup
z1<-z
for(i in 2:(count+1)){
pcol=1+colpergroup*(i-1)
temp=z$x[[pcol]]
temp[temp=="< 0.001"]=0
below05=which(as.numeric(temp)<level)+1
if(length(below05)>0) for(j in 1:length(below05))
z1=addCellColor(z1,rows=below05[j],
cols=(pcol+1-(colpergroup-1)):(pcol+1),bg=bg,color=color)
}
}
} else {
if(!is.null(z$pcol)){
temp=z$x[[z$pcol]]
below05=which(as.numeric(temp)<level)+1
if(length(below05)>0){
z1=addRowColor(z,rows=below05,bg=bg,color=color)
} else{
z1<-z
}
} else{
z1=z
}
}
z1
}
| /scratch/gouwar.j/cran-all/cranData/ztable/R/ztable2.R |
#' Convert a named color into a hexadecimal color with rgb value
#' @param color A named color
#' @return a hexadecimal color
#' @importFrom stringr str_flatten str_detect
#' @importFrom grDevices col2rgb
#' @export
#' @examples
#' color2hex("green")
#' color2hex("red")
color2hex <- function(color) {
## return (directly):
if(str_detect(color,"#")) {
color
} else {
temp <- ztable::zcolors$rgb[ztable::zcolors$name == tolower(color)]
if(length(temp)==1) {
paste0("#",temp)
} else { # not found in zcolors, assume base R color:
res <- col2rgb(color)
paste0("#", paste(sprintf("%02x", res), collapse=""))
}
}
}
#' Extract hexadecimal colors from a color palette
#' @param name The name of color palette from RColorBrewer package
#' @param reverse Whether or not reverse the order of colors
#' @return hexadecimal colors
#' @importFrom RColorBrewer brewer.pal brewer.pal.info
#' @export
#' @examples
#' require(RColorBrewer)
#' require(magrittr)
#' palette2colors("Reds")
#' ztable(head(mtcars,10)) %>%
#' addColColor(cols=1:12,bg=palette2colors("Set3"))
palette2colors=function (name, reverse = FALSE)
{
colors = brewer.pal(RColorBrewer::brewer.pal.info[rownames(brewer.pal.info) ==
name, "maxcolors"], name)
if (reverse)
colors = rev(colors)
colors
}
#' Convert cgroup of ztable into data.frame
#' @param z An object of ztable
#' @return A data.frame
cgroup2df=function(z){
cgroup=z$cgroup
n.cgroup=z$n.cgroup
layer=length(cgroup)
name=list()
for(i in 1:layer){
temp=c()
for(j in 1:length(cgroup[[i]])){
temp=c(temp,rep(cgroup[[i]][j],n.cgroup[[i]][j]))
}
name[[i]]=temp
}
header=data.frame(
col_keys=colnames(z$x),stringsAsFactors=FALSE
)
header
for(i in 1:layer){
temp=name[[i]]
header=cbind(header,temp,stringsAsFactors=FALSE)
colnames(header)[ncol(header)]=paste0("name",i)
}
header=cbind(header,colnames(z$x),stringsAsFactors=FALSE)
header
}
#' Round the numbers of a data.frame
#' @param df A data.frame
#' @param digits A vector of integer indicating the number of decimal places
#' @return a rounded data.frame
#' @export
roundDf=function(df,digits=2){
if(length(digits)==1){
digits<-rep(digits,ncol(df))
}
else if(length(digits)<ncol(df)) {
digits<-c(digits,rep(0,ncol(df)-length(digits)))
}
df[]<-lapply(1:ncol(df),function(i){
if(is.integer(df[[i]])) {
df[[i]]<-df[[i]]
} else if(is.numeric(df[[i]])) {
fmt=paste0("%0.",sprintf("%d",digits[i]),"f")
df[[i]]=sprintf(fmt,df[[i]])
} else{
df[[i]]<-df[[i]]
}
})
df
}
#' Convert an object of ztable into an object of flextable
#' @param z An object of class ztable
#' @return An object of class flextable
#' @importFrom flextable regulartable delete_part bg color border align merge_at
#' @importFrom flextable set_header_df merge_h padding hline_top hline border_remove
#' @importFrom officer fp_border
#' @export
#' @examples
#' z=ztable(head(mtcars))
#' ztable2flextable(z)
ztable2flextable=function(z){
df=data2table(z)
addcol=0
if(z$include.rownames) {
df=cbind(rowname=rownames(df),df)
addcol=1
}
# if(addcol){
# digits=z$digits
# } else{
# digits=z$digits[-1]
# }
# df<-roundDf(df,digits)
if(sum(colnames(df)=="")>0){
colnames(df)[which(colnames(df)=="")]=rep(" ",which(colnames(df)==""))
}
big_border = fp_border(color="black", width = 2)
std_border = fp_border(color="black", width = 1)
ft<-regulartable(df) %>% border_remove() %>%
hline_top(border=big_border,part="header") %>%
hline(border=std_border,part="header") %>%
hline(border=std_border,i=nrow(df),part="body")
if(z$include.rownames) {
ft<-ft %>% color(i=1,j=1,color="white",part="header")
}
if(z$include.colnames==FALSE) ft<- ft %>% delete_part("header")
for(i in 1:(nrow(df)+1)){
for(j in 1:(ncol(df))){
if(z$cellcolor[i,j]!="white") {
if(i==1) {
ft<-ft %>% bg(i=i,j=j-ifelse(addcol,0,1),bg=color2hex(z$cellcolor[i,j]),part="header")
} else{
ft<-ft %>% bg(i=i-1,j=j-ifelse(addcol,0,1),bg=color2hex(z$cellcolor[i,j]),part="body")
}
}
}
}
for(i in 1:(nrow(df)+1)){
for(j in 1:(ncol(df))){
if(z$frontcolor[i,j]!="black") {
#cat("i=",i,",j=",j,",color=",z$frontcolor[i,j],"\n")
if(i==1) {
ft<-ft %>% color(i=i,j=j-ifelse(addcol,0,1),color=color2hex(z$frontcolor[i,j]),part="header")
} else{
ft<-ft %>% color(i=i-1,j=j-ifelse(addcol,0,1),color=color2hex(z$frontcolor[i,j]),part="body")
}
}
}
}
if(!is.null(attr(z$x,"footer"))){
footer=attr(z$x,"footer")
rowname=ft$header$col_keys[1]
rowname
ft<-eval(parse(text=paste0("add_footer(ft,",rowname,"=footer)"))) %>%
merge_at(i=1,j=1:ncol(df),part="footer")
}
if(!is.null(z$caption)){
header=z$caption
rowname=ft$header$col_keys[1]
ft<-eval(parse(text=paste0("add_header(ft,",rowname,"=header)"))) %>%
merge_at(i=1,j=1:ncol(df),part="header") %>%
color(i=1,j=1,color="black",part="header") %>%
bg(i=1,j=1,bg="white",part="header") %>%
border(border.top=fp_border(color="white"),part="header")
}
ft
if(!is.null(z$cgroup)){
header=cgroup2df(z)
ft<-ft %>% set_header_df(mapping=header,key="col_keys") %>%
merge_h(part="header") %>%
border(border.top=fp_border(),border.bottom=fp_border(),part="header") %>%
padding(padding.left=4,padding.right=4,part="header")
for(i in 1:length(z$cgroupcolor)){
for(j in 1:length(z$cgroupcolor[[i]])){
if(z$cgroupcolor[[i]][j]!="black") {
if(addcol){
if(j <= 1+addcol) {
mycol=j
} else{
mycol=cumsum(z$n.cgroup[[i]])[j-1-addcol]+1+addcol
}
} else{
if(j<=2){
mycol=1
} else{
mycol=cumsum(z$n.cgroup[[i]])[j-2]+1
}
}
# cat("z$cgroupcolor[[",i,"]][",j,"]=",z$cgroupcolor[[i]][j],",mycol=",mycol,"\n")
ft<-ft %>% color(i=i,j=mycol,color=color2hex(z$cgroupcolor[[i]][j]),part="header")
}
if(z$cgroupbg[[i]][j]!="white") {
# cat("z$cgroupbg[[",i,"]][",j,"]=",z$cgroupbg[[i]][j],"\n")
if(addcol){
if(j <= 1+addcol) {
mycol=j
} else{
mycol=cumsum(z$n.cgroup[[i]])[j-1-addcol]+1+addcol
}
} else{
if(j<=2){
mycol=1
} else{
mycol=cumsum(z$n.cgroup[[i]])[j-2]+1
}
}
ft<-ft %>% bg(i=i,j=mycol,bg=color2hex(z$cgroupbg[[i]][j]),part="header")
}
}
}
}
if(!is.null(z$spanCol)){
for(i in 1 :nrow(z$spanCol)){
if(addcol){
from=z$spanCol[i,2]
to=z$spanCol[i,3]
#cat("z$spanCol[i,]=",z$spanCol[i,],",from=",from,",to=",to,"\n")
ft=merge_at(ft,i=z$spanCol[i,1]-1,j=from:to,part="body")
} else{
from=z$spanCol[i,2]-1
to=z$spanCol[i,3]-1
myi=z$spanCol[i,1]-1
#cat("z$spanCol[i,]=",z$spanCol[i,],",myi=",myi,",from=",from,",to=",to,"\n")
ft=merge_at(ft,i=myi,j=from:to,part="body")
}
}
}
if(!is.null(z$spanRow)){
for(i in 1 :nrow(z$spanRow)){
ft=merge_at(ft,j=z$spanRow[i,1]-ifelse(addcol,0,1),i=(z$spanRow[i,2]-1):(z$spanRow[i,3]-1))
}
}
ft<- ft %>% align(align="center",part="header")
ft
}
| /scratch/gouwar.j/cran-all/cranData/ztable/R/ztable2flextable.R |
#'Hooks for Namespace Events
#'
#'Functions to be called when loaded, attached, detached or unloaded
#'@param libname a character string giving the library directory where
#'@param pkgname a character string giving the name of the package.
.onAttach<-function(libname,pkgname){
packageStartupMessage("Welcome to package ztable ver 0.2.3")
}
.onLoad<-function(libname,pkgname){
options(ztable.include.rownames=TRUE)
options(ztable.include.colnames=TRUE)
options(ztable.type="html")
options(ztable.color="black")
options(ztable.show.heading=TRUE)
options(ztable.show.footer=TRUE)
options(ztable.caption.placement="top")
options(ztable.caption.position="c")
options(ztable.caption.bold=FALSE)
options(ztable.booktabs=FALSE)
options(ztable.zebra=NULL)
options(ztable.zebra.color=NULL)
options(ztable.zebra.type=1)
options(ztable.zebra.colnames=FALSE)
options(ztable.zebra.rownames=TRUE)
options(ztable.colnames.bold=FALSE)
invisible()
}
| /scratch/gouwar.j/cran-all/cranData/ztable/R/zzz.R |
## ----setup, include = FALSE---------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = NA,
message=FALSE
)
## ----eval=FALSE---------------------------------------------------------------
# install.packages("ztable")
## ----eval=FALSE---------------------------------------------------------------
# if(!require(devtools)) install.packages("devtools")
# devtools::install_github("cardiomoon/ztable")
## -----------------------------------------------------------------------------
require(moonBook)
x=table(acs$Dx,acs$smoking)
x
## ----results="asis"-----------------------------------------------------------
library(ztable)
library(magrittr)
options(ztable.type="html")
z=ztable(x)
print(z,caption="Table 1. Basic Table")
## ----results="asis"-----------------------------------------------------------
z %>%
addCellColor(4,3,bg="orange",color="white") %>%
print(caption="Table 2. Add Cell Color")
## ----results='asis'-----------------------------------------------------------
ztable(head(iris),caption="Table 3. Conditinoal Formatting: Sepal.Width >= 3.5") %>%
addRowColor(rows=1,bg="#C90000",color="white") %>%
addCellColor(condition=Sepal.Width>=3.5,cols=Sepal.Width,color="red")
## ----results="asis"-----------------------------------------------------------
z %>% makeHeatmap() %>% print(caption="Table 4. Heatmap Table")
## ----results='asis'-----------------------------------------------------------
ztable(head(mtcars)) %>%
makeHeatmap(palette="Blues") %>%
print(caption="Table 5. Heatmap table with 'Blue' palette")
## ----fig.width=8,out.width='100%'---------------------------------------------
mycolor=gradientColor(low="yellow",mid="orange",high="red",n=20,plot=TRUE)
mycolor
## ----results='asis'-----------------------------------------------------------
ztable(head(mtcars[1:5])) %>%
makeHeatmap(mycolor=mycolor) %>%
print(caption="Table 6. Heatmap table with user-defined palette")
## ----results='asis'-----------------------------------------------------------
ztable(head(acs[1:10])) %>%
makeHeatmap %>%
print(caption="Table 7. Heatmap table with non-numeric data")
## ----results='asis'-----------------------------------------------------------
ztable(head(mtcars)) %>%
makeHeatmap(palette="YlOrRd",cols=c(1,3,4),margin=2) %>%
print(caption="Table 8. Columnwise heatmap table")
## ----results='asis'-----------------------------------------------------------
ztable(t(head(mtcars))) %>%
makeHeatmap(palette="YlOrRd",rows=c(1,3,4),margin=1) %>%
print(caption="Table 9. Rowwise heatmap table")
| /scratch/gouwar.j/cran-all/cranData/ztable/inst/doc/heatmapTable.R |
---
title: "Make a Heatmap Table using `ztable`"
author: "Keon-Woong Moon"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{heatmapTable}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r setup, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = NA,
message=FALSE
)
```
## Installation
You can install R package "ztable" from CRAN. Current version is 0.1.8.
```{r,eval=FALSE}
install.packages("ztable")
```
To make a heatmap table, you have to install the developmental version of ztable from github. Current github version is 0.2.0.
```{r,eval=FALSE}
if(!require(devtools)) install.packages("devtools")
devtools::install_github("cardiomoon/ztable")
```
## Introduction
A heat map (or heatmap) is a graphical representation of data where the individual values contained in a matrix are represented as colors. You can summarize the the diagnosis and smoking status of 857 patients of acute coronary syndrome(acs) using table() function.
```{r}
require(moonBook)
x=table(acs$Dx,acs$smoking)
x
```
## Basic Table
You can make `html` or `LaTex` table easily with ztable.
```{r,results="asis"}
library(ztable)
library(magrittr)
options(ztable.type="html")
z=ztable(x)
print(z,caption="Table 1. Basic Table")
```
## Formatting the Table
You can change the background color and font color of `ztable` using addCellColor function. For example, you can change the cell color of the 3rd row, 2nd column. Please keep in mind that the ztable count colname and rowname.
```{r,results="asis"}
z %>%
addCellColor(4,3,bg="orange",color="white") %>%
print(caption="Table 2. Add Cell Color")
```
## Conditional Formatting
You can select rows with logical expression. You can select cols with column name.
```{r,results='asis'}
ztable(head(iris),caption="Table 3. Conditinoal Formatting: Sepal.Width >= 3.5") %>%
addRowColor(rows=1,bg="#C90000",color="white") %>%
addCellColor(condition=Sepal.Width>=3.5,cols=Sepal.Width,color="red")
```
## Make a Heatmap Table
You can make a heatmap table in which background colors representing the values. With makeHeatmap() function, you can make a heatmap table easily. The makeHeatmap() function apply the "Reds" palette from RColorBrewer package.
```{r,results="asis"}
z %>% makeHeatmap() %>% print(caption="Table 4. Heatmap Table")
```
## Heatmap Table with desired palette
You can change the palette with palette argument. For example, you can use the "Blue" palette.
```{r,results='asis'}
ztable(head(mtcars)) %>%
makeHeatmap(palette="Blues") %>%
print(caption="Table 5. Heatmap table with 'Blue' palette")
```
## Heatmap Table with user-defined palette
With the gradientColor() function, you makes sequential colour gradient palette easily.
```{r,fig.width=8,out.width='100%'}
mycolor=gradientColor(low="yellow",mid="orange",high="red",n=20,plot=TRUE)
mycolor
```
```{r,results='asis'}
ztable(head(mtcars[1:5])) %>%
makeHeatmap(mycolor=mycolor) %>%
print(caption="Table 6. Heatmap table with user-defined palette")
```
## Heatmap Table with non-numeric data
You can make heatmap table with data containing non-numeric columns. Only columns with numeric data affected by this function.
```{r,results='asis'}
ztable(head(acs[1:10])) %>%
makeHeatmap %>%
print(caption="Table 7. Heatmap table with non-numeric data")
```
## Selected Columnwise Heatmap Table
You can make selected columnwise heatmap table. You can select columns with `cols` argument. To make columnwise heatmap table, set the `margin` argument 2.
```{r,results='asis'}
ztable(head(mtcars)) %>%
makeHeatmap(palette="YlOrRd",cols=c(1,3,4),margin=2) %>%
print(caption="Table 8. Columnwise heatmap table")
```
## Selected Rowwise Heatmap Table
You can make selected columnwise heatmap table. You can select rows with `rows` argument. To make rowwise heatmap table, set the `margin` argument 1.
```{r,results='asis'}
ztable(t(head(mtcars))) %>%
makeHeatmap(palette="YlOrRd",rows=c(1,3,4),margin=1) %>%
print(caption="Table 9. Rowwise heatmap table")
```
| /scratch/gouwar.j/cran-all/cranData/ztable/inst/doc/heatmapTable.Rmd |
## ----results='asis'-----------------------------------------------------------
library(ztable)
library(magrittr)
options(ztable.type="html")
z=ztable(head(iris))
z
## ----results='asis'-----------------------------------------------------------
z=ztable(head(iris),align="cccccc")
z
## ----results='asis'-----------------------------------------------------------
z <- ztable(head(iris))
z <- addRowColor(z, rows=1,bg="#C90000",color="white")
print(z)
## ----results='asis'-----------------------------------------------------------
ztable(head(iris)) %>%
addRowColor(rows=1,bg="#C90000",color="white") %>%
print
## ----results='asis'-----------------------------------------------------------
cgroup=c("Sepal","Petal","Species")
n.cgroup=c(2,2,1)
z <- ztable(head(iris)) %>%
addcgroup(cgroup=cgroup,n.cgroup=n.cgroup)
z
## ----results='asis'-----------------------------------------------------------
rgroup=c("OneToThree","Four","FiveToSix")
n.rgroup=c(3,1,2)
z <- z %>%
addrgroup(rgroup=rgroup,n.rgroup=n.rgroup,cspan.rgroup=1)
z
print(z,type="latex")
## ----results='asis'-----------------------------------------------------------
ncount=c(123,120,123,124)
sub=paste("(N=",ncount,")",sep="")
z=addSubColNames(z,c(sub,NA))
z
## ----results='asis'-----------------------------------------------------------
z=spanRow(z,col=2,from=4,to=7,bg="lightcyan",color="red")
z=spanRow(z,col=3,from=5,to=7,"platinum","blue")
z=spanRow(z,col=4,from=6,to=7,"cyan")
z=spanRow(z,col=5,from=5,to=7,"yellow")
z=spanRow(z,col=6,from=3,to=5,"yellow")
z
z=spanCol(z,row=2,from=3,to=4,"yellow")
z=spanCol(z,row=3,from=4,to=5,"lightblue")
z
## ----results='asis'-----------------------------------------------------------
vlines(z,type="all") # type=1 gets same result
z <- vlines(z,type="none") # type=0 gets same result
z
z <- z %>% vlines(add=c(1,2,5))
z
## ----results='asis'-----------------------------------------------------------
t1=head(iris,10)[,c(1,3,5)]
t2=tail(iris,10)[,c(1,3,5)]
t=cbind(t1,t2)
z=ztable(t,caption="Table 1. Top 10 and Last 10 Data from iris",align="ccccccc")
z
## ----results='asis'-----------------------------------------------------------
cgroup=c("Top 10","Last 10")
n.cgroup=c(3,3)
z=addcgroup(z,cgroup=cgroup,n.cgroup=n.cgroup)
z
rgroup=c("Top 1-3","Top 4-5",NA," Top 7-10")
n.rgroup=c(3,2,1,4)
z=addrgroup(z,rgroup=rgroup,n.rgroup=n.rgroup,cspan.rgroup=1)
z
z <- z %>%
addRowColor(c(5,10),"pink") %>%
addColColor(4,"amber") %>%
addCellColor(rows=c(5,10),cols=4,"red","white")
z
z <- z %>%
spanCol(row=2,from=2,to=3,"lightcyan","red") %>%
spanRow(col=7,from=7,to=8,"cyan")
z
hlines(z,type=1)
## ----results='asis'-----------------------------------------------------------
vlines(z,type=0) # No vertical lines
vlines(z,type=1) # Vertical lines for all column
## ---- eval=FALSE--------------------------------------------------------------
# options(ztable.type="html")
## ----results="asis",message=FALSE---------------------------------------------
require(ztable)
options(ztable.type="html")
options(ztable.zebra=1)
options(ztable.zebra.color="platinum")
options(ztable.colnames.bold=TRUE)
ztable(head(mtcars))
## ----results='asis'-----------------------------------------------------------
ztable(head(mtcars),zebra=NULL,size=3,
caption="Table 1. Non-zebra Table with small size")
## ----results='asis'-----------------------------------------------------------
ztable(head(mtcars[c(1:7)]),zebra=2,zebra.color="lightcyan",size=7,
caption="Table 2. Left-sided caption at botom with large font",
caption.placement="bottom",caption.position="l")
## ----results="asis"-----------------------------------------------------------
out <- aov(mpg ~ ., data=mtcars)
ztable(out)
## ----results='asis'-----------------------------------------------------------
fit <- lm(mpg ~ cyl + disp + wt + drat + am, data=mtcars)
ztable(fit)
## ----results='asis'-----------------------------------------------------------
a=anova(fit)
ztable(a)
## ----results='asis'-----------------------------------------------------------
fit2 <- lm(mpg ~ cyl+wt, data=mtcars)
b=anova(fit2,fit)
ztable(b)
ztable(b,show.heading=FALSE)
## ----results='asis',warning=FALSE---------------------------------------------
require(survival)
data(cancer)
attach(colon)
out <- glm(status ~ rx+obstruct+adhere+nodes+extent, data=colon, family=binomial)
ztable(out)
## ----results='asis'-----------------------------------------------------------
anova(out)
ztable(anova(out))
## ----results='asis'-----------------------------------------------------------
op <- options(contrasts = c("contr.helmert", "contr.poly"))
npk.aov <- aov(yield ~ block + N*P*K, npk)
ztable(npk.aov,zebra=1)
## ----results='asis'-----------------------------------------------------------
ctl <- c(4.17,5.58,5.18,6.11,4.50,4.61,5.17,4.53,5.33,5.14)
trt <- c(4.81,4.17,4.41,3.59,5.87,3.83,6.03,4.89,4.32,4.69)
group <- gl(2, 10, 20, labels = c("Ctl","Trt"))
weight <- c(ctl, trt)
lm.D9 <- lm(weight ~ group)
ztable(lm.D9)
ztable(anova(lm.D9),align="|c|rrrr|r|")
## ----results='asis'-----------------------------------------------------------
counts <- c(18,17,15,20,10,20,25,13,12)
outcome <- gl(3,1,9)
treatment <- gl(3,3)
d.AD <- data.frame(treatment, outcome, counts)
glm.D93 <- glm(counts ~ outcome + treatment, family = poisson())
ztable(glm.D93)
## ----results='asis',message=FALSE---------------------------------------------
data(USArrests)
pr1 <- prcomp(USArrests)
ztable(pr1)
ztable(summary(pr1))
## ----results='asis',message=FALSE---------------------------------------------
colon$TS = Surv(time,status==1)
out=coxph(TS~rx+obstruct+adhere+differ+extent+surg+node4,data=colon)
ztable(out)
## ----comment=NA---------------------------------------------------------------
require(graphics)
DNase1 <- subset(DNase, Run == 1)
## using a selfStart model
fm1DNase1 <- nls(density ~ SSlogis(log(conc), Asym, xmid, scal),DNase1)
summary(fm1DNase1)
## ----results='asis',message=FALSE---------------------------------------------
ztable(fm1DNase1)
## ----results='asis'-----------------------------------------------------------
require(MASS)
set.seed(123)
x <- rgamma(100, shape = 5, rate = 0.1)
a=fitdistr(x, "gamma")
ztable(a)
x3 <- rweibull(100, shape = 4, scale = 100)
b=fitdistr(x3, "weibull")
ztable(b)
## ----results='asis',message=FALSE---------------------------------------------
ztable(head(mtcars,15),zebra=0,zebra.color=NULL)
## ----results='asis'-----------------------------------------------------------
z1=ztable(head(iris),zebra=2)
z1
print(z1,zebra.type=2)
print(z1,zebra=1,zebra.type=2,zebra.colnames=TRUE)
## ----results='asis'-----------------------------------------------------------
options(ztable.zebra.color=NULL)
(z1=ztable(head(iris),zebra=0,zebra.type=2))
## ----results='asis'-----------------------------------------------------------
update_ztable(z1,colnames.bold=TRUE,zebra.colnames=TRUE)
## ----results='asis'-----------------------------------------------------------
print(z1,zebra.color=c(rep("white",5),"peach"),zebra.colnames=TRUE)
## ----results='asis'-----------------------------------------------------------
ztable(head(iris),zebra=0,zebra.type=0)
ztable(head(iris),zebra=0,zebra.type=0,zebra.color=zcolors$name,zebra.colnames=TRUE)
## ----results='asis'-----------------------------------------------------------
ztable(head(iris),zebra=0,zebra.type=0,zebra.color=1:7,zebra.colnames=TRUE)
ztable(head(mtcars[,1:9]),zebra=0,zebra.type=0,zebra.color=1:9,zebra.colnames=TRUE)
## ----results='asis'-----------------------------------------------------------
mycolor=rep("white",6)
for(i in 1:149){
mycolor=c(mycolor,"white",zcolors$name[((i-1)*5+1):((i-1)*5+5)])
}
mycolor=c(mycolor,"white",zcolors$name[c(746:749,1)])
a=c(zcolors$name[1:5])
for(i in 2:149){
a=rbind(a,zcolors$name[((i-1)*5+1):((i-1)*5+5)])
}
a=rbind(a,zcolors$name[c(746:749,1)])
a=data.frame(a,stringsAsFactors=FALSE,row.names=NULL)
ztable(a,zebra=0,zebra.type=0,zebra.color=mycolor,include.rownames=FALSE,
include.colnames=FALSE,longtable=TRUE)
## ----results='asis'-----------------------------------------------------------
z=ztable(head(mtcars[1:3]),tabular=TRUE,zebra.color="peach-orange")
z1=ztable(head(iris[1:3]),tabular=TRUE,zebra=2)
parallelTables(width=c(0.5,0.5),list(z,z1),type="html")
parallelTables(width=c(0.5,0.5),list(z,"figures/ztable3.png"),type="html")
## ----results='asis'-----------------------------------------------------------
require(moonBook)
res=mytable(Dx~.,data=acs)
options(ztable.zebra=NULL)
z=ztable(res)
z
vlines(z,type="all")
## ----results='asis'-----------------------------------------------------------
res1=mytable(sex+DM~.,data=acs)
z=ztable(res1)
z
vlines(z,type="all")
## ----results='asis'-----------------------------------------------------------
z=addRowColor(z,c(13,16),"platinum")
z=addColColor(z,c(5,8),"pink")
z=addCellColor(z,rows=16,cols=c(5,8),bg="orange")
z=addCellColor(z,rows=13,cols=5,bg="orange")
z
## ----results='asis'-----------------------------------------------------------
require(magrittr)
res1=mytable(sex+DM~.,data=acs)
z=ztable(res1)
z %>%
addRowColor(c(13,16),"platinum") %>%
addColColor(c(5,8),"pink") %>%
addCellColor(rows=16,cols=c(5,8),bg="orange") %>%
addCellColor(rows=13,cols=5,bg="orange") %>%
print
## ----results='asis'-----------------------------------------------------------
res1=mytable(sex~.,data=acs)
res1 %>% ztable %>%
addSigColor %>%
print
res2<-mytable(sex+DM~.,data=acs)
res2 %>%
ztable %>%
addSigColor(level=0.1,bg="yellow",color="red") %>%
print
| /scratch/gouwar.j/cran-all/cranData/ztable/inst/doc/ztable.R |
---
title: "Package ztable"
author: "Keon-Woong Moon"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{ztable}
%\VignetteEngine{knitr::rmarkdown}
\usepackage[utf8]{inputenc}
---
# Introduction
## Table Show
Package "ztable" make everything possible about table. Basically, An object of "ztable" made from a data.frame. The default output format of ztable is RStudio::viewer or web-browser format(type="viewer"). So if you want to use ztable in a "html" format, you should change the parameter ztable.type to "html". If you want to use ztable in latex format, you should change the parameter ztable.type to "latex".
```{r,results='asis'}
library(ztable)
library(magrittr)
options(ztable.type="html")
z=ztable(head(iris))
z
```
You can change the position of data in each cell by adjusting the parameter "align".
```{r,results='asis'}
z=ztable(head(iris),align="cccccc")
z
```
You can change background color and font color with addRowColor() function.
```{r,results='asis'}
z <- ztable(head(iris))
z <- addRowColor(z, rows=1,bg="#C90000",color="white")
print(z)
```
The pipe operator("%>%") from magrittr package can simplify your R code.
```{r,results='asis'}
ztable(head(iris)) %>%
addRowColor(rows=1,bg="#C90000",color="white") %>%
print
```
You can add column groups to ztable using addcgroup function. The n.cgroup means how much columns included in each row group.
```{r,results='asis'}
cgroup=c("Sepal","Petal","Species")
n.cgroup=c(2,2,1)
z <- ztable(head(iris)) %>%
addcgroup(cgroup=cgroup,n.cgroup=n.cgroup)
z
```
You can add row groups to ztable using addrgroup function. The n.rgroup means how much rows included in each row group. The cspan.rgroup means how much columns occupied by row group name.
```{r,results='asis'}
rgroup=c("OneToThree","Four","FiveToSix")
n.rgroup=c(3,1,2)
z <- z %>%
addrgroup(rgroup=rgroup,n.rgroup=n.rgroup,cspan.rgroup=1)
z
print(z,type="latex")
```
You can add another colname(subcolname), the N count for example. The length of subcolnames should be same with column count of data.frame. You can use "NA" and the column name spans 2 rows.
```{r,results='asis'}
ncount=c(123,120,123,124)
sub=paste("(N=",ncount,")",sep="")
z=addSubColNames(z,c(sub,NA))
z
```
You can merge cells by spanRow or spanCol function.
```{r,results='asis'}
z=spanRow(z,col=2,from=4,to=7,bg="lightcyan",color="red")
z=spanRow(z,col=3,from=5,to=7,"platinum","blue")
z=spanRow(z,col=4,from=6,to=7,"cyan")
z=spanRow(z,col=5,from=5,to=7,"yellow")
z=spanRow(z,col=6,from=3,to=5,"yellow")
z
z=spanCol(z,row=2,from=3,to=4,"yellow")
z=spanCol(z,row=3,from=4,to=5,"lightblue")
z
```
You can add or adjust vertical lines of table by vlines function
```{r,results='asis'}
vlines(z,type="all") # type=1 gets same result
z <- vlines(z,type="none") # type=0 gets same result
z
z <- z %>% vlines(add=c(1,2,5))
z
```
Please note that if you add vertical lines between groups, the space between groups(empty columns) disappeared and vice versa.
## Merge two tables
You can bind two or more data.frame by cbind function.
```{r,results='asis'}
t1=head(iris,10)[,c(1,3,5)]
t2=tail(iris,10)[,c(1,3,5)]
t=cbind(t1,t2)
z=ztable(t,caption="Table 1. Top 10 and Last 10 Data from iris",align="ccccccc")
z
```
And then, you can add column groups, row groups, add row colors, add column colors, add cell colors, and merge cells
```{r,results='asis'}
cgroup=c("Top 10","Last 10")
n.cgroup=c(3,3)
z=addcgroup(z,cgroup=cgroup,n.cgroup=n.cgroup)
z
rgroup=c("Top 1-3","Top 4-5",NA," Top 7-10")
n.rgroup=c(3,2,1,4)
z=addrgroup(z,rgroup=rgroup,n.rgroup=n.rgroup,cspan.rgroup=1)
z
z <- z %>%
addRowColor(c(5,10),"pink") %>%
addColColor(4,"amber") %>%
addCellColor(rows=c(5,10),cols=4,"red","white")
z
z <- z %>%
spanCol(row=2,from=2,to=3,"lightcyan","red") %>%
spanRow(col=7,from=7,to=8,"cyan")
z
hlines(z,type=1)
```
And you can adjust vertical lines, too.
```{r,results='asis'}
vlines(z,type=0) # No vertical lines
vlines(z,type=1) # Vertical lines for all column
```
# Basic Use
Package "ztable" consist of one function: ztable. It's main function is creating zebra zebra striping tables(tables with alternating row colors) in both Latex and html formats easily from mainly data.frame or an R object such as matrix, lm, aov, anova, glm and coxph objects. It is fully customizable and you can get similar tables in both latex and html format without changing source. The default output is RStudio::viewer, but you can get html format by adding just one sentence.
```{r, eval=FALSE}
options(ztable.type="html")
```
It's usage is somewhat similar to xtable, but very simple.
## data.frame
### Basic Use
It's use is very simple. Just use 'ztable()' function. You can get the zebra striping table by set the parameter zebra=1 (default value is NULL)
```{r,results="asis",message=FALSE}
require(ztable)
options(ztable.type="html")
options(ztable.zebra=1)
options(ztable.zebra.color="platinum")
options(ztable.colnames.bold=TRUE)
ztable(head(mtcars))
```
### Tailoring zebra striping
You can get non-zebra table by change parameter zebra=NULL or change zebra striping on even rows by zebra=2.
```{r,results='asis'}
ztable(head(mtcars),zebra=NULL,size=3,
caption="Table 1. Non-zebra Table with small size")
```
### Customize the caption and the font size
You can change the position of table by using parameter position. You can use "r" for right position, "l" for left position and "c" for center position(default). You can change the color of zebra striping by change the parameter zebra.color. You can also change the size of font from 1 to 10(default is 5). You can change the caption.placement("top" or "bottom") and caption.position("c" for center / "r" for right/ "l" for left).
```{r,results='asis'}
ztable(head(mtcars[c(1:7)]),zebra=2,zebra.color="lightcyan",size=7,
caption="Table 2. Left-sided caption at botom with large font",
caption.placement="bottom",caption.position="l")
```
## aov object
'ztable()' can be used for 'aov' object. When used for 'aov' object, the function call is added as footer to the table. The parameter 'show.footer' can be used whether or not include footer in the table. Default value is TRUE.
```{r,results="asis"}
out <- aov(mpg ~ ., data=mtcars)
ztable(out)
```
## Linear model : 'lm' object
'ztable()' can be used for 'lm' object. When used for 'lm' object, the function call is added as footer to the table, too.
```{r,results='asis'}
fit <- lm(mpg ~ cyl + disp + wt + drat + am, data=mtcars)
ztable(fit)
```
## Analysis of Variance Table : 'anova' object
'ztable()' can be used for 'anova' object to show the anova table. When used for 'anova' object, headings of anova are added as headings to the table. The parameter 'show.footer' can be used whether or not include footer in the table. Dafault value is TRUE.
```{r,results='asis'}
a=anova(fit)
ztable(a)
```
This is examples of another 'anova' object. The models in this anova tables showed as table headings. You can decide whether or not include the headings in the table by using parameter 'show.heading'(default: TRUE).
```{r,results='asis'}
fit2 <- lm(mpg ~ cyl+wt, data=mtcars)
b=anova(fit2,fit)
ztable(b)
ztable(b,show.heading=FALSE)
```
## Generalized linear model ; 'glm' object
'ztable()' can be used for 'glm'(generalized linear model) object. In this time, 'ztable()' shows the **odds ratio(OR) and 95% confidence interval** as well as standard R output.
```{r,results='asis',warning=FALSE}
require(survival)
data(cancer)
attach(colon)
out <- glm(status ~ rx+obstruct+adhere+nodes+extent, data=colon, family=binomial)
ztable(out)
```
Again, 'ztable()' also shows the anova table of this model.
```{r,results='asis'}
anova(out)
ztable(anova(out))
```
## More 'aov' object
```{r,results='asis'}
op <- options(contrasts = c("contr.helmert", "contr.poly"))
npk.aov <- aov(yield ~ block + N*P*K, npk)
ztable(npk.aov,zebra=1)
```
## More 'lm' object
```{r,results='asis'}
ctl <- c(4.17,5.58,5.18,6.11,4.50,4.61,5.17,4.53,5.33,5.14)
trt <- c(4.81,4.17,4.41,3.59,5.87,3.83,6.03,4.89,4.32,4.69)
group <- gl(2, 10, 20, labels = c("Ctl","Trt"))
weight <- c(ctl, trt)
lm.D9 <- lm(weight ~ group)
ztable(lm.D9)
ztable(anova(lm.D9),align="|c|rrrr|r|")
```
## More 'glm' object
```{r,results='asis'}
counts <- c(18,17,15,20,10,20,25,13,12)
outcome <- gl(3,1,9)
treatment <- gl(3,3)
d.AD <- data.frame(treatment, outcome, counts)
glm.D93 <- glm(counts ~ outcome + treatment, family = poisson())
ztable(glm.D93)
```
## Principal Components Analysis : 'prcomp' object
'ztable()' can be used in principal components analysis. Followings are examples of ztable() of 'prcomp' object.
```{r,results='asis',message=FALSE}
data(USArrests)
pr1 <- prcomp(USArrests)
ztable(pr1)
ztable(summary(pr1))
```
## Survival Analysis : 'coxph' object
'ztable()' can be used in survival analysis. When used for Cox proportional hazard model, 'ztable()' showed the hazard ratio and 95% confidence interval ready for publication to medical journal.
```{r,results='asis',message=FALSE}
colon$TS = Surv(time,status==1)
out=coxph(TS~rx+obstruct+adhere+differ+extent+surg+node4,data=colon)
ztable(out)
```
## Nonlinear Least Squares: 'nls' object
'ztable()' can be used to determine the nonlinear (weighted) least-squares estimates of the parameters of a nonlinear model. Followings are examples of ztable() of 'nls' object.
```{r,comment=NA}
require(graphics)
DNase1 <- subset(DNase, Run == 1)
## using a selfStart model
fm1DNase1 <- nls(density ~ SSlogis(log(conc), Asym, xmid, scal),DNase1)
summary(fm1DNase1)
```
```{r,results='asis',message=FALSE}
ztable(fm1DNase1)
```
## Maximum-likelihood Fitting of Univariate Distributions
'ztable()' can be used in maximum-likelihood fitting of univariate distributions. Followings are examples of ztable() of 'fitdistr' object.
```{r,results='asis'}
require(MASS)
set.seed(123)
x <- rgamma(100, shape = 5, rate = 0.1)
a=fitdistr(x, "gamma")
ztable(a)
x3 <- rweibull(100, shape = 4, scale = 100)
b=fitdistr(x3, "weibull")
ztable(b)
```
## Customize the zebra striping colors
If you wanted to use several colors for zebra striping, you can set the parameter 'zebra' to zero(e.g. zebra=0) and set the 'zebra.color' parameter with vector of your favorite colors. Your favorite colors are used to zebra striping. For your convienience, ten colors are predifned for this purpose. The predefined colors are:
c("peach","peach-orange","peachpuff","peach-yellow","pear","pearl","peridot","periwinkle","pastelred", "pastelgray").
```{r,results='asis',message=FALSE}
ztable(head(mtcars,15),zebra=0,zebra.color=NULL)
```
The color names used for this purpose are predefined in the data 'zcolors' included in 'ztable' package. Please type '?zcolors' in R console for help file or just type 'zcolors'. You can see 749 color names defined in data 'zcolors'.
## Vertical striping
If you wanted to vertical striping table, you can get it by set the parameter zebra.type 2. You can change the ztables parameters when printing.
```{r,results='asis'}
z1=ztable(head(iris),zebra=2)
z1
print(z1,zebra.type=2)
print(z1,zebra=1,zebra.type=2,zebra.colnames=TRUE)
```
## More tailoring zebra striping
You can update parameters of ztable with 'update_ztable' function.
```{r,results='asis'}
options(ztable.zebra.color=NULL)
(z1=ztable(head(iris),zebra=0,zebra.type=2))
```
You can change the background color of colnames rows by setting zebra.colnames=TRUE.
```{r,results='asis'}
update_ztable(z1,colnames.bold=TRUE,zebra.colnames=TRUE)
```
You can customize the striping when printing.
```{r,results='asis'}
print(z1,zebra.color=c(rep("white",5),"peach"),zebra.colnames=TRUE)
```
## Change the background color of all cells
You can change the background color of all cells by setting the zebra.type=0.
```{r,results='asis'}
ztable(head(iris),zebra=0,zebra.type=0)
ztable(head(iris),zebra=0,zebra.type=0,zebra.color=zcolors$name,zebra.colnames=TRUE)
```
## Diagonal striping
You can make diagonal striping with use of zebra.color greater/lesser than column length by 1.
```{r,results='asis'}
ztable(head(iris),zebra=0,zebra.type=0,zebra.color=1:7,zebra.colnames=TRUE)
ztable(head(mtcars[,1:9]),zebra=0,zebra.type=0,zebra.color=1:9,zebra.colnames=TRUE)
```
## All background colors
This is demonstration of All background colors. All 749 colors are available in package ztable. Please type ?zcolors.
```{r,results='asis'}
mycolor=rep("white",6)
for(i in 1:149){
mycolor=c(mycolor,"white",zcolors$name[((i-1)*5+1):((i-1)*5+5)])
}
mycolor=c(mycolor,"white",zcolors$name[c(746:749,1)])
a=c(zcolors$name[1:5])
for(i in 2:149){
a=rbind(a,zcolors$name[((i-1)*5+1):((i-1)*5+5)])
}
a=rbind(a,zcolors$name[c(746:749,1)])
a=data.frame(a,stringsAsFactors=FALSE,row.names=NULL)
ztable(a,zebra=0,zebra.type=0,zebra.color=mycolor,include.rownames=FALSE,
include.colnames=FALSE,longtable=TRUE)
```
## Place two or more ztables or figures side by side
If you wanted to place two or more ztables or figures side by side, you can use function **parallelTables()**. Function **parallelTables()** takes three parameters. The first parameter width is a numeric vector specifies the width to which the tables or figures should be scaled. The second parameter is a list of ztable or names of valid figure. The 3rd parameter 'type' is the type of table to produce. Possible values for type are "latex" or "html". Default value is "latex". See the examples.
```{r,results='asis'}
z=ztable(head(mtcars[1:3]),tabular=TRUE,zebra.color="peach-orange")
z1=ztable(head(iris[1:3]),tabular=TRUE,zebra=2)
parallelTables(width=c(0.5,0.5),list(z,z1),type="html")
parallelTables(width=c(0.5,0.5),list(z,"figures/ztable3.png"),type="html")
```
## mytable object from "moonBook" package
'ztable()' can be used for 'mytable' object made by "mytable" function from "moonBook" package.
```{r,results='asis'}
require(moonBook)
res=mytable(Dx~.,data=acs)
options(ztable.zebra=NULL)
z=ztable(res)
z
vlines(z,type="all")
```
## cbind.mytable object
'ztable()' can be used for 'cbind.mytable' object made by "mytable" function from "moonBook" package.
```{r,results='asis'}
res1=mytable(sex+DM~.,data=acs)
z=ztable(res1)
z
vlines(z,type="all")
```
You can use all ztable related function in this table.
```{r,results='asis'}
z=addRowColor(z,c(13,16),"platinum")
z=addColColor(z,c(5,8),"pink")
z=addCellColor(z,rows=16,cols=c(5,8),bg="orange")
z=addCellColor(z,rows=13,cols=5,bg="orange")
z
```
You can use pipe from package "magrittr"
```{r,results='asis'}
require(magrittr)
res1=mytable(sex+DM~.,data=acs)
z=ztable(res1)
z %>%
addRowColor(c(13,16),"platinum") %>%
addColColor(c(5,8),"pink") %>%
addCellColor(rows=16,cols=c(5,8),bg="orange") %>%
addCellColor(rows=13,cols=5,bg="orange") %>%
print
```
You can use addSigColor() function to colorized significant rows of ztable.mytable
```{r,results='asis'}
res1=mytable(sex~.,data=acs)
res1 %>% ztable %>%
addSigColor %>%
print
res2<-mytable(sex+DM~.,data=acs)
res2 %>%
ztable %>%
addSigColor(level=0.1,bg="yellow",color="red") %>%
print
```
| /scratch/gouwar.j/cran-all/cranData/ztable/inst/doc/ztable.Rmd |
## ----setup, include = FALSE---------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = NA
)
## ----eval=FALSE---------------------------------------------------------------
# install.packages("ztable")
## ----eval=FALSE---------------------------------------------------------------
# if(!require(devtools)) install.packages("devtools")
# devtools::install_github("cardiomoon/ztable")
## ----results='asis'-----------------------------------------------------------
library(ztable)
library(magrittr)
options(ztable.type="html")
z=ztable(head(iris),caption="Table 1. Basic Table")
z
## ----results='asis'-----------------------------------------------------------
ztable(head(iris),caption="Table 2. Table with desired background and font colors") %>%
addRowColor(rows=1,bg="#C90000",color="white") %>%
addCellColor(rows=3,cols=c(4,6), bg="cyan",color="red")
## ----results='asis'-----------------------------------------------------------
ztable(head(iris),caption="Table 3. Conditinoal Formatting: Sepal.Width >= 3.5") %>%
addRowColor(rows=1,bg="#C90000",color="white") %>%
addCellColor(condition=Sepal.Width>=3.5,cols=Sepal.Width,color="red")
## ----results='asis'-----------------------------------------------------------
ztable(head(mtcars),caption="Table 4. Cars with mpg > 21 ") %>%
addCellColor(condition=mpg>21,cols=1:2,bg="cyan",color="red")
## -----------------------------------------------------------------------------
require(RColorBrewer)
reds=palette2colors("Reds")
reds
## ----results='asis'-----------------------------------------------------------
ztable(head(iris),caption="Table 5. Use of color palette") %>%
addColColor(bg=reds)
ztable(head(mtcars),caption="Table 6. Use of color palette(2)") %>%
addRowColor(bg=palette2colors("Set3"))
## -----------------------------------------------------------------------------
require(officer)
require(flextable)
ft=regulartable(head(iris))
ft
## ----eval=FALSE---------------------------------------------------------------
# read_docx() %>%
# body_add_flextable(ft) %>%
# print(target = "flextable.docx")
## ----results='asis'-----------------------------------------------------------
cgroup=c("Sepal","Petal","Species")
n.cgroup=c(2,2,1)
z <- ztable(head(iris),caption="Table 9. Use of column groups") %>%
addcgroup(cgroup=cgroup,n.cgroup=n.cgroup,color=c("red","green","blue")) %>%
spanRow(col=4,from=2,to=3,bg="cyan") %>%
spanCol(row=5,from=2,to=3,bg="cyan",color="blue")
z
ztable2flextable(z)
## ----results='asis'-----------------------------------------------------------
fit <- lm(mpg ~ cyl + disp + wt + drat + am, data=mtcars)
z=ztable(fit,caption="Table 10. Results of Multiple Regression Analysis ")
z
ztable2flextable(z)
## ----results='asis'-----------------------------------------------------------
z1=z %>% addSigColor
z1
ztable2flextable(z1) %>% autofit()
## ----results='asis'-----------------------------------------------------------
z2= z %>% addSigColor(level=0.01,bg="yellow",color="red")
z2
ztable2flextable(z2)
| /scratch/gouwar.j/cran-all/cranData/ztable/inst/doc/ztable_update.R |
---
title: "ztable Update"
author: "Keon-Woong Moon"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{ztable_update}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r setup, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = NA
)
```
# Introduction
If you are unfamiliar to ztable, please read the ztable vignette: https://CRAN.R-project.org/package=ztable/vignettes/ztable.html
## Installation
You can install R package "ztable" from CRAN. Current version is 0.1.8.
```{r,eval=FALSE}
install.packages("ztable")
```
You can install the developmental version of ztable from github. Current github version is 0.1.9.
```{r,eval=FALSE}
if(!require(devtools)) install.packages("devtools")
devtools::install_github("cardiomoon/ztable")
```
## Make table from a data.frame
Package "ztable" make everything possible about table. Basically, An object of "ztable" made from a data.frame. The default output format of ztable is RStudio::viewer or web-browser format(type="viewer"). So if you want to use ztable in a "html" format, you should change the parameter ztable.type to "html". If you want to use ztable in latex format, you should change the parameter ztable.type to "latex".
```{r,results='asis'}
library(ztable)
library(magrittr)
options(ztable.type="html")
z=ztable(head(iris),caption="Table 1. Basic Table")
z
```
## Use background and font color
You can change background color and font color with bg and color arguments in addRowColor(), addColColor() and addCellColor() functions.
```{r,results='asis'}
ztable(head(iris),caption="Table 2. Table with desired background and font colors") %>%
addRowColor(rows=1,bg="#C90000",color="white") %>%
addCellColor(rows=3,cols=c(4,6), bg="cyan",color="red")
```
## Conditional Formatting
You can select rows with logical expression. You can select cols with column name.
```{r,results='asis'}
ztable(head(iris),caption="Table 3. Conditinoal Formatting: Sepal.Width >= 3.5") %>%
addRowColor(rows=1,bg="#C90000",color="white") %>%
addCellColor(condition=Sepal.Width>=3.5,cols=Sepal.Width,color="red")
```
```{r,results='asis'}
ztable(head(mtcars),caption="Table 4. Cars with mpg > 21 ") %>%
addCellColor(condition=mpg>21,cols=1:2,bg="cyan",color="red")
```
## Use of color palette
You can use color palettes from RColorBrewer packages. You can extract colors from palette by using palette2colors() function.
```{r}
require(RColorBrewer)
reds=palette2colors("Reds")
reds
```
You can use the extracted colors to your ztable.
```{r,results='asis'}
ztable(head(iris),caption="Table 5. Use of color palette") %>%
addColColor(bg=reds)
ztable(head(mtcars),caption="Table 6. Use of color palette(2)") %>%
addRowColor(bg=palette2colors("Set3"))
```
## Make a flextable from a ztable
You can use ztable for html and latex output. But it is impossible to use ztable in `Microsoft Word` or `Microsoft Powerpoint` output directly. The `officer` package by David Gohel makes it possible to access and manipulate `Microsoft Word` or `Microsoft Powerpoint` document. You can insert a flextable object office documents with officer package.
```{r}
require(officer)
require(flextable)
ft=regulartable(head(iris))
ft
```
You can make a 'Microsoft Word' document with this flextable.
```{r,eval=FALSE}
read_docx() %>%
body_add_flextable(ft) %>%
print(target = "flextable.docx")
```
You can convert an object of class ztable to a flextable object.
```{r,results='asis'}
cgroup=c("Sepal","Petal","Species")
n.cgroup=c(2,2,1)
z <- ztable(head(iris),caption="Table 9. Use of column groups") %>%
addcgroup(cgroup=cgroup,n.cgroup=n.cgroup,color=c("red","green","blue")) %>%
spanRow(col=4,from=2,to=3,bg="cyan") %>%
spanCol(row=5,from=2,to=3,bg="cyan",color="blue")
z
ztable2flextable(z)
```
```{r,results='asis'}
fit <- lm(mpg ~ cyl + disp + wt + drat + am, data=mtcars)
z=ztable(fit,caption="Table 10. Results of Multiple Regression Analysis ")
z
ztable2flextable(z)
```
You can change the color of rows in which p value is below the desired level(default value is 0.05).
```{r,results='asis'}
z1=z %>% addSigColor
z1
ztable2flextable(z1) %>% autofit()
```
You can change the significant level and background and font color.
```{r,results='asis'}
z2= z %>% addSigColor(level=0.01,bg="yellow",color="red")
z2
ztable2flextable(z2)
```
For more options of flextable, please read the flextable vignette at https://davidgohel.github.io/flextable/index.html.
| /scratch/gouwar.j/cran-all/cranData/ztable/inst/doc/ztable_update.Rmd |
---
classoption: b5paper
geometry: margin=1in
header-includes:
- \usepackage{multirow}
- \usepackage{colortbl}
- \usepackage{pdflscape}
- \usepackage[table]{xcolor}
- \usepackage{tabularx,booktabs}
- \usepackage{boxedminipage}
- \usepackage{graphicx}
- \usepackage{rotating}
- \usepackage{longtable}
output: html_document
---
```{r,echo=FALSE,message=FALSE,results='asis' }
require(ztable)
longt=ifelse("1" %in% input$latexOption,TRUE,FALSE)
sidew=ifelse("2" %in% input$latexOption,TRUE,FALSE)
if(input$format=="PDF") {
print(z,type="latex",longtable=longt,sidewaystable=sidew)
} else {
print(z,type="html")
}
```
| /scratch/gouwar.j/cran-all/cranData/ztable/inst/ztableDemo/report.Rmd |
Subsets and Splits