content
stringlengths 0
14.9M
| filename
stringlengths 44
136
|
---|---|
library("aroma.affymetrix");
library("ACNE");
log <- verbose <- Arguments$getVerbose(-8, timestamp=TRUE);
dataSetName <- "Affymetrix_2006-TumorNormal";
chipType <- "Mapping250K_Nsp";
pairs <- matrix(c(
"CRL-2325D", "CRL-2324D",
"CRL-5957D", "CRL-5868D",
"CCL-256.1D", "CCL-256D",
"CRL-2319D", "CRL-2320D",
"CRL-2362D", "CRL-2321D",
"CRL-2337D", "CRL-2336D",
"CRL-2339D", "CRL-2338D",
"CRL-2341D", "CRL-2340D",
"CRL-2346D", "CRL-2314D"
), ncol=2, byrow=TRUE);
colnames(pairs) <- c("normal", "tumor");
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Setting up data set
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
cdf <- AffymetrixCdfFile$byName(chipType);
csR <- AffymetrixCelSet$byName(dataSetName, cdf=cdf);
print(csR);
# Reorder arrays according to 'pairs' matrix
csR <- extract(csR, indexOf(csR, pairs));
acc <- AllelicCrosstalkCalibration(csR, model="CRMAv2");
print(acc);
csC <- process(acc, verbose=log);
print(csC);
bpn <- BasePositionNormalization(csC, target="zero");
print(bpn);
csN <- process(bpn, verbose=log);
print(csN);
plm <- NmfSnpPlm(csN, mergeStrands=TRUE);
print(plm);
fit(plm, verbose=log);
ces <- getChipEffectSet(plm);
print(ces);
fln <- FragmentLengthNormalization(ces, target="zero");
print(fln);
cesN <- process(fln, verbose=log);
print(cesN);
| /scratch/gouwar.j/cran-all/cranData/ACNE/inst/testScripts/system/chipTypes/Mapping250K_Nsp,Sty/test20090128,250K,TumorNormal,NMF.R |
library("ACNE");
verbose <- Arguments$getVerbose(-8, timestamp=TRUE);
dataSet <- "Affymetrix_2006-TumorNormal";
chipType <- "Mapping250K_Nsp";
res <- doACNE(dataSet, chipType=chipType, verbose=verbose);
print(res);
ds <- res$total;
dfR <- getAverageFile(ds, verbose=verbose);
df <- getFile(ds, 1);
baf <- getFile(res$fracB, 1);
ugp <- getAromaUgpFile(ds);
fig <- sprintf("%s", getFullName(df));
if (!devIsOpen(fig)) {
devSet(fig, width=10, height=5);
subplots(2*3, nrow=2, byrow=FALSE);
par(mar=c(3,4,2,1)+0.1, pch=".");
for (chr in 1:3) {
units <- getUnitsOnChromosome(ugp, chr);
pos <- getPositions(ugp, units=units);
beta <- extractMatrix(baf, units=units, drop=TRUE);
fracB <- RawAlleleBFractions(beta, pos, chromosome=chr);
theta <- extractMatrix(df, units=units, drop=TRUE);
thetaR <- extractMatrix(dfR, units=units, drop=TRUE);
C <- 2 * theta/thetaR;
cn <- RawCopyNumbers(C, pos, chromosome=chr);
plot(cn, col="gray", cex=0.8, ylim=c(0,4));
xOut <- seq(xMin(cn), xMax(cn), by=0.5e6);
cnS <- gaussianSmoothing(cn, xOut=xOut, sd=1e6);
points(cnS, col="black");
stext(side=3, pos=0, getName(df));
stext(side=3, pos=1, sprintf("Chr%d", chr));
plot(fracB, ylim=c(0,1));
box(col="blue");
stext(side=3, pos=0, getTags(ds, collapse=","));
stext(side=3, pos=1, sprintf("Chr%d", chr));
} # for (chr ...)
devDone();
}
| /scratch/gouwar.j/cran-all/cranData/ACNE/inst/testScripts/system/chipTypes/Mapping250K_Nsp,Sty/test20100517,250K,doACNE.R |
if (interactive()) savehistory();
library("aroma.affymetrix");
library("ACNE");
# - - - - - - - - - - - - - - - - - - - - - - -
# setup dataset and chip names
# - - - - - - - - - - - - - - - - - - - - - - -
log <- Arguments$getVerbose(-10, timestamp=TRUE);
dataSetName <- "HapMap270,100K,CEU,5trios"
chipType <- "Mapping50K_Hind240"
# - - - - - - - - - - - - - - - - - - - - - - -
# Setup annotation data
# - - - - - - - - - - - - - - - - - - - - - - -
cdf <- AffymetrixCdfFile$byChipType(chipType);
print(cdf);
gi <- getGenomeInformation(cdf);
print(gi);
si <- getSnpInformation(cdf);
print(si);
# - - - - - - - - - - - - - - - - - - - - - - -
# Setup data set
# - - - - - - - - - - - - - - - - - - - - - - -
csR <- AffymetrixCelSet$byName(dataSetName, cdf=cdf);
print(csR);
# - - - - - - - - - - - - - - - - - - - - - - -
# Calibrate and normalize
# - - - - - - - - - - - - - - - - - - - - - - -
acc <- AllelicCrosstalkCalibration(csR, model="CRMAv2");
print(acc);
csC <- process(acc, verbose=log);
print(csC);
bpn <- BasePositionNormalization(csC, target="zero");
print(bpn);
csN <- process(bpn, verbose=log);
print(csN);
# - - - - - - - - - - - - - - - - - - - - - - -
# Summarize replicated probes
# - - - - - - - - - - - - - - - - - - - - - - -
chromosomes <- 19:22;
chromosome <- 2;
units <- getUnitsOnChromosome(gi, chromosome);
str(units);
plm <- NmfSnpPlm(csN, mergeStrands=TRUE);
print(plm);
fit(plm, units=units, verbose=log);
ces <- getChipEffectSet(plm);
print(ces);
# - - - - - - - - - - - - - - - - - - - - - - -
# Comparing to default AS-CRMA v2
# - - - - - - - - - - - - - - - - - - - - - - -
plm0 <- RmaSnpPlm(csN, mergeStrands=TRUE);
fit(plm0, units=units, verbose=log);
ces0 <- getChipEffectSet(plm0);
# - - - - - - - - - - - - - - - - - - - - - - -
# Plotting (x,B)
# - - - - - - - - - - - - - - - - - - - - - - -
pos <- getPositions(gi, units);
pos <- pos / 1e6;
array <- 1;
ce <- getFile(ces, array);
freqB <- extractTotalAndFreqB(ce, units=units)[,"freqB"];
ce0 <- getFile(ces0, array);
freqB0 <- extractTotalAndFreqB(ce0, units=units)[,"freqB"];
subplots(2, ncol=1);
par(mar=c(3.7,3.7,1,1)+0.1, mgp=c(2.5,0.8,0), xaxs="i");
plotFreqB(pos, freqB);
stext(side=3, pos=0, sprintf("%s/%s", getName(ces), getName(ce)));
stext(side=3, pos=1, sprintf("Chromosome %d", chromosome));
stext(side=4, pos=1, getChipType(ces, fullname=FALSE), cex=0.8, line=-0.01);
plotFreqB(pos, freqB0);
| /scratch/gouwar.j/cran-all/cranData/ACNE/inst/testScripts/system/chipTypes/Mapping50K_Hind240,Xba240/test20090122,50K,NMF,freqB.R |
if (interactive()) savehistory();
library("aroma.affymetrix");
library("ACNE");
# - - - - - - - - - - - - - - - - - - - - - - -
# setup dataset and chip names
# - - - - - - - - - - - - - - - - - - - - - - -
log <- Arguments$getVerbose(-10, timestamp=TRUE);
dataSetName <- "HapMap270,100K,CEU,5trios"
chipType <- "Mapping50K_Hind240"
# - - - - - - - - - - - - - - - - - - - - - - -
# Setup annotation data
# - - - - - - - - - - - - - - - - - - - - - - -
cdf <- AffymetrixCdfFile$byChipType(chipType);
print(cdf);
gi <- getGenomeInformation(cdf);
print(gi);
si <- getSnpInformation(cdf);
print(si);
# - - - - - - - - - - - - - - - - - - - - - - -
# Setup data set
# - - - - - - - - - - - - - - - - - - - - - - -
csR <- AffymetrixCelSet$byName(dataSetName, cdf=cdf);
print(csR);
# - - - - - - - - - - - - - - - - - - - - - - -
# Calibrate and normalize
# - - - - - - - - - - - - - - - - - - - - - - -
acc <- AllelicCrosstalkCalibration(csR, model="CRMAv2");
print(acc);
csC <- process(acc, verbose=log);
print(csC);
bpn <- BasePositionNormalization(csC, target="zero");
print(bpn);
csN <- process(bpn, verbose=log);
print(csN);
# - - - - - - - - - - - - - - - - - - - - - - -
# Summarize replicated probes
# - - - - - - - - - - - - - - - - - - - - - - -
plm <- NmfSnpPlm(csN, mergeStrands=TRUE);
print(plm);
fit(plm, verbose=log);
ces <- getChipEffectSet(plm);
print(ces);
# - - - - - - - - - - - - - - - - - - - - - - -
# Fragment-length normalization
# - - - - - - - - - - - - - - - - - - - - - - -
fln <- FragmentLengthNormalization(ces, target="zero");
print(fln);
cesN <- process(fln, verbose=log);
print(cesN);
# - - - - - - - - - - - - - - - - - - - - - - -
# Segmentation
# - - - - - - - - - - - - - - - - - - - - - - -
# CBS needs theta = thetaA + thetaB
as <- AlleleSummation(cesN);
cesT <- process(as, verbose=log);
cbs <- CbsModel(cesT);
print(cbs);
ce <- ChromosomeExplorer(cbs);
print(ce);
process(ce, chromosomes=1:5, verbose=log);
| /scratch/gouwar.j/cran-all/cranData/ACNE/inst/testScripts/system/chipTypes/Mapping50K_Hind240,Xba240/test20090128,50K,NMF.R |
library("ACNE");
verbose <- Arguments$getVerbose(-8, timestamp=TRUE);
dataSet <- "HapMap270,100K,CEU,5trios";
chipType <- "Mapping50K_Hind240";
res <- doACNE(dataSet, chipType=chipType, verbose=verbose);
print(res);
ds <- res$total;
dfR <- getAverageFile(ds, verbose=verbose);
df <- getFile(ds, 1);
baf <- getFile(res$fracB, 1);
ugp <- getAromaUgpFile(ds);
fig <- sprintf("%s", getFullName(df));
if (!devIsOpen(fig)) {
devSet(fig, width=10, height=5);
subplots(2*3, nrow=2, byrow=FALSE);
par(mar=c(3,4,2,1)+0.1, pch=".");
for (chr in 1:3) {
units <- getUnitsOnChromosome(ugp, chr);
pos <- getPositions(ugp, units=units);
beta <- extractMatrix(baf, units=units, drop=TRUE);
fracB <- RawAlleleBFractions(beta, pos, chromosome=chr);
theta <- extractMatrix(df, units=units, drop=TRUE);
thetaR <- extractMatrix(dfR, units=units, drop=TRUE);
C <- 2 * theta/thetaR;
cn <- RawCopyNumbers(C, pos, chromosome=chr);
plot(cn, col="gray", cex=0.8, ylim=c(0,4));
xOut <- seq(xMin(cn), xMax(cn), by=0.5e6);
cnS <- gaussianSmoothing(cn, xOut=xOut, sd=1e6);
points(cnS, col="black");
stext(side=3, pos=0, getName(df));
stext(side=3, pos=1, sprintf("Chr%d", chr));
plot(fracB, ylim=c(0,1));
box(col="blue");
stext(side=3, pos=0, getTags(ds, collapse=","));
stext(side=3, pos=1, sprintf("Chr%d", chr));
} # for (chr ...)
devDone();
}
| /scratch/gouwar.j/cran-all/cranData/ACNE/inst/testScripts/system/chipTypes/Mapping50K_Hind240,Xba240/test20100517,50K,doACNE.R |
Binom_Sim <-
function(size,p,N) {
q <- 1-p
x <- numeric(N)
for(i in 1:N){
temp <- runif(1)
j <- 0; cc <- p/(1-p); prob <- (1-p)^size; F <- prob
while(temp >= F){
prob <- cc*(size-j)*prob/(j+1); F <- F+prob; j <- j+1
}
x[i] <- j
}
return(x)
}
| /scratch/gouwar.j/cran-all/cranData/ACSWR/R/Binom_Sim.R |
Ehrenfest <-
function(n) {
States <- c(0, seq(1,2*n))
TPM <- matrix(0,nrow=length(States),ncol=length(States),dimnames=
list(seq(0,2*n),seq(0,2*n)))
tran_prob <- function(i,n) {
tranRow <- rep(0,2*n+1)
if(i==0) tranRow[2] <- 1
if(i==2*n) tranRow[(2*n+1)-1] <- 1
if(i!=0 & i!=2*n) {
j=i+1
tranRow[j-1] <- i/(2*n)
tranRow[j+1] <- 1-i/(2*n)
}
return(tranRow)
}
for(j in 0:(2*n))TPM[j+1,] <- tran_prob(j,n)
return(TPM)
}
| /scratch/gouwar.j/cran-all/cranData/ACSWR/R/Ehrenfest.R |
Geom_Sim <-
function(p,n){
q <- 1-p
x <- numeric(n)
for(i in 1:n){
temp <- runif(1)
temp <- 1-temp
j <- 0
while(((temp>q^j) & (temp <= q^{j-1}))==FALSE)j <- j+1
x[i] <- j
}
return(x)
}
| /scratch/gouwar.j/cran-all/cranData/ACSWR/R/Geom_Sim.R |
LRNormal2Mean <-
function(x,y,alpha){
xbar <- mean(x); ybar <- mean(y)
nx <- length(x); ny <- length(y)
Sx <- var(x); Sy <- var(y)
Sp <- ((nx-1)*Sx+(ny-1)*Sy)/(nx+ny-2)
tcalc <- abs(xbar-ybar)/sqrt(Sp*(1/nx+1/ny))
conclusion <- ifelse(tcalc>qt(df=nx+ny-2,p=alpha/2),
"Reject Hypothesis H","Fail to Reject Hypothesis H")
return(c(tcalc,conclusion,Sp))
}
| /scratch/gouwar.j/cran-all/cranData/ACSWR/R/LRNormal2Mean.R |
LRNormalMean_KV <-
function(x,mu0,alpha,sigma) {
ifelse(abs(sqrt(length(x))*(mean(x)-mu0)/sigma)>qnorm(1-alpha/2),"Reject Hypothesis H","Fail to Reject Hypothesis H")
}
| /scratch/gouwar.j/cran-all/cranData/ACSWR/R/LRNormalMean_KV.R |
LRNormalMean_UV <-
function(x,mu0,alpha){
S <- sd(x); n <- length(x)
ifelse(abs(sqrt(length(x))*(mean(x)-mu0)/S)>qt(n-1,1-alpha/2),"Reject Hypothesis H","Fail to Reject Hypothesis H")
}
| /scratch/gouwar.j/cran-all/cranData/ACSWR/R/LRNormalMean_UV.R |
LRNormalVariance_UM <-
function(x,sigma0,alpha){
S <- var(x); n <- length(x)
chidata <- ((n-1)*S)/(sigma0^2)
ifelse((chidata<qchisq(df=n-1,p=alpha/2)|| (chidata>qchisq(df=n-1,p=1-alpha/2))),"Reject Hypothesis H","Fail to Reject Hypothesis H")
}
| /scratch/gouwar.j/cran-all/cranData/ACSWR/R/LRNormalVariance_UM.R |
MPNormal <-
function(mu0, mu1, sigma, n,alpha) {
if(mu0<mu1) k <- qnorm(alpha,lower.tail = FALSE)*sigma/sqrt(n) + mu0
if(mu0>mu1) k <- mu0 - qnorm(alpha,lower.tail = FALSE)*sigma/sqrt(n)
return(k)
}
| /scratch/gouwar.j/cran-all/cranData/ACSWR/R/MPNormal.R |
MPPoisson <-
function(Hlambda, Klambda, alpha,n) {
Hlambda <- n*Hlambda
Klambda <- n*Klambda
nn <- n*Hlambda
if(Hlambda<Klambda) {
k <- min(which((1-ppois(0:nn,lambda=Hlambda))<alpha))-1
gamma <- (alpha-1+ppois(k,lambda=Hlambda))/dpois(k,lambda=Hlambda)
return(list=c(k,gamma))
}
else {
k <- max(which((ppois(0:nn,lambda=Hlambda))<alpha))
gamma <- (alpha-ppois(k-1,lambda=Hlambda))/dpois(k,lambda=Hlambda)
return(list=c(k,gamma))
}
}
| /scratch/gouwar.j/cran-all/cranData/ACSWR/R/MPPoisson.R |
MPbinomial <-
function(Hp, Kp, alpha,n) {
if(Hp<Kp){
k <- min(which((1-pbinom(0:n,size=n,prob=Hp))<alpha))-1
gamma <- (alpha-1+pbinom(k,size=n,prob=Hp))/dbinom(k,size=n,prob=Hp)
return(list=c(k,gamma))
}
else {
k <- max(which((pbinom(0:n,size=n,prob=Hp))<alpha))
gamma <- (alpha-pbinom(k-1,size=n,prob=Hp))/dbinom(k,size=n,prob=Hp)
return(list=c(k,gamma))
}
}
| /scratch/gouwar.j/cran-all/cranData/ACSWR/R/MPbinomial.R |
Poisson_Sim <-
function(lambda,n) {
x = numeric(n)
for(i in 1:n){
j = 0; p = exp(-lambda); F = p
temp = runif(1)
while((F>temp)==FALSE){
p = lambda*p/(j+1); F = F+p; j=j+1
}
x[i] = j
}
return(x)
}
| /scratch/gouwar.j/cran-all/cranData/ACSWR/R/Poisson_Sim.R |
QH_CI <-
function(x,alpha) {
k <- length(x); n <- sum(x)
QH_lcl <- (1/(2*(sum(x)+qchisq(1-alpha/k,k-1))))*{qchisq(1-alpha/k,k-1)+2*x-sqrt( qchisq(1-alpha/k,k-1)*(qchisq(1-alpha/k,k-1)+ 4*x*(sum(x)-x)/sum(x))) }
QH_ucl <- (1/(2*(sum(x)+qchisq(1-alpha/k,k-1))))*{qchisq(1-alpha/k,k-1)+2*x+sqrt( qchisq(1-alpha/k,k-1)*(qchisq(1-alpha/k,k-1)+ 4*x*(sum(x)-x)/sum(x))) }
return(cbind(QH_lcl,QH_ucl))
}
| /scratch/gouwar.j/cran-all/cranData/ACSWR/R/QH_CI.R |
ST_Ordered <-
function(N,x,p_x){
x <- x[order(p_x,decreasing=TRUE)]
F_x <- cumsum(sort(p_x,decreasing=TRUE))
disc_sim <- numeric(length=N)
for(i in 1:N){
temp <- runif(1)
disc_sim[i] <- x[min(which(F_x>temp))]
}
return(disc_sim)
}
| /scratch/gouwar.j/cran-all/cranData/ACSWR/R/ST_Ordered.R |
ST_Unordered <-
function(N,x,p_x) {
F_x <- cumsum(p_x)
disc_sim <- numeric(length=N)
for(i in 1:N){
temp <- runif(1)
disc_sim[i] <- x[min(which(F_x>temp))]
}
return(disc_sim)
}
| /scratch/gouwar.j/cran-all/cranData/ACSWR/R/ST_Unordered.R |
TM <-
function(x) {
qs <- quantile(x,c(0.25,0.5,0.75))
return(as.numeric((qs[2]+(qs[1]+qs[3])/2)/2))
}
| /scratch/gouwar.j/cran-all/cranData/ACSWR/R/TM.R |
TMH <-
function(x) {
qh <- fivenum(x,c(0.25,0.5,0.75))
return((qh[2]+(qh[1]+qh[3])/2)/2)
}
| /scratch/gouwar.j/cran-all/cranData/ACSWR/R/TMH.R |
UMPExponential <-
function(theta0, n, alpha){
t <- qgamma(1-alpha, shape=n,scale=theta0)
return(t)
}
| /scratch/gouwar.j/cran-all/cranData/ACSWR/R/UMPExponential.R |
UMPNormal <-
function(mu0, sigma, n,alpha) {
mu0-qnorm(alpha)*sigma/sqrt(n)
}
| /scratch/gouwar.j/cran-all/cranData/ACSWR/R/UMPNormal.R |
UMPUniform <-
function(theta0,n,alpha) return(theta0*(1-alpha)^{1/n})
| /scratch/gouwar.j/cran-all/cranData/ACSWR/R/UMPUniform.R |
WilsonCI <-
function(x,n,alpha) {
phat <- x/n
nz2 <- n + dnorm(alpha/2)^2
firstterm <- phat*n/nz2
secondterm <- 0.5*dnorm(alpha/2)/nz2
commonterm <- phat*(1-phat)/n
commonterm <- commonterm * (n^2) * (dnorm(alpha/2)^2) / (nz2^2)
commonterm <- commonterm + (0.25 * (dnorm(alpha/2)^4) )/ (nz2^2)
commonterm <- sqrt(commonterm)
return(c(firstterm+secondterm-commonterm,firstterm+secondterm+commonterm))
}
| /scratch/gouwar.j/cran-all/cranData/ACSWR/R/WilsonCI.R |
kurtcoeff <-
function (x) {
x <- x[!is.na(x)]
n <- length(x)
mx <- mean(x); sx <- sd(x)*sqrt((n-1)/n)
kurt <- mean((x-mx)^4)/sx^4
return(kurt)
}
| /scratch/gouwar.j/cran-all/cranData/ACSWR/R/kurtcoeff.R |
lval <-
function (x, na.rm = TRUE)
{
xna <- is.na(x)
if (na.rm)
x <- x[!xna]
else if (any(xna))
return(rep(NA, 5))
x <- sort(x)
n <- length(x)
cpos <- n
depth <- c()
while (cpos > 1) {
cpos <- (floor(cpos) + 1)/2
if (cpos != 1.5)
depth <- c(depth, cpos)
}
lo <- (x[floor(depth)] + x[ceiling(depth)])/2
hi <- (x[floor(n + 1 - depth)] + x[ceiling(n + 1 - depth)])/2
mids <- (lo + hi)/2
spreads <- hi - lo
out = data.frame(depth, lo, hi, mids, spreads)
labels = c("M", "H", "E", "D", "C", "B", "A", "Z", "Y", "X",
rep("", 1000))
row.names(out) = labels[1:length(dimnames(out)[[1]])]
return(out)
}
| /scratch/gouwar.j/cran-all/cranData/ACSWR/R/lval.R |
msteptpm <-
function(TPM,m){
if(m==1) return(TPM) else {
temp <- TPM
for(i in 1:(m-1)) temp=temp%*%TPM
return(temp)
}
}
| /scratch/gouwar.j/cran-all/cranData/ACSWR/R/msteptpm.R |
pareto_density <-
function(x,scale,shape) {
lpd <- ifelse(x<scale, -Inf, log(shape) + shape*log(scale) - (shape+1)*log(x))
return(exp(lpd))
}
| /scratch/gouwar.j/cran-all/cranData/ACSWR/R/pareto_density.R |
pareto_quantile <-
function(p,scale,shape) scale/(1-p)^{1/shape}
| /scratch/gouwar.j/cran-all/cranData/ACSWR/R/pareto_quantile.R |
powertestplot <-
function(mu0,sigma,n,alpha) {
mu0seq <- seq(mu0-3*sigma, mu0+3*sigma,(6*sigma/100))
betamu <- pnorm(sqrt(n)*(mu0-mu0seq)/sigma-qnorm(1-alpha))
plot(mu0seq,betamu,"l",xlab=expression(mu),ylab="Power of UMP Test",main=expression(paste("H:",mu >= mu[0]," vs K:",mu<mu[0])))
abline(h=alpha)
abline(v=mu0)
}
| /scratch/gouwar.j/cran-all/cranData/ACSWR/R/powertestplot.R |
resistant_line <-
function(x,y,iterations) {
three_medians <- function(x,y) {
n <- length(x)
k <- n %% 3
dix <- sort(x,index.return=TRUE)$ix
x <- x[dix]; y <- y[dix]
if(k==0) {
t <- n/3
xleft <- x[1:t]; xmid <- x[(t+1):(2*t)]; xright <- x[(2*t+1):n]
yleft <- y[1:t]; ymid <- y[(t+1):(2*t)]; yright <- y[(2*t+1):n]
}
if(k==1) {
t <- (n-1)/3
xleft <- x[1:t]; xmid <- x[(t+1):(2*t+1)]; xright <- x[(2*t+2):n]
yleft <- y[1:t]; ymid <- y[(t+1):(2*t+1)]; yright <- y[(2*t+2):n]
}
if(k==2) {
t <- (n-2)/3
xleft <- x[1:t+1]; xmid <- x[(t+2):(2*t+1)]; xright <- x[(2*t+2):n]
yleft <- y[1:t+1]; ymid <- y[(t+2):(2*t+1)]; yright <- y[(2*t+2):n]
}
xlm <- median(xleft); xmm <- median(xmid); xrm <- median(xright)
ylm <- median(yleft); ymm <- median(ymid); yrm <- median(yright)
xmed = c(xlm,xmm,xrm); ymed = c(ylm,ymm,yrm)
b1 <- (yrm-ylm)/(xrm-xlm)
# b0 <- ((ylm+ymm+yrm)-b1*(xlm+xmm+xrm))/3
b0 <- mean(ymed-b1*(xmed-xmed[2]))
bl <- (ymm-ylm)/(xmm-xlm); br <- (yrm-ymm)/(xrm-xmm)
the_medians = data.frame(xmed=xmed,ymed=ymed)
return(list(the_medians=the_medians,coeffs = c(b0,b1),bl=bl,br=br,xCenter=xmm))
}
tms <- three_medians(x,y)$the_medians
b0=b1=0
bl=br=0
resid <- y
for(i in 1:iterations) {
currmodel <- three_medians(x,resid)
b0 <- b0+currmodel$coeffs[1]
b1 <- b1+currmodel$coeffs[2]
if(i==1) {
bl <- currmodel$bl
br <- currmodel$br
}
resid <- y - (b0+b1*(x-currmodel$xCenter))
}
return(list(coeffs=c(b0,b1),tms=tms,xCenter = currmodel$xCenter, hsr = br/bl))
}
| /scratch/gouwar.j/cran-all/cranData/ACSWR/R/resistant_line.R |
siegel.tukey <-
function(x,y) {
m <- length(x);n <- length(y)
N <- m+n
an <- function(N){
TEMP <- NULL
for(i in 1:N){
if(i<=N/2){
if(i%%2==0) TEMP[i] <- 2*i else TEMP[i] <- 2*i-1
}
if(i>N/2){
if(i%%2==0) TEMP[i] <- 2*(N-i)+2 else TEMP[i] <- 2*(N-i)+1
}
}
return(TEMP)
}
z <- function(x,y){
TEMP2 <- cbind(c(x,y),c(rep(1,length(x)),rep(0,length(y))))
return(TEMP2[order(TEMP2[,1]),2])
}
sn <- sum(z(x,y)*an(N))
zz <- (sn-m*(N+1)/2)/sqrt(m*n*(N+1)/12)
return(c(zz,pnorm(zz)))
}
| /scratch/gouwar.j/cran-all/cranData/ACSWR/R/siegel.tukey.R |
skewcoeff <-
function(x) {
x <- x[!is.na(x)]
n <- length(x)
mx <- mean(x); sx <- sd(x)*sqrt((n-1)/n)
skew <- mean((x-mx)^3)/sx^3
return(skew)
}
| /scratch/gouwar.j/cran-all/cranData/ACSWR/R/skewcoeff.R |
stationdistTPM <-
function(M) {
eigenprob <- eigen(t(M))
temp <- which(round(eigenprob$values,1)==1)
stationdist <- eigenprob$vectors[,temp]
stationdist <- stationdist/sum(stationdist)
return(stationdist)
}
| /scratch/gouwar.j/cran-all/cranData/ACSWR/R/stationdistTPM.R |
vonNeumann <-
function(x,n) {
rx <- NULL
d <- max(2,length(unlist(strsplit(as.character(x),""))));
getNext <- function(x,d) {
temp <- x^2
tbs <- as.numeric(unlist(strsplit(as.character(temp),""))) # to be split
tbs_n <- length(tbs);
diff_n <- 2*d - tbs_n;
dn <- ceiling(d/2)
ifelse(diff_n == 0, tbs <- tbs, tbs <- c(rep(0,diff_n),tbs))
tbs_n <- length(tbs)
NEXT <- tbs[-c(1:dn,((tbs_n-dn+1):tbs_n))]
return(as.numeric(paste(NEXT,collapse="")))
}
rx[1] <- x
for(i in 2:(n+1)) rx[i] <- getNext(rx[i-1],d)
return(rx)
}
| /scratch/gouwar.j/cran-all/cranData/ACSWR/R/vonNeumann.R |
ww.test <-
function(x,y) {
runfunction <- function(x,y){
xind <- rep(1,length(x))
yind <- rep(2,length(y))
xy <- c(x,y); xyind <- c(xind,yind);grand <- cbind(xy,xyind)
grand <- grand[rank(grand[,1]),]
num_of_runs <- sum(diff(grand[,2])!=0)+1
# return(num_of_runs)
}
m <- length(x); n <- length(y)
mu0 <- 1+2*m*n/(m+n)
var0 <- 2*m*n*(2*m*n-m-m)/((m+n-1)*(m+n)^2)
test.statistic <- (runfunction(x,y)-mu0)/sqrt(var0)
return(2*(1-pnorm(abs(test.statistic))))
}
| /scratch/gouwar.j/cran-all/cranData/ACSWR/R/ww.test.R |
DistIdealPatt <-
function(Y,Q,weight){
#-----------------------Basic variables----------------------#
#N:number of examinees
#J:number of items
#K:number of attributes
#M:number of ideal attribute patterns, which is equal to 2^K
N <- dim(Y)[1]
J <- dim(Y)[2]
K <- dim(Q)[2]
M <- 2^K
#------------------------------------------------------------#
#A:alpha matrix
#E:eta matrix
A <- alpha (K)
E <- eta (K,J,Q)
#=============================Compute weight===================================#
if (is.null(weight)){ #unspecified weights
p <- apply(Y,2,mean)
if (min(p) == 0 | max(p) == 1)
{
warning("Cannot compute weights because some weights equal to NA or Inf, unweighted Hamming distance will be used.")
weight <- c(rep(1,times = J))
}else{
weight <- 1/(p*(1-p))
}
}
#============================compute distance===================================#
dis <- c(rep(NA,M))
for (i in 1:M){
dis[i] <- apply(as.matrix(apply(abs(Y-E[rep(i,times = N),]),2,sum)*weight),2,sum)/N
}
output <- list(dist = dis,weight = weight)
return(output)
}
| /scratch/gouwar.j/cran-all/cranData/ACTCD/R/DistIdealPatt.R |
alpha <- function(K){
GDINA::attributepattern(K)
}
| /scratch/gouwar.j/cran-all/cranData/ACTCD/R/alpha.R |
cd.cluster <-
function(Y, Q, method = c("HACA","Kmeans"), Kmeans.centers = NULL,
Kmeans.itermax = 10,Kmeans.nstart = 1,
HACA.link = c("complete", "ward", "single","average",
"mcquitty", "median", "centroid"),HACA.cut = NULL) {
cluster.method <- match.arg(method)
HACA.link <- match.arg(HACA.link)
#------------------------Input check-------------------------#
input.check(Y,Q,cluster.method=cluster.method)
#-----------------------Basic variables----------------------#
#N:number of examinees
#J:number of items
#K:number of attributes
#M:number of ideal attribute patterns, which is equal to 2^K
N <- dim(Y)[1]
J <- dim(Y)[2]
K <- dim(Q)[2]
M <- 2^K
#------------------------------------------------------------#
# a vector of summed scores for items that measure each of the K attributes
w <- as.matrix(Y)%*%as.matrix(Q)
#========================KMEANS==============================#
if (cluster.method=="Kmeans") {
if (is.null(Kmeans.centers)){
Kmeans.centers <- M
}
kmns <- kmeans(w, Kmeans.centers, Kmeans.itermax, Kmeans.nstart)
Kmeans.class <- kmns$cluster
Kmeans.size <- kmns$size
Kmeans.mean.w <- kmns$centers
Kmeans.wss.w <- kmns$withinss
Kmeans.class <- kmns$cluster
Kmeans.sqmwss.w <- NULL
Kmeans.mean.y <- NULL
for (i in 1:Kmeans.centers)
{
c <- which(Kmeans.class==i)
if (length(c)>1)
{
#the mean of total score for each cluster
mean.y <- mean(apply(Y[c,],1,sum))
#the average within-cluster sum of squares
sqmwss.w <- sqrt(mean(apply((w[c,]-matrix(rep(Kmeans.mean.w[i,], length(c)), length(c),K,byrow=TRUE))^2,1,sum)))
}
if (length(c)==1)
{
mean.y <- sum(Y[c,])
sqmwss.w <- 0
wss.w <- 0
}
if (length(c)==0)
{
mean.y <- NA
sqmwss.w <- NA
wss.w <- NA
}
Kmeans.mean.y <- c(Kmeans.mean.y, mean.y)
Kmeans.sqmwss.w <- c(Kmeans.sqmwss.w, sqmwss.w)
}
output <- list(W=w,size=Kmeans.size, mean.w=Kmeans.mean.w,
wss.w=Kmeans.wss.w, sqmwss.w=Kmeans.sqmwss.w,
mean.y=Kmeans.mean.y, class=Kmeans.class,cluster.method=cluster.method)
}else if(cluster.method=="HACA")
#========================HACA==============================#
{
hc <- hclust(dist(w), method=HACA.link)
if (is.null(HACA.cut)){
HACA.cut<-M
}
HACA.class <- cutree(hc, k=HACA.cut)
HACA.size <- NULL
HACA.mean.w <- NULL
HACA.sqmwss.w <- NULL
HACA.mean.y <- NULL
HACA.wss.w <- NULL
for (i in 1:HACA.cut)
{
c <- which(HACA.class==i)
if (length(c)>1)
{
mean.w <- apply(w[c,],2,mean) #the average w within each group:a vector with K elements
mean.y <- mean(apply(Y[c,],1,sum)) #the average y of each group:a scalar
sqmwss.w <- sqrt(mean(apply((w[c,]-matrix(rep(mean.w,length(c)),
length(c),K, byrow=TRUE))^2,1,sum)))
wss.w <- sum((w[c,]-matrix(rep(mean.w,length(c)),length(c),
K, byrow=TRUE))^2)
}
if (length(c)==1)
{
mean.w <- w[c,]
mean.y <- sum(Y[c,])
sqmwss.w <- 0
wss.w <- 0
}
if (length(c)==0)
{
mean.w <- c(NA,NA,NA)
mean.y <- NA
sqmwss.w <- NA
wss.w <- NA
}
HACA.size <- c(HACA.size, length(c))
HACA.mean.w <- rbind(HACA.mean.w, mean.w)
HACA.mean.y <- c(HACA.mean.y, mean.y)
HACA.sqmwss.w <- c(HACA.sqmwss.w, sqmwss.w)
HACA.wss.w <- c(HACA.wss.w, wss.w)
}
output <- list(W=w,size=HACA.size, mean.w=HACA.mean.w,
wss.w=HACA.wss.w, sqmwss.w=HACA.sqmwss.w,
mean.y=HACA.mean.y,class=HACA.class,cluster.method=cluster.method)
}
class(output) <- "cd.cluster"
return(output)
}
| /scratch/gouwar.j/cran-all/cranData/ACTCD/R/cd.cluster.R |
eta <-
function (K,J,Q)
{
M <- 2^K
A <- alpha(K)
tmp <- matrix(NA,M,J)
for (g in 1:M){ #g is latent pattern
for (j in 1:J){ #j is item
tmp[g,j] <- ifelse(all(as.logical(A[g,]^Q[j,])),1,0)
}
}
return(tmp)
}
| /scratch/gouwar.j/cran-all/cranData/ACTCD/R/eta.R |
input.check <-
function(Y, Q, cluster.method="HACA", HACA.link="complete",
label.method="2a",perm=NULL)
{
##################################################
# Check Y and Q #
##################################################
if (!is.matrix(Y)){
Y <- as.matrix(Y)
}
if (!is.matrix(Q)){
Q <- as.matrix(Q)
}
if (ncol(Y) != nrow(Q)) {
return(warning("Item numbers in the response matrix are not equal to that in Q-matrix."))
}
if (!all(Y %in% c(1, 0))) {
return(warning("Only 0 and 1 are allowed in the response matrix."))
}
if (!all(Q %in% c(1, 0))) {
return(warning("Only 0 and 1 are allowed in the Q-matrix."))
}
##################################################
# Check method #
##################################################
if (cluster.method != "Kmeans" && cluster.method != "HACA")
{
return(warning("Only Kmeans or HACA can be used as cluster method options."))
}
if (label.method == "1" && ncol(Q) != 3 && ncol(Q) != 4)
{
return(warning('label method "1" is only available for 3 or 4 attributes.'))
}
if (label.method == "1" && is.null(perm))
{
return(warning('when label method "1" used, the "perm" is needed to be specified.'))
}
}
| /scratch/gouwar.j/cran-all/cranData/ACTCD/R/input.check.R |
labeling <-
function(Y,Q,cd.cluster.object,method = c("2b","2a","1","3"),perm=NULL){
label.method <- match.arg(method)
#distance <- match.arg(distance)
#------------------------Input check-------------------------#
input.check(Y,Q,label.method = label.method,perm=perm)
Y <- as.matrix(Y)
Q <- as.matrix(Q)
#-----------------------Basic variables----------------------#
#N:number of examinees
#J:number of items
#K:number of attributes
#M:number of ideal attribute patterns, which is equal to 2^K
N <- dim(Y)[1]
J <- dim(Y)[2]
K <- dim(Q)[2]
if (label.method=="3"|label.method=="1") {
M <- 2^K
}else {
M <- length(cd.cluster.object$size)
}
if (M!=length(cd.cluster.object$size)) {
return(warning("The number of latent clusters cannot be
specified if label method is not '2a' or '2b'."))
}
#------------------------------------------------------------#
cluster <- as.matrix(cd.cluster.object$class) #cluster memberships
#read alpha
A <- alpha(K)
Y <- cbind(Y,cluster) #attach cluster membership for each person
matchgroup <- matrix(NA,M,3)
#compute the distance matrix between clusters and ideal patterns
#=======================Calculate common weights==============================#
p <- apply(Y[,1:J],2,mean)
if (min(p) == 0 | max(p) == 1)
{
warning("Cannot compute weights because some weights equal to NA or Inf, unweighted Hamming distance will be used.")
weight <- c(rep(1,times = J))
}else{
weight <- 1/(p * (1-p))
}
#=======================Calculate distance matrix==============================#
#dist will be M by M matrix, rows represent clusters and cols represent ideal #
#pattern, cell(i,j) represents the distance between ith cluster and jth ideal #
#pattern. #
#==============================================================================#
dist <- NULL
for (i in 1:M)
{
temp <- DistIdealPatt(Y[which(Y[,(J+1)] == i),1:J],Q,weight)$dist
dist <- rbind(dist,temp) #this matrix is ordered from 1 to M
}
#==================================Labeling====================================#
if (label.method == "2b")
{
matchgroup[,1] <- c(1:M) #the number of clusters
matchgroup[,2] <- as.matrix(apply(dist,1,which.min))#assign labels
} else if (label.method == "1")
{
# Partially order mean.w based on the order of mean.y
order <- order(cd.cluster.object$mean.y)
W <- ((cd.cluster.object$mean.w)[,1:K])[order,]## partial order
P <- perm
#change mode
storage.mode(W) <- "numeric"
storage.mode(A) <- "integer"
storage.mode(P) <- "integer"
K <-as.integer(K)
length <- as.integer(dim(P)[1])
#length <- as.integer(2^K)
res <- matrix(as.integer(0),length,1)
#Compute the min IC
#dyn.load("ICIndex.dll")
rownum <- .Fortran("ICIndex",res,K,length,P,A,W)[[1]]
#Get the min location
minloc <- which(rownum[,1] == min(rownum),arr.ind = TRUE)
#Get the pattern of min IC
if (length(minloc) == 1){
pattern <- P[minloc,]
}else{
pattern <- P[minloc[sample(length(minloc),size = 1)],]
}
matchgroup[,1] <- order
matchgroup[,2] <- pattern
}else if(label.method == "2a")
{
matchgroup[,1] <- c(1:M)
#Compute cluster size
for (i in 1:M)
{
matchgroup[i,3] <-length(Y[which(Y[,(J+1)] == i),(J+1)])
}
#Order the clusters based on cluster size decreasingly
#randimize it first in case of ties
matchgroup <- matchgroup[sample(1:nrow(matchgroup)),]
matchgroup <- matchgroup[order(matchgroup[,3],decreasing = TRUE,na.last = NA),]
for (i in matchgroup[,1])
{
col <- setdiff(c(1:M),matchgroup[,2])
if (length(col) != 0)
{
if (length(which(dist[i,] == min(dist[i,col]),arr.ind = TRUE)) == 1) #no ties
{
matchgroup[which(matchgroup[,1] == i,arr.ind = TRUE),2] <- which(dist[i,] == min(dist[i,col]),arr.ind = TRUE)
}else #there exist ties
{
x <- setdiff(which(dist[i,] == min(dist[i,col]),arr.ind = TRUE),matchgroup[,2])
if (length(x) == 1){
matchgroup[which(matchgroup[,1] == i,arr.ind = TRUE),2] <- x
} else {
matchgroup[which(matchgroup[,1] == i,arr.ind = TRUE),2] <- sample(x,size = 1)
}
}
}
}
} else if (label.method == "3"){
#Compute Mean for each cluster
meanScore <- aggregate(apply(Y[,1:J],1,sum),by = list(Y[,(J+1)]),FUN = "mean")
matchgroup[,1] <- meanScore[,1]#number of cluster
matchgroup[,3] <- meanScore[,2]#size of cluster
#randimize it in case of ties
matchgroup <- matchgroup[sample(1:nrow(matchgroup)),]
matchgroup <- matchgroup[order(matchgroup[,3]),]
matchgroup[1,2] <- 1 #min value
matchgroup[M,2] <- M #max value
#========================================IF nested================================================#
# test if one cluster is nested within another one cell(i,j)=0 represents ith cluster is NOT nested
# within jth cluster. cell(i,j)!=0 represents ith cluster is nested with jth cluster.
#=================================================================================================#
if.nested <- alpha(K)%*%t(alpha(K))
for (i in 2:M){
for (j in 2:M){
if (if.nested[i,j] == max(if.nested[i,])){
if.nested[i,j] <- 1
}else{
if.nested[i,j] <- 0
}
}
}
#========================initialize gi========================#
# gi is the clusters of which rowsum of alpha is 1 #
# gi is the group in which person only master one skill #
#=============================================================#
index <- matrix(NA,M,2)
index[,1] <- as.matrix(seq(1:M))
index[,2] <- as.matrix(apply(alpha(K),1,sum))
gi <- index[which(index[,2] == 1),1]
for (i in 2:(M-1))
{
#================finalize gi========================#
gi <- setdiff(gi,matchgroup[,2])
#look for gplus
for (gplus in 2:M){
if (apply(as.matrix(if.nested[gi,gplus]),2,sum) == 0){ #gi is not nested within gplus
gi <- setdiff(union(gi,gplus),matchgroup[,2]) #update gi
}
}
#================labelling========================#
g <- gi #all possible clusters for labelling at current step
if (length(g) != 0)
{
if (length(which(dist[matchgroup[i,1],g] == min(dist[matchgroup[i,1],g]),arr.ind = TRUE)) == 1) # no ties
{
matchgroup[i,2] <- g[which(dist[matchgroup[i,1],g] == min(dist[matchgroup[i,1],g]),arr.ind = TRUE)]
}else #there are ties
{
x <- which(dist[matchgroup[i,1],g] == min(dist[matchgroup[i,1],g]),arr.ind = TRUE)
if (length(x) == 1){
matchgroup[i,2] <- g[x]
}else{
matchgroup[i,2] <- g[sample(x,size = 1)]
}
}
}
}
}
#================Calculate output================#
label.cluster <- array(NA,dim = length(cluster))
for (i in 1:length(cluster)){
if (cluster[i]%in%matchgroup[,1]){
label.cluster[i] <- matchgroup[which(matchgroup[,1] == cluster[i]),2]
}
}
label <- matrix(NA,N,K)
for (i in 1:N)
{
label[i,] <- alpha(K)[label.cluster[i],]
}
att.patt <- rep(NA,times=2^K)
if (M==2^K){
freq <- rep(0,times=2^K)
} else {
freq <- rep(NA,times=2^K)
}
for (i in 1:2^K){
att.patt[i] <- paste(A[i,],collapse=" ")
freq[i] <- sum(label.cluster==i)
}
att.dist <- data.frame(att.patt,freq)
output <- list(att.pattern = label, att.class = label.cluster, att.dist = att.dist, label.method=label.method)
class(output) <- "labeling"
return(output)
}
| /scratch/gouwar.j/cran-all/cranData/ACTCD/R/labeling.R |
npar.CDM <-
function(Y, Q, cluster.method = c("HACA","Kmeans"), Kmeans.centers = NULL,
Kmeans.itermax = 10, Kmeans.nstart = 1, HACA.link =
c("complete", "ward", "single","average", "mcquitty",
"median", "centroid"),HACA.cut = NULL,label.method =
c("2b","2a","1","3"),perm=NULL)
{
s1 <- Sys.time()
cluster.method <- match.arg(cluster.method)
HACA.link <- match.arg(HACA.link)
label.method <- match.arg(label.method)
#-----------------------Input check--------------------------#
input.check(Y = Y,Q = Q,cluster.method = cluster.method,label.method = label.method,perm=perm)
#-----------------------Basic variables----------------------#
#N:number of examinees
#J:number of items
#K:number of attributes
#M:number of ideal attribute patterns, which is equal to 2^K
N <- dim(Y)[1]
J <- dim(Y)[2]
K <- dim(Q)[2]
M <- 2^K
#------------------------------------------------------------#
#A:alpha matrix
#E:eta matrix
A <- alpha (K)
E <- eta (K,J,Q)
#====================cluster analysis================================#
cd.cluster.object <- cd.cluster(Y, Q, cluster.method, Kmeans.centers,
Kmeans.itermax, Kmeans.nstart, HACA.link,
HACA.cut)
#===============================Labelling=============================#
labels <- labeling(Y, Q, cd.cluster.object, method = label.method,perm=perm)
s2 <- Sys.time()
output <- list(att.pattern = labels$att.pattern, att.class = labels$att.class,
att.dist = labels$att.dist, alpha = A, eta = E,
cluster.size = cd.cluster.object$size,
cluster.class = cd.cluster.object$class,
starting.time = s1, end.time = s2, cluster.method = cluster.method,
label.method = label.method)
class(output) <- "npar.CDM"
return(output)
}
| /scratch/gouwar.j/cran-all/cranData/ACTCD/R/npar.CDM.R |
print.labeling <-
structure(function(x, ...)
{
output2 <- x$att.dist
cat("-------------------------------------------\n")
cat("labeling for ACTCD\n")
cat(paste(paste("based on", x$label.method, "label method"),"\n"))
cat("-------------------------------------------\n")
cat("The distribution of attribute patterns:\n")
print(output2)
}, export = FALSE, S3class = "labeling", modifiers = "public")
| /scratch/gouwar.j/cran-all/cranData/ACTCD/R/print.Labeling.R |
print.cd.cluster <-
structure(function(x, ...)
{
output1 <- cbind(c(1:length(x$size)),x$size)
colnames(output1) <- c("clusters #","freq")
cat("-------------------------------------------\n")
cat("Cluster analysis for ACTCD\n")
cat(paste(paste("based on", x$cluster.method, "algorithm"),"\n"))
cat("-------------------------------------------\n")
cat("Number of examinees within each cluster:\n")
print(output1)
}, export = FALSE, S3class = "cd.cluster", modifiers = "public")
| /scratch/gouwar.j/cran-all/cranData/ACTCD/R/print.cd.cluster.R |
print.npar.CDM <-
structure(function(x, ...)
{
output <- x$att.dist
cat("ACTCD: Asymptotic Classification Theory for Cognitive Diagnosis\n")
cat("-------------------------------------------\n")
cat(paste(paste("Analysis starts at", x$starting.time),"\n"))
cat(paste(paste("Analysis ends at", x$end.time),"\n"))
cat(paste(paste("based on", x$cluster.method, "cluster algorithm and", x$label.method, "label method"),"\n"))
cat("-------------------------------------------\n")
cat("The distribution of estimated attribute patterns\n")
print(output)
}, export = FALSE, S3class = "npar.CDM", modifiers = "public")
| /scratch/gouwar.j/cran-all/cranData/ACTCD/R/print.npar.CDM.R |
#' Construct shift matrix
#'
#' Internal function for creation of sparse shift matrix.
#'
#' @param n Integer specifying dimensions of the shift matrix.
#' @param q Integer specifying the order of the shift matrix. Value `q = 1` (resp. `q = -1`) indicates the upper (resp. lower) shift matrix. Larger (resp. smaller) values represent higher powers of the respective shift matrices.
#'
#' @return Returns a sparse matrix (class `"ngCMatrix"`).
#'
#' @export
#' @keywords internal
shiftMatrix <- function(n, q) {
Indices <- cbind(
max(-q, 0) + seq_len(max(n - abs(q), 0)),
max(q, 0) + seq_len(max(n - abs(q), 0))
)
Output <- Matrix::sparseMatrix(i = Indices[, 1], j = Indices[, 2], dims = c(n, n))
return(Output)
}
| /scratch/gouwar.j/cran-all/cranData/ACV/R/ShiftMatrix.R |
#' Estimate out-of-sample loss
#'
#' Function `estimateL()` estimates the out-of-sample loss of a given algorithm on specified time-series. By default, it uses the optimal weighting scheme which exploits also the in-sample performance in order to deliver a more precise estimate than the conventional estimator.
#'
#' @param y Univariate time-series object.
#' @param algorithm Algorithm which is to be applied to the time-series. The object which the algorithm produces should respond to `fitted` and `forecast` methods.
#' Alternatively in the case of more complex custom algorithms, the algorithm may be a function which takes named arguments `("yInSample", "yOutSample", "h")` or `("yInSample", "yOutSample", "h", "xregInSample", "xregOutSample")` as inputs and produces a list with named elements `("yhatInSample", "yhatOutSample")` containing vectors of in-sample and out-of-sample forecasts.
#' @param m Length of the window on which the algorithm should be trained.
#' @param h Number of predictions made after a single training of the algorithm.
#' @param v Number of periods by which the estimation window progresses forward once the predictions are generated.
#' @param xreg Matrix of exogenous regressors supplied to the algorithm (if applicable).
#' @param lossFunction Loss function used to compute contrasts (defaults to squared error).
#' @param method Can be set to either `"optimal"` for the estimator which optimally utilizes also the in-sample performance or `"convetional"` for the conventional loss estimator.
#' @param Phi User can also directly supply `Phi`; the matrix of contrasts produced by `tsACV`. In this case parameters: `y`, `algorithm`, `m`, `h`, `v`, `xreg`, `lossFunction` are ignored.
#' @param bw Bandwidth for the long run variance estimator. If `NULL`, `bw` is selected according to `(3/4)*n^(1/3)`.
#' @param rhoLimit Parameter `rhoLimit` limits to the absolute value of the estimated `rho` coefficient. This is useful as estimated values very close to 1 might cause instability.
#' @param ... Other parameters passed to the algorithm.
#'
#' @return List containing loss estimate and its estimated variance along with some other auxiliary information like the matrix of contrasts `Phi` and the weights used for computation.
#'
#' @examples
#' set.seed(1)
#' y <- rnorm(40)
#' m <- 36
#' h <- 1
#' v <- 1
#' estimateL(y, forecast::Arima, m = m, h = h, v = v)
#'
#' @export
estimateL <- function(y, algorithm, m, h = 1, v = 1, xreg = NULL, lossFunction = function(y, yhat) {(y - yhat)^2}, method = "optimal", Phi = NULL, bw = NULL, rhoLimit = 0.99, ...) {
if (is.null(Phi)) {
Phi <- tsACV(y, algorithm, m, h, v, xreg, lossFunction, ...)
}
temp <- infoPhi(Phi)
K <- temp$K
mn <- temp$mn
m <- temp$m
v <- temp$v
h <- temp$h
mh <- temp$mh
J <- temp$J
switch(method,
"conventional" = {
lambda <- do.call(c, J) > m
lambda <- lambda / sum(lambda)
if (v != h) {
warning("Currently, standart error estimation is supported only for h = v.")
var <- NA
} else {
phiOutSample <- stats::na.omit(c(Phi))[do.call(c, J) > m]
var <- estimateLongRunVar(phiOutSample, bw) / length(phiOutSample)
}
rho <- NA
},
"optimal" = {
b <- sapply(1:mh, function(x) {
ifelse(x > m, sum(sapply(J, function(Jk) {
x %in% Jk
})), 0)
})
b <- b / sum(b)
rho <- estimateRho(Phi, rhoLimit)
I <- shiftMatrix(mh, 0)
Z1 <- shiftMatrix(mh, 0) + rho^2 / (1 - rho^2) * shiftMatrix(mh, -v) %*% shiftMatrix(mh, v)
Z2 <- shiftMatrix(mh, 0) + rho^2 / (1 - rho^2) * (shiftMatrix(mh, -v) %*% shiftMatrix(mh, v) + shiftMatrix(mh, v) %*% shiftMatrix(mh, -v))
Z3 <- shiftMatrix(mh, 0) + rho^2 / (1 - rho^2) * shiftMatrix(mh, v) %*% shiftMatrix(mh, -v)
Zu <- -rho / (1 - rho^2) * shiftMatrix(mh, v)
Zl <- -rho / (1 - rho^2) * shiftMatrix(mh, -v)
BViB <- I[, J[[1]]] %*% Z1[J[[1]], J[[1]]] %*% I[J[[1]], ] + I[, J[[2]]] %*% Zu[J[[2]], J[[1]]] %*% I[J[[1]], ]
for (k in seq(2, K - 1, length = max(0, K - 2))) {
BViB <- BViB + I[, J[[k - 1]]] %*% Zl[J[[k - 1]], J[[k]]] %*% I[J[[k]], ] + I[, J[[k]]] %*% Z2[J[[k]], J[[k]]] %*% I[J[[k]], ] + I[, J[[k + 1]]] %*% Zu[J[[k + 1]], J[[k]]] %*% I[J[[k]], ]
}
BViB <- BViB + I[, J[[K - 1]]] %*% Zl[J[[K - 1]], J[[K]]] %*% I[J[[K]], ] + I[, J[[K]]] %*% Z3[J[[K]], J[[K]]] %*% I[J[[K]], ]
BViBi <- solve(BViB)
ViB <- do.call(rbind, c(
list(Z1[J[[1]], J[[1]]] %*% I[J[[1]], ] + Zl[J[[1]], J[[2]]] %*% I[J[[2]], ]),
lapply(seq(2, K - 1, length = max(0, K - 2)), function(k) {
Zu[J[[k]], J[[k - 1]]] %*% I[J[[k - 1]], ] + Z2[J[[k]], J[[k]]] %*% I[J[[k]], ] + Zl[J[[k]], J[[k + 1]]] %*% I[J[[k + 1]], ]
}),
list(Zu[J[[K]], J[[K - 1]]] %*% I[J[[K - 1]], ] + Z3[J[[K]], J[[K]]] %*% I[J[[K]], ])
))
lambda <- as.vector(ViB %*% BViBi %*% b)
if (v != h) {
warning("Currently, standart error estimation is supported only for h = v.")
var <- NA
} else {
phiOutSample <- stats::na.omit(c(Phi))[do.call(c, J) > m]
varRatio <- c(t(b) %*% BViBi %*% b) / (1 / length(phiOutSample))
var <- estimateLongRunVar(phiOutSample, bw) / length(phiOutSample) * varRatio
}
}
)
output <- list(
estimate = sum(stats::na.omit(c(Phi)) * lambda),
var = var,
lambda = lambda,
Phi = Phi,
rho = rho
)
class(output) <- "estimateL"
return(output)
}
| /scratch/gouwar.j/cran-all/cranData/ACV/R/estimateL.R |
#' Estimate long-run variance
#'
#' Internal function for estimating the long-run variance.
#'
#' @param x Univariate time-series object.
#' @param bw Bandwidth for long run variance estimation.
#'
#' @return Estimated long run variance (numeric vector of length 1).
#'
#' @export
#' @keywords internal
estimateLongRunVar <- function(x, bw = NULL) {
if (is.null(bw)) {
bw <- floor(0.75 * length(x)^(1 / 3))
}
weightsACF <- seq(1, 0, by = -(1 / (bw + 1)))[-(bw + 2)]
ACF <- as.vector(stats::acf(x, type = "covariance", plot = F, lag.max = bw)$acf)
LongRunVar <- sum(ACF * weightsACF[1:length(ACF)] * c(1, rep(2, bw))[1:length(ACF)])
return(LongRunVar)
}
| /scratch/gouwar.j/cran-all/cranData/ACV/R/estimateLongRunVar.R |
#' Estimate `rho` coefficient
#'
#' Internal function for estimating the rho coefficient.
#'
#' @param Phi Matrix of computed contrasts generated by `tsACV()`.
#' @param rhoLimit Parameter `rhoLimit` limits to the absolute value of the estimated `rho` coefficient. This is useful as estimated values very close to 1 might cause instability.
#'
#' @return Estimated `rho` coefficient (numeric vector of length 1).
#'
#' @export
#' @keywords internal
estimateRho <- function(Phi, rhoLimit) {
temp <- infoPhi(Phi)
K <- temp$K
mn <- temp$mn
m <- temp$m
v <- temp$v
h <- temp$h
mh <- temp$mh
J <- temp$J
Target <- 1 - 1 / (2 * stats::var(c(Phi), na.rm = T)) * sapply(1:K, function(k) {
apply((Phi - Phi[, k])^2, 2, mean, na.rm = T)
})
Target <- sapply(1:K, function(k) mean(Target[cbind((k):K, 1:(K - k + 1))]))
Weights <- pmax(mh - (0:(K - 1)) * v, 0) * (K:1)
moments <- function(par) {
return(ifelse(is.na(Target), 0, Target - par^(v * (0:(K - 1)))))
}
objective <- function(par) {
return(sum(moments(par)^2 * Weights))
}
fit <- stats::optimize(objective, interval = c(-rhoLimit, rhoLimit))
rho <- fit$minimum
return(rho)
}
| /scratch/gouwar.j/cran-all/cranData/ACV/R/estimateRho.R |
#' Recover information about `Phi`
#'
#' Internal function which recovers all the necessary parameters using which the `Phi` was constructed and some additional useful variables derived from these parameters.
#'
#' @param Phi Matrix of computed contrasts generated by `tsACV()`.
#'
#' @return List of parameters that were used to generate `Phi`.
#'
#' @export
#' @keywords internal
infoPhi <- function(Phi) {
K <- ncol(Phi)
mn <- nrow(Phi)
m <- sum(!is.na(Phi[, K]))
v <- sum(cumprod(is.na(Phi[, 2])))
h <- sum(!is.na(Phi[, 1])) - m
mh <- m + h
J <- lapply(1:K, function(k) 1:sum(!is.na(Phi[, k])))
return(
list(
K = K,
mn = mn,
m = m,
v = v,
h = h,
mh = mh,
J = J
)
)
}
| /scratch/gouwar.j/cran-all/cranData/ACV/R/infoPhi.R |
#' Printing method for class `"estimateL"`
#'
#' Internal printing method for `"estimateL"` object generated by `estimateL()`.
#'
#' @param x Object of class `"estimateL"`.
#'
#' @return Does not return a value. It is used to print out the loss estimate along its standard error and confidence interval.
#'
#' @export
#' @keywords internal
print.estimateL <- function(x, ...) {
temp <- data.frame(
"Estimate" = x$estimate,
"Std.Error" = sqrt(x$var),
"CI.2.5" = stats::qnorm(0.025, mean = x$estimate, sd = sqrt(x$var)),
"CI.97.5" = stats::qnorm(0.975, mean = x$estimate, sd = sqrt(x$var))
)
row.names(temp) <- "Loss"
print(temp)
return()
}
| /scratch/gouwar.j/cran-all/cranData/ACV/R/print.estimateL.R |
#' Printing method for class `"testL"`
#'
#' Internal printing method for `"testL"` object generated by `testL()`.
#'
#' @param x Object of class `"testL"`.
#'
#' @return Does not return a value. It is used to print out the test results.
#'
#' @export
#' @keywords internal
print.testL <- function(x, ...) {
cat(x$test, "test\n")
cat("\nEstimated difference L(algorithm1) - L(algorithm2):\n", x$estimate)
cat("\nAlternative hypothesis:\n L(algorithm1) - L(algorithm2) ", x$Ha)
cat("\nt-stat:\n", x$tval)
cat("\np-val:\n", x$pval)
return()
}
| /scratch/gouwar.j/cran-all/cranData/ACV/R/print.testL.R |
#' Test equality of out-of-sample losses of two algorithms
#'
#' Function `testL()` tests the null hypothesis of equal predictive ability of `algorithm1` and `algorithm2` on time series `y`. By default, it uses the optimal weighting scheme which exploits also the in-sample performance in order to deliver more power than the conventional tests.
#'
#' @param y Univariate time-series object.
#' @param algorithm1 First algorithm which is to be applied to the time-series. The object which the algorithm produces should respond to `fitted` and `forecast` methods.
#' Alternatively in the case of more complex custom algorithms, the algorithm may be a function which takes named arguments `("yInSample", "yOutSample", "h")` or `("yInSample", "yOutSample", "h", "xregInSample", "xregOutSample")` as inputs and produces a list with named elements `("yhatInSample", "yhatOutSample")` containing vectors of in-sample and out-of-sample forecasts.
#' @param algorithm2 Second algorithm. See above.
#' @param m Length of the window on which the algorithm should be trained.
#' @param h Number of predictions made after a single training of the algorithm.
#' @param v Number of periods by which the estimation window progresses forward once the predictions are generated.
#' @param xreg Matrix of exogenous regressors supplied to the algorithm (if applicable).
#' @param lossFunction Loss function used to compute contrasts (defaults to squared error).
#' @param method Can be set to either `"optimal"` for the test which optimally utilizes also the in-sample performance or `"convetional"` for the conventional test.
#' @param test Type of the test which is to be executed. Can attain values `"Diebold-Mariano"` for the canonical test of equal predictive ability or `"Ibragimov-Muller"` for the sub-sampling t-test.
#' @param Ha Alternative hypothesis. Can attain values `"!=0"` for two sided test or `"<0"` and `">0"` for one sided tests.
#' @param Phi User can also directly supply `Phi=Phi1-Phi2`; the matrix of contrasts differentials produced by `tsACV`. In this case parameters: `y`, `algorithm`, `m`, `h`, `v`, `xreg`, `lossFunction` are ignored.
#' @param bw Applicable to `"Diebold-Mariano"` test. Bandwidth for the long run variance estimator. If `NULL`, `bw` is selected according to `(3/4)*n^(1/3)`.
#' @param groups Applicable to `"Ibragimov-Muller"` test. The number of groups to which the data is to be divided.
#' @param rhoLimit Parameter `rhoLimit` limits to the absolute value of the estimated `rho` coefficient. This is useful as estimated values very close to 1 might cause instability.
#' @param ... Other parameters passed to algorithms.
#'
#' @return List containing loss differential estimate and associated p-value along with some other auxiliary information like the matrix of contrasts differentials `Phi` and the weights used for computation.
#'
#' @examples
#' set.seed(1)
#' y <- rnorm(40)
#' m <- 36
#' h <- 1
#' v <- 1
#' algorithm1 <- function(y) {
#' forecast::Arima(y, order = c(1, 0, 0))
#' }
#' algorithm2 <- function(y) {
#' forecast::Arima(y, order = c(2, 0, 0))
#' }
#' testL(y, algorithm1, algorithm2, m = m, h = h, v = v)
#'
#' @export
testL <- function(y, algorithm1, algorithm2, m, h = 1, v = 1, xreg = NULL, lossFunction = function(y, yhat) {(y - yhat)^2}, method = "optimal", test = "Diebold-Mariano", Ha = "!=0", Phi = NULL, bw = NULL, groups = 2, rhoLimit = 0.99, ...) {
if (is.null(Phi)) {
Phi1 <- tsACV(y, algorithm1, m, h, v, xreg, lossFunction, ...)
Phi2 <- tsACV(y, algorithm2, m, h, v, xreg, lossFunction, ...)
Phi <- Phi1 - Phi2
}
temp <- infoPhi(Phi)
K <- temp$K
mn <- temp$mn
m <- temp$m
v <- temp$v
h <- temp$h
mh <- temp$mh
J <- temp$J
if (v != h) {
stop("Currently, only predictive ability testing with h = v is supported.")
}
switch(test,
"Diebold-Mariano" = {
output <- estimateL(Phi = Phi, method = method, bw = bw, rhoLimit = rhoLimit)
estimate <- output$estimate
tval <- output$estimate / sqrt(output$var)
pval <- switch(Ha,
"<0" = stats::pnorm(tval),
"!=0" = (1 - stats::pnorm(abs(tval))) * 2,
">0" = 1 - stats::pnorm(tval)
)
},
"Ibragimov-Muller" = {
groupSize <- ceiling((K - 1) / groups)
estimates <- sapply(1:groups, function(g) {
colIndices <- 1:(groupSize + 1) + (g - 1) * groupSize
rowIndices <- 1:(m + v * (groupSize)) + (g - 1) * groupSize
PhiSubset <- Phi[rowIndices[rowIndices <= nrow(Phi)], colIndices[colIndices <= ncol(Phi)]]
output <- estimateL(Phi = PhiSubset, method = method, bw = bw, rhoLimit = rhoLimit)
return(output$estimate)
})
estimate <- mean(estimates)
tval <- estimate / sqrt((1 / (groups)) * (1 / (groups - 1)) * sum((estimates - estimate)^2))
pval <- switch(Ha,
"<0" = stats::pt(tval, df = groups - 1),
"!=0" = (1 - stats::pt(abs(tval), df = groups - 1)) * 2,
">0" = 1 - stats::pt(tval, df = groups - 1)
)
}
)
output <- list(
estimate = estimate,
tval = tval,
pval = pval,
Ha = Ha,
test = test,
Phi = Phi
)
class(output) <- "testL"
return(output)
}
| /scratch/gouwar.j/cran-all/cranData/ACV/R/testL.R |
#' Perform time-series cross-validation
#'
#' Function `tsACV()` computes contrasts between forecasts produced by a given algorithm and the original time-series on which the algorithm is trained.
#' This can then be used to estimate the loss of the algorithm.
#' Unlike the similar `tsCV()` function from the `'forecast'` package, `tsACV()` also records in-sample contrasts as these can be leveraged to produce more accurate out-of-sample loss estimates.
#'
#' @param y Univariate time-series object.
#' @param algorithm Algorithm which is to be applied to the time-series. The object which the algorithm produces should respond to `fitted` and `forecast` methods.
#' Alternatively in the case of more complex custom algorithms, the algorithm may be a function which takes named arguments `("yInSample", "yOutSample", "h")` or `("yInSample", "yOutSample", "h", "xregInSample", "xregOutSample")` as inputs and produces a list with named elements `("yhatInSample", "yhatOutSample")` containing vectors of in-sample and out-of-sample forecasts.
#' @param m Length of the window on which the algorithm should be trained.
#' @param h Number of predictions made after a single training of the algorithm.
#' @param v Number of periods by which the estimation window progresses forward once the predictions are generated.
#' @param xreg Matrix of exogenous regressors supplied to the algorithm (if applicable).
#' @param lossFunction Loss function used to compute contrasts (defaults to squared error).
#' @param ... Other parameters passed to the algorithm.
#'
#' @return Matrix of computed contrasts `Phi`. Each row corresponds to a particular period of the `y` time-series and each column corresponds to a particular location of the training window.
#'
#' @examples
#' set.seed(1)
#' y <- rnorm(40)
#' m <- 36
#' h <- 1
#' v <- 1
#' tsACV(y, forecast::Arima, m = m, h = h, v = v)
#'
#' @export
tsACV <- function(y, algorithm, m, h = 1, v = 1, xreg = NULL, lossFunction = function(y, yhat) {(y - yhat)^2}, ...) {
mn <- length(y)
if (any(is.na(y))) {
stop("y should not contain missing values")
}
y <- stats::ts(c(y, y[rep(1, h)]), start = stats::start(y), frequency = stats::frequency(y))
if (!is.null(xreg)) {
if (!ifelse(is.matrix(xreg), (nrow(xreg) == mn), F)) {
stop("xreg must be a matrix with the same number of rows as length(y)")
}
xreg <- stats::ts(rbind(xreg, xreg[rep(1, h), ]), start = stats::start(xreg), frequency = stats::frequency(xreg))
}
if (((mn - m) <= 0) | ((mn - m) %% v != 0)) {
stop("number of periods left for evaluation must be greater than 0 and divisble by h")
}
I <- seq(0, (mn - m), by = v)
yhat <- stats::ts(matrix(NA_real_, nrow = mn, ncol = length(I)))
times <- stats::time(y)
for (index in seq_along(I)) {
i <- I[index]
yInSample <- stats::window(y, times[1 + i], times[m + i])
yOutSample <- stats::window(y, times[1 + m + i], times[h + m + i])
if (is.null(xreg)) {
if (all(c("yInSample", "yOutSample", "h") %in% methods::formalArgs(algorithm))) {
model <- algorithm(yInSample = yInSample, yOutSample = yOutSample, h = h, ...)
yhatInSample <- model$yhatInSample
yhatOutSample <- model$yhatOutSample
} else {
model <- algorithm(yInSample, ...)
yhatOutSample <- forecast::forecast(model, h = h)$mean
yhatInSample <- stats::fitted(model)
}
} else {
xregInSample <- stats::window(xreg, times[1 + i], times[m + i])
xregOutSample <- stats::window(xreg, times[1 + m + i], times[h + m + i])
if (all(c("yInSample", "yOutSample", "h", "xregInSample", "xregOutSample") %in% methods::formalArgs(algorithm))) {
model <- algorithm(yInSample = yInSample, yOutSample = yOutSample, h = h, xregInSample = xregInSample, xregOutSample = xregOutSample, ...)
yhatInSample <- model$yhatInSample
yhatOutSample <- model$yhatOutSample
} else {
model <- algorithm(yInSample, xreg = xregInSample, ...)
yhatOutSample <- forecast::forecast(model, xreg = xregOutSample, h = h)$mean
yhatInSample <- stats::fitted(model)
}
}
timeindices <- (1:(m + h) + i)
yhat[timeindices[timeindices <= mn], index] <- c(yhatInSample, yhatOutSample)[timeindices <= mn]
}
output <- apply(yhat, 2, function(x) lossFunction(y[seq_len(mn)], x))
colnames(output) <- paste("i=", I, sep = "")
return(output)
}
| /scratch/gouwar.j/cran-all/cranData/ACV/R/tsACV.R |
#' Acute Chronic Workload Ratio
#'
#' @param db a data frame
#' @param ID ID of the subjects
#' @param TL training load
#' @param weeks training weeks
#' @param days training days
#' @param training_dates training dates
#' @param ACWR_method method to calculate ACWR
#'
#' @return a data frame with the acute & chronic training load and ACWR calculated
#' with the selected method/s and added on the left side of the data frame
#' @export
#'
#' @examples
#'
#' \dontrun{
#' # Get old working directory
#' oldwd <- getwd()
#'
#' # Set temporary directory
#' setwd(tempdir())
#'
#' # Read dfs
#' data("training_load", package = "ACWR")
#'
#' # Convert to data.frame
#' training_load <- data.frame(training_load)
#'
#' # Calculate ACWR
#' result_ACWR <- ACWR(db = training_load,
#' ID = "ID",
#' TL = "TL",
#' weeks = "Week",
#' days = "Day",
#' training_dates = "Training_Date",
#' ACWR_method = c("EWMA", "RAC", "RAU"))
#'
#' # set user working directory
#' setwd(oldwd)
#' }
#'
ACWR <- function(db,
ID,
TL,
weeks,
days,
training_dates,
ACWR_method = c("EWMA", "RAC", "RAU")) {
## Checks
# NULL
if(is.null(db)){
stop("you must provide a dataframe")
}
if(is.null(ID)){
stop("you must provide the column name of ID variable")
}
if(is.null(TL)){
stop("you must provide the column name of training load variable")
}
if(is.null(weeks)){
stop("you must provide the column name of week variable")
}
if(is.null(days)){
stop("you must provide the column name of day variable")
}
if(is.null(training_dates)){
stop("you must provide the column name of training dates variable")
}
# Loop over the subjects
for (i in unique(db[[ID]])) {
# Create individual dfs
db_ind <- db[db[[ID]] == i, c(ID, TL, weeks, days, training_dates) ]
# Loop over the methods
for (j in ACWR_method) {
# EWMA method
if(j == "EWMA") {
res_EWMA <- EWMA(TL = db_ind[[TL]])
db_ind$EWMA_chronic <- res_EWMA$EWMA_chronic
db_ind$EWMA_acute <- res_EWMA$EWMA_acute
db_ind$EWMA_ACWR <- res_EWMA$EWMA_ACWR
}
# Rolling Average Coupled
if(j == "RAC") {
res_RAC <- RAC(TL = db_ind[[TL]],
weeks = db_ind[[weeks]],
training_dates = db_ind[[training_dates]])
db_ind$RAC_acute = res_RAC$RAC_acute
db_ind$RAC_chronic = res_RAC$RAC_chronic
db_ind$RAC_ACWR = res_RAC$RAC_ACWR
}
# Rolling Average Uncoupled
if(j == "RAU") {
res_RAU <- RAU(TL = db_ind[[TL]],
weeks = db_ind[[weeks]],
training_dates = db_ind[[training_dates]])
db_ind$RAU_acute = res_RAU$RAU_acute
db_ind$RAU_chronic = res_RAU$RAU_chronic
db_ind$RAU_ACWR = res_RAU$RAU_ACWR
}
} # end loop over methods
if(i == 1){
db_final <- db_ind
} else {
db_final <- rbind(db_final, db_ind)
}
} # end loop over individuals
return(db_final)
} # end ACWR function
| /scratch/gouwar.j/cran-all/cranData/ACWR/R/ACWR.R |
#' Exponentially Weighted Moving Average
#'
#' @param TL training load
#'
#' @return {This function returns the following variables:
#' \itemize{
#' \item EWMA_chronic: EWMA - chronic training load.
#' \item EWMA_acute: EWMA - acute training load.
#' \item EWMA_ACWR: EWMA - Acute-Chronic Workload Ratio.
#' }}
#'
#' @export
#'
#' @examples
#'
#' \dontrun{
#' # Get old working directory
#' oldwd <- getwd()
#'
#' # Set temporary directory
#' setwd(tempdir())
#'
#' # Read db
#' data("training_load", package = "ACWR")
#'
#' # Convert to data.frame
#' training_load <- data.frame(training_load)
#'
#' # Select the first subject
#' training_load_1 <- training_load[training_load[["ID"]] == 1, ]
#'
#' # Calculate ACWR
#' result_EWMA <- EWMA(TL = training_load_1$TL)
#'
#' # set user working directory
#' setwd(oldwd)
#' }
#'
EWMA <- function(TL) {
# lambda <- 2/(N + 1)
# Initialize variables
EWMA_chronic <- c()
EWMA_acute <- c()
EWMA_ACWR <- c()
lambda_acute <- 2/(7+1)
lambda_chronic <- 2/(28+1)
# Loop over the TL
for (i in seq_along(TL)) {
# First training day: EWMA_chronic = TL / EWMA_acute = TL
if(i == 1){
EWMA_chronic[i] = TL[i]
EWMA_acute[i] = TL[i]
}
if(i > 1){
EWMA_chronic[i] = TL[i] * lambda_chronic + ((1- lambda_chronic)* EWMA_chronic[i-1])
EWMA_acute[i] = TL[i] * lambda_acute + ((1- lambda_acute)* EWMA_acute[i-1])
}
EWMA_ACWR <- EWMA_acute / EWMA_chronic
}
return(list(EWMA_acute = EWMA_acute,
EWMA_chronic = EWMA_chronic,
EWMA_ACWR = EWMA_ACWR))
} # end EWMA
| /scratch/gouwar.j/cran-all/cranData/ACWR/R/EWMA.R |
#' Rolling Average Coupled
#'
#' @param TL training load
#' @param weeks training weeks
#' @param training_dates training dates
#'
#' @return {This function returns the following variables:
#' \itemize{
#' \item RAC_chronic: RAC - chronic training load.
#' \item RAC_acute: RAC - acute training load.
#' \item RAC_ACWR: RAC - Acute-Chronic Workload Ratio.
#' }}
#' @export
#'
#' @examples
#'
#' \dontrun{
#' # Get old working directory
#' oldwd <- getwd()
#'
#' # Set temporary directory
#' setwd(tempdir())
#'
#' # Read db
#' data("training_load", package = "ACWR")
#'
#' # Convert to data.frame
#' training_load <- data.frame(training_load)
#'
#' # Select the first subject
#' training_load_1 <- training_load[training_load[["ID"]] == 1, ]
#'
#' # Calculate ACWR
#' result_RAC <- RAC(TL = training_load_1$TL,
#' weeks = training_load_1$Week,
#' training_dates = training_load_1$Training_Date)
#'
#' # set user working directory
#' setwd(oldwd)
#' }
#'
RAC <- function(TL,
weeks,
training_dates) {
# Count number of sessions / week
sessions_week <- as.data.frame(table(weeks))
# Initialize variables
RAC_chronic <- c()
RAC_acute <- c()
RAC_ACWR <- c()
# Initialize number of training sessions
n_sessions_total <- 0
# Loop over the total days of training
for (i in unique(weeks)) {
# First training week: RAC_chronic = RAC_acute
if(i == 1){
# loop over number of sessions / week
for (j in 1:sessions_week$Freq[unique(weeks)[i]]) {
# First training day: RAC_chronic = TL / RAC_acute = TL
if(j == 1){
# Count number of training sessions
n_sessions_total <- n_sessions_total + 1
RAC_chronic[n_sessions_total] = TL[n_sessions_total]
RAC_acute[n_sessions_total] = TL[n_sessions_total]
}
# Rest of the week
else if(j >= 2){
# Count number of training sessions
n_sessions_total <- n_sessions_total + 1
RAC_chronic[n_sessions_total] = (sum(TL[1:n_sessions_total]))/n_sessions_total
RAC_acute[n_sessions_total] = (sum(TL[1:n_sessions_total]))/n_sessions_total
}
}
} # end first week
# from second week to end of first month
else if(i >= 2 && i < 5){
# loop over number of sessions / week
for (j in 1:sessions_week$Freq[unique(weeks)[i]]) {
# Count number of training sessions
n_sessions_total <- n_sessions_total + 1
RAC_chronic[n_sessions_total] = (sum(TL[1:n_sessions_total]))/n_sessions_total
# RAC acute each 7 CALENDAR days
# Calculate 7 days training blocks
# Returns:
# n_sessions_acute = Number of training sessions include in the acute block
# previous_TL_acute = Position of the first session of the acute training block
acute_TB <- training_blocks(training_dates = training_dates,
actual_TL = n_sessions_total,
diff_dates = 6)
RAC_acute[n_sessions_total] = (sum(TL[acute_TB$previous_TL:n_sessions_total]))/acute_TB$n_sessions
}
} # end first month
# from second moth to end of data
else if(i >= 5){
# loop over number of sessions / week
for (j in 1:sessions_week$Freq[unique(weeks)[i]]) {
# Count number of training sessions
n_sessions_total <- n_sessions_total + 1
# RAC chronic each 28 CALENDAR days
# Calculate 28 days training blocks
chronic_TB <- training_blocks(training_dates = training_dates,
actual_TL = n_sessions_total,
diff_dates = 27)
RAC_chronic[n_sessions_total] = (sum(TL[chronic_TB$previous_TL:n_sessions_total]))/chronic_TB$n_sessions
# RAC acute each 7 CALENDAR days
# Calculate 7 days training blocks
acute_TB <- training_blocks(training_dates = training_dates,
actual_TL = n_sessions_total,
diff_dates = 6)
RAC_acute[n_sessions_total] = (sum(TL[acute_TB$previous_TL:n_sessions_total]))/acute_TB$n_sessions
}
} # end >= second moth
} # end of loop over the data
# Calculate ACWR
RAC_ACWR <- RAC_acute / RAC_chronic
return(list(RAC_acute = round(RAC_acute, 2),
RAC_chronic = round(RAC_chronic, 2),
RAC_ACWR = round(RAC_ACWR, 2)))
}# end RAC function
| /scratch/gouwar.j/cran-all/cranData/ACWR/R/RAC.R |
#' Rolling Average Uncoupled
#'
#' @param TL training load
#' @param weeks training weeks
#' @param training_dates training dates
#'
#' @return {This function returns the following variables:
#' \itemize{
#' \item RAU_chronic: RAU - chronic training load.
#' \item RAU_acute: RAU - acute training load.
#' \item RAU_ACWR: RAU - Acute-Chronic Workload Ratio.
#' }}
#' @export
#'
#' @examples
#'
#' \dontrun{
#' # Get old working directory
#' oldwd <- getwd()
#'
#' # Set temporary directory
#' setwd(tempdir())
#'
#' # Read db
#' data("training_load", package = "ACWR")
#'
#' # Convert to data.frame
#' training_load <- data.frame(training_load)
#'
#' # Select the first subject
#' training_load_1 <- training_load[training_load[["ID"]] == 1, ]
#'
#' # Calculate ACWR
#' result_RAU <- RAU(TL = training_load_1$TL,
#' weeks = training_load_1$Week,
#' training_dates = training_load_1$Training_Date)
#'
#' # set user working directory
#' setwd(oldwd)
#' }
#'
RAU <- function (TL,
weeks,
training_dates) {
# Count number of sessions / week
sessions_week <- as.data.frame(table(weeks))
# Initialize variables
RAU_chronic <- c()
RAU_acute <- c()
RAU_ACWR <- c()
# Initialize number of training sessions
n_sessions_total <- 0
# We also need a new counter for the number of training sessions
n_sessions_chronic <- 1
# Loop over the total days of training
for (i in unique(weeks)) {
# First training week: RAC_chronic = NA / RAC_acute = Training load
if(i == 1){
# loop over number of sessions / week
for (j in 1:sessions_week$Freq[unique(weeks)[i]]) {
# First training day: RAC_chronic = TL / RAC_acute = TL
if(j == 1){
# Count number of training sessions
n_sessions_total <- n_sessions_total + 1
# RAU_chronic[n_sessions_total] = TL[n_sessions_total]
RAU_chronic[n_sessions_total] = NA
RAU_acute[n_sessions_total] = TL[n_sessions_total]
}
# Rest of the week
else if(j >= 2){
# Count number of training sessions
n_sessions_total <- n_sessions_total + 1
# RAU_chronic[n_sessions_total] = (sum(TL[1:n_sessions_total]))/n_sessions_total
RAU_chronic[n_sessions_total] = NA
RAU_acute[n_sessions_total] = (sum(TL[1:n_sessions_total]))/n_sessions_total
}
# During first week of RAU ACWR = 0
RAU_ACWR[n_sessions_total] <- NA
}
} # end first week
# from the beginning of the second week to end of third week
else if(i >= 2 && i < 5){
# loop over number of sessions / week
for (j in 1:sessions_week$Freq[unique(weeks)[i]]) {
# Count number of training sessions
n_sessions_total <- n_sessions_total + 1
# RAU acute each 7 CALENDAR days
# Calculate 7 days training blocks
# Returns:
# n_sessions = Number of training sessions include in the acute block
# previous_TL = Position of the first session of the acute training block
acute_TB <- training_blocks(training_dates = training_dates,
actual_TL = n_sessions_total,
diff_dates = 6)
RAU_acute[n_sessions_total] = (sum(TL[acute_TB$previous_TL:n_sessions_total]))/acute_TB$n_sessions
# RAU chronic
# (acute_TB$previous_TL-1) indicates the position of the first session
# We are going to reuse this value to indicate the first value of the RAU chronic block
RAU_chronic[n_sessions_total] = (sum(TL[(acute_TB$previous_TL-1):1]))/n_sessions_chronic
n_sessions_chronic <- n_sessions_chronic + 1
}
} # end first 3 weeks
# from fourth week to end of data
else if(i >= 5){
# loop over number of sessions / week
for (j in 1:sessions_week$Freq[unique(weeks)[i]]) {
# Count number of training sessions
n_sessions_total <- n_sessions_total + 1
# RAU acute each 7 CALENDAR days
# Calculate 7 days training blocks
# Returns:
# n_sessions = Number of training sessions include in the acute block
# previous_TL = Position of the first session of the acute training block
acute_TB <- training_blocks(training_dates = training_dates,
actual_TL = n_sessions_total,
diff_dates = 6)
RAU_acute[n_sessions_total] = (sum(TL[acute_TB$previous_TL:n_sessions_total]))/acute_TB$n_sessions
# RAU chronic
#
chronic_TB <- training_blocks(training_dates = training_dates,
actual_TL = n_sessions_total,
diff_dates = 20)
# Number of sessions include in the chronic training block =
# Number of sessions in chronic - number of sessions in acute
RAU_chronic[n_sessions_total] = (sum(TL[acute_TB$previous_TL:chronic_TB$previous_TL]))/chronic_TB$n_session
}
}
} # end loop
# Calculate ACWR
RAU_ACWR <- RAU_acute / RAU_chronic
return(list(RAU_acute = round(RAU_acute, 2),
RAU_chronic = round(RAU_chronic, 2),
RAU_ACWR = round(RAU_ACWR, 2)))
} # end RAU function
| /scratch/gouwar.j/cran-all/cranData/ACWR/R/RAU.R |
#' ACWR plots using d3.js
#'
#' @param db a data frame
#' @param TL training load
#' @param ACWR Acute Chronic Workload Ratio
#' @param day training days
#' @param ID ID of the subjects
#' @param colour colour of the bars. By default "#87CEEB" (skyblue)
#' @param xLabel x-axis label. By default "Days"
#' @param y0Label left y-axis label. By default "Load [AU]"
#' @param y1Label right y-axis label. By default "Acute:chronic worload ratio"
#' @param plotTitle Title of the plot. By default "ACWR"
#'
#' @import r2d3
#'
#' @return This function returns a d3.js object for a single subject.
#' For several subjects it returns a list of d3.js objects.
#' @export
#'
#' @examples
#'
#' \dontrun{
#' # Get old working directory
#' oldwd <- getwd()
#'
#' # Set temporary directory
#' setwd(tempdir())
#'
#' # Read db
#' data("training_load", package = "ACWR")
#'
#' # Convert to data.frame
#' training_load_db <- data.frame(training_load)
#'
#' # Calculate ACWR
#' result_ACWR <- ACWR(db = training_load_db,
#' ID = "ID",
#' TL = "TL",
#' weeks = "Week",
#' days = "Day",
#' training_dates = "Training_Date",
#' ACWR_method = c("EWMA", "RAC", "RAU"))
#'
#' # Plot for 1 subject
#' # Select the first subject
#' result_ACWR_1 <- result_ACWR[result_ACWR[["ID"]] == 1, ]
#'
#' # plot ACWR (e.g. EWMA)
#' ACWR_plot_1 <- plot_ACWR(db = result_ACWR_1,
#' TL = "TL",
#' ACWR = "EWMA_ACWR",
#' day = "Day")
#'
#' # Plot for several subjects
#' # plot ACWR (e.g. RAC)
#' ACWR_plot <- plot_ACWR(db = result_ACWR,
#' TL = "TL",
#' ACWR = "RAC_ACWR",
#' day = "Day",
#' ID = "ID")
#'
#' # set user working directory
#' setwd(oldwd)
#' }
#'
plot_ACWR <- function(db,
TL,
ACWR,
day,
ID = NULL,
colour = NULL,
xLabel = NULL,
y0Label = NULL,
y1Label = NULL,
plotTitle = NULL) {
# Check variables
if(is.null(db)){
stop("you must provide a db")
}
if(is.null(TL)){
stop("you must provide the name of the training load column in the database")
}
if(is.null(ACWR)){
stop("you must provide the name of the ACWR column in the database")
}
if(is.null(day)){
stop("you must provide the name of the day training column in the database")
}
if(is.null(colour)){
colour <- "#87CEEB"
}
if(is.null(xLabel)){
xLabel <- "Days"
}
if(is.null(y0Label)){
y0Label <- "Load [AU]"
}
if(is.null(y1Label)){
y1Label <- "Acute:chronic worload ratio"
}
if(is.null(plotTitle)){
plotTitle <- "ACWR"
}
# Rename db columns
# TL
names(db)[names(db) == TL] <- "TL"
# ACWR
names(db)[names(db) == ACWR] <- "ACWR"
# day
names(db)[names(db) == day] <- "day"
# Single plot
if(is.null(ID)){
# Day / TL / EWMA_ACWR
d3_ACWR <- r2d3(data = db,
script = system.file("ACWR_plot.js", package = "ACWR"),
options = list(margin = 50,
#barPadding = 0.1,
colour = colour,
xLabel = xLabel,
y0Label = y0Label,
y1Label = y1Label,
plotTitle = plotTitle
),
)
# return d3 object
return(d3_ACWR)
} # end if is.null(ID)
# Multiple plots
if(!is.null(ID)){
# Create an empty list to store the plots
d3_ACWR_list <- list()
# Loop over the subjects
for (i in unique(db[[ID]])) {
# Create individual dfs
db_ind <- db[db[[ID]] == i, c("TL", "ACWR", "day") ]
# Individual plots
d3_ACWR <- r2d3(data = db_ind,
script = system.file("ACWR_plot.js", package = "ACWR"),
options = list(margin = 50,
#barPadding = 0.1,
colour = colour,
xLabel = xLabel,
y0Label = y0Label,
y1Label = y1Label,
plotTitle = plotTitle
),
)
# Store the plots inside the list
d3_ACWR_list[[paste0("ID = ", i)]] <- d3_ACWR
}
# return a list
return(d3_ACWR_list)
} # end if !is.null(ID)
}
| /scratch/gouwar.j/cran-all/cranData/ACWR/R/plot_ACWR.R |
#' @title Training load dataframe
#'
#' @description A dataframe with the training load of 3 subjects.
#'
#' @docType data
#'
#' @usage data("training_load", package = "ACWR")
#'
#' @section Variables:
#' \describe{
#' \item{ID}{ID of the subjects}
#' \item{Week}{training weeks}
#' \item{Day}{training days}
#' \item{TL}{training load (arbitrary units)}
#' \item{Training_Date}{training dates}
#' }
"training_load"
| /scratch/gouwar.j/cran-all/cranData/ACWR/R/training_load.R |
#' Create Training Blocks
#'
#' @param training_dates training dates
#' @param actual_TL position of the actual training load
#' @param diff_dates difference in days
#'
#'
training_blocks <- function(training_dates,
actual_TL,
diff_dates
){
# Initialize variables
n_sessions <- 0
# Create blocks BACKWARDS
# loop over the training days backwards, starting from actual training day - 1
for (previous_TL in rev(1:actual_TL-1)) {
# Count total number of sessions include in a acute training block
n_sessions <- n_sessions + 1
# Calculate the difference in days between 2 dates (as integer)
diff_dates_calculated <- as.integer(training_dates[actual_TL] - training_dates[previous_TL])
# If the difference between 2 dates are X days or more more then stop the loop
if(diff_dates_calculated >= diff_dates) {
break
}
} # end loop
# Number of training sessions include in the training block
return(list(n_sessions = n_sessions +1,
# Position of the first session of the training block
previous_TL = previous_TL)
)
} # end training blocks
| /scratch/gouwar.j/cran-all/cranData/ACWR/R/utils.R |
# Ugly workaround to make foreach pass CRAN syntax check
#http://r.789695.n4.nabble.com/R-CMD-check-and-foreach-code-td4687660.html
globalVariables(c('fe_cType', 'fe_curGene'))
#' Use parallel missForest to impute missing values.
#' @description This wrapper is helpful because missForest crashes if you have more cores than variables.
#' This will default to no parellelization for Windows
#'
#' newMatrix <- missForest.par(dataMat)
#'
#' @param dataMat Columns are features, Rows examples. The data with NA values. 'xmis' in missForest
#' @param parallelize split on 'forests' or 'variables' (DEFAULT: 'variables')
#'
#' @export
#' @return a matrix including imputed values
#' @examples
#' library(ADAPTS)
#' LM22 <- ADAPTS::LM22
#' LM22[2,3] <- as.numeric(NA) #Make some missing data to impute
#' LM22.imp <- missForest.par(LM22)
missForest.par <- function(dataMat, parallelize = "variables") {
if (.Platform$OS.type == 'windows') {
parallelize <- 'no'
fixCores <- FALSE
} else {
fixCores <- ncol(dataMat) < foreach::getDoParWorkers()
if (fixCores) {
oldCores <- foreach::getDoParWorkers()
if(ncol(dataMat) == 1) {
parallelize <- 'no'
} else {
options(mc.cores = ncol(dataMat))
options(cores = ncol(dataMat))
doParallel::registerDoParallel(cores = ncol(dataMat))
} #if(ncol(dataMat) == 1 ) {
} #if (fixCores) {
} #if (.Platform$OS.type == 'windows') {
newMatrix <- try(missForest::missForest(dataMat, parallelize = parallelize)$ximp)
if(inherits(x=newMatrix,'try-error')) {
message('missForest error')
}
if(fixCores) {
options(mc.cores = oldCores)
options(cores = oldCores)
doParallel::registerDoParallel(cores = oldCores);
}
return(newMatrix)
}
#' Make an Augmented Signature Matrix
#' @description With the ADAPTSdata packge, it will use the full LM22 data matrix and add a few
#' additional genes to cover osteoblasts, osteoclasts, Plasma.memory, MM. In many ways this is
#' just a convenient wrapper for AugmentSigMatrix that calculates and caches a gList.
#'
#'
#' @param exprData The gene express data to use to augment LM22, e.g. ADAPTSdata::addMGSM27
#' @param fullLM22 LM22 data with all genes. Available in ADAPTSdata2::fullLM22
#' @param smallLM22 The small LM22 matrix, if it includes new cell types in exprData those will not be overwritten (DEFAULT: NULL, i.e. buildLM22plus(useLM22genes = TRUE)
#' @param plotToPDF TRUE: pdf, FALSE: standard display (DEFAULT: TRUE)
#' @param condTol The tolerance in the reconstruction algorithm. 1.0 = no tolerance, 1.05 = 5\% tolerance (DEFAULT: 1.01)
#' @param postNorm Set to TRUE to normalize new signatures to match old signatures. To Do: Redo Kappa curve? (DEFAULT: TRUE)
#' @param autoDetectMin Set to true to automatically detect the first local minima. GOOD PRELIMINARY RESULTS (DEAFULT: FALSE)
#' @param pdfDir A fold to write the pdf file to if plotToPDF=TRUE (DEFAULT: tempdir())
#' @param oneCore Set to TRUE to disable parallelization (DEFAULT: FALSE)
#' @param cache_gList Set to TRUE to cache slow gList calculations (DEFAULT: TRUE)
#' @export
#' @return a cell type signature matrix
#' @examples
#' #This toy example treats the LM22 deconvolution matrix as if it were all of the data
#' # For a real example, look at the vignette or comments in exprData, fullLM22, small LM22
#' library(ADAPTS)
#' fullLM22 <- ADAPTS::LM22[1:200, 1:8]
#' #Make a fake signature matrix out of 100 genes and the first 8 cell types
#' smallLM22 <- fullLM22[1:100, 1:8]
#'
#' #Make fake data representing two replicates of purified Mast.cells types
#' exprData <- ADAPTS::LM22[1:200, c("Mast.cells.resting","Mast.cells.activated")]
#' colnames(exprData) <- c("Mast.cells", "Mast.cells")
#' newSig <- remakeLM22p(exprData=exprData, fullLM22=fullLM22, smallLM22=smallLM22,
#' plotToPDF=FALSE, oneCore=TRUE, cache_gList=FALSE)
remakeLM22p <- function(exprData, fullLM22, smallLM22=NULL, plotToPDF=TRUE, condTol = 1.01, postNorm=TRUE, autoDetectMin = FALSE, pdfDir=tempdir(), oneCore=FALSE, cache_gList=TRUE) {
exprData <- as.data.frame(exprData)
if (is.null(smallLM22)) {
smallLM22 <- ADAPTS::LM22
}
#Combine additonalMM data and the full LM22 dataset
colnames(exprData) <- sub('.[0-9]+$', '', colnames(exprData))
cNames <- c(colnames(fullLM22), colnames(exprData))
#Problem 04-19-17 - If I just use the original LM22 data, there's too many NA, I end up adding 950 genes
# and 913 of them have NA values in the original data. I will artifically limit the dataset to genes that I
# have data for
##rNames <- unique(c(rownames(fullLM22), rownames(exprData)))
rNames.1 <- rownames(fullLM22)[apply(fullLM22, 1, function(x) {mean(is.na(x)) < 0.25})]
rNames.2 <- rownames(exprData)[apply(exprData, 1, function(x) {mean(is.na(x)) < 0.25})]
rNames <- rNames.1[rNames.1 %in% rNames.2]
geneExpr <- cbind(fullLM22[rNames,], as.data.frame(exprData)[rNames,])
colnames(geneExpr) <- sub('.[0-9]+$', '', colnames(geneExpr))
fName <- paste('gList', paste(rev(unique(colnames(geneExpr))), collapse="_"), 'RData', sep='.')
if(nchar(fName) > 240) { print('Truncating name list. File name may not be unique') }
fName <- paste0(strtrim(fName, 240),'.RData') #Avoid too long filesnames, but introduct possible bug where two specs can generate the same file
fName <- file.path(tempdir(), fName)
if(file.exists(fName) & cache_gList == TRUE) {
gList <- get(load(fName)[1])
} else {
gList <- rankByT(geneExpr = geneExpr, qCut=0.3, oneCore=oneCore)
if(cache_gList == TRUE) { save(gList, file=fName, compress = TRUE) }
}
#Normalize the new expression data against the full data?
newMatData <- AugmentSigMatrix(origMatrix=smallLM22, fullData=fullLM22[rNames,], newData=exprData[rNames,], gList=gList, plotToPDF = plotToPDF, condTol = condTol, postNorm=postNorm, autoDetectMin=autoDetectMin, pdfDir=pdfDir)
return(newMatData=as.data.frame(newMatData))
}
#' Make an augmented signature matrix
#' @description Build an augmented signature matrix from an initial signature matrix, source data, and a list of
#' differentially expressed genes (gList). The user might want to modify gList to make certain that particular
#' genes are included in the matrix. The algorithm will be to add one additional gene from each new cell type
#' Record the condition number, and plot those. Will only consider adding rows shared by fullData and newData
#'
#' newMatData <- AugmentSigMatrix(origMatrix, fullData, newData, gList)
#'
#' @param origMatrix The original signature matrix
#' @param fullData The full data for the signature matrix
#' @param newData The new data to add signatures from
#' @param gList The ordered list of genes from running rankByT() on newData. NOTE: best genes at the bottom!!
#' @param nGenes The number of additional genes to consider (DEFAULT: 1:100)
#' @param plotToPDF Plot the output condition numbers to a pdf file. (DEFAULT: TRUE)
#' @param imputeMissing Set to TRUE to impute missing values. NOTE: adds stoachasiticity (DEFAULT: TRUE)
#' @param condTol Setting higher tolerances will result in smaller numbers extra genes. 1.00 minimizes compliment number (DEFAULT: 1.00)
#' @param postNorm Set to TRUE to normalize new signatures to match old signatures. (DEFAULT: FALSE)
#' @param minSumToRem Set to non-NA to remove any row with the sum(abs(row)) < minSumToRem (DEFAULT: NA)
#' @param addTitle An optional string to add to the plot and savefile (DEFAULT: NULL)
#' @param autoDetectMin Set to true to automatically detect the first local minima. GOOD PRELIMINARY RESULTS (DEAFULT: FALSE)
#' @param calcSpillOver Use the training data to calculate a spillover matrix (DEFAULT: FALSE)
#' @param pdfDir A fold to write the pdf file to if plotToPDF=TRUE (DEFAULT: tempdir())
#' @param plotIt Set to FALSE to suppress non-PDF plotting (DEFAULT: TRUE)
#' @export
#' @return an augmented cell type signature matrix
#' @examples
#' #This toy example treats the LM22 deconvolution matrix as if it were all of the data
#' # For a real example, look at the vignette or comments in exprData, fullLM22, small LM22
#' library(ADAPTS)
#' fullLM22 <- ADAPTS::LM22[1:200, 1:8]
#' #Make a fake signature matrix out of 100 genes and the first 8 cell types
#' smallLM22 <- fullLM22[1:100, 1:8]
#'
#' #Make fake data representing two replicates of purified Mast.cells
#' exprData <- ADAPTS::LM22[1:200, c("Mast.cells.resting","Mast.cells.activated")]
#' colnames(exprData) <- c("Mast.cells", "Mast.cells")
#'
#' #Fake source data with replicates for all purified cell types.
#' # Note in this fake data set, many cell types have exactly one replicate
#' fakeAllData <- cbind(fullLM22, as.data.frame(exprData))
#' gList <- rankByT(geneExpr = fakeAllData, qCut=0.3, oneCore=TRUE)
#'
#' newSig <- AugmentSigMatrix(origMatrix=smallLM22, fullData=fullLM22, newData=exprData,
#' gList=gList, plotToPDF=FALSE)
AugmentSigMatrix <- function(origMatrix, fullData, newData, gList, nGenes=1:100, plotToPDF=TRUE, imputeMissing=TRUE, condTol=1.01, postNorm=FALSE, minSumToRem=NA, addTitle=NULL, autoDetectMin=FALSE, calcSpillOver=FALSE, pdfDir=tempdir(), plotIt=TRUE) {
origMatrix <- as.data.frame(origMatrix)
if(autoDetectMin == TRUE) {
if(!is.null(addTitle)) {
addTitle <- paste('Auto', addTitle, sep='.')
} else {
addTitle <- 'Auto'
}
} #if(autoDetectMin == TRUE) {
if(any(!(colnames(origMatrix) %in% c(colnames(fullData), colnames(newData))))) {
missingData <- colnames(origMatrix)[!(colnames(origMatrix) %in% c(colnames(fullData), colnames(newData)))]
print(paste(missingData, 'in origMatrix but not fullData or newData'))
return(NULL)
}
#Make sure that all columns in the newData already exist in the origMatrix
missingCols <- unique(colnames(newData)[!(colnames(newData) %in% colnames(origMatrix))])
if(length(missingCols) > 0) {
augData <- matrix(as.numeric(NA), ncol=length(missingCols), nrow=nrow(origMatrix),
dimnames=list(rownames(origMatrix), missingCols))
olGenes <- rownames(origMatrix)[rownames(origMatrix) %in% rownames(newData)]
for(missingCol in missingCols) {
newExp <- apply(newData[olGenes, colnames(newData) == missingCol, drop=FALSE],1, stats::median, na.rm=TRUE)
augData[names(newExp),missingCol] <- newExp
} #for(missingCol in missingCols) {
origMatrix <- cbind(origMatrix, augData)
}
unAug <- origMatrix[colnames(origMatrix)[colnames(origMatrix) %in% colnames(fullData)]]
cNums.orig <- kappa(as.matrix(unAug))
#Baseline condition number.
newCtypes <- unique(colnames(newData))
if(any(!(newCtypes %in% names(gList)))) {
outStr <- paste(paste(newCtypes[!(newCtypes %in% names(gList))], sep=', '), 'missing')
print(outStr)
return(NULL)
}
gList <- gList[newCtypes]
allGenes <- rownames(fullData)[rownames(fullData) %in% rownames(newData)]
fullData <- fullData[allGenes,]
newData <- newData[allGenes,]
if(any(is.na(origMatrix))) {
if(imputeMissing == TRUE) {
origMatrix.imp <- t(missForest.par(t(origMatrix)))
} else {
remBool <- apply(origMatrix, 1, function(x){any(is.na(x))})
origMatrix.imp <- origMatrix[!remBool,]
}
} else {
origMatrix.imp <- origMatrix
}
cNums.new <- kappa(as.matrix(origMatrix.imp))
selGenes <- list()
newMatrix <- origMatrix
for(gNum in nGenes) {
newGenes <- NULL
for (cType in newCtypes) {
gNames <- rev(rownames(gList[[cType]]))
#New matrix will be iteratively built and code will make sure that new genes are not in matrix
newGenes <- c(newGenes, gNames[which(!(gNames %in% rownames(newMatrix)))[1]])
} #for (cType in newCtypes) {
newGenes <- unique(newGenes)
if(all(is.na(newGenes))) { next; }
newGenes <- newGenes[!is.na(newGenes)]
augData.new <- cbind(fullData[newGenes,,drop=FALSE], newData[newGenes,,drop=FALSE])
augData <- apply(augData.new, 1, function(x) {
tapply(x, colnames(augData.new), stats::median, na.rm=TRUE)
})
augData <- t(augData)
newMatrix <- rbind(newMatrix, augData[, colnames(newMatrix), drop=FALSE])
selGenes[[as.character(gNum)]] <- newGenes
} #for(gNum in nGenes) {
#Impute the full matrix and back-calculate the kappa
if(any(is.na(newMatrix))) {
if(imputeMissing == TRUE) {
impMatrix <- t(missForest.par(t(newMatrix)))
} else {
remBool <- apply(newMatrix, 1, function(x){any(is.na(x))})
impMatrix <- newMatrix[!remBool,]
}
} else {
impMatrix <- newMatrix
}
impMatrix[impMatrix > max(origMatrix, na.rm=TRUE)] <- max(origMatrix, na.rm=TRUE)
impMatrix[impMatrix < min(origMatrix, na.rm=TRUE)] <- min(origMatrix, na.rm=TRUE)
cNums <- cNums.new
nGenes <- nrow(origMatrix)
for (i in 1:length(selGenes)) {
newGenes <- unlist(selGenes[1:i])
newGenes <- newGenes[!is.na(newGenes)]
curMatGenes <- c(rownames(origMatrix), newGenes)
curMat <- impMatrix[curMatGenes[curMatGenes %in% rownames(impMatrix)],]
cNums <- c(cNums, kappa(as.matrix(curMat)))
nGenes <- c(nGenes, nrow(curMat))
}
#Plot the results
smData <- stats::smooth(cNums)
#smData <- ges(cNums)
#Modification: use standard smoothing and then pick point with smallest # features that is no more than 1% higher
if(autoDetectMin == TRUE) {
smData2 <- stats::predict(stats::smooth.spline(smData))$y
mins <- quantmod::findPeaks(-smData2)
bestMin <- mins[1]
mVal <- smData[bestMin]
if(is.na(mVal)) {
message('autoDetectMin failed, reverting to absolute min')
}
}
if(autoDetectMin == FALSE || is.na(mVal)) {
#mVal will be NA if autoDetectMin fails.
mVal <- min(smData)
bestMin <- which(smData == mVal)[1]
}
smallMin <- which(smData <= condTol*mVal)[1]
#newGenes <- unlist(selGenes[1:which.min(smData)])
#newGenes <- unique(unlist(selGenes[1:smallMin]))
newGenes <- unique(unlist(selGenes[1:(smallMin-1)]))
if(imputeMissing == TRUE) {
sigMatrix <- impMatrix[rownames(impMatrix) %in% c(rownames(origMatrix), newGenes),]
} else {
sigMatrix <- newMatrix[rownames(newMatrix) %in% c(rownames(origMatrix), newGenes),]
sigMatrix <- sigMatrix[apply(sigMatrix, 1, function(x){!any(is.na(x))}),]
} #if(imputeMissing == TRUE) {
if (postNorm==TRUE) {
#message(paste('Pre-normalization Kappa:', kappa(sigMatrix)))
newPartBool <- colnames(sigMatrix) %in% colnames(newData)
if(sum(!newPartBool)==0) {
renormNewPart <- preprocessCore::normalize.quantiles(x=as.matrix(sigMatrix[,newPartBool]))
} else {
renormNewPart <- preprocessCore::normalize.quantiles.use.target(x=as.matrix(sigMatrix[,newPartBool]), target=as.vector(as.matrix(sigMatrix[,!newPartBool])))
} #if(sum(!newPartBool)) {
sigMatrix[,newPartBool] <- renormNewPart
#message(paste('Post-normalization Kappa:', kappa(sigMatrix)))
}
titleStr <- paste('Augmenting Signature Matrix ( tol =', condTol, ')\n# Cell-types:', ncol(unAug), '->', ncol(newMatrix),
'| # Genes:', nrow(unAug), '->', nrow(sigMatrix))
if(!is.null(addTitle)) { titleStr <- paste(addTitle, titleStr) }
if(plotToPDF == TRUE) {
pdfFile <- paste('AugmentSigMatrix', condTol, Sys.Date(), 'pdf', sep='.')
pdfFile <- file.path(pdfDir, pdfFile)
if(!is.null(addTitle)) { pdfFile <- sub('.pdf$', paste0('.', addTitle, '.pdf'), pdfFile) }
if(imputeMissing) { pdfFile <- sub('.pdf', '.impute.pdf', pdfFile) }
if(postNorm) { pdfFile <- sub('.pdf', '.postNorm.pdf', pdfFile) }
grDevices::pdf(pdfFile)
}
if(plotIt == TRUE) {
legText <- c('Unagumented Signature Matrix', 'Minimum Smoothed Condition Number', 'Best Augmented Signature Matrix')
pchs <- c('o', 'x', 'x')
cols <- c('red', 'purple', 'blue')
ylims <- c(min(cNums.orig,cNums,kappa(as.matrix(sigMatrix)))*0.95, max(cNums.orig, cNums)*1.05)
graphics::plot(x=nGenes, y=cNums,
xlab='Number of Genes', ylab='Condition Number (lower is more stable)',
main=titleStr, ylim=ylims)
graphics::points(x=nrow(unAug), y=cNums.orig, col='red', pch='o', cex=1.5 )
graphics::lines(x=nGenes, y=smData, col='green')
graphics::points(x=nGenes[bestMin], y=cNums[bestMin], col='purple', pch='x', cex=1.5)
graphics::points(x=nGenes[smallMin], y=cNums[smallMin], col='blue', pch='x', cex=1.5)
if (postNorm==TRUE) {
legText <- c(legText, 'Post-Normalized')
pchs <- c(pchs, 'x')
cols <- c(cols, 'violetred')
graphics::points(x=nGenes[smallMin], y=kappa(as.matrix(sigMatrix)), col='violetred', pch='x', cex=1.5)
}
graphics::legend('topright', legend=legText, pch=pchs, col=cols)
origTitle <- 'Original Matrix'
if(!is.null(addTitle)) { origTitle <- paste(origTitle, titleStr) }
pheatmap::pheatmap(origMatrix, main=origTitle, fontsize_row = 4)
pheatmap::pheatmap(sigMatrix, main=titleStr, fontsize_row = 4)
if(!is.na(minSumToRem)) {
sigMatrix <- sigMatrix[rowSums(abs(sigMatrix)) > minSumToRem,]
titleStr <- paste('Augmenting Signature Matrix ( tol =', condTol, 'filter =', minSumToRem, ')\n# Cell-types:',
ncol(unAug), '->', ncol(sigMatrix), '| # Genes:', nrow(unAug), '->', nrow(sigMatrix))
if(!is.null(addTitle)) { titleStr <- paste(addTitle, titleStr) }
pheatmap::pheatmap(sigMatrix, main=titleStr, fontsize_row = 4)
}
} #if(plotIt = TRUE) {
if(plotToPDF == TRUE) {
grDevices::dev.off()
}
#Remove genes one at a time until the condition number stops removing
#note, this is a bad idea, it pretty much monotonically shrinks.
postShrink <- FALSE
if(postShrink==TRUE) {
curComp <- kappa(as.matrix(sigMatrix))
names(curComp) <- ""
newSigMatrix <- sigMatrix
for(i in 1:(nrow(sigMatrix)-1)) {
#Remove genes one at a time and determine which will minimize the condition number
newComps <- foreach(fe_curGene = rownames(newSigMatrix), .combine=c) %dopar% {
kappa(as.matrix(newSigMatrix[rownames(newSigMatrix)!=fe_curGene,]))
}
names(newComps) <- rownames(newSigMatrix)
remGene <- names(newComps)[which.min(newComps)]
curComp <- c(curComp, newComps[remGene])
newSigMatrix <- newSigMatrix[rownames(newSigMatrix)!=remGene,]
} #for(i in 1:(nrow(sigMatrix)-1)) {
}
#Optional, calculate a spillover matrix
if(calcSpillOver == TRUE) {
curAllDat <- cbind(fullData, newData)
res <- buildSpilloverMat(sigMatrix, geneExpr=curAllDat)
pheatmap::pheatmap(res, cluster_rows = FALSE, cluster_cols = FALSE, main=paste0('Spillover Matrix: ', nrow(res), ' Cell Types'))
rv <- list(sigMatrix=sigMatrix, spillOver=res)
} else {
rv <- sigMatrix
}
return(rv)
}
#' Rank genes for each cell type
#' @description Use a t-test to rank to features for each cell type
#'
#' gList <- rankByT(geneExpr, qCut=0.3)
#'
#' @param geneExpr The gene expression data
#' @param qCut (DEFAULT: 0.3)
#' @param oneCore Set to TRUE to disable paralellization (DEFAULT: FALSE)
#' @param secondPval Set to TRUE to use p-Values as a second sort criteria (DEFAULT: TRUE)
#' @param remZinf Set to TRUE to remove any ratio with zero or infinity. Good for scRNAseq. (DEFAULT: FALSE)
#' @param reqRatGT1 Set to TRUE to remove any gene with a ratio with less than 1. Good for scRNAseq. (DEFAULT: FALSE)
#' @export
#' @return a list of cell types with data frames ranking genes
#' @examples
#' #This toy example treats the LM22 deconvolution matrix as if it were all of the data
#' # For a real example, look at the vignette or comments in exprData, fullLM22, small LM22
#' library(ADAPTS)
#' fullLM22 <- ADAPTS::LM22[1:200, 1:8]
#' #Make a fake signature matrix out of 100 genes and the first 8 cell types
#' smallLM22 <- fullLM22[1:100, 1:8]
#'
#' #Make fake data representing two replicates of purified Mast.cells
#' exprData <- ADAPTS::LM22[1:200, c("Mast.cells.resting","Mast.cells.activated")]
#' colnames(exprData) <- c("Mast.cells", "Mast.cells")
#'
#' #Fake source data with replicates for all purified cell types.
#' # Note in this fake data set, many cell types have exactly one replicate
#' fakeAllData <- cbind(fullLM22, as.data.frame(exprData))
#' gList <- rankByT(geneExpr = fakeAllData, qCut=0.3, oneCore=TRUE, reqRatGT1=FALSE)
rankByT <- function(geneExpr, qCut=0.3, oneCore=FALSE, secondPval=TRUE, remZinf=FALSE, reqRatGT1=FALSE) {
colnames(geneExpr) <- sub("\\.[0-9]+$", '', colnames(geneExpr)) #Strip any trailing numbers added by make.names()
cTypes <- unique(colnames(geneExpr))
if(length(cTypes) > 2 & oneCore==FALSE) {
gList <- foreach (fe_cType = cTypes) %dopar% {
print(fe_cType)
isType <- colnames(geneExpr) == fe_cType
geneExpr.cur <- geneExpr
tRes <- lapply(rownames(geneExpr.cur), function(x) {
rv <- try(stats::t.test(geneExpr.cur[x,isType], geneExpr.cur[x,!isType], na.action=stats::na.omit), silent=TRUE)
if(inherits(rv, 'try-error')) {rv <- list(estimate=c(1,1), statistic=0, p.value=1)}
return(rv)
}) #tRes <- mclapply(gNames, function(x) {
geneDF <- do.call(rbind, lapply(tRes, function(x) {data.frame(rat=x$estimate[1]/x$estimate[2], t=x$statistic, pVal=x$p.value)}))
rownames(geneDF) <- rownames(geneExpr.cur)
geneDF$qVal <- stats::p.adjust(geneDF$pVal, method = 'fdr')
if(secondPval==TRUE) {
geneDF <- geneDF[order(abs(log2(geneDF$rat)), -1*log(geneDF$pVal)),]
} else {
geneDF <- geneDF[order(abs(log2(geneDF$rat))),]
}
geneDF <- geneDF[geneDF$qVal <= qCut,]
geneDF <- geneDF[!is.na(geneDF$rat), ]
#gList[[cType]] <- geneDF
if(reqRatGT1==TRUE) { geneDF <- geneDF[geneDF$rat>1, ,drop=FALSE] }
if (remZinf==TRUE) {
isZ <- geneDF$rat == 0
isInf <- is.infinite(geneDF$rat)
geneDF <- geneDF[!(isZ | isInf), ,drop=FALSE]
}
return(geneDF)
} #for (cType in unique(colnames(geneExpr))) {
names(gList) <- cTypes
} else {
if(length(cTypes) <= 2) {cTypes <- cTypes[2]}
gList <- lapply(cTypes, function(fe_cType) {
print(fe_cType)
isType <- colnames(geneExpr) == fe_cType
#if (remZinf) {
# isZ <- rowSums(geneExpr[,isType,drop=FALSE]) == 0
# notZ <- rowSums(geneExpr[,!isType,drop=FALSE]) == 0
# remBool <- isZ | notZ
# geneExpr.cur <- geneExpr[!remBool,]
#} else {
geneExpr.cur <- geneExpr
#}
if(oneCore==TRUE) {
tRes <- lapply(rownames(geneExpr.cur), function(x) {
rv <- try(stats::t.test(geneExpr.cur[x,isType], geneExpr.cur[x,!isType], na.action=na.omit), silent=TRUE)
if(inherits(rv, 'try-error')) {rv <- list(estimate=c(1,1), statistic=0, p.value=1)}
return(rv)
}) #tRes <- lapply(gNames, function(x) {
} else {
tRes <- parallel::mclapply(rownames(geneExpr.cur), function(x) {
rv <- try(stats::t.test(geneExpr.cur[x,isType], geneExpr.cur[x,!isType], na.action=na.omit), silent=TRUE)
if(inherits(rv, 'try-error')) {rv <- list(estimate=c(1,1), statistic=0, p.value=1)}
return(rv)
}) #tRes <- mclapply(gNames, function(x) {
} #if(oneCore==TRUE) {
geneDF <- do.call(rbind, lapply(tRes, function(x) {data.frame(rat=x$estimate[1]/x$estimate[2], t=x$statistic, pVal=x$p.value)}))
rownames(geneDF) <- rownames(geneExpr.cur)
geneDF$qVal <- stats::p.adjust(geneDF$pVal, method = 'fdr')
if(secondPval==TRUE) {
geneDF <- geneDF[order(abs(log2(geneDF$rat)), -1*log(geneDF$pVal)),]
} else {
geneDF <- geneDF[order(abs(log2(geneDF$rat))),]
}
geneDF <- geneDF[geneDF$qVal <= qCut,]
geneDF <- geneDF[!is.na(geneDF$rat), ]
#gList[[cType]] <- geneDF
if(reqRatGT1==TRUE) { geneDF <- geneDF[geneDF$rat>1, ,drop=FALSE] }
if (remZinf==TRUE) {
isZ <- geneDF$rat == 0
isInf <- is.infinite(geneDF$rat)
geneDF <- geneDF[!(isZ | isInf), ,drop=FALSE]
}
return(geneDF)
}) #for (cType in unique(colnames(geneExpr))) {
names(gList) <- cTypes
} #if(length(cTypes) > 1) {
return(gList)
}
#' Load MGSM27
#' @description Load the MGSM27 signature matrix
#'
#' @export
#' @return The MGSM27 signature matrix from Identifying a High-risk Cellular Signature in the Multiple Myeloma Bone Marrow Microenvironment
#' @examples
#' MGSM27 <- loadMGSM27()
loadMGSM27 <- function() {
MGSM27 <- ADAPTS::MGSM27
return(MGSM27)
}
#' LM22 look up table
#' @description Load a map of cell type names
#'
#' @export
#' @return a map of cell types names
#' @examples
#' cellMap <- getLM22cells()
getLM22cells <- function() {
mapTypes <- list('naive B-cells', # B.cells.naive
'Memory B-cells', # B.cells.memory
'Plasma cells', # Plasma.cells
c('CD8+ T-cells', 'CD8+ Tcm', 'CD8+ Tem', 'CD8+ naive T-cells'), # T.cells.CD8
'CD8+ T-cells',
'CD4+ naive T-cells', # T.cells.CD4.naive
'CD4+ memory T-cells', # T.cells.CD4.memory.resting NOTE divide by 2
'CD4+ memory T-cells', # T.cells.CD4.memory.activated NOTE divide by 2
c('CD4+ T-cells', 'CD4+ Tem', 'CD4+ Tcm', 'Th1 cells', 'Th2 cells'), # T.cells.follicular.helper
'Tregs', # T.cells.regulatory..Tregs
'Tgd cells', # T.cells.gamma.delta
'NK cells', # NK.cells.resting NOTE divide by 2
'NK cells', # NK.cells.activated NOTE divide by 2
'Monocytes', # Monocytes
'Macrophages', # Macrophages.M0
'Macrophages M1', # Macrophages.M1
'Macrophages M2', # Macrophages.M2
c('cDC','pDC','DC'), # Dendritic.cells.resting
'aDC', # Dendritic.cells.activated
'Mast cells', # Mast.cells.resting NOTE divide by 2
'Mast cells', # Mast.cells.activated NOTE divide by 2
'Eosinophils', # Eosinophils
'Neutrophils', # Neutrophils
'Plasma cells', # MM.plasma.cell
'Macrophages', # osteoclast
'Plasma cells', # PlasmaMemory
'') # Other, need something special here
mapTypes <- unique(unlist(mapTypes))
return(mapTypes)
}
#' LM22 to xCell LUT
#' @description Load the LM22 xCell map
#'
#' @export
#' @return A map between xCell cell type names and LM22 cell type names
#' @examples
#' xcellMap <- loadModMap()
loadModMap <- function() {
modMap <- rbind(c('naive B-cells', 'B.cells.naive'),
c('Memory B-cells','B.cells.memory'),
c('Plasma cells','Plasma.cells'),
c('CD8+ T-cells','T.cells.CD8'),
c('CD4+ naive T-cells','T.cells.CD4.naive'),
c('CD4+ memory T-cells', 'T.cells.CD4.memory.resting'),
c('CD4+ memory T-cells', 'T.cells.CD4.memory.activated'),
c('CD4+ Tcm','T.cells.follicular.helper'),
c('Tregs','T.cells.regulatory..Tregs.'),
c('Tgd cells','T.cells.gamma.delta'),
c('NK cells', 'NK.cells.resting'),
c('NK cells', 'NK.cells.activated'),
c('Monocytes','Monocytes'),
c('Macrophages', 'Macrophages.M0'),
c('Macrophages M1','Macrophages.M1'),
c('Macrophages M2', 'Macrophages.M2'),
c('iDC','Dendritic.cells.resting'),
c('aDC', 'Dendritic.cells.activated'),
c('Mast cells', 'Mast.cells.resting'),
c('Mast cells', 'Mast.cells.activated'),
c('Eosinophils', 'Eosinophils'),
c('Neutrophils', 'Neutrophils'),
c('Adipocytes', 'adipocyte'),
c('Plasma cells', 'MM.plasma.cell'),
c('Osteoblast', 'osteoblast'),
c('Plasma cells', 'PlasmaMemory'))
return(modMap)
}
#New functions to work into ADAPTS
#' Plot condition numbers
#' @description Plot the condition numbers during the growing and shrinking of signature matrices.
#'
#' bonusPoints <- data.frame(legText = c('Unagumented Signature Matrix', 'Minimum Smoothed Condition Number', 'Best Augmented Signature Matrix'),
#' pchs = c('o', 'x', 'x'),
#' cols = c('red', 'purple', 'blue'),
#' kappa = c(10, 15, 20),
#' nGene = c(5, 10, 15))
#'
#' @param kappas The condition numbers to plot
#' @param nGenes The number of genes associated with each kapp
#' @param smData Smoothed data to plot as a green line (DEFAULT: NULL)
#' @param titleStr The title of the plot (DEFAULT: 'Shrink Signature Matrix')
#' @param bonusPoints Set to plot additional points on the plot, see description (DEFAULT: NULL)
#' @param maxCond Cap the condition number to maxCond (DEFAULT: 100)
#'
#' @export
#' @return a matrix including imputed values
#' @examples
#' nGenes <- 1:300
#' kappas <- log(abs(nGenes-250))
#' kappas[is.infinite(kappas)] <- 0
#' kappas <- kappas+runif(300, 0, 1)
#' smData <- stats::smooth(kappas)
#' bonusPoints <- data.frame(legText = 'Minimum Smoothed ', pchs='x', cols='purple',
#' kappa=min(smData), nGenes=nGenes[which.min(smData)])
#' plotKappas(kappas=kappas, nGenes=nGenes, smData=smData, bonusPoints=bonusPoints, maxCond=100)
#'
plotKappas <- function(kappas, nGenes, smData=NULL, titleStr='Shrink Signature Matrix', bonusPoints=NULL, maxCond=100) {
testKappa <- kappas
if(!is.null(bonusPoints)) {testKappa <- c(testKappa, bonusPoints$kappa)}
if(any(kappas > maxCond)) {
message(paste('Capping condition number to', maxCond))
kappas[kappas > maxCond] <- maxCond
}
ylims <- c(min(testKappa)*0.95, max(testKappa)*1.05)
graphics::plot(x=nGenes, y=kappas,
xlab='Number of Genes', ylab='Condition Number (lower is more stable)',
main=titleStr, ylim=ylims)
if(!is.null(smData)) {graphics::lines(x=nGenes, y=smData, col='green')}
if(!is.null(bonusPoints)) {
graphics::points(x=bonusPoints$'nGene', y=bonusPoints$'kappa',
col=as.character(bonusPoints$'cols'), pch=as.character(bonusPoints$'pchs'), cex=1.5)
graphics::legend('topright', legend=as.character(bonusPoints$'legText'),
pch=as.character(bonusPoints$'pchs'), col=as.character(bonusPoints$'cols'))
}
}
#' Calculate conditions numbers for signature subsets
#' @description Remove genes by chunks by picking those the most improve the condition number.
#' Will set any infinite condition numbers to max(kappas[!is.infinite(kappas)])+1
#' Return the condition numbers with their gene lists
#'
#' @param sigMatrix The original signature matrix
#' @param numChunks The number of groups of genes to remove (DEFAULT: NULL)
#' @param verbose Print out the current chunk as is it's being calculated (DEFAULT: NULL)
#' @param plotIt The title of the plot (DEFAULT: TRUE)
#' @param singleCore Set to FALSE to use multiple cores to calculate condition numbers (DEFAULT: FALSE)
#' @param fastStop Halt early when the condition number changes by less than 1 for 3 iterations (DEFAULT: FALSE)
#'
#' @export
#' @return A list with condition numbers and gene lists
#' @examples
#' library(ADAPTS)
#' LM22 <- ADAPTS::LM22
#' sigGenesList <- shrinkByKappa(sigMatrix=LM22[1:100,1:5], numChunks=4,
#' verbose=FALSE, plotIt=FALSE, singleCore=TRUE, fastStop=TRUE)
#'
shrinkByKappa <- function(sigMatrix, numChunks=NULL, verbose=TRUE, plotIt=TRUE, singleCore=FALSE, fastStop=TRUE) {
curComp <- kappa(as.matrix(sigMatrix))
names(curComp) <- ""
newSigMatrix <- sigMatrix
#It is much to slow for 4000+ genes in a sig matrix with one gene at a time
if(is.null(numChunks)){numChunks <- nrow(sigMatrix)}
stepSize <- max(floor(nrow(sigMatrix) / numChunks), 1)
sigGenesList <- list()
for(i in 1:numChunks) {
if(verbose==TRUE){message(paste(i, '/', numChunks))}
#Remove genes one at a time and determine which will minimize the condition number,
# It's really to slow to do recalcualte this every iteration.
if(singleCore==TRUE) {
newComps <- foreach(fe_curGene = rownames(newSigMatrix), .combine=c) %do% {
kappa(as.matrix(newSigMatrix[rownames(newSigMatrix)!=fe_curGene,]))
}
} else {
newComps <- foreach(fe_curGene = rownames(newSigMatrix), .combine=c) %dopar% {
kappa(as.matrix(newSigMatrix[rownames(newSigMatrix)!=fe_curGene,]))
}
} #if(singleCore==TRUE) {
names(newComps) <- rownames(newSigMatrix)
#Alternative: Why not just rank them by variance? Remove the lowest variance first??
# Really, we need soem clustering here to no remove say, B-cell specific genes
# That would require some prior information about clusters??
#remGene <- names(newComps)[which.min(newComps)]
remGene <- names(utils::head(sort(newComps), stepSize))
curComp <- c(curComp, newComps[remGene])
#Problem: if we remove all non-zero genes for a single sample, that will increase the kappa to Inf.
# This loop makes sure that this doesn't happen.
for(curRemGene in remGene) {
tempSigMat <- newSigMatrix[rownames(newSigMatrix) != curRemGene,,drop=FALSE]
if(nrow(tempSigMat)>0 && !is.infinite(kappa(as.matrix(tempSigMat)))) {newSigMatrix <- tempSigMat}
}
#newSigMatrix <- newSigMatrix[!rownames(newSigMatrix) %in% remGene,]
condNum <- kappa(as.matrix(newSigMatrix))
if( i == 1) {
deltaKappa <- as.numeric(NA)
} else {
deltaKappa <- condNum - sigGenesList[[i-1]]$condNum
} #if( i == 1) {
if(nrow(newSigMatrix)>0){
sigGenesList[[i]] <- list(sigGenes=rownames(newSigMatrix), condNum=condNum, deltaKappa=deltaKappa)
}
if(fastStop == TRUE) {
if (i > 3) {
kappas <- c(sigGenesList[[i-2]]$deltaKappa, sigGenesList[[i-1]]$deltaKappa, sigGenesList[[i]]$deltaKappa)
if(all(abs(kappas) < 1)) { break; }
} #if (i > 3) {
} #if(fastStop == TRUE) {
} #for(i in 1:(nrow(sigMatrix)-1)) {
kappas <- sapply(sigGenesList, function(x){x$condNum})
if (any(is.infinite(kappas))) {
kappas[is.infinite(kappas)] <- max(kappas[!is.infinite(kappas)])+1
for (i in 1:length(sigGenesList)) { sigGenesList[[i]]$condNum <- kappas[i] }
}
#plot(y=kappas, x=nGenes, xlab='Number of Genes', ylab='Condition Number')
if(plotIt==TRUE) {
kappas <- sapply(sigGenesList, function(x){x$condNum}) #Redo to make sure Inf is gone
nGenes <- sapply(sigGenesList, function(x){length(x$sigGenes)})
plotKappas(kappas, nGenes=nGenes)
}
return(sigGenesList)
}
#' Shrink a signature matrix
#' @description Use shrinkByKappa and automatic minima detection to reduce a signature matrix.
#' Select the new signature matrix with the minima and the maximum number of genes. There is an
#' inherent difficult in that the condition number will tend to have a second peak at a relatively
#' small number of genes, and then crash so that smallest condition number has more or less one gene.
#'
#' By default, the algorithm will tend to pick the detected minima with the largest nubmer of genes.
#' aggressiveMin=TRUE will try to find the minimum number of genes that has more genes than the
#' maxima at the smallest number of genes
#'
#' @param sigMatrix The original signature matrix
#' @param numChunks The number of groups of genes to remove. NULL is all genes (DEFAULT: 100)
#' @param verbose Print out the current chunk as is it's being calculated (DEFAULT: NULL)
#' @param plotIt Set to TRUE to plot (DEFAULT: FALSE)
#' @param aggressiveMin Set to TRUE to aggresively seek the smallest number of genes (DEFAULT: TRUE)
#' @param sigGenesList Set to use precomputed results from shrinkByKappa (DEFAULT: NULL)
#' @param singleCore Set to FALSE to use multiple cores to calculate condition numbers (DEFAULT: FALSE)
#' @param fastStop Halt early when the condition number changes by less than 1 for 3 iterations (DEFAULT: TRUE)
#'
#' @export
#' @return A list with condition numbers and gene lists
#' @examples
#' library(ADAPTS)
#' LM22 <- ADAPTS::LM22
#' newSigMat <- shrinkSigMatrix(sigMatrix=LM22[1:100,1:5], numChunks=4, verbose=FALSE,
#' plotIt=FALSE, aggressiveMin=TRUE, sigGenesList=NULL, singleCore=TRUE, fastStop=FALSE)
#'
shrinkSigMatrix <- function(sigMatrix, numChunks=100, verbose=FALSE, plotIt=FALSE, aggressiveMin=TRUE, sigGenesList=NULL, singleCore=FALSE, fastStop=TRUE) {
if(fastStop == TRUE) {
message('fastStop==TRUE overwriting aggressiveMin option.')
aggressiveMin <- TRUE #The logic is getting convoluted. Please refactor
}
else{
if(is.null(numChunks) || numChunks > nrow(sigMatrix)-1 ){
numChunks<-nrow(sigMatrix)-1
}
}
if(is.null(sigGenesList)) {
sigGenesList <- shrinkByKappa(sigMatrix=sigMatrix, numChunks=numChunks, verbose=verbose, plotIt=FALSE, singleCore=singleCore, fastStop=fastStop)
}
kappas <- sapply(sigGenesList, function(x){x$condNum})
nGenes <- sapply(sigGenesList, function(x){length(x$sigGenes)})
kappas <- kappas[order(nGenes)]
nGenes <- nGenes[order(nGenes)]
smData <- stats::smooth(kappas)
smData2 <- try(stats::predict(stats::smooth.spline(smData))$y)
if(inherits(smData2, 'try-error')) {
smData2 <- stats::smooth(smData)
}
mins <- quantmod::findValleys(smData2)
maxs <- quantmod::findPeaks(smData2)
if(length(mins)==0) {mins <- which.min(kappas)}
if(length(maxs)==0) {maxs <- which.max(kappas)}
if (fastStop== TRUE) {
legText <- 'min'
pchs <- 2
cols <- 'orange'
bonusGenes <- min(nGenes)
bonusKappas <- kappas[which(nGenes==bonusGenes)[1]]
bonusPoints <- data.frame(legText = legText, pchs=pchs, cols=cols, kappa=bonusKappas, nGenes=bonusGenes, stringsAsFactors = FALSE)
} else {
legText <- c(make.names(rep('min', length(mins)), unique=TRUE), make.names(rep('max', length(maxs)), unique=TRUE))
pchs <- c(rep(2, length(mins)), rep(6, length(maxs)))
cols <- c(rep('red', length(mins)), rep('blue', length(maxs)))
bonusKappas <- kappas[c(mins, maxs)]
bonusGenes <- nGenes[c(mins, maxs)]
bonusPoints <- data.frame(legText = legText, pchs=pchs, cols=cols, kappa=bonusKappas, nGenes=bonusGenes, stringsAsFactors = FALSE)
#There's two possible algorithms,
# A) Find the min with the minimum condition number
# B) Find the min with maximum number of genes
# First: use B
} # if (fastStop== TRUE) {
minIdx <- sub('\\.+[0-9]$', '', bonusPoints$legText) == 'min'
bonus.min <- bonusPoints[minIdx, ]
if (aggressiveMin == TRUE) {
chosenPoint <- bonus.min$legText[which.max(bonus.min$nGenes)]
bonusPoints[bonusPoints$legText==chosenPoint,'cols'] <- 'orange'
bonusPoints[bonusPoints$legText==chosenPoint,'pchs'] <- 17
} else {
maxIdx <- sub('\\.+[0-9]$', '', bonusPoints$legText) == 'max'
bonus.max <- bonusPoints[maxIdx, ]
chosenMax <- bonus.max$legText[which.min(bonus.max$nGenes)]
nGene.bool <- nGenes > bonus.max$nGenes[bonus.max$legText==chosenMax]
if(all(nGene.bool == FALSE)) { nGene.bool <- nGenes >= bonus.max$nGenes[bonus.max$legText==chosenMax] }
relMinKappa <- min(kappas[nGene.bool])
chosenPointDF <- data.frame(legText = 'Chosen', pchs=17, cols='orange',
kappa=relMinKappa, nGenes=nGenes[kappas==relMinKappa],
stringsAsFactors = FALSE)
bonusPoints <- rbind(bonusPoints, chosenPointDF)
chosenPoint <- 'Chosen'
}
if (plotIt==TRUE) {
plotKappas(kappas=kappas, nGenes=nGenes, smData = smData2, bonusPoints=bonusPoints)
}
numChosenGenes <- bonusPoints[bonusPoints$legText==chosenPoint,'nGenes']
#Check the bestSig calculation
bestSig <- sigGenesList[sapply(sigGenesList, function(x){length(x$sigGenes)}) == numChosenGenes][[1]]
smallMatrix <- sigMatrix[bestSig$sigGenes,]
return(smallMatrix)
}
#' Split a single cell dataset into multiple sets
#' @description Take a matrix of single cell data with genes as rows and each column corresponding
#' to a single cells. Break it up into rougly equal subsets, taking care to make sure that each cell type is represented
#' in each set if possible
#'
#' @param RNAcounts The single cell matrix
#' @param cellIDs A vector will cell types for each column in scCountMatrix (DEFAULT: colnames(RNAcounts))
#' @param numSets The number of sets to break it up into (DEFAULT: 3)
#' @param verbose Set to TRUE to print cell counts as it goes (DEFAULT: TRUE)
#' @param randomize Set to TRUE to randomize the sets (DEFAULT: TRUE)
#'
#' @export
#' @return a list with a multiple sets
#' @examples
#' RNAcounts <- matrix(0, nrow=10, ncol=30)
#' rownames(RNAcounts) <- make.names(rep('Gene', nrow(RNAcounts)), unique=TRUE)
#' colnames(RNAcounts) <- make.names(c('CellX', rep('CellY', 9),
#' rep('CellZ', 10), rep('CellB', 10)), unique=TRUE)
#' RNAcounts[, grepl('CellY', colnames(RNAcounts))] <- 1
#' RNAcounts[, grepl('CellZ', colnames(RNAcounts))] <- 2
#' RNAcounts[, grepl('CellB', colnames(RNAcounts))] <- 3
#' splitSCdata(RNAcounts, numSets=3)
#'
splitSCdata <- function(RNAcounts, cellIDs=colnames(RNAcounts), numSets=3, verbose=TRUE, randomize=TRUE) {
cellTypes <- unique(sub('\\.[0-9]+$', '', cellIDs))
seqList <- list()
for (cellType in cellTypes) {
matchStr <- sub("+",'\\+',paste0(cellType,'\\.*[0-9]*$'), fixed=TRUE)
idxs <- grep(matchStr, cellIDs)
maxSam <- ceiling(length(idxs)/numSets)
#https://stackoverflow.com/questions/3318333/split-a-vector-into-chunks-in-r
#> max <- 20
#> x <- seq_along(d)
#> d1 <- split(d, ceiling(x/max))
x <- seq_along(idxs)
if(randomize==TRUE) {x <- sample(x)}#Add option to not randomize?
idxList <- split(idxs, ceiling(x/maxSam))
message(c(paste(cellType, ':', length(idxs)), '; ', paste(sapply(idxList, length), collapse=', ')))
for (y in names(idxList)) {seqList[[y]] <- c(seqList[[y]], idxList[[y]])}
} #for (cellType in cellTypes) {
#Reshape into lists for each group
setList <- lapply(seqList, function(x){ RNAcounts[,x] })
return(setList)
} #splitSCdata <- function(RNAcounts, cellIDs=colnames(RNAcounts), numSets=3) {
#' Build groupSize pools according to cellIDs
#' @description This function is intended to collapse many single cells into 3 (groupsize) groups
#' with the average count across all cells in each of the groups. These groups can then be used to perform a
#' t-test (for example) between the 3 groups of CellX with 3 groups of CellY
#'
#' @param RNAcounts The single cell matrix
#' @param cellIDs A vector will cell types for each column in scCountMatrix (DEFAULT: colnames(RNAcounts))
#' @param groupSize The number of sets to break it up into (DEFAULT: 3)
#' @param randomize Set to TRUE to randomize the sets (DEFAULT: TRUE)
#' @param mc.cores The number of cores to use (DEFAULT: 1)
#'
#' @export
#' @return a list with a multiple sets
#' @examples
#' RNAcounts <- matrix(0, nrow=10, ncol=100)
#' rownames(RNAcounts) <- make.names(rep('Gene', nrow(RNAcounts)), unique=TRUE)
#' colnames(RNAcounts) <- make.names(c('CellX', rep('CellY', 39),
#' rep('CellZ', 30), rep('CellB', 30)), unique=TRUE)
#' RNAcounts[, grepl('CellY', colnames(RNAcounts))] <- 1
#' RNAcounts[, grepl('CellZ', colnames(RNAcounts))] <- 2
#' RNAcounts[, grepl('CellB', colnames(RNAcounts))] <- 3
#' scSample(RNAcounts, groupSize=3)
#'
scSample <- function(RNAcounts, cellIDs=colnames(RNAcounts), groupSize=3, randomize=TRUE, mc.cores=1) {
cellTypes <- unique(sub('\\.[0-9]+$', '', as.character(cellIDs)))
#combCellList <- list()
#for (cellType in cellTypes) {
combCellList <- parallel::mclapply(cellTypes, mc.cores=mc.cores, function(cellType) {
matchStr <- sub("+",'\\+',paste0(cellType,'\\.*[0-9]*$'), fixed=TRUE)
idxs <- grep(matchStr, cellIDs)
if(randomize==TRUE) {idxs <- sample(x=idxs, length(idxs))}
if(length(idxs) > groupSize) {
maxSam <- ceiling(length(idxs)/groupSize)
#https://stackoverflow.com/questions/3318333/split-a-vector-into-chunks-in-r
#> max <- 20
#> x <- seq_along(d)
#> d1 <- split(d, ceiling(x/max))
x <- seq_along(idxs)
idxList <- split(idxs, ceiling(x/maxSam))
curData.list <- lapply(idxList, function(x) {
rowMeans(RNAcounts[,x,drop=FALSE], na.rm=TRUE)
})
curData <- do.call(cbind, curData.list)
colnames(curData) <- rep(cellType, ncol(curData))
#combCellList[[cellType]] <- curData
} else {
curData <- RNAcounts[,idxs,drop=FALSE]
if(length(idxs) == 1) {
colnames(curData) <- cellType
} else {
#colnames(curData) <- 1:length(idxs)#paste(cellType, 1:length(idxs), sep='.')
colnames(curData) <- rep(cellType, ncol(curData))
}
#combCellList[[cellType]] <- curData
}
curData
} ) #for (cellType in cellTypes) {
#Now convert it to a data.frame
combDF <- do.call(cbind, combCellList)
colnames(combDF) <- sub('\\..*$', '', colnames(combDF)) #Remove <cell>.<cell> name. Need smarter RegEX
return(combDF)
}
#' Calculate prediction accuracy
#' @description Calculate correlation coeffifients, p-Values, MAE, RMSE for deconvolution predictions
#'
#' @param estimates The estimated cell percentages
#' @param reference The reference cell percentages
#'
#' @export
#' @return a list with a multiple sets
#' @examples
#' estimates <- sample(c(runif(8), 0 ,0))
#' estimates <- 100 * estimates / sum(estimates)
#' reference <- sample(c(runif(7), 0 , 0, 0))
#' reference <- 100 * reference / sum(reference)
#' calcAcc(estimates, reference)
#'
calcAcc <- function(estimates, reference) {
ct <- stats::cor.test(estimates, reference)
ct.spear <- suppressWarnings(stats::cor.test(estimates, reference, method = 'spearman'))
mae <- mean(abs(estimates - reference), na.rm=TRUE)
rmse <- sqrt(mean((estimates - reference)^2, na.rm=TRUE))
#Binarize to get at sensitivity and specificity
F1mcc <- getF1mcc(estimate=estimates>0, reference=reference>0)
Zs <- estimates == 0
minForNon0 <- min(reference[!Zs & !is.na(reference)])
out <- c(rho=ct$estimate, pVal=ct$p.value, spear=ct.spear$estimate, pVal.spear=ct.spear$p.value,
mae=mae, rmse=rmse, sens=F1mcc[['sensitivity']], minForNon0=minForNon0)
return(out)
}
#' Get f1 / mcc
#' @description Get f1 / mcc and other accuracy measurements for binary predictions.
#' Provide either an estimate and reference vector
#' e.g. getF1mcc(estimate, reference)
#' Or TPs, FPs, etc.
#' e.g. getF1mcc(tps=3, fps=4, tns=7, fns=2)
#'
#' @param estimate A binary vector of predictions
#' @param reference a binary vector of actual values
#' @param tps The number of TPs
#' @param fps The number of FPs
#' @param tns The number of TNs
#' @param fns The number of FNs
#'
#' @export
#' @return A vector with sensitivity, specificity, fpr, fdr, f1, agreement, p.value, mcc, and mcc.p
#' @examples
#' estimates <- sample(c(runif(8), 0 ,0))
#' reference <- sample(c(runif(7), 0 , 0, 0))
#' accuracyStats <- getF1mcc(estimate=estimates>0, reference=reference>0)
getF1mcc <- function(estimate=NULL, reference=NULL, tps=NULL, fps=NULL, tns=NULL, fns=NULL) {
if(is.null(tps) | is.null(fps) | is.null(tns) | is.null(fns)) {
if(is.null(estimate) | is.null(reference)) {
stop('Must specific either estimates and reference or tps, fps, tns, fns.')
} else {
naIdx <- is.na(estimate) | is.na(reference) | is.null(estimate) | is.null(reference)
estimate <- estimate[!naIdx]
reference <- reference[!naIdx]
tps <- sum(estimate==TRUE & reference==TRUE)
fps <- sum(estimate==TRUE & reference==FALSE)
tns <- sum(estimate==FALSE & reference==FALSE)
fns <- sum(estimate==FALSE & reference==TRUE)
}
} #if(is.null(tps) | is.null(fps) | is.null(tns) | is.null(fns)) {
sensitivity <- tps/(tps+fns)
specificity <- tns/(fps+tns)
fpr <- fps / (fps+tns)
fdr <- fps / (fps+tps)
f1 <- (2*tps) / (2*tps + fps + fns)
N <- tns+tps+fns+fps
S <- (tps+fns) / N
P <- (tps+fps) / N
mcc <- (tps/N - S * P) / sqrt(P*S*(1-S)*(1-P))
mccData <- rbind( t(matrix(c(1,1), 2, max(tps,1))), t(matrix(c(0,0), 2, max(tns,1))),
t(matrix(c(1,0), 2, max(fps,1))), t(matrix(c(0,1), 2, max(fns,1))) )
#Note: max assumes that you will get a correlation
mcc.p <- stats::cor.test(mccData[,1], mccData[,2])$p.value
if (is.na(mcc) | mcc==Inf) { mcc <- 0; mcc.p <- 1 }
agreement <- (tps+tns)/(tps+fps+tns+fns)
bgProb <- max(c( (tps+fns)/(tps+fps+tns+fns) , 1-(tps+fns)/(tps+fps+tns+fns) ))
p.value <- stats::pbinom(q=tps+tns, size=tps+fps+tns+fns, prob=bgProb, lower.tail = FALSE)
return(c(sensitivity=sensitivity, specificity=specificity, fpr=fpr, fdr=fdr, f1=f1, agreement=agreement, p.value=p.value, mcc=mcc, mcc.p=mcc.p))
}
#' Build a gList using random forest
#' @description Use ranger to select features and build a genesInSeed gene matrix
#'
#' @param trainSet Each row is a gene, and each column is an example of a particular cell type, e.g. ADAPTS::scSample(trainSet, groupSize=30)
#' @param oneCore SEt to TRUE to disable multicore (DEFAULT: FALSE)
#'
#' @export
#' @return A cell specific geneList for ADAPTS::AugmentSigMatrix()
#' @examples
#' library(ADAPTS)
#' ct1 <- runif(1000, 0, 100)
#' ct2 <- runif(1000, 0, 100)
#' dataMat <- cbind(ct1, ct1, ct1, ct1, ct1, ct1, ct2, ct2, ct2, ct2)
#' rownames(dataMat) <- make.names(rep('gene', nrow(dataMat)), unique=TRUE)
#' noise <- matrix(runif(nrow(dataMat)*ncol(dataMat), -2, 2), nrow = nrow(dataMat), byrow = TRUE)
#' dataMat <- dataMat + noise
#' gList <- gListFromRF(trainSet=dataMat, oneCore=TRUE)
#'
gListFromRF <- function(trainSet, oneCore=FALSE) {
clusterNames <- unique(sub('\\.[0-9]+$', '', colnames(trainSet)))
trainSet.4reg <- t(trainSet)
if(oneCore==TRUE) {
gList.fromRF <- lapply (clusterNames, function(cn) {
clusterBool <- colnames(trainSet) == cn
rf1 <- ranger::ranger(x=trainSet.4reg, y=clusterBool, num.trees=1000, importance='impurity')
imp <- ranger::importance(rf1)
imp <- sort(imp[imp>0], decreasing = FALSE)
curDF <- data.frame(rat=imp, t=0, pVal=0, qVal=0)
})
} else {
gList.fromRF <- parallel::mclapply (clusterNames, function(cn) {
clusterBool <- colnames(trainSet) == cn
rf1 <- ranger::ranger(x=trainSet.4reg, y=clusterBool, num.trees=1000, importance='impurity')
imp <- ranger::importance(rf1)
imp <- sort(imp[imp>0], decreasing = FALSE)
curDF <- data.frame(rat=imp, t=0, pVal=0, qVal=0)
})
} #if(oneCore==TRUE) {
names(gList.fromRF) <- clusterNames
return(gList.fromRF)
}
#' Make a GSVA genelist
#' @description Provide a gList and signature matrix with matched cell types to get signatures
#' gene lists for GSVA and similar algorithms.
#' gList=NULL select highest genes for each cell type, minimum of 3.
#'
#' @param sigMat A signature matrix such as from ADAPTS::AugmentSigMatrix()
#' @param gList A list of prioritized genes such as from ADAPTS::gListFromRF() (DEFAULT:NULL)
#'
#' @export
#' @return A list of genes for each cell types musually in sigMat and gList
#' @examples
#' library(ADAPTS)
#' ct1 <- runif(1000, 0, 100)
#' ct2 <- runif(1000, 0, 100)
#' dataMat <- cbind(ct1, ct1, ct1, ct1, ct1, ct1, ct2, ct2, ct2, ct2)
#' rownames(dataMat) <- make.names(rep('gene', nrow(dataMat)), unique=TRUE)
#' noise <- matrix(runif(nrow(dataMat)*ncol(dataMat), -2, 2), nrow = nrow(dataMat), byrow = TRUE)
#' dataMat <- dataMat + noise
#' gList <- ADAPTS::gListFromRF(trainSet=dataMat, oneCore=TRUE)
#' newSigMat <- ADAPTS::buildSeed(trainSet=dataMat, plotIt=FALSE)
#' geneLists <- matrixToGenelist(sigMat=newSigMat, gList=gList)
#'
matrixToGenelist <- function(sigMat, gList=NULL) {
geneLists <- list() #The output variable
if(is.null(gList)) {
bestCols <- apply(sigMat, 1, which.max)
for (curName in colnames(sigMat)) {
seedGenes <- names(tail(sort(sigMat[, curName]),3))
bestGenes <- names(bestCols)[bestCols == which(colnames(sigMat)==curName)]
allGenes <- unique(c(seedGenes, bestGenes))
rats <- sapply(allGenes, function(x){ sigMat[x,curName] / sum(sigMat[x,colnames(sigMat) != curName])})
geneLists[[curName]] <- names(sort(rats, decreasing = TRUE))
}
} else {
olNames <- names(gList)[names(gList) %in% colnames(sigMat)]
if(length(olNames) < length(gList) | length(olNames) < ncol(sigMat)) {
message('Not all cell-type names match between gList and sigMat')
message(' Only matched cell-types will be calculated')
}
for (curName in olNames) {
olGenes <- rownames(gList[[curName]])[rownames(gList[[curName]]) %in% rownames(sigMat)]
#Make sure that genes are high in current cell type.
rats <- sapply(olGenes, function(x){ sigMat[x,curName] / sum(sigMat[x,colnames(sigMat) != curName])})
rats <- rats[rats>1]
geneLists[[curName]] <- names(sort(rats, decreasing = TRUE))
}
}
return(geneLists)
}
# Function removed to pass R CMD check --as-cran because xCell is on GitHub not CRAN.
#
# #' Use xCellSignifcanceBetaDist in xCell to estimate the probability that a cell type is in a sample.
# #'
# #' cellPvals <- estxCellSig(geneExpr.pbmc, rnaseq = FALSE)
# #'
# #' @param geneExpr.pbmc The gene expression data
# #' @param rnaseq Set to TRUE if the data is RNAseq data (DEFAULT: FALSE)
# #' @export
# #' @return cell type p-values
# estxCellSig <- function(geneExpr.pbmc, rnaseq = FALSE) {
# if(!require('xCell')) {
# message('This function requires the xCell package')
# message('It can be found here: https://github.com/dviraran/xCell')
# return(NULL)
# }
# xCells <- xCellAnalysis(geneExpr.pbmc,rnaseq = rnaseq)
# xCellSigs <- xCellSignifcanceBetaDist(xCells, rnaseq = rnaseq)
#Map to LM22 where possible.
# xCellMap <- loadModMap()
# xCellMap <- xCellMap[xCellMap[,1] %in% rownames(xCellSigs),]
# mappable <- xCellSigs[xCellMap[,1],]
# rownames(mappable) <- xCellMap[,2]
# notmappable <- xCellSigs[!(rownames(xCellSigs) %in% xCellMap[,1]),]
# cellPvals <- rbind(mappable, notmappable)
# return(cellPvals)
#}
# Function removed to pass R CMD check --as-cran because xCell is on GitHub not CRAN
#
# #' Deconvolve PBMCs and see what you get.
# #' This requires the ADAPTSdata packages from CRAN
# #' https://github.com/sdanzige/ADAPTSdata
# #' https://github.com/sdanzige/ADAPTSdata2
# #'
# #'
# #' matData <- deconvolvePBMC(geneExpr.pbmc=ADAPTSdata::PBMC)
# #'
# #' @param geneExpr.pbmc The PBMC data to look at, for example: ADAPTSdata::PBMC
# #' @param refExpr The reference matrix (DEFAULT: NULL, ie load LM22)
# #' @param deconName The deconvolved matrix name (DEFAULT: 'LM22')
# #' @param dsName The sample name (DEFAULT: 'PBMC')
# #' @param decons Set to include already deconvolved values (DEFAULT: NULL)
# #' @param incXcell Set to TRUE to include xCell estimates of the fraction of samples with this cell type (DEFAULT: TRUE)
# #' @param plotIT Set to TRUE to plot it (DEFAULT: FALSE)
# #' @export
# #' @return cell type estimates for each sample
# deconvolvePBMC <- function(geneExpr.pbmc, refExpr=NULL, deconName="LM22", dsName='PBMC', decons=NULL, incXcell=TRUE, plotIT=FALSE) {
# if(is.null(decons)) {
# if(is.null(refExpr)) {
# refExpr <- utils::read.csv(gzfile('/GitHub/Deconvolution/LM22.csv.gz'), row.names = 1)
# }
# decons <- estCellPercent.DCQ(refExpr, geneExpr.pbmc, marker_set = NULL, number_of_repeats = 10)
# } #if(!is.null(decons)) {
#Plot it
# means <- apply(decons, 1, base::mean, na.rm=TRUE)
# sds <- apply(decons, 1, stats::sd, na.rm=TRUE)
# medians <- apply(decons, 1, stats::median, na.rm=TRUE)
#Also test with xCell
# if(incXcell == TRUE) {
# cellPvals <- estxCellSig(geneExpr.pbmc)
# fracSig <- apply(cellPvals, 1, mean)
# fracSig.inc <- fracSig[rownames(decons)[rownames(decons) %in% names(fracSig)]]
# inBoth <- rownames(decons)[rownames(decons) %in% names(fracSig.inc)]
# for (curBoth in inBoth) {
# i <- which(rownames(decons) == curBoth)
# rownames(decons)[i] <- paste0(rownames(decons)[i], ' (', round(100*fracSig.inc[curBoth]), '%)')
# }
# } #if(incXcell == TRUE) {
# for(iter in 1:2) {
# if(iter==2 && plotIT==TRUE) { grDevices::pdf(paste('deconvolvePBMC', deconName, dsName, Sys.Date(), 'pdf', sep='.')) }
# titleStr <- paste('DCQ', deconName, 'plot of', dsName)
# newMax <- max(means) + max(sds) * 1.1
# graphics::par(mar=c(12,4.1,4.1,2.1))
# graphics::plot(x=means, ylim=c(0, newMax), axes=FALSE, xlab=NA, main=titleStr)
# graphics::points(medians, col='red',pch='x')
# graphics::par(las=2)
# graphics::axis(1, at=1:nrow(decons), labels=rownames(decons), cex.axis=0.75)
# graphics::axis(2, labels=TRUE) #default way
# graphics::box()
# graphics::segments(x0=1:nrow(decons), y0=means-sds, y1=means+sds)
# epsilon = 0.2
# graphics::segments(x0=(1:nrow(decons))-epsilon, means-sds ,(1:nrow(decons))+epsilon, means-sds)
# graphics::segments(x0=(1:nrow(decons))-epsilon, means+sds ,(1:nrow(decons))+epsilon, means+sds)
# if(incXcell == TRUE) {
# graphics::legend('topleft', legend=c('median','() xCell %'), pch=c('x', ''), col=c('red','black'))
# } else {
# graphics::legend('topleft', legend=c('median'), pch=c('x'), col=c('red'))
# }
# if(iter==2 && plotIT==TRUE) { grDevices::dev.off() }
# } #for(iter = 1:2) {
# invisible(decons)
# }
| /scratch/gouwar.j/cran-all/cranData/ADAPTS/R/MakeSigMatrix.R |
#' Hierarchical Deconvolution
#' @description Deconvolve cell types based on clusters detected by an n-pass spillover matrix
#'
#' @param sigMatrix The deconvolution matrix, e.g. LM22 or MGSM27
#' @param geneExpr The source gene expression matrix used to calculate sigMatrix
#' @param toPred The gene expression to ultimately deconvolve
#' @param hierarchData The results of hierarchicalSplit OR hierarchicalSplit.sc (DEFAULT: NULL, ie hierarchicalSplit)
#' @param pdfDir A fold to write the pdf file to (DEFAULT: tempdir())
#' @param oneCore Set to TRUE to disable parallelization (DEFAULT: FALSE)
#' @param nPasses The maximum number of iterations for spillToConvergence (DEFAULT: 100)
#' @param remZinf Set to TRUE to remove any ratio with zero or infinity when generating gList (DEFAULT: FALSE)
#' @param method One of 'DCQ', 'SVMDECON', 'DeconRNASeq', 'proportionsInAdmixture', 'nnls' (DEFAULT: DCQ)
#' @param useRF Set to TRUE to use ranger random forests to build the seed matrix (DEFAULT: TRUE)
#' @param incNonCluster Set to TRUE to include a 'nonCluster' in each of the sub matrices (DEFAULT: TRUE)
#' @export
#' @return a matrix of cell counts
#' @examples
#' #This toy example
#' library(ADAPTS)
#' fullLM22 <- ADAPTS::LM22[1:30, 1:4]
#' smallLM22 <- fullLM22[1:25,]
#'
#' cellCounts <- hierarchicalClassify(sigMatrix=smallLM22, geneExpr=fullLM22, toPred=fullLM22,
#' oneCore=TRUE, nPasses=10, method='DCQ')
hierarchicalClassify <- function(sigMatrix, geneExpr, toPred, hierarchData=NULL, pdfDir=tempdir(), oneCore=FALSE, nPasses=100, remZinf=TRUE, method='DCQ', useRF=TRUE, incNonCluster=TRUE) {
if(is.null(hierarchData)) {
hierarchData <- hierarchicalSplit(sigMatrix, geneExpr, oneCore=oneCore, nPasses=nPasses, remZinf=remZinf, useRF=useRF, incNonCluster=incNonCluster)
}
#Step 1: Baseline deconvolution
toPred.sub <- toPred[rownames(toPred) %in% rownames(sigMatrix),,drop=FALSE]
colnames(toPred.sub) <- make.names(colnames(toPred.sub), unique=TRUE)
toPred.sub.imp <- missForest.par(toPred.sub)
initDecon <- estCellPercent(refExpr = sigMatrix, geneExpr=toPred.sub.imp, method=method)
#Step 2: Build the clustered Deconvolution
clusters <- NULL
for (i in 1:length(hierarchData$allClusters)) {
clusters <- rbind(data.frame(cell=hierarchData$allClusters[[i]], clust=i), clusters)
}
clustIDs <- clusters$clust
names(clustIDs) <- clusters$cell
clustIDs <- clustIDs[rownames(initDecon)]
initDecon.clust <- apply(initDecon, 2, function(x){tapply(x,clustIDs, sum)})
#Step 3: Split the clusters based on the smaller groups
curDecon.break.list <- list()
for (i in as.numeric(rownames(initDecon.clust))) {
curCellTypes <- hierarchData$allClusters[[i]]
if(length(curCellTypes) > 1) {
#curSigMat <- hierarchData$sigMatList[[i]][,curCellTypes,drop=FALSE]
curSigMat <- hierarchData$sigMatList[[i]][,,drop=FALSE]
toPred.sub <- toPred[rownames(toPred) %in% rownames(curSigMat),,drop=FALSE] #Filter genes
colnames(toPred.sub) <- make.names(colnames(toPred.sub), unique=TRUE)
toPred.sub.imp <- missForest.par(toPred.sub)
curDecon <- estCellPercent(refExpr = curSigMat, geneExpr=toPred.sub.imp, method=method)
curDecon <- curDecon[curCellTypes,,drop=FALSE]
curDecon.frac <- apply(curDecon, 2, function(x){x/sum(x)})
curDecon.frac <- curDecon.frac[rownames(curDecon.frac) != 'others',,drop=FALSE]
nas <- apply(curDecon.frac, 2, function(x){ any(is.na(x)) })
curDecon.frac[,nas] <- rep(1/nrow(curDecon.frac), nrow(curDecon.frac))
#curDecon.frac. Each row is a component of the current cluster.
# Each column is a particular sample
# initDecon.clust[as.character(i),] The fraction of cells in the current sample
curDecon.break <- apply(curDecon.frac, 1, function(x){x * initDecon.clust[as.character(i),]}) #initDecon.clust[as.character(i),,drop=FALSE] * curDecon.frac
if(ncol(toPred) > 1) {
curDecon.break <- t(curDecon.break)
} else {
curDecon.break <- data.frame(first=curDecon.break)
colnames(curDecon.break) <- colnames(toPred)
}
#curDecon.break should have broken up the amount in each column initDecon.clust[as.character(i),] scaled by each column of curDecon.frac
} else {
#Note, potentially for each single cluster, we could rescale this to self vs other, and rescale accordingly.
# This could then be fixed in the later renormalization. However this does get a little wierd.
curDecon.break <- initDecon.clust[as.character(i),,drop=FALSE]
rownames(curDecon.break) <- curCellTypes
} #if(length(curCellTypes) > 1) {
curDecon.break.list[[as.character(i)]] <- curDecon.break
} #for (i in as.numeric(rownames(initDecon.clust))) {
curDecon.new <- do.call(rbind, curDecon.break.list)
rownames(curDecon.new) <- unlist(sapply(curDecon.break.list, rownames))
curDecon.new <- rbind(curDecon.new, initDecon['others',,drop=FALSE])
curDecon.new.rescale <- apply(curDecon.new, 2, function(x){100*x/sum(x)}) #Fix rounding errors.
outFile <- paste('hierarchicalSplit', Sys.Date(),'pdf', sep='.')
outFile <- file.path(pdfDir, outFile)
grDevices::pdf(outFile)
res <- try(pheatmap::pheatmap(t(curDecon.new.rescale), fontsize=6, fontsize_row = 4, cluster_cols=FALSE, cluster_rows = TRUE), silent=TRUE)
if(inherits(res, 'try-error')) { try(pheatmap::pheatmap(t(curDecon.new.rescale), fontsize=6, fontsize_row = 4, cluster_cols=FALSE, cluster_rows = FALSE), silent=TRUE) }
grDevices::dev.off()
return(curDecon.new.rescale)
}
#' Build hierarchical cell clusters.
#' @description Attempt to deconvolve cell types by building a hierarchy of cell types using
#' spillToConvergence to determine cell types that are not signficantly different.
#' First deconvolve those clusters of cell types.
#' Deconvolution matrices are then built to separate the cell types that formerly could
#' not be separated.
#'
#' @param sigMatrix The deconvolution matrix, e.g. LM22 or MGSM27
#' @param geneExpr The source gene expression matrix used to calculate sigMatrix
#' @param oneCore Set to TRUE to disable parallelization (DEFAULT: FALSE)
#' @param nPasses The maximum number of iterations for spillToConvergence (DEFAULT: 100)
#' @param deconMatrices Optional pre-computed results from spillToConvergence (DEFAULT: NULL)
#' @param remZinf Set to TRUE to remove any ratio with zero or infinity when generating gList (DEFAULT: FALSE)
#' @param method One of 'DCQ', 'SVMDECON', 'DeconRNASeq', 'proportionsInAdmixture', 'nnls' (DEFAULT: DCQ)
#' @param useRF Set to TRUE to use ranger random forests to build the seed matrix (DEFAULT: TRUE)
#' @param incNonCluster Set to TRUE to include a 'nonCluster' in each of the sub matrices (DEFAULT: TRUE)
#' @export
#' @return A list of clusters and a list of signature matrices for breaking those clusters
#' @examples
#' #This toy example
#' library(ADAPTS)
#' fullLM22 <- ADAPTS::LM22[1:30, 1:4]
#' smallLM22 <- fullLM22[1:25,]
#'
#' clusters <- hierarchicalSplit(sigMatrix=smallLM22, geneExpr=fullLM22, oneCore=TRUE, nPasses=10,
#' deconMatrices=NULL, remZinf=TRUE, method='DCQ', useRF=TRUE, incNonCluster=TRUE)
hierarchicalSplit <- function(sigMatrix, geneExpr, oneCore=FALSE, nPasses=100, deconMatrices=NULL, remZinf=TRUE, method='DCQ', useRF=TRUE, incNonCluster=TRUE) {
allClusters.rv <- clustWspillOver(sigMatrix, geneExpr, nPasses=nPasses, deconMatrices=deconMatrices, method=method)
allClusters <- allClusters.rv$allClusters
deconMatrices <- allClusters.rv$deconMatrices
#Step 1: Do the level 1 deconvolution
cNames <- sub('\\.+[0-9]+$', '', colnames(geneExpr))
if(!all(cNames == colnames(geneExpr))) {
message('Stripping .[0-9]+ from the end of gene expression column names.')
colnames(geneExpr) <- cNames
}
#Make new signature matrices for each split. How to determine # of genes for only 2 cell types?
sigMatList <- list()
for (i in 1:length(allClusters)) {
curLen <- length(allClusters[[i]])
if (curLen == 1) {
sigMatList[[i]] <- as.matrix(sigMatrix)
next;
}
curGeneExpr <- geneExpr[,colnames(geneExpr) %in% allClusters[[i]]]
if(incNonCluster == TRUE) {
#Add the other cell types
curGeneExpr.other <- geneExpr[,!(colnames(geneExpr) %in% allClusters[[i]])]
colnames(curGeneExpr.other) <- rep('nonCluster', length=ncol(curGeneExpr.other))
curGeneExpr <- cbind(curGeneExpr, curGeneExpr.other)
} #if(incNonCluster == TRUE) {
naBool <- apply(curGeneExpr, 1, function(x){ any(is.na(x)) })
if(any(naBool)) {
message(paste('Removing', sum(naBool), 'genes due to NAs'))
curGeneExpr <- curGeneExpr[!naBool,]
#Note: Why is it imputing later if I've removed all of these??? I need to fix the NA problem better.
}
colnames(curGeneExpr) <- sub('\\.[0-9]+$', '', colnames(curGeneExpr))
if(useRF==TRUE) {
gList <- gListFromRF(trainSet = curGeneExpr, oneCore=oneCore)
} else {
gList <- rankByT(geneExpr = curGeneExpr, qCut=0.3, oneCore=oneCore, remZinf=remZinf)
} # if(useRF==TRUE) {
if(length(gList) == 1) {
otherCellType <- allClusters[[i]][!allClusters[[i]] %in% names(gList)]
gList[[otherCellType]] <- gList[[1]]
}
origMatrix <- sigMatrix[,allClusters[[i]]]
origMatrix.sm <- origMatrix[names(utils::tail(sort(apply(origMatrix,1,stats::var)),ceiling(nrow(sigMatrix)/10))),]
newMatData <- try(AugmentSigMatrix(origMatrix=origMatrix.sm, fullData=curGeneExpr, newData=curGeneExpr, gList=gList,
nGenes=1:100, plotToPDF=TRUE, imputeMissing=TRUE, condTol=1.01, postNorm=FALSE,
minSumToRem=NA, addTitle=paste(allClusters[[i]],collapse='_'), autoDetectMin=TRUE,
calcSpillOver=FALSE), silent = TRUE)
if(inherits(newMatData, 'try-error')) {
sigMatList[[i]] <- as.matrix(origMatrix.sm)
} else {
#Revision 08-20-18 - Add in all of cell types, but with just genes in newMatData
sigMatList[[i]] <- newMatData
}
} #for (i in 1:length(allClusters)) {
return(list(allClusters=allClusters, sigMatList=sigMatList, deconMatrices=deconMatrices))
}
#' Cluster with spillover
#' @description Build clusters based on n-pass spillover matrix
#'
#' @param sigMatrix The deconvolution matrix, e.g. LM22 or MGSM27
#' @param geneExpr The source gene expression matrix used to calculate sigMatrix.
#' @param nPasses The maximum number of iterations for spillToConvergence (DEFAULT: 100)
#' @param deconMatrices Optional pre-computed results from spillToConvergence (DEFAULT: NULL)
#' @param method One of 'DCQ', 'SVMDECON', 'DeconRNASeq', 'proportionsInAdmixture', 'nnls' (DEFAULT: DCQ)
#' @export
#' @return Cell types grouped by cluster
#' @examples
#' #This toy example
#' library(ADAPTS)
#' fullLM22 <- ADAPTS::LM22[1:30, 1:4]
#' smallLM22 <- fullLM22[1:25,]
#'
#' clusters <- clustWspillOver(sigMatrix=smallLM22, geneExpr=fullLM22, nPasses=10)
clustWspillOver <- function(sigMatrix, geneExpr, nPasses=100, deconMatrices=NULL, method='DCQ') {
if(is.null(deconMatrices)) {
curGeneExpr <- geneExpr
naBool <- apply(curGeneExpr, 1, function(x){ any(is.na(x)) })
if(any(naBool)) {
message(paste('clustWspillOver: Removing', sum(naBool), 'genes due to NAs'))
curGeneExpr <- curGeneExpr[!naBool,,drop=FALSE]
keepBool <- rownames(sigMatrix) %in% rownames(geneExpr)
message(paste('clustWspillOver: Trimming', sum(!keepBool), '/', length(keepBool), 'genes from sigMatrix due to missingness'))
sigMatrix <- sigMatrix[keepBool,,drop=FALSE]
#Note: Why is it imputing later if I've removed all of these??? I need to fix the NA problem better.
} #if(is.null(deconMatrices)) {
deconMatrices <- spillToConvergence(sigMatrix=sigMatrix, geneExpr=curGeneExpr,plotIt=FALSE, nPasses=nPasses, method=method)
}
curExpr <- estCellCounts.nPass(geneExpr=sigMatrix, deconMatrices=deconMatrices, method=method)
#Any two identical columns belong in a cluster.
curCor <- stats::cor(curExpr)
allClusters <- list()
while(nrow(curCor) > 0) {
curLabel <- rownames(curCor)[1]
clustIdx <- (round(curCor[,curLabel],2) - 1) == 0
curRows <- rownames(curCor)[clustIdx]
allClusters[[length(allClusters)+1]] <- curRows
curCor <- curCor[!clustIdx,,drop=FALSE]
} #while(ncol(curCor) > 1) {
return(list(allClusters=allClusters, deconMatrices=deconMatrices))
}
#' Deconvolve with an n-pass spillover matrix
#' @description curExpr <- estCellCounts.nPass(sigMatrix, deconMatrices)
#'
#' @param geneExpr The gene expression matrix
#' @param deconMatrices The results from spillToConvergence()
#' @param method One of 'DCQ', 'SVMDECON', 'DeconRNASeq', 'proportionsInAdmixture', 'nnls' (DEFAULT: DCQ)
#' @export
#' @return An estimate of cell counts
#' @examples
#' #This toy example
#' library(ADAPTS)
#' fullLM22 <- ADAPTS::LM22[1:30, 1:4]
#' smallLM22 <- fullLM22[1:25,]
#'
#' deconMatrices <- spillToConvergence(sigMatrix=smallLM22, geneExpr=fullLM22, nPasses=10)
#' cellCounts <- estCellCounts.nPass(geneExpr=fullLM22, deconMatrices=deconMatrices, method='DCQ')
estCellCounts.nPass <- function(geneExpr, deconMatrices, method='DCQ') {
curExpr <- geneExpr
for (curDecon in deconMatrices) {
curExpr <- estCellPercent(refExpr = curDecon, geneExpr = curExpr, method=method)
}
return(curExpr)
}
#' Spillover to convergence
#' @description Build an n-pass spillover matrix, continuing until the results converge into clusters of cell types
#'
#' deconMatrices <- spillToConvergence(sigMatrix, geneExpr, 100, FALSE, TRUE)
#'
#' @param sigMatrix The deconvolution matrix, e.g. LM22 or MGSM27
#' @param geneExpr The source gene expression matrix used to calculate sigMatrix
#' @param nPasses The maximum number of iterations (DEFAULT: 100)
#' @param plotIt Set to TRUE to plot it (DEFAULT: FALSE)
#' @param imputNAs Set to TRUE to imput genes with missing values & cache the imputed. FALSE will just remove them (DEFAULT: FALSE)
#' @param method One of 'DCQ', 'SVMDECON', 'DeconRNASeq', 'proportionsInAdmixture', 'nnls' (DEFAULT: DCQ)
#' @export
#' @return A list of signature matrices
#' @examples
#' #This toy example
#' library(ADAPTS)
#' fullLM22 <- ADAPTS::LM22[1:30, 1:4]
#' smallLM22 <- fullLM22[1:25,]
#'
#' deconMatrices <- spillToConvergence(sigMatrix=smallLM22, geneExpr=fullLM22, nPasses=10, plotIt=TRUE)
spillToConvergence <- function(sigMatrix, geneExpr, nPasses=100, plotIt=FALSE, imputNAs=FALSE, method='DCQ') {
keepBool <- sub('\\.[0-9]+$', '', colnames(geneExpr)) %in% colnames(sigMatrix)
if (!all(keepBool)) {
message(paste('Removing', sum(!keepBool), '/', length(keepBool), 'from geneExpr that are not in sigMatrix'))
geneExpr <- geneExpr[,keepBool,drop=FALSE]
}
keepBool <- sub('\\.[0-9]+$', '', colnames(sigMatrix)) %in% colnames(geneExpr)
if (!all(keepBool)) {
message(paste('Removing', sum(!keepBool), '/', length(keepBool), 'from sigMatrix that are not in geneExpr'))
sigMatrix <- sigMatrix[,keepBool,drop=FALSE]
}
missingGeneBool <- !rownames(sigMatrix) %in% rownames(geneExpr)
if (sum(missingGeneBool) > 0) {
message(paste('Removing', sum(missingGeneBool), 'that are in sigMatrix but not geneExpr'))
sigMatrix <- sigMatrix[!missingGeneBool,]
}
geneExpr.sub <- geneExpr[rownames(sigMatrix),]
naGeneBool <- apply(geneExpr.sub, 1, function(x) {any(is.na(x))})
if(any(naGeneBool)) {
if (imputNAs==TRUE) {
message(paste('Imputing for', sum(naGeneBool), '/', length(naGeneBool), 'genes with missing values'))
saveFile <- paste('spillToConvergence.geneExpr.sub.imp',nrow(sigMatrix),ncol(sigMatrix),nrow(geneExpr),ncol(geneExpr),sum(naGeneBool),length(naGeneBool),'RData',sep='.')
saveFile <- file.path(tempdir(), saveFile)
newMatrix <- NULL
if(file.exists(saveFile)) {
message(paste('Loading pre-imputed', saveFile))
newMatrix <- get(load(saveFile)[1])
if(any(rownames(newMatrix) != rownames(geneExpr.sub)) | any(colnames(newMatrix) != colnames(geneExpr.sub))) {
message(paste('Loaded matrix does not match genes/experiments in input matrix.'))
newMatrix <- NULL
}
} #if(file.exists(saveFile)) {
if(is.null(newMatrix)) {
message('Imputing')
newMatrix <- missForest.par(dataMat = geneExpr.sub, parallelize = "variables")
save(newMatrix, file=saveFile)
}
geneExpr.sub <- newMatrix
} else {
message(paste('Removing', sum(naGeneBool), '/', length(naGeneBool), 'genes with missing values'))
keepGenes <- names(naGeneBool)[!naGeneBool]
geneExpr.sub <- geneExpr.sub[keepGenes,]
sigMatrix <- sigMatrix[keepGenes,]
} #if (imputNAs==TRUE) {
}
#E_0
cellEst <- estCellPercent(refExpr=sigMatrix, geneExpr=geneExpr.sub, method=method)
if(is.null(cellEst)) {message('spillToConvergence deconvolution failed'); return(NULL)}
#S_1
newSig <- t(apply(cellEst, 1, function(x) { tapply(x, sub('\\.[0-9]+$', '', colnames(cellEst)), mean, na.rm=TRUE)}))
#estToPlot <- newSig[c(sort(colnames(newSig)),'others'),sort(colnames(newSig))]
if(plotIt==TRUE) {pheatmap::pheatmap(t(cellEst), main='First Decon Results\n y=Purified, x=Decon As', fontsize = 4)}#, cluster_rows = FALSE, cluster_cols = FALSE)
cellEst.last <- cellEst
cellEst.last2 <- cellEst
addPassList <- list()
addPassList[[1]] <- sigMatrix
addPassList[[2]] <- newSig
for (curPass in 3:nPasses) {
#E_1, etc
cellEst.next <- estCellPercent(refExpr=addPassList[[curPass-1]], geneExpr=cellEst.last, method=method)
#S_2
newSig.next <- t(apply(cellEst.next, 1, function(x) { tapply(x, sub('\\.[0-9]+$', '', colnames(cellEst.next)), mean, na.rm=TRUE)}))
rnames <- c(sort(rownames(cellEst.next)[rownames(cellEst.next) %in% colnames(cellEst.next)]), sort(rownames(cellEst.next)[!rownames(cellEst.next) %in% colnames(cellEst.next)]))
cnames <- c(sort(colnames(cellEst.next)[colnames(cellEst.next) %in% rownames(cellEst.next)]), sort(colnames(cellEst.next)[!colnames(cellEst.next) %in% rownames(cellEst.next)]))
cellEst.next <- cellEst.next[rnames,colnames(cellEst.next) %in% cnames]
pairedTypes <- sort(rownames(cellEst.next)[rownames(cellEst.next) %in% colnames(cellEst.next)])
estWself3 <- sapply(pairedTypes, function(i) {cellEst.next[i,i]})
if(plotIt==TRUE) {graphics::barplot(estWself3, col=grDevices::rainbow(length(estWself3)), main=paste('Self Identification in Purified Samples\nPass',curPass), ylim=c(0,100), cex.names=0.66, las=2)}
titleStr3 <- paste0(curPass, 'x-Re-Deconvolving results with spillover matrix\nMean Self % = ', round(mean(estWself3)))
if(plotIt==TRUE) {pheatmap::pheatmap(t(newSig.next),cluster_rows = FALSE, cluster_cols = FALSE, main=titleStr3, xlab='DCQ', ylab='Reference', fontsize = 4)} #xlab and ylab don't work
#addPassList[[curPass]] <- list(res=res3, cellEst=cellEst.third, estWself=estWself3, titleStr=titleStr3)
addPassList[[curPass]] <- newSig.next
#Test to see if there was any change from the last pass. If not, then stop
diffs <- sapply(colnames(cellEst.next), function(x){ sqrt(mean((cellEst.last[,x]-cellEst.next[,x])^2))})
if(all(diffs == 0)) { break;}
#Test to see if the signature matrices are oscilating
diffs <- sapply(colnames(cellEst.next), function(x){ sqrt(mean((cellEst.last2[,x]-cellEst.next[,x])^2))})
if(all(diffs == 0)) { break;}
cellEst.last2 <- cellEst.last
cellEst.last <- cellEst.next
} #for (curPass in 3:nPasses) {
if(plotIt==TRUE) {pheatmap::pheatmap(t(newSig.next),cluster_rows = TRUE, cluster_cols = TRUE, main=titleStr3, xlab='DCQ', ylab='Reference', fontsize = 4)} #xlab and ylab don't work
return(addPassList)
}
#' Build a spillover matrix
#' @description Build a spillover matrix, i.e. what do purified samples deconvolve as?
#'
#' spillExpr <- buildSpilloverMat(refExpr, geneExpr, method='DCQ')
#'
#' @param refExpr The deconvolution matrix, e.g. LM22 or MGSM27
#' @param geneExpr The full gene expression for purified cell types. Multiple columns (examples) for each column in the reference expr.
#' @param method One of 'DCQ', 'SVMDECON', 'DeconRNASeq', 'proportionsInAdmixture', 'nnls' (DEFAULT: DCQ)
#' @export
#' @return A spillover matrix showing how purified cell types deconvolve
#' @examples
#' #This toy example
#' library(ADAPTS)
#' fullLM22 <- ADAPTS::LM22[1:30, 1:4]
#' smallLM22 <- fullLM22[1:25,]
#'
#' spillover <- buildSpilloverMat(refExpr=smallLM22, geneExpr=fullLM22, method='DCQ')
buildSpilloverMat <- function(refExpr, geneExpr, method='DCQ') {
if(any(grepl('\\.[0-9]+$', unique(colnames(geneExpr))))) {
print('###Note: Some of the column names in geneExpr end in a . followed by numbers###')
print('Different samples with the same cell types should have exactly the same column names')
print('')
}
olGenes <- rownames(refExpr)[rownames(refExpr) %in% rownames(geneExpr)]
refExpr <- refExpr[olGenes,]
failedGene1 <- apply(refExpr, 1, function(x){any(is.na(x))})
geneExpr <- geneExpr[olGenes,]
failedGene2 <- apply(geneExpr, 1, function(x){any(is.na(x))})
keepGenes <- !failedGene1 & !failedGene2
cellEst <- estCellPercent(refExpr=refExpr[keepGenes,], geneExpr=geneExpr[keepGenes,], method=method)
res <- res.bk <- apply(cellEst, 1, function(x) { tapply(x, colnames(cellEst), mean, na.rm=TRUE)})
res <- res[,colnames(res)!='others']
res <- res[order(toupper(rownames(res))),order(toupper(colnames(res)))]
res <- cbind(res, data.frame(others = res.bk[,'others']))
return(res)
}
#' Estimate cell percentage from spillover
#' @description Use a spillover matrix to deconvolve a samples
#'
#' @param spillExpr A spill over matrix, as calculated by buildSpilloverMat(). (e.g. LM22.spillover.csv.gz)
#' @param refExpr a data frame representing immune cell expression profiles. Each row represents an expression of a gene, and each column represents a different immune cell type. colnames contains the name of each immune cell type and the rownames includes the genes' symbol. The names of each immune cell type and the symbol of each gene should be unique. Any gene with missing expression values must be excluded.
#' @param geneExpr a data frame representing RNA-seq or microarray gene-expression profiles of a given complex tissue. Each row represents an expression of a gene, and each column represents a different experimental sample. colnames contain the name of each sample and rownames includes the genes' symbol. The name of each individual sample and the symbol of each gene should be unique. Any gene with missing expression values should be excluded.
#' @param method One of 'DCQ', 'SVMDECON', 'DeconRNASeq', 'proportionsInAdmixture', 'nnls' (DEFAULT: DCQ)
#' @param ... Parameters for estCellPercent.X (e.g. number_of_repeats for .DCQ)
#' @export
#' @return a matrix of estimate cell type percentages in samples
#' @examples
#' #This toy example
#' library(ADAPTS)
#' fullLM22 <- ADAPTS::LM22[1:30, 1:4]
#' smallLM22 <- fullLM22[1:25,]
#'
#' spillover <- buildSpilloverMat(refExpr=smallLM22, geneExpr=fullLM22)
#' cellEst <- estCellPercent.spillOver(spillExpr=spillover, refExpr=smallLM22, geneExpr=fullLM22)
estCellPercent.spillOver <- function(spillExpr, refExpr, geneExpr, method='DCQ', ...) {
if(method == 'DCQ') {
estCellPercent.X <- estCellPercent.DCQ
} else if (method == 'SVMDECON') {
estCellPercent.X <- estCellPercent.svmdecon
} else if (method == 'DeconRNASeq') {
estCellPercent.X <- estCellPercent.DeconRNASeq
} else if (method == 'proportionsInAdmixture') {
estCellPercent.X <- estCellPercent.proportionsInAdmixture
} else if (method == 'nnls') {
estCellPercent.X <- estCellPercent.nnls
}
cellEst <- estCellPercent.X(refExpr=refExpr, geneExpr=geneExpr, ...)
cellEst2 <- estCellPercent.X(refExpr=t(spillExpr), geneExpr=cellEst, ...)
return(cellEst2)
}
#' DCQ Deconvolution
#' @description Use DCQ to estimate the cell count percentage
#' Requires installation of package 'ComICS'
#' To Do: Also report the standard deviation as a confidence metric
#'
#' @param refExpr a data frame representing immune cell expression profiles. Each row represents an expression of a gene, and each column represents a different immune cell type. colnames contains the name of each immune cell type and the rownames includes the genes' symbol. The names of each immune cell type and the symbol of each gene should be unique. Any gene with missing expression values must be excluded.
#' @param geneExpr a data frame representing RNA-seq or microarray gene-expression profiles of a given complex tissue. Each row represents an expression of a gene, and each column represents a different experimental sample. colnames contain the name of each sample and rownames includes the genes' symbol. The name of each individual sample and the symbol of each gene should be unique. Any gene with missing expression values should be excluded.
#' @param marker_set data frames of one column, that includes a preselected list of genes that likely discriminate well between the immune-cell types given in the reference data. (DEFAULT: NULL, i.e. one for each gene in the refExpr)
#' @param number_of_repeats using one repeat will generate only one output model. Using many repeats, DCQ calculates a collection of models, and outputs the average and standard deviation for each predicted relative cell quantity. (DEFAULT: 1)
#' @param alpha The elasticnet mixing parameter, with 0 <= alpha <= 1. alpha=1 is the lasso penalty, and alpha=0 the ridge penalty. (DEFAULT: 0.05)
#' @param lambda A minimum value for the elastic net lambda parameter (DEFAULT: 0.2)
#' @export
#' @return A matrix with cell type estimates for each samples
#' @examples
#' #This toy example
#' library(ADAPTS)
#' fullLM22 <- ADAPTS::LM22[1:30, 1:4]
#' smallLM22 <- fullLM22[1:25,]
#'
#' cellEst <- estCellPercent.DCQ(refExpr=smallLM22, geneExpr=fullLM22)
estCellPercent.DCQ <- function(refExpr, geneExpr, marker_set=NULL, number_of_repeats=10, alpha=0.05, lambda=0.2) {
if(any(is.na(geneExpr))) {
message('There are some NAs in geneExpr, please impute or remove NAs')
return(NULL)
}
if(is.null(marker_set)) {marker_set <- data.frame(marker_set=rownames(refExpr))}
if(ncol(geneExpr)==1) {geneExpr <- cbind(geneExpr, geneExpr); fixOneCol<-TRUE} else {fixOneCol<-FALSE}
suppressWarnings(sink(""))
cellCounts <- try(ComICS::dcq(reference_data = refExpr, mix_data = geneExpr, marker_set = marker_set, number_of_repeats=number_of_repeats,lambda_min = lambda,alpha_used = alpha))
sink()
if(inherits(cellCounts, 'try-error')) {return(cellCounts)}
#DCQ returns a list with two matrices: average quantities for each cell type, stdev over all repeats for each cell type
#Question for MM-010 data, how many cells?? Frank says 1-10 million
cellCoefVar <- cellCoefVar.m <- t(cellCounts$stdev / cellCounts$average)
cellCountsPercent <- cellCountsPercent.m <- t(cellCounts$average*100)
cellCountsPercent.m[cellCountsPercent.m > 0] <- 0
cellCoefVar.m[cellCoefVar.m > 0] <- 0
others <- abs(colSums(cellCountsPercent.m))
others.m <- abs(colMeans(cellCoefVar.m, na.rm=TRUE))
cellCountsPercent[cellCountsPercent < 0] <- 0
cellCountsPercent <- rbind(cellCountsPercent, others)
cellCountsPercent <- round(apply(cellCountsPercent, 2, function(x){100*x/sum(x)}),2)
cellCoefVar <- rbind(cellCoefVar, others.m)
cellCoefVar[is.na(cellCoefVar)] <- 0
cellCountsPercent.std <- cellCoefVar * cellCountsPercent
#Note: cellCountsPercent.std is very small. This DCQ error is not good enough for the estimate
if(fixOneCol==TRUE) {
cellCountsPercent <- cellCountsPercent[,1,drop=FALSE]
}
return (cellCountsPercent)
}
#' SVMDECON deconvolution
#' @description Use SVMDECON to estimate the cell count percentage
#' Performs considerably worse in deconvolution than DCQ
#'
#' cellEst <- estCellPercent.svmdecon(refExpr, geneExpr)
#'
#' @param refExpr a data frame representing immune cell expression profiles. Each row represents an expression of a gene, and each column represents a different immune cell type. colnames contains the name of each immune cell type and the rownames includes the genes' symbol. The names of each immune cell type and the symbol of each gene should be unique. Any gene with missing expression values must be excluded.
#' @param geneExpr a data frame representing RNA-seq or microarray gene-expression profiles of a given complex tissue. Each row represents an expression of a gene, and each column represents a different experimental sample. colnames contain the name of each sample and rownames includes the genes' symbol. The name of each individual sample and the symbol of each gene should be unique. Any gene with missing expression values should be excluded.
#' @param marker_set data frames of one column, that includes a preselected list of genes that likely discriminate well between the immune-cell types given in the reference data. (DEFAULT: NULL, i.e. one for each gene in the refExpr)
#' @param useOldVersion Set the TRUE to 2^ the data (DEFAULT: FALSE)
#' @param progressBar Set to TRUE to show a progress bar (DEFAULT: TRUE)
#' @export
#' @return A matrix with cell type estimates for each samples
#' #This toy example
#' library(ADAPTS)
#' fullLM22 <- ADAPTS::LM22[1:30, 1:4]
#' smallLM22 <- fullLM22[1:25,]
#'
#' cellEst <- estCellPercent.svmdecon(refExpr=smallLM22, geneExpr=fullLM22)
estCellPercent.svmdecon <- function(refExpr, geneExpr, marker_set=NULL, useOldVersion=F,progressBar = T) {
if(is.null(marker_set)) {marker_set <- data.frame(marker_set=rownames(refExpr))}
#if(ncol(geneExpr)==1) {geneExpr <- cbind(geneExpr, geneExpr)}
refExprMatrix <- as.matrix(refExpr)
refGenes <- rownames(refExprMatrix)[rownames(refExprMatrix) %in% rownames(geneExpr)]
refExprMatrix <- refExprMatrix[refGenes,]
geneExpr <- geneExpr[refGenes,,drop=FALSE]
if(useOldVersion == F){
geneExpr <- 2^geneExpr
refExprMatrix <- 2^refExprMatrix
}
proportions <- matrix(nrow=ncol(refExprMatrix), ncol=ncol(geneExpr))
for (column in 1:ncol(geneExpr)) {
# SVMDECON returns the estimated proportions of each cell-type in the given sample
dataCol <- as.matrix(geneExpr[,column,drop=FALSE])
proportions[, column] <- t(SVMDECON(dataCol, refExprMatrix))
}
others <- numeric(ncol(proportions))
proportions <- rbind(proportions, others)
colnames(proportions) <- colnames(geneExpr)
rownames(proportions) <- c(colnames(refExprMatrix), "others")
cellCountsPercent <- round(proportions * 100, 2)
return(cellCountsPercent)
}
#' DeconRNASeq deconvolution
#' @description Use DeconRNASeq to estimate the cell count percentage
#' Performs with similar effectiveness as DCQ, but identifies different proportions of cell-types
#' Requires installation of package 'DeconRNASeq':
#' source("https://bioconductor.org/biocLite.R")
#' biocLite("DeconRNASeq")
#'
#' <joseph.szustakowski@novartis.com> TGJDS (2013). DeconRNASeq: Deconvolution of Heterogeneous Tissue Samples for mRNA-Seq data. R package version 1.18.0.
#'
#' cellEst <- estCellPercent.DeconRNASeq(refExpr, geneExpr, marker_set=NULL)
#'
#' @param refExpr a data frame representing immune cell expression profiles. Each row represents an expression of a gene, and each column represents a different immune cell type. colnames contains the name of each immune cell type and the rownames includes the genes' symbol. The names of each immune cell type and the symbol of each gene should be unique. Any gene with missing expression values must be excluded.
#' @param geneExpr a data frame representing RNA-seq or microarray gene-expression profiles of a given complex tissue. Each row represents an expression of a gene, and each column represents a different experimental sample. colnames contain the name of each sample and rownames includes the genes' symbol. The name of each individual sample and the symbol of each gene should be unique. Any gene with missing expression values should be excluded.
#' @param marker_set data frames of one column, that includes a preselected list of genes that likely discriminate well between the immune-cell types given in the reference data. (DEFAULT: NULL, i.e. one for each gene in the refExpr)
#' @export
#' @return A matrix with cell type estimates for each samples
#' @examples
#' \donttest{
#' #This toy example, donttest due to performance issues in windows development build
#' library(ADAPTS)
#' fullLM22 <- ADAPTS::LM22[1:30, 1:4]
#' smallLM22 <- fullLM22[1:25,]
#'
#' cellEst <- estCellPercent.DeconRNASeq(refExpr=smallLM22, geneExpr=fullLM22)
#' }
estCellPercent.DeconRNASeq <- function(refExpr, geneExpr, marker_set=NULL) {
if(!'DeconRNASeq' %in% rownames(utils::installed.packages()) | !exists('DeconRNASeq')) {
message('estCellPercent.DeconRNASeq requires DeconRNASeq')
message('https://www.bioconductor.org/packages/release/bioc/html/DeconRNASeq.html')
message('Run library(DeconRNASeq) after installation')
return(NULL)
}
if(!exists('DeconRNASeq')) {
DeconRNASeq <- function(datasets = NULL, signatures=NULL) { stop('DeconRNASeq not loaded') }
}
if(is.null(marker_set)) {marker_set <- data.frame(marker_set=rownames(refExpr))}
if(ncol(geneExpr)==1) {geneExpr <- cbind(geneExpr, geneExpr)}
pca <- pcaMethods::pca #Something has clobbered PCA
curDecon <- try(DeconRNASeq(datasets=as.data.frame(geneExpr), signatures=refExpr))
if(inherits(curDecon, 'try-error')) {
message('Please update all packages called by DeconRNASeq')
return(NULL)
}
cellProportions <- t(curDecon$out.all)
cellCountsPercent <- round(cellProportions * 100, 2)
others <- numeric(ncol(cellCountsPercent))
cellCountsPercent <- rbind(cellCountsPercent, others)
colnames(cellCountsPercent) <- colnames(geneExpr)
rownames(cellCountsPercent) <- c(colnames(refExpr), "others")
return (cellCountsPercent)
}
#' WGCNA::proportionsInAdmixture deconvolution
#' @description Use R function proportionsInAdmixture to estimate the cell count percentage
#' Uses the 'WGCNA' package
#'
#' cellEst <- estCellPercent.proportionsInAdmixture(refExpr)
#'
#' @param refExpr a data frame representing immune cell expression profiles. Each row represents an expression of a gene, and each column represents a different immune cell type. colnames contains the name of each immune cell type and the rownames includes the genes' symbol. The names of each immune cell type and the symbol of each gene should be unique. Any gene with missing expression values must be excluded.
#' @param geneExpr a data frame representing RNA-seq or microarray gene-expression profiles of a given complex tissue. Each row represents an expression of a gene, and each column represents a different experimental sample. colnames contain the name of each sample and rownames includes the genes' symbol. The name of each individual sample and the symbol of each gene should be unique. Any gene with missing expression values should be excluded.
#' @param marker_set data frames of one column, that includes a preselected list of genes that likely discriminate well between the immune-cell types given in the reference data. (DEFAULT: NULL, i.e. one for each gene in the refExpr)
#' @export
#' @return A matrix with cell type estimates for each samples
#' @examples
#' \donttest{
#' #This toy example
#' library(ADAPTS)
#' fullLM22 <- ADAPTS::LM22[1:30, 1:4]
#' smallLM22 <- fullLM22[1:25,]
#'
#' cellEst <- estCellPercent.proportionsInAdmixture(refExpr=smallLM22, geneExpr=fullLM22)
#' }
estCellPercent.proportionsInAdmixture <- function(refExpr, geneExpr, marker_set=NULL) {
if(!'WGCNA' %in% rownames(utils::installed.packages()) | !exists('proportionsInAdmixture')) {
message('WGCNA required for proportionsInAdmixture deconvolution')
message('https://cran.r-project.org/web/packages/WGCNA/')
message('Run library(WGCNA) after installation')
return(NULL)
}
if(!exists('proportionsInAdmixture')) {
proportionsInAdmixture <- function(MarkerMeansPure = NULL, datE.Admixture=NULL) { stop('proportionsInAdmixture not loaded') }
}
if(is.null(marker_set)) {marker_set <- data.frame(marker_set=rownames(refExpr))}
if(ncol(geneExpr)==1) {geneExpr <- cbind(geneExpr, geneExpr)}
# Filter the gene expression matrix and reference expression matrix to only having the same genes
refExprMatrix <- as.matrix(refExpr)
refGenes <- rownames(refExprMatrix)[rownames(refExprMatrix) %in% rownames(geneExpr)]
refExprMatrix <- refExprMatrix[refGenes,]
geneExprMatrix <- geneExpr[refGenes,]
# Changing the input matrices into relevant formats (data frames of corresponding sizes)
refExprDF <- as.data.frame(refExprMatrix)
refExprDF <- cbind(rownames(refExprDF), refExprDF)
geneExprDF <- as.data.frame(t(geneExprMatrix))
# function proportionsInAdmixture takes in a data frame whose first column reports the gene names and remaining columns report
# the gene expression in specific cell types, and a data frame whose rows represent each sample and columns represent the gene expression.
# proportionInAdmixture returns a list where rows are samples and columns are cell types
proportionList <- try(proportionsInAdmixture(MarkerMeansPure = refExprDF, datE.Admixture = geneExprDF))
if(inherits(proportionList, 'try-error')) { message('proportionsInAdmixture failed'); return(NULL); }
proportionList <- proportionList["PredictedProportions"]
proportionMatrix <- t(matrix(unlist(proportionList), ncol = ncol(refExprMatrix), byrow = FALSE))
cellCountsPercent <- round(proportionMatrix * 100, 2)
others <- numeric(ncol(cellCountsPercent))
cellCountsPercent <- rbind(cellCountsPercent, others)
colnames(cellCountsPercent) <- colnames(geneExprMatrix)
rownames(cellCountsPercent) <- c(colnames(refExprMatrix), "others")
return (cellCountsPercent)
}
#' Non-negative least squares deconvolution
#' @description Use non-negative least squares regression to deconvolve a sample
#' This is going to be to simple to be useful
#' This might be more interesting if I used non-positive least squares to detect 'other'
#'
#' @param refExpr a data frame representing immune cell expression profiles. Each row represents an expression of a gene, and each column represents a different immune cell type. colnames contains the name of each immune cell type and the rownames includes the genes' symbol. The names of each immune cell type and the symbol of each gene should be unique. Any gene with missing expression values must be excluded.
#' @param geneExpr a data frame representing RNA-seq or microarray gene-expression profiles of a given complex tissue. Each row represents an expression of a gene, and each column represents a different experimental sample. colnames contain the name of each sample and rownames includes the genes' symbol. The name of each individual sample and the symbol of each gene should be unique. Any gene with missing expression values should be excluded.
#' @export
#' @return A matrix with cell type estimates for each samples
#' @examples
#' #This toy example
#' library(ADAPTS)
#' fullLM22 <- ADAPTS::LM22[1:30, 1:4]
#' smallLM22 <- fullLM22[1:25,]
#'
#' cellEst <- estCellPercent.nnls(refExpr=smallLM22, geneExpr=fullLM22)
estCellPercent.nnls <- function(refExpr, geneExpr) {
marker_set <- data.frame(marker_set=rownames(refExpr))
#if(ncol(geneExpr)==1) {geneExpr <- cbind(geneExpr, geneExpr)}
refExprMatrix <- as.matrix(refExpr)
refGenes <- rownames(refExprMatrix)[rownames(refExprMatrix) %in% rownames(geneExpr)]
refExprMatrix <- refExprMatrix[refGenes,,drop=FALSE]
geneExpr <- geneExpr[refGenes,,drop=FALSE]
proportions <- matrix(nrow=ncol(refExprMatrix), ncol=ncol(geneExpr))
for (column in 1:ncol(geneExpr)) {
# SVMDECON returns the estimated proportions of each cell-type in the given sample
dataCol <- as.matrix(geneExpr[,column,drop=FALSE])
reg <- nnls::nnls(refExprMatrix, dataCol)
curDec <- reg$x
names(curDec) <- colnames(refExprMatrix)
proportions[, column] <- curDec
}
others <- numeric(ncol(proportions))
proportions <- rbind(proportions, others)
colnames(proportions) <- colnames(geneExpr)
rownames(proportions) <- c(colnames(refExprMatrix), "others")
cellCountsPercent <- round(proportions * 100, 2)
return(cellCountsPercent)
}
#' SVMDECONV helper function
#' @description Use weightNorm to normalize the SVM weights. Used for SVMDECONV
#'
#' w1 <- weightNorm(w)
#'
#' @param w The weight vector from fitting an SVM, something like something like t(fit1$coefs) \%*\% fit1$SV, where fit comes from <- svm(m~B, nu=0.25, kernel="linear"))
#' @return a weight vector
weightNorm <- function(w) {
w[w<0] <- 0
return(w/sum(w))
}
#' Support vector machine deconvolution
#' @description Use SVMDECONV to estimate the cell count percentage
#' David L Gibbs, dgibbs@systemsbiology.org
#' June 9, 2017
#'
#' v-SVR is applied with a linear kernel to solve for f,
#' and the best result from three values of v = {0.25, 0.5, 0.75}
#' is saved, where ‘best’ is defined as the lowest root mean squared error
#' between m and the deconvolution result, f x B.
#'
#' Our current implementation executes v-SVR using the
#' ‘svm’ function in the R package, ‘e1071’.
#'
#' w2 <- SVMDECON(m, B)
#'
#' @param m a matrix represenging the mixture (genes X 1 sample)
#' @param B a matrix representing the references (genes X cells), m should be subset to match B
#'
#' @return A matrix with cell type estimates for each samples
SVMDECON <- function(m,B) {
# three models are fit with different values of nu
fit1 <- e1071::svm(m~B, nu=0.25, kernel="linear", scale=T, type="nu-regression")
fit2 <- e1071::svm(m~B, nu=0.50, kernel="linear", scale=T, type="nu-regression")
fit3 <- e1071::svm(m~B, nu=0.75, kernel="linear", scale=T, type="nu-regression")
# these w's are the cell fractions
w1 <- weightNorm(t(fit1$coefs) %*% fit1$SV)
w2 <- weightNorm(t(fit2$coefs) %*% fit2$SV)
w3 <- weightNorm(t(fit3$coefs) %*% fit3$SV)
# return the model with the smallest mean sq error
err1 <- sqrt( sum( (m - B %*% t(w1))^2 )/nrow(m) )
err2 <- sqrt( sum( (m - B %*% t(w2))^2 )/nrow(m) )
err3 <- sqrt( sum( (m - B %*% t(w3))^2 )/nrow(m) )
resIdx <- which(c(err1,err2,err3) == min(c(err1,err2,err3)))[1]
return(list(w1,w2,w3)[[resIdx]])
}
#' Collapse cell types
#' @description Collapse the cell types (in rows) to super-classes
#' Including MGSM36 cell types
#'
#' @param cellCounts A matrix with cell counts
#' @param method The method for combining cell types ('Default: 'Pheno2')
#' Pheno1: Original cell-type based combinations
#' Pheno2: Original cell-type based combinations, omitting Macrophages
#' Pheno3: Alt Phenotype definitions based on WMB deconvolution correlations
#' Pheno4: Consensus cell types
#' Pheno5: Consensus cell types, combined myeloma & plasma
#' Spillover1: Empirical combinations based on compToLM22source
#' Spillover2: More agressive combination based on empirical combinations based on compToLM22source
#' Spillover3: Combinations determined by spillToConvergence on 36 cell types
#' @return NULL
#' @export
#' @return a cell estimate matrix with the names changed
#' @examples
#' #This toy example
#' library(ADAPTS)
#' fullLM22 <- ADAPTS::LM22[1:30, 1:4]
#' smallLM22 <- fullLM22[1:25,]
#'
#' cellEst <- estCellPercent.DCQ(refExpr=smallLM22, geneExpr=fullLM22)
#' collapseCounts <- collapseCellTypes(cellCounts=cellEst)
collapseCellTypes <- function(cellCounts, method='Pheno4') {
if(method == "Pheno1" | method == 'Pheno2') {
combList <- list(MastCells=c('Mast.cells.activated', 'Mast.cells.resting'),
NKcells=c('NK.cells.activated', 'NK.cells.resting'),
HealthyBcells=c('B.cells.memory', 'B.cells.naive'),
CD8s=c('T.cells.CD8'),
CD4s=c('T.cells.CD4.memory.resting', 'T.cells.CD4.memory.activated', 'T.cells.CD4.naive',
'T.cells.follicular.helper', 'T.cells.regulatory..Tregs.'),
DendriticCells=c('Dendritic.cells.resting', 'Dendritic.cells.activated'),
Myeloma=c('CustomMM', 'PlasmaMemory', 'Plasma.cells', 'MM.plasma.cell'),
BMFibroblast=c('BMFibroblast','uamsBMFibroblast')
)
}
if(method == "Pheno1" | method == 'Pheno2') {
newList <- list(Macrophages=c('Macrophages.M0', 'Macrophages.M1', 'Macrophages.M2'))
combList <- c(combList, newList)
}
if(method == 'Pheno3') {
combList <- list(MastCells=c('Mast.cells.activated', 'Mast.cells.resting'),
NKcells=c('NK.cells.activated', 'NK.cells.resting'),
HealthyBcells=c('B.cells.memory', 'B.cells.naive'),
CD8s=c('T.cells.CD8'),
CD4s=c('T.cells.CD4.memory.resting', 'T.cells.CD4.naive',
'T.cells.follicular.helper', 'T.cells.regulatory..Tregs.'), #Keep 'T.cells.CD4.memory.activated' separate
DendriticCells=c('Dendritic.cells.resting', 'Dendritic.cells.activated'),
PlasmaCellMemory=c('PlasmaMemory', 'Plasma.cells'),
Myeloma=c('CustomMM', 'MM.plasma.cell'),
MonocyteNeutrophil=c('Monocytes', 'Neutrophils'),
BMFibroblast=c('BMFibroblast','uamsBMFibroblast')
)
}
if(method == 'Pheno4') {
combList <- list(MastCells=c('Mast.cells.activated', 'Mast.cells.resting'),
NKcells=c('NK.cells.activated', 'NK.cells.resting'),
Bcells=c('B.cells.memory', 'B.cells.naive'),
CD8s=c('T.cells.CD8'),
CD4s=c('T.cells.CD4.memory.resting', 'T.cells.CD4.naive', 'T.cells.CD4.memory.activated',
'T.cells.follicular.helper', 'T.cells.regulatory..Tregs.'),
DendriticCells=c('Dendritic.cells.resting', 'Dendritic.cells.activated'),
HealthyPlasmaCell=c('PlasmaMemory', 'Plasma.cells'),
Myeloma=c('CustomMM', 'MM.plasma.cell'),
BMFibroblast=c('BMFibroblast','uamsBMFibroblast')
)
}
if(method == 'Pheno5') {
combList <- list(MastCells=c('Mast.cells.activated', 'Mast.cells.resting'),
NKcells=c('NK.cells.activated', 'NK.cells.resting'),
Bcells=c('B.cells.memory', 'B.cells.naive'),
CD8s=c('T.cells.CD8'),
CD4s=c('T.cells.CD4.memory.resting', 'T.cells.CD4.naive', 'T.cells.CD4.memory.activated',
'T.cells.follicular.helper', 'T.cells.regulatory..Tregs.'),
DendriticCells=c('Dendritic.cells.resting', 'Dendritic.cells.activated'),
Myeloma=c('CustomMM', 'MM.plasma.cell', 'PlasmaMemory', 'Plasma.cells'),
BMFibroblast=c('BMFibroblast','uamsBMFibroblast')
)
}
if(method == 'Spillover1') {
#Comb B-cells
#Comb DC.act & M1
#Comb DC.rest & M0 & M2
#Comb Mast
#Comb Mono/Neutro
#Comb CD4.resting, CD4.naive, FHs, Tregs
#Comb NKs
#Plasma Cells alone
#CD4.activated alone
#Leave Eo alone
#Leave CD8s alone
#Leave gdTs alone
combList <- list(MastCells=c('Mast.cells.activated', 'Mast.cells.resting'),
NKcells=c('NK.cells.activated', 'NK.cells.resting'),
HealthyBcells=c('B.cells.memory', 'B.cells.naive'),
MonocyteNeutrophil=c('Monocytes', 'Neutrophils'),
APC.activated=c('Dendritic.cells.activated', 'Macrophages.M1'),
APC.inactive=c('Dendritic.cells.resting', 'Macrophages.M0', 'Macrophages.M2'),
CD8s=c('T.cells.CD8'),
CD4s.MemoryActivated= c('T.cells.CD4.memory.activated'),
CD4s.others=c('T.cells.CD4.memory.resting', 'T.cells.CD4.naive',
'T.cells.follicular.helper', 'T.cells.regulatory..Tregs.'), #Keep separate
PlasmaCellMemory=c('PlasmaMemory', 'Plasma.cells'),
Myeloma=c('CustomMM', 'MM.plasma.cell'),
BMFibroblast=c('BMFibroblast','uamsBMFibroblast')
)
}
if(method == 'Spillover2') {
# Eosinphils alone
# Monocytes+Neutrophils
# Mast.cells.activated/resting
# M1 macrophages & activated dendritic cells
# Adipocyte / Osteoblast
# B.cells memory/naive
# cd138p/PlasmaMemory/Plasma.cell/MM.plasma.cell
# T.cells.gamma.delta
# NK.cells act/resting
# osteoclast/M2/resting dendritic/M0
# CD8s
# Treg / TcelFH / memory resting / naive
combList <- list(Eosinophils=c('Eosinophils'),
MonocyteNeutrophil=c('Monocytes', 'Neutrophils'),
MastCells=c('Mast.cells.activated', 'Mast.cells.resting'),
APC.activated=c('Dendritic.cells.activated', 'Macrophages.M1'),
AdipoOsteo=c('adipocyte', 'osteoblast'),
HealthyBcells=c('B.cells.memory', 'B.cells.naive'),
Myeloma=c('CustomMM', 'MM.plasma.cell', 'PlasmaMemory', 'Plasma.cells'),
GammaDeltaT=c('T.cells.gamma.delta'),
NKcells=c('NK.cells.activated', 'NK.cells.resting'),
APC.inactive=c('osteoblast', 'Dendritic.cells.resting', 'Macrophages.M0', 'Macrophages.M2'),
CD8s=c('T.cells.CD8'),
CD4s.MemoryActivated= c('T.cells.CD4.memory.activated'),
CD4s.others=c('T.cells.CD4.memory.resting', 'T.cells.CD4.naive',
'T.cells.follicular.helper', 'T.cells.regulatory..Tregs.'), #Keep separate
BMFibroblast=c('BMFibroblast','uamsBMFibroblast')
) #combList
}
if(method == 'Spillover3') {
#MDSCs and CAFs should be left alone
combList <- list(EosinoMegakaryoEryth=c('Eosinophils','Megakaryocyte','Erythroid'),
MonocyteNeutrophil=c('Monocytes', 'Neutrophils'),
MastCells=c('Mast.cells.activated', 'Mast.cells.resting'),
APC.activated=c('Dendritic.cells.activated', 'Macrophages.M1'),
APC.resting=c('Dendritic.cells.resting', 'Macrophages.M0', 'Macrophages.M2', 'osteoclast'),
AdipoLym=c('adipocyte', 'LymEndothelial'),
FibroOsteoBlast = c('uamsBMFibroblast','BMFibroblast','osteoblast'),
HealthyBcells=c('B.cells.memory', 'B.cells.naive', 'Plasma.cells'),
Myeloma=c('CustomMM', 'MM.plasma.cell', 'PlasmaMemory', 'Haemetopoetic'),
GammaDeltaT.CD4naive.FH=c('T.cells.gamma.delta','T.cells.CD4.naive', 'T.cells.follicular.helper'),
NKcells=c('NK.cells.activated', 'NK.cells.resting'),
CD8s.CD4sOthers=c('T.cells.CD8', 'T.cells.CD4.memory.resting', 'T.cells.CD4.memory.activated', 'T.cells.regulatory..Tregs.')
) #combList
}
countMatrix <- cellCounts[!grepl('_', rownames(cellCounts)),]
#Add tumor & make it optional (DEFAULT to collapse to these types)
countMatrix.comT <- countMatrix
for(combCell in names(combList)) {
curTypes <- combList[[combCell]]
curTypes <- curTypes[curTypes %in% rownames(countMatrix)]
if(length(curTypes) == 0) { next; }
combCounts <- colSums(countMatrix[curTypes,,drop=FALSE])
newDF <- data.frame(newData=combCounts)
colnames(newDF) <- combCell
countMatrix.comT <- rbind(countMatrix.comT[!(rownames(countMatrix.comT) %in% curTypes),], t(newDF))
}
countMatrix.comT <- countMatrix.comT[order(toupper(rownames(countMatrix.comT))),]
countMatrix.comT <- rbind(countMatrix.comT, cellCounts[grepl('_', rownames(cellCounts)),])
return(countMatrix.comT)
}
#' Wrapper for deconvolution methods
#' @description A wrapper function to call any of the estCellPercent functions
#' Modified on June 16th 2021 to quantile normalize the geneExpr data to match refExpr
#' Set preNormalize to FALSE for previous behavior.
#'
#' @param refExpr a data frame representing immune cell expression profiles. Each row represents an expression of a gene, and each column represents a different immune cell type. colnames contains the name of each immune cell type and the rownames includes the genes' symbol. The names of each immune cell type and the symbol of each gene should be unique. Any gene with missing expression values must be excluded.
#' @param geneExpr a data frame representing RNA-seq or microarray gene-expression profiles of a given complex tissue. Each row represents an expression of a gene, and each column represents a different experimental sample. colnames contain the name of each sample and rownames includes the genes' symbol. The name of each individual sample and the symbol of each gene should be unique. Any gene with missing expression values should be excluded.
#' @param method One of 'DCQ', 'SVMDECON', 'DeconRNASeq', 'proportionsInAdmixture', 'nnls' (DEFAULT: DCQ)
#' @param preNormalize Set to TRUE to quantile normalize geneExpr to match refExpr (DEFAULT: TRUE)
#' @param verbose Set to TRUE to echo the results of parameters (DEFAULT: TRUE)
#' @param ... Parameters for estCellPercent.X (e.g. number_of_repeats for .DCQ)
#' @export
#' @return A matrix with cell type estimates for each samples
#' @examples
#' #This toy example
#' library(ADAPTS)
#' fullLM22 <- ADAPTS::LM22[1:30, 1:4]
#' smallLM22 <- fullLM22[1:25,]
#'
#' cellEst <- estCellPercent(refExpr=smallLM22, geneExpr=fullLM22, preNormalize=FALSE, verbose=TRUE)
#'
estCellPercent <- function(refExpr, geneExpr, preNormalize=TRUE, verbose=TRUE, method='DCQ', ...) {
if (preNormalize == TRUE) {
if(verbose==TRUE) {message('Quantile Normalizing geneExpr to match refExpr')}
rns <- rownames(geneExpr)
cns <- colnames(geneExpr)
geneExpr <- preprocessCore::normalize.quantiles.use.target(as.matrix(geneExpr), as.numeric(as.matrix(refExpr)))
rownames(geneExpr) <- rns
colnames(geneExpr) <- cns
}
if(verbose==TRUE) {message(paste('Setting method to:', method))}
if(method == 'DCQ') {
cellEst <- estCellPercent.DCQ(refExpr = refExpr, geneExpr=geneExpr, ...)
} else if (method == 'SVMDECON') {
cellEst <- estCellPercent.svmdecon(refExpr = refExpr, geneExpr=geneExpr, ...)
} else if (method == 'DeconRNASeq') {
cellEst <- estCellPercent.DeconRNASeq(refExpr = refExpr, geneExpr=geneExpr, marker_set=NULL, ...)
} else if (method == 'proportionsInAdmixture') {
cellEst <- estCellPercent.proportionsInAdmixture(refExpr = refExpr, geneExpr=geneExpr, marker_set=NULL, ...)
} else if (method == 'nnls') {
cellEst <- estCellPercent.nnls(refExpr = refExpr, geneExpr=geneExpr, ...)
}
return(cellEst)
}
| /scratch/gouwar.j/cran-all/cranData/ADAPTS/R/onlyDeconAlgorithms.R |
#' Build a deconvolution seed matrix, add the proportional option
#' @description Use ranger to select features and build a genesInSeed gene matrix
#'
#' @param trainSet Each row is a gene, and each column is an example of a particular cell type, ie from single cell data
#' @param genesInSeed The maximum number of genes in the returned seed matrix (DEFAULT: 200)
#' @param groupSize The number of groups to break the trainSet into by ADAPTS::scSample (DEFAULT: 30)
#' @param randomize Set to TRUE randomize the sets selected by ADAPTS::scSample (DEFAULT: TRUE)
#' @param num.trees The number of trees to be used by ranger (DEFAULT: 1000)
#' @param plotIt Set to TRUE to plot (DEFAULT: TRUE)
#' @param trainSet.3sam Optional pre-calculated ADAPTS::scSample(trainSet, groupSize = 3) (DEFAULT: NULL)
#' @param trainSet.30sam Optional pre-calculated ADAPTS::scSample(trainSet, groupSize=groupSize, randomize=randomize) (DEFAULT: NULL)
#' @param proportional Set to true to make the training set cell type proportional. Ignores group size (DEFAULT: FALSE)
#'
#' @export
#' @return A list with condition numbers and gene lists
#' @examples
#' library(ADAPTS)
#' ct1 <- runif(1000, 0, 100)
#' ct2 <- runif(1000, 0, 100)
#' dataMat <- cbind(ct1, ct1, ct1, ct1, ct1, ct1, ct2, ct2, ct2, ct2)
#' rownames(dataMat) <- make.names(rep('gene', nrow(dataMat)), unique=TRUE)
#' noise <- matrix(runif(nrow(dataMat)*ncol(dataMat), -2, 2), nrow = nrow(dataMat), byrow = TRUE)
#' dataMat <- dataMat + noise
#' newSigMat <- buildSeed(trainSet=dataMat)
#'
buildSeed <- function(trainSet, genesInSeed=200, groupSize=30, randomize=TRUE, num.trees=1000, plotIt=TRUE, trainSet.3sam=NULL, trainSet.30sam=NULL, proportional=FALSE) {
if(is.null(trainSet.3sam)) {trainSet.3sam <- ADAPTS::scSample(RNAcounts = trainSet, groupSize = 3, randomize = randomize)}
if (proportional==TRUE) {
#colnames(trainSet) <- sub('\\.[0-9]+$', '', colnames(trainSet))
tsNames <- sub('\\.[0-9]+$', '', colnames(trainSet))
cellProps <- table(tsNames)
cellSampleCounts <- 3*ceiling(100*cellProps / sum(cellProps))
trainList <- list()
for (curCount in unique(cellSampleCounts)) {
curClusts <- names(cellSampleCounts)[cellSampleCounts==curCount]
trainList[[as.character(curCount)]] <- ADAPTS::scSample(RNAcounts = trainSet[, tsNames %in% curClusts], groupSize = curCount, randomize = randomize)
}
trainSet.30sam <- do.call(cbind, trainList)
} else {
if(is.null(trainSet.30sam)) {trainSet.30sam <- ADAPTS::scSample(RNAcounts = trainSet, groupSize = groupSize, randomize = randomize)}
}
clusterIDs <- factor(colnames(trainSet.30sam))
trainSet.4reg <- t(trainSet.30sam)
rf1 <- ranger::ranger(x=trainSet.4reg, y=clusterIDs, num.trees=num.trees, importance='impurity')
imp <- ranger::importance(rf1)
imp <- sort(imp[imp>0], decreasing = TRUE)
topGenes <- names(imp)[1:min(genesInSeed, length(imp))]
#topGenes[!topGenes %in% rownames(trainSet.3sam)]
seedMat <- trainSet.3sam[rownames(trainSet.3sam) %in% topGenes,]
cellTypes <- sub('\\.[0-9]+$', '', colnames(seedMat))
seedMat <- t(apply(seedMat, 1, function(x){tapply(x, cellTypes, mean, na.rm=TRUE)}))
if(plotIt==TRUE) {pheatmap:: pheatmap(seedMat, main=paste('Seed Matrix','\n# Cell Types:',ncol(seedMat),'| # Genes:',nrow(seedMat))) }
return(seedMat)
}
#' Generate all the signature matrices one time with the option to leave out half of the data as a test set
#' @description This wrapper is helpful for repetitively matrix generation. It generates seed matrix, all-gene matrix, augmented matrix, shrunk matrix,
#' and all the clustered matrices in one call.
#'
#' @param exprData The gene express data. Each row is a gene, and each column is an example of a particular cell type.
#' @param randomize Set to to TRUE randomize the sets selected by ADAPTS::scSample (DEFAULT: TRUE)
#' @param skipShrink Set to TRUE to skip shrinking the signatrure matrix (DEFAULT: TRUE)
#' @param proportional Set to true to make the training set cell type proportional. Ignores group size (DEFAULT: FALSE)
#' @param handMetaCluster A List of pre-defined meta clusters.Set to NULL to automatically group indistinguishable cells
#' into same cluster using clustWspillOver.(DEFAULT: NULL)
#' @param testOnHalf Set to TRUE to leave half the data as a test set
#' @param condTol The tolerance in the reconstruction algorithm. 1.0 = no tolerance, 1.05 = 5\% tolerance (DEFAULT: 1.01)
#' @param numChunks The number of groups of genes to remove while shrinking (DEFAULT: NULL, i.e. 1)
#' @param plotIt Set to FALSE to suppress plots (DEFAULT: TRUE)
#' @param fastStop Halt early when the condition number changes by less than 1 for 3 iterations (DEFAULT: TRUE)
#' @param singleCore TRUE for a single core (DEFAULT: TRUE)
#'
#' @export
#' @return A list of results including prediction accuracy and cell enrichment
#'
#' @examples
#' ct1 <- runif(1000, 0, 100)
#' ct2 <- runif(1000, 0, 100)
#' ct3 <- runif(1000, 0, 100)
#' ct4 <- runif(1000, 0, 100)
#' dataMat <- cbind(ct1, ct1, ct1, ct1, ct1, ct1, ct2, ct2, ct2, ct2, ct3, ct3, ct3,ct3,ct4,ct4)
#' rownames(dataMat) <- make.names(rep('gene', nrow(dataMat)), unique=TRUE)
#' noise <- matrix(runif(nrow(dataMat)*ncol(dataMat), -2, 2), nrow = nrow(dataMat), byrow = TRUE)
#' dataMat <- dataMat + noise
#' metaList <- list()
#' colnames(dataMat) <- sub('\\..*','', colnames(dataMat))
#' metaList[[1]] <- c(unique(colnames(dataMat))[1]) #Cell Type 1
#' metaList[[2]] <- c(unique(colnames(dataMat))[2]) #Cell Type 2
#' metaList[[3]] <- c(unique(colnames(dataMat))[3]) #Cell Type 3
#' metaList[[4]] <- c(unique(colnames(dataMat))[4:length(unique(colnames(dataMat)))]) #Cell Type 4
#' #options(mc.cores=2)
#' # This is a meta-function that calls other functions,
#' # The execution speed is too slow for the CRAN automated check
#' #testAllSigMatrices(exprData=dataMat, randomize = TRUE, skipShrink=FALSE,
#' # proportional=FALSE, handMetaCluster=metaList, testOnHalf=TRUE, numChunks=NULL)
testAllSigMatrices <- function(exprData, randomize = TRUE, skipShrink=FALSE, proportional=FALSE, handMetaCluster=NULL, testOnHalf=TRUE, condTol=1.01, numChunks=100, plotIt=TRUE, fastStop=TRUE, singleCore=TRUE) {
if(randomize==TRUE) {set.seed(Sys.time())}
resList <- list()
if(testOnHalf == TRUE){
trainTestSet <- ADAPTS::splitSCdata(exprData, numSets=2, randomize = randomize)
trainSet <- trainTestSet[[1]]
testSet <-trainTestSet[[2]]
}
else {
trainSet <- exprData
testSet <- exprData
}
trainSet.30sam <- ADAPTS::scSample(RNAcounts = trainSet, groupSize = 30, randomize = randomize)
trainSet.3sam <- ADAPTS::scSample(RNAcounts = trainSet, groupSize = 3, randomize = randomize)
pseudobulk.test <- data.frame(test=rowSums(testSet))
pseudobulk.test.counts<-table(sub('\\..*','',colnames(testSet)))
actFrac.test <- 100 * pseudobulk.test.counts / sum(pseudobulk.test.counts)
#seed
genesInSeed<-100
seedMat <- buildSeed(trainSet, genesInSeed=genesInSeed, groupSize=30, randomize=TRUE, num.trees=1000, plotIt=plotIt, trainSet.3sam=trainSet.3sam, trainSet.30sam=trainSet.30sam,proportional = proportional)
resList[['matrix.seed']] <- seedMat
estimates.onTest <- as.data.frame(ADAPTS::estCellPercent.DCQ(seedMat, pseudobulk.test))
colnames(estimates.onTest) <- paste('Seed Matrix')
estimates.onTest$actFrac.test <- round(actFrac.test[rownames(estimates.onTest)],2)
resList[['estimates.onTest']] <- estimates.onTest
resList[['testAcc.seed']] <- seed2TestAcc <- ADAPTS::calcAcc(estimates=estimates.onTest[,1], reference=estimates.onTest[,2])
#All gene
allGeneSig <- apply(trainSet.3sam, 1, function(x){tapply(x, colnames(trainSet.3sam), mean, na.rm=TRUE)})
estimates.allGene <- as.data.frame(ADAPTS::estCellPercent.DCQ(t(allGeneSig), pseudobulk.test))
colnames(estimates.allGene)<-'All Gene Matrix'
estimates.onTest<-cbind(estimates.allGene,estimates.onTest)
resList[['estimates.onTest']] <- estimates.onTest
resList[['testAcc.all']] <- seed2TestAcc <- ADAPTS::calcAcc(estimates=estimates.onTest[,1],reference=estimates.onTest[,ncol(estimates.onTest)])
# Aug
gList <- ADAPTS::gListFromRF(trainSet=trainSet.30sam)
resList[['gList']] <- gList
augTrain <- ADAPTS::AugmentSigMatrix(origMatrix = seedMat, fullData = trainSet.3sam, gList = gList, nGenes = 1:100, newData = trainSet.3sam, plotToPDF = FALSE, pdfDir = '.', condTol=condTol, plotIt=plotIt)
resList[['matrix.aug']] <- augTrain
resList[['matrix.aug.bestGenes']] <- ADAPTS::matrixToGenelist(augTrain, gList)
estimates.augment <- as.data.frame(ADAPTS::estCellPercent.DCQ(augTrain, pseudobulk.test))
colnames(estimates.augment) <- paste('Augmented Matrix')
estimates.onTest <- cbind(estimates.augment, estimates.onTest)
resList[['estimates.onTest']] <- estimates.onTest
resList[['testAcc.aug']] <- seed2TestAcc <- ADAPTS::calcAcc(estimates=estimates.onTest[,1], reference=estimates.onTest[,ncol(estimates.onTest)])
#shrink
if(skipShrink == FALSE) {
#augTrain.shrink <- ADAPTS::shrinkSigMatrix(sigMatrix=augTrain, numChunks=NULL, verbose=FALSE, plotIt = FALSE, aggressiveMin=TRUE,sigGenesList=NULL, fastStop=TRUE, singleCore=TRUE)
augTrain.shrink <- ADAPTS::shrinkSigMatrix(sigMatrix=augTrain, verbose=FALSE, plotIt = plotIt, aggressiveMin=TRUE, fastStop=fastStop, singleCore=singleCore, numChunks=numChunks)
#pheatmap(augTrain.shrink)
resList[['matrix.shrink']] <- augTrain.shrink
resList[['matrix.shrink.bestGenes']] <- ADAPTS::matrixToGenelist(augTrain.shrink, gList)
estimates.shrink <- as.data.frame(ADAPTS::estCellPercent.DCQ(augTrain.shrink, pseudobulk.test))
colnames(estimates.shrink) <- paste('Shrunk Matrix')
resList[['estimates.onTest']] <- estimates.onTest <- cbind(estimates.shrink, estimates.onTest)
resList[['testAcc.shrink']] <- seed2TestAcc<- ADAPTS::calcAcc(estimates=estimates.onTest[,1], reference=estimates.onTest[,ncol(estimates.onTest)])
} else {
augTrain.shrink <- augTrain #Used for later clustering
}
#Clustering
if(!is.null(handMetaCluster)) {
resList[['allClusters']] <- handMetaCluster
} else {
varClusts <- ADAPTS::clustWspillOver(sigMatrix = augTrain.shrink, geneExpr = trainSet.3sam)
resList[['allClusters']] <- varClusts$allClusters
}
resList[['allClusters']]
metaCluster.id <- list()
for(i in 1:length(resList[['allClusters']])) {
for (x in resList[['allClusters']][[i]]) {
metaCluster.id[[x]] <- paste('Meta',i,sep='_')
}
}
metaClust.LUT <- resList[['metaClust.LUT']] <- unlist(metaCluster.id)
names(resList[['allClusters']]) <- metaClust.LUT[sapply(resList[['allClusters']], function(x){x[1]})]
#Update 05-19-20: More informative names
metaNames <- sapply(unique(metaClust.LUT), function(x) { paste(names(metaClust.LUT)[metaClust.LUT == x], collapse='_')})
metaClust.LUT.MI <- metaClust.LUT
metaClust.LUT.MI <- metaNames[match(metaClust.LUT.MI, names(metaNames))]
names(metaClust.LUT.MI) <- names(metaClust.LUT)
metatrainSet<-trainSet
metatestSet<-testSet
colnames(metatrainSet) <- metaClust.LUT.MI[sub('\\..*','',colnames(metatrainSet))]
colnames(metatestSet) <- metaClust.LUT.MI[sub('\\..*','',colnames(metatestSet))]
metatrainSet.3sam <- ADAPTS::scSample(RNAcounts = metatrainSet, groupSize = 3, randomize = TRUE)
metatrainSet.30sam <- ADAPTS::scSample(RNAcounts = metatrainSet, groupSize = 30, randomize = TRUE)
metaclusterIDs <- factor(colnames(metatrainSet.30sam))
metatrainSet.4reg <- t(metatrainSet.30sam)
metapseudobulk.test <- data.frame(test=rowSums(metatestSet))
metapseudobulk.test.counts <- table(sub('\\..*','',colnames(metatestSet)))
meta.actFrac <- 100 * metapseudobulk.test.counts / sum(metapseudobulk.test.counts)
#Metaseed
genesInSeed<-100
metaseedMat <-buildSeed(metatrainSet, genesInSeed=genesInSeed, groupSize=30, randomize=TRUE, num.trees=1000, plotIt=plotIt, trainSet.3sam=metatrainSet.3sam, trainSet.30sam=metatrainSet.30sam,proportional = proportional)
resList[['matrix.metaSeed']] <- metaseedMat
estimates.Meta.onTest <- as.data.frame(ADAPTS::estCellPercent.DCQ(metaseedMat, metapseudobulk.test))
colnames(estimates.Meta.onTest) <- paste('Seed Meta')
estimates.Meta.onTest$actFrac.test <- round(meta.actFrac[rownames(estimates.Meta.onTest)],2)
resList[['estimates.onTest.meta']] <- estimates.Meta.onTest
resList[['testAcc.metaSeed']] <- seed2TestAcc.meta <- ADAPTS::calcAcc(estimates=estimates.Meta.onTest[,1], reference=estimates.Meta.onTest[,2])
#meta all gene
metaallGeneSig <- apply(metatrainSet.3sam, 1, function(x){tapply(x, colnames(metatrainSet.3sam), mean, na.rm=TRUE)})
metaestimates.allGene <- as.data.frame(ADAPTS::estCellPercent.DCQ(t(metaallGeneSig), metapseudobulk.test))
colnames(metaestimates.allGene)<-paste('All Gene Meta')
estimates.Meta.onTest <- cbind(metaestimates.allGene, estimates.Meta.onTest)
resList[['estimates.onTest.meta']] <- estimates.Meta.onTest
resList[['testAcc.metaAll']] <- seed2TestAcc <- ADAPTS::calcAcc(estimates=estimates.Meta.onTest[,1], reference=estimates.Meta.onTest[,ncol(estimates.Meta.onTest)])
#meta aug
metagList <- ADAPTS::gListFromRF(trainSet=metatrainSet.30sam)
resList[['gList.meta']] <- metagList
sapply(gList,dim)
meta.augTrain <- ADAPTS::AugmentSigMatrix(origMatrix = metaseedMat, fullData = metatrainSet.3sam, gList = metagList, nGenes = 1:100, newData = metatrainSet.3sam, plotToPDF = FALSE, pdfDir = '.', condTol = condTol, plotIt=plotIt)
resList[['matrix.metaAug']] <- meta.augTrain
resList[['matrix.metaAug.bestGenes']] <- ADAPTS::matrixToGenelist(meta.augTrain, metagList)
estimates.Meta.augment <- as.data.frame(ADAPTS::estCellPercent.DCQ(meta.augTrain, metapseudobulk.test))
colnames(estimates.Meta.augment) <- paste('Augmented Meta')
resList[['estimates.onTest.meta']] <- estimates.Meta.onTest <- cbind(estimates.Meta.augment, estimates.Meta.onTest)
resList[['testAcc.metaAug']] <- seed2TestAcc.aug.meta <- ADAPTS::calcAcc(estimates=estimates.Meta.onTest[,1], reference=estimates.Meta.onTest[,ncol(estimates.Meta.onTest)])
#meta shrink
if(skipShrink == FALSE) {
gc()
#meta.augTrain.shrink <- ADAPTS::shrinkSigMatrix(sigMatrix=meta.augTrain, numChunks=NULL, verbose=FALSE, plotIt = FALSE, aggressiveMin=TRUE, sigGenesList=NULL,fastStop=TRUE, singleCore=TRUE)
meta.augTrain.shrink <- ADAPTS::shrinkSigMatrix(sigMatrix=meta.augTrain, verbose=FALSE, plotIt = plotIt, aggressiveMin=TRUE, fastStop=fastStop, singleCore=singleCore, numChunks=numChunks)
dim(meta.augTrain.shrink)
#pheatmap(augTrain.shrink)
resList[['matrix.metaAugShrink']] <- meta.augTrain.shrink
resList[['matrix.metaAugShrink.bestGenes']] <- ADAPTS::matrixToGenelist(meta.augTrain.shrink, metagList)
estimates.Meta.shrink <- as.data.frame(ADAPTS::estCellPercent.DCQ(meta.augTrain.shrink, metapseudobulk.test))
colnames(estimates.Meta.shrink) <- paste('Shrunk Meta')
resList[['estimates.onTest.meta']] <- estimates.Meta.onTest <- cbind(estimates.Meta.shrink, estimates.Meta.onTest)
resList[['testAcc.metaAugShrink']] <- seed2TestAcc.shrink.meta <- ADAPTS::calcAcc(estimates=estimates.Meta.onTest[,1], reference=estimates.Meta.onTest[,ncol(estimates.Meta.onTest)])
}
return(resList)
}
#' Find out at which iteration the results converge, i.e. the mean results are stable.
#'
#' @param curSeq A sequence of results that generated from each iteration of the loop
#' @param changePer The maximum percentage of change allowed
#' @param winSize The window size for mean calculation
#'
#' @return The minimum number of iterations needed for the results to converge
findConvergenceIter <- function(curSeq, changePer=1, winSize=5) {
#Note, this will remove NAs. Is that best? They're caused by bad correlations
runMean <- sapply(1:length(curSeq), function(x) {sum(curSeq[1:x], na.rm=TRUE)/x})
#Criteria: running mean has changed less than 5% in the last 5? point
convIter <- as.numeric(NA)
if (length(runMean) > winSize) {
winOffset <- winSize-1
maxWinChange <- sapply(winSize:length(runMean), function(x) {
win <- runMean[(x-winOffset):x]
max(abs((win - mean(win))/mean(win)))
}) #maxWinChange
mwcBool <- maxWinChange < changePer/100
if(any(mwcBool)) {convIter <- winOffset + which(mwcBool)[1]}
}
return (convIter)
}
#' A meta analysis for the results from multiple iterations
#' @description Calculate the mean and the standard deviation of the results from all the iterations, and also
#' test for convergence by % of change with each additional iteration.
#'
#' @param allResList A list of results generated from all the iterative calls of testAllSigMatrices
#' @param changePer The maximum percentage of change allowed for convergence
#' @export
#'
#' @return The mean and standard deviation of all the results, along with the number of iterations needed for the results to converge.
#' A meta analysis for the results from multiple iterations
#' @description Calculate the mean and the standard deviation of the results from all the iterations, and also
#' test for convergence by % of change with each additional iteration.
#'
#' @param allResList A list of results generated from all the iterative calls of testAllSigMatrices
#' @param changePer The maximum percentage of change allowed for convergence
#' @export
#'
#' @return The mean and standard deviation of all the results, along with the number of iterations needed for the results to converge.
meanResults <- function (allResList,changePer=1) {
testNames <- unique(sub('^.*\\.', '', names(allResList[[1]])))
testNames <- testNames[!testNames %in% c("onTest", "allClusters", "LUT", "meta", "gList")]
compTypes <- names(allResList[[1]][[paste0('testAcc.', testNames[1])]])
allResList <- allResList[!sapply(allResList, function(x){inherits(x,'try-error')})]
compList <- list()
for (curName in testNames) {
compList[[curName]] <- list()
for (curComp in compTypes) {
compList[[curName]][[curComp]] <- sapply(allResList, function(x){
y <- x[[paste0('testAcc.', curName)]]
if(!inherits(y, 'try-error')) {return(y[[curComp]])} else {return(as.numeric(NA))}
#If only two clusters, set correlation to zero or NA?
}) #compList[[curName]][[curComp]] <- sapply(allResList, function(x){
} #for (curComp in compTypes) {
} #for (curName in testNames) {
curColN <- unlist(lapply(compTypes, function(x){c(x, paste0(x,'.sd'))}))
resMat <- matrix(as.numeric(NA), nrow=length(testNames), ncol=length(curColN),
dimnames=list(testNames, curColN))
convMat <- matrix(as.numeric(NA), nrow=length(testNames), ncol=length(compTypes),
dimnames=list(testNames, compTypes))
for (curName in testNames) {
if(is.null(compList[[curName]][[compTypes[1]]][[1]])) { message(paste('Omitting', curName, 'due to NULL results')) ; next; }
for (curComp in compTypes) {
curMean <- mean(compList[[curName]][[curComp]], na.rm = TRUE)
curSD <- stats::sd(compList[[curName]][[curComp]], na.rm = TRUE)
resMat[curName,curComp] <- curMean
resMat[curName,paste0(curComp,'.sd')] <- curSD
#Also test for convergence by % of change with each additional
convMat[curName, curComp] <- findConvergenceIter(curSeq=compList[[curName]][[curComp]], changePer=changePer, winSize=5)
} #for (curComp in compTypes) {
} #for (curName in testNames) {
colnames(convMat) <- paste0('convIt.',colnames(convMat))
resMat <- as.data.frame(resMat)
resMat$N <- length(allResList)
resMat <- cbind(resMat, as.data.frame(convMat))
remBool <- apply(resMat, 1, function(x){all(is.na(x))})
resMat <- resMat[!remBool,]
return(resMat)
}
#' Loop testAllSigMatrices until convergence
#' @description Iteratively call testAllSigMatrices numLoops times with the option to fast stop
#' if correlation, correlation spear, mae and rmse all converge
#' @param numLoops The number of iterations. Set to null to loop until results converge.
#' @param fastStop Set to TRUE to break the loop when correlation, correlation spear, mae and rmse all converge
#' @param exprData The single cell matrix
#' @param changePer The maximum percentage of change allowed for convergence
#' @param handMetaCluster A List of pre-defined meta clusters. Set to NULL to automatically group indistinguishable
#' cells into same cluster use clustWspillOver (DEFAULT: NULL)
#' @param testOnHalf Set to TRUE to leave half the data as a test set to validate all the matrices
#' @param condTol The tolerance in the reconstruction algorithm. 1.0 = no tolerance, 1.05 = 5\% tolerance (DEFAULT: 1.01)
#'
#' @return A list of results generated from all the iterative calls of testAllSigMatrices
#' @export
#'
#' @examples
#' ct1 <- runif(1000, 0, 100)
#' ct2 <- runif(1000, 0, 100)
#' ct3 <- runif(1000, 0, 100)
#' ct4 <- runif(1000, 0, 100)
#' dataMat <- cbind(ct1, ct1, ct1, ct1, ct1, ct1, ct2, ct2, ct2, ct2, ct3, ct3, ct3,ct3,ct4,ct4)
#' rownames(dataMat) <- make.names(rep('gene', nrow(dataMat)), unique=TRUE)
#' noise <- matrix(runif(nrow(dataMat)*ncol(dataMat), -2, 2), nrow = nrow(dataMat), byrow = TRUE)
#' dataMat <- dataMat + noise
#' #options(mc.cores=2)
#' # This is a meta-function that calls other functions,
#' # The execution speed is too slow for the CRAN automated check
#' #loopTillConvergence(numLoops=10, fastStop=TRUE, exprData=dataMat,
#' # changePer=10,handMetaCluster=NULL, testOnHalf=TRUE)
loopTillConvergence <- function(numLoops, fastStop, exprData, changePer, handMetaCluster, testOnHalf, condTol=1.01){
if(is.null(numLoops)){
fastStop<-TRUE
numLoops<-1000000
}
allResListOut <- list()
for (i in 1:numLoops) {
curName <- paste0('res', i)
allResListOut[[curName]] <- try(testAllSigMatrices(exprData, randomize = TRUE, proportional=FALSE, handMetaCluster=handMetaCluster, testOnHalf=testOnHalf, condTol = condTol))
if(fastStop==TRUE){
covtmp<-meanResults(allResList=allResListOut,changePer)[ ,c("convIt.rho.cor", "convIt.spear.rho", "convIt.mae","convIt.rmse")]
if(all(!is.na(covtmp))) break
}}
return(allResListOut)
}
| /scratch/gouwar.j/cran-all/cranData/ADAPTS/R/runLoops.R |
#' @importFrom foreach %do% %dopar% getDoParWorkers foreach
#' @importFrom stats na.omit t.test var
#' @importFrom utils tail
.onLoad <- function(libname, pkgname){
if (.Platform$OS.type == 'unix') {
doParallel::registerDoParallel(cores = parallel::detectCores())
options(mc.cores=parallel::detectCores())
options(cores=parallel::detectCores())
}
}
| /scratch/gouwar.j/cran-all/cranData/ADAPTS/R/zzz.R |
#' Power calculation for Biomarker-Informed Design with Hierarchical Model
#'
#' Given the Biomarker-Informed design information, returns the overall power and probability of the arm is selected as the winner.
#'
#' @usage
#' BioInfo.Power(uCtl, u0y, u0x, rhou, suy, sux, rho, sy, sx, Zalpha, N1, N, nArms, nSims)
#' @param uCtl mean value for the control group.
#' @param u0y mean parameter of the group 1 for the parent model.
#' @param u0x mean parameter of the group 2 for the parent model.
#' @param rhou correlation coefficient between two groups for the parent model.
#' @param suy standard deviation of the group 1 for the parent model.
#' @param sux standard deviation of the group 2 for the parent model.
#' @param rho correlation coefficient between two groups for the lower level model.
#' @param sy standard deviation of the group 1 for the lower level model.
#' @param sx standard deviation of the group 2 for the lower level model.
#' @param Zalpha crtical point for rejection.
#' @param N1 sample size per group at interim analysis.
#' @param N sample size per group at final analysis.
#' @param nArms number of active groups.
#' @param nSims number of simulation times.
#' @return
#' The evaluated power and probability of selecting the arm as the winner.
#' @author Yalin Zhu
#' @references Chang, M. (2014). Adaptive design theory and implementation using SAS and R.
#' \emph{CRC Press}.
#'
#' @examples
#' ## Determine critical value Zalpha for alpha (power) =0.025
#' u0y=c(0,0,0); u0x=c(0,0,0)
#' BioInfo.Power(uCtl=0, u0y, u0x, rhou=1, suy=0, sux=0, rho=1, sy=4, sx=4,
#' Zalpha=2.772, N1=100, N=300, nArms=3, nSims=1000)
#' ## Power simulation
#' u0y=c(1,0.5,0.2)
#' u0x=c(2,1,0.5)
#' BioInfo.Power(uCtl=0, u0y, u0x, rhou=0.2, suy=0.2, sux=0.2, rho=0.2, sy=4, sx=4,
#' Zalpha=2.772, N1=100, N=300, nArms=3, nSims=500)
#'
#'@import stats
#'@import mvtnorm
#' @export
BioInfo.Power <- function(uCtl, u0y, u0x, rhou, suy, sux, rho, sy, sx, Zalpha, N1, N, nArms, nSims){
uy=rep(0,nArms); ux=rep(0,nArms); probWinners=rep(0,nArms); power = 0
varcov0=matrix(c(suy^2,rhou*suy*sux,rhou*suy*sux, sux^2),2,2)
varcov=matrix(c(sy^2, rho*sy*sx, rho*sx*sy, sx^2),2,2)
for (i in 1: nSims) {
winnerMarker= -Inf
for (j in 1: nArms) {
u= rmvnorm(1,mean=c(u0y[j],u0x[j]), sigma=varcov0)
uy[j]=u[1]; ux[j]=u[2]
dataStg1= rmvnorm(N1, mean=c(uy[j], ux[j]), sigma=varcov)
meanxMarker=mean(dataStg1[,2])
if (meanxMarker>winnerMarker)
{winner=j; winnerMarker=meanxMarker; winnerY=dataStg1[,1]}
} ## End of j ##
for (j in 1:nArms) {if (winner==j) {probWinners[j]=probWinners[j]+1/nSims}}
yStg1=winnerY
yStg2=rnorm(N-N1, mean=uy[winner], sd=sy)
yTrt=c(yStg1+yStg2)
yCtl=rnorm(N, mean=uCtl, sd=sy)
tValue=t.test(yTrt,yCtl)$statistic
if (tValue>=Zalpha) {power=power+1/nSims} } ##Endofi##
return (c(power, probWinners))
}
| /scratch/gouwar.j/cran-all/cranData/ADCT/R/BID.R |
#' Power Calculation for Two Coprimary Endpoints.
#'
#' Given the group sequential design information, returns the overall power.
#'
#' @usage
#' CopriEndpt.Power(n, tau, mu1, mu2, rho, alpha1, alpha2, alternative)
#' @param n sample size for the design.
#' @param tau information time for the interim analysis.
#' @param mu1 mean value for coprimary endpoint 1.
#' @param mu2 mean value for coprimary endpoint 2.
#' @param rho correlation coefficient between two coprimary endpoints.
#' @param alpha1 significant level for the first stage.
#' @param alpha2 significant level for the second stage.
#' @param alternative indicates the alternative hypothesis and must be one of \code{"two.sided"} or \code{"two.sided"}.
#' @return
#' The evaluated power with attributes and computational error.
#' @author Yalin Zhu
#' @references Chang, M. (2014). Adaptive design theory and implementation using SAS and R.
#' \emph{CRC Press}.
#'
#' @examples
#' # Example in Chang (2014) page 272
#' CopriEndpt.Power(n=197, tau=0.5, mu1=0.2, mu2=0.2, rho=0.5,
#' alpha1=0.0025, alpha2=0.024, alternative="one.sided")
#' sapply(c(-0.8,-0.5,-0.2,0,0.2,0.5,0.8),CopriEndpt.Power,
#' n=197, tau=0.5, mu1=0.2, mu2=0.2, alpha1=0.0025, alpha2=0.024, alternative="one.sided")
#' @import stats
#' @import mvtnorm
#' @export
CopriEndpt.Power <- function (n, tau, mu1, mu2, rho, alpha1, alpha2, alternative = c("two.sided", "one.sided")){
alternative <- match.arg(alternative)
za <- switch (alternative,
two.sided = {c(qnorm(1-alpha1/2), qnorm(1-alpha2/2))},
one.sided = {c(qnorm(1-alpha1), qnorm(1-alpha2))}
)
u1 <- sqrt(n*tau)*mu1; u2 <- sqrt(n)*mu2
r12 <- rho; r13 <- sqrt(tau); r14 <- r12*r13
r23 <- r14; r24 <- r13; r34 <- r12
s1 <- matrix(c(1,r12, r12,1), 2,2)
s2 <- matrix(c(1,r12,r13, r14, r12,1, r23, r24, r13, r23,1, r34, r14, r24, r34, 1), 4,4)
power1 <- pmvnorm(lower = c(za[1],za[1]), upper = c(Inf, Inf), rep(u1,2), s1)
power2 <- pmvnorm(lower = c(za[2],za[2]), upper = c(Inf, Inf), rep(u2,2), s1)
powerOverlap <- pmvnorm(lower = c(za[1], za[1], za[2], za[2]), upper = rep(Inf, 4), c(u1, u1, u2, u2), s2)
power <- power1 + power2 - powerOverlap
return (power)
}
#' Power Simulation for Two Group Two Coprimary Endpoints Group Sequential Design.
#'
#' Given the group sequential design information, returns the simulated overall power.
#'
#' @usage
#' TwoGrpCopriEndpt.SimPower(mu11,mu12, mu21, mu22, rho, tau,
#' alpha1, alpha2, alternative , Nmax, B)
#' @param mu11 standardized mean value for coprimary endpoint 1 in group 1.
#' @param mu12 standardized mean value for coprimary endpoint 2 in group 1.
#' @param mu21 standardized mean value for coprimary endpoint 1 in group 2.
#' @param mu22 standardized mean value for coprimary endpoint 2 in group 2.
#' @param rho correlation coefficient between two coprimary endpoints.
#' @param tau information time for the interim analysis.
#' @param alpha1 significant level for the first stage.
#' @param alpha2 significant level for the second stage.
#' @param alternative indicates the alternative hypothesis and must be one of \code{"two.sided"} or \code{"two.sided"}.
#' @param Nmax maximum sample size per group.
#' @param B the simulation iterative time.
#' @return
#' The evaluated power with attributes and computational error.
#' @author Yalin Zhu
#' @references Chang, M. (2014). Adaptive design theory and implementation using SAS and R.
#' \emph{CRC Press}.
#'
#' @examples
#' # Example in Chang (2014) page 275
#' TwoGrpCopriEndpt.SimPower(mu11=0.2,mu12=0.25, mu21=0.005, mu22=0.015, rho=0.25,
#' tau=0.5, alpha1=0.0025, alpha2=0.024, alternative = "two.sided",Nmax=584, B=10000)
#' @export
TwoGrpCopriEndpt.SimPower <- function(mu11,mu12, mu21, mu22, rho, tau, alpha1, alpha2, alternative= c("two.sided", "one.sided"), Nmax, B=10000){
alternative <- match.arg(alternative)
za <- switch (alternative,
two.sided = {c(qnorm(1-alpha1/2), qnorm(1-alpha2/2))},
one.sided = {c(qnorm(1-alpha1), qnorm(1-alpha2))}
)
rej <- rep(0,B)
for (i in 1:B){
n <- round(Nmax*tau)
varcov <- matrix(c(1,rho,rho,1),2,2)
trtStg1 <- rmvnorm(n,mean=c(mu11,mu12), sigma=varcov)
ctStg1 <- rmvnorm(n,mean=c(mu21,mu22), sigma=varcov)
t11 <- t.test(trtStg1[,1],ctStg1[,1])$statistic
t12 <- t.test(trtStg1[,2],ctStg1[,2])$statistic
trtStg2 <- rmvnorm(Nmax-n, mean=c(mu11,mu12), sigma=varcov)
ctStg2 <- rmvnorm(Nmax-n, mean=c(mu21,mu22), sigma=varcov)
trt1 <- c(trtStg1[,1], trtStg2[,1]); trt2 <- c(trtStg1[,2], trtStg2[,2])
ct1 <- c(ctStg1[,1], ctStg2[,1]); ct2 <- c(ctStg1[,2], ctStg2[,2])
t21 <- t.test(trt1,ct1)$statistic; t22 <- t.test(trt2,ct2)$statistic
rej[i]=(t11>za[1] & t12>za[1]) | (t21>za[2] & t22>za[2])
return (mean(rej))
}
}
#' Conditional power for one-arm, two-stage design with two primary endpoints
#'
#' Given the group sequential design information, returns the conditional power.
#'
#' @usage
#' OneArm.CondPower(mu1, mu2, n1, n2, rho, tau, alpha2, alternative)
#' @param mu1 mean value for the first stage (endpoint 1).
#' @param mu2 mean value for the second stage (endpoint 2).
#' @param n1 sample size for the first stage.
#' @param n2 sample size for the second stage.
#' @param rho correlation coefficient between two coprimary endpoints.
#' @param tau information time for the interim analysis.
#' @param alpha2 significant level for the second stage.
#' @param alternative indicates the alternative hypothesis and must be one of \code{"two.sided"} or \code{"two.sided"}.
#' @return
#' The evaluated power with attributes and computational error.
#' @author Yalin Zhu
#' @references Chang, M. (2014). Adaptive design theory and implementation using SAS and R.
#' \emph{CRC Press}.
#'
#' @examples
#' # Example in Chang (2014) page 277
#' OneArm.CondPower(mu1=0.1333, mu2=0.1605, n1=130, n2=130, rho=0.35,
#' tau=0.5, alpha2=0.024, alternative = "one.sided")
#' OneArm.CondPower(mu1=0.1333, mu2=0.1605, n1=130, n2=414, rho=0.35,
#' tau=0.5, alpha2=0.024, alternative = "one.sided")
#' @export
OneArm.CondPower <- function(mu1, mu2, n1, n2, rho, tau, alpha2, alternative= c("two.sided", "one.sided")){
alternative <- match.arg(alternative)
za2 <- switch (alternative,
two.sided = {qnorm(1-alpha2/2)},
one.sided = {qnorm(1-alpha2)}
)
Z1t=sqrt(n1)*mu1; Z2t=sqrt(n1)*mu2
mean= c(sqrt(n2)*mu1,sqrt(n2)*mu2)
s=matrix(c(1,rho, rho,1), 2,2)
c1=(za2-sqrt(tau)*Z1t)/(1-tau)
c2=(za2-sqrt(tau)*Z2t)/(1-tau)
return( pmvnorm(lower=c(c1,c2), upper=c(Inf, Inf), mean, s))
}
#' Conditional power for two-group design, two-stage design with two primary endpoints
#'
#' Given the group sequential design information, returns the conditional power.
#'
#'
#' @usage
#' TwoArms.CondPower(mu1, mu2, sigma1, sigma2, n1, n2, rho, tau, alpha2, alternative)
#' @param mu1 mean value for the first stage (endpoint 1).
#' @param mu2 mean value for the second stage (endpoint 2).
#' @param sigma1 standard deviation for the first stage.
#' @param sigma2 standard deviation for the second stage.
#' @param n1 sample size for the first stage.
#' @param n2 sample size for the second stage.
#' @param rho correlation coefficient between two coprimary endpoints.
#' @param tau information time for the interim analysis.
#' @param alpha2 significant level for the second stage.
#' @param alternative indicates the alternative hypothesis and must be one of \code{"two.sided"} or \code{"two.sided"}.
#' @return
#' The evaluated power with attributes and computational error.
#' @author Yalin Zhu
#' @references Chang, M. (2014). Adaptive design theory and implementation using SAS and R.
#' \emph{CRC Press}.
#'
#' @examples
#' # Example in Chang (2014) page 278
#' TwoArms.CondPower(mu1=0.28, sigma1=1.9, mu2=0.35, sigma2=2.2, n1=340, n2=340,
#' rho=0.3, tau=0.5, alpha2=0.024, alternative = "one.sided")
#' TwoArms.CondPower(mu1=0.28, sigma1=1.9, mu2=0.35, sigma2=2.2, n1=340, n2=482,
#' rho=0.3, tau=0.5, alpha2=0.024, alternative = "one.sided")
#' TwoArms.CondPower(mu1=0.32, sigma1=2, mu2=0.4, sigma2=1.8, n1=340, n2=340,
#' rho=0.3, tau=0.5, alpha2=0.024, alternative = "one.sided")
#' @export
TwoArms.CondPower <- function(mu1, mu2, sigma1, sigma2, n1, n2, rho, tau, alpha2, alternative= c("two.sided", "one.sided")){
alternative <- match.arg(alternative)
za2 <- switch (alternative,
two.sided = {qnorm(1-alpha2/2)},
one.sided = {qnorm(1-alpha2)}
)
delta1=mu1/sqrt(2)/sigma1; delta2=mu2/sqrt(2)/sigma2
Z1t=sqrt(n1)*delta1; Z2t=sqrt(n1)*delta2
mean= c(sqrt(n2)*delta1,sqrt(n2)*delta2)
s=matrix(c(1,rho, rho,1), 2,2)
c1=(za2-sqrt(tau)*Z1t)/(1-tau); c2=(za2-sqrt(tau)*Z2t)/(1-tau)
return ( pmvnorm(lower=c(c1,c2), upper=c(Inf, Inf), mean, s))
}
| /scratch/gouwar.j/cran-all/cranData/ADCT/R/CopriEndpt.R |
### lifetime.mle
## failure.threshold is the percentage
################################################################################
addt.fit=function(formula, data, initial.val=100, proc="All", failure.threshold, time.rti=100000, method="Nelder-Mead", subset, na.action, starts=NULL,fail.thres.vec=c(70,80), semi.control= list(cor=F,...) ,...)
{
mf <- match.call(expand.dots = FALSE)
m <- match(c("formula", "data", "subset", "na.action"), names(mf), 0L)
if (m[1]==0) stop("a formula argument is required")
mf <- mf[c(1, m)]
mf[[1]] <- as.name("model.frame")
mdat <- eval(mf, parent.frame())
n=nrow(mdat)
mt <- attr(mdat, "terms")
#y <- model.extract(mdat, "response")
#X <- model.matrix(mt, mdat)
#ll <- ncol(X)
#browser()
names(mdat)=c("Response", "Time", "Temp")
dat0=as.data.frame(cbind(Temp=mdat[,"Temp"], Time=mdat[,"Time"], Response=mdat[,"Response"]))
LS.obj=ML.obj=SemiPara.obj=NULL
if(proc=="LS"|proc=="All"){
# LSA
LS.obj=lsa.fit(dat=dat0, initial.val=initial.val,failure.threshold=failure.threshold, time.rti=time.rti)
}
if(proc=="ML"|proc=="All"){
# MLA
if(is.null(starts))
{
#fail.thres=50
#ok=0
#while(ok<1){
# temp=try(lsa.fit(dat=dat0, initial.val=initial.val,failure.threshold=fail.thres, time.rti=time.rti), silent=T)
# ok=ifelse(class(temp)=="try-error", 0, 1)
# fail.thres=fail.thres+10
#}
#fail.thres1=fail.thres-10
#fail.thres2=ifelse(fail.thres>100, 99, fail.thres)
starts=addt.mle.initial.val(dat=dat0, time.rti=time.rti, initial.val=initial.val, failure.threshold=fail.thres.vec[1], failure.threshold1=fail.thres.vec[2])
}
ML.obj=lifetime.mle(dat=dat0, minusloglik=minus.loglik.kinetics, starts=starts, method = method , control=list(maxit=100000))
ML.obj=mle.transform(obj=ML.obj)
ML.obj=c(ML.obj, rti=true.ti.compute(pars=ML.obj$coef,failure.threshold=failure.threshold,initial.val=initial.val,time.rti=time.rti)$rti, beta = true.ti.compute(pars=ML.obj$coef,failure.threshold=failure.threshold,initial.val=initial.val,time.rti=time.rti)$coef)
}
if(proc=="SemiPara"|proc=="All"){
#print("Succeeded")
#dat=data.frame(TempC=c(50, mdat[,"Temp"]), TimeH=c(0, mdat[,"Time"]),Response=c(100, mdat[,"Response"]))
dat=data.frame(TempC= mdat[,"Temp"], TimeH=mdat[,"Time"],Response=mdat[,"Response"])
#print(mdat)
#print(dat)
##############
# initial value of beta
# pineapple: This could cause problem
#beta.ini(dat)
#beta.ini(dat, smooth.spline=T)
colnames(mdat) <- c("response", "time", "temp")
if (missing(semi.control))
{
res=monotone.bspline.fit.nocor.knotselection.output(dat,cov.fun.type="cov.fun.REMLc")
bestmodel=bspline.fit.addt.nocor(test.dat=dat, n.knots=NULL, knots=as.numeric(unlist(strsplit(as.character(res[,"Knots"]), ",")))
, degree=res[,"Order"], cov.fun.type="cov.fun.REMLc")
TI.semi=try(TI.bspline.nocor(dat=dat, model.fit.obj=bestmodel,dd=time.rti, failure.threshold = failure.threshold/100 ), silent = T)
#TI.values <- semi.para.TI(bestmodel, dat, threshold = failure.threshold, td = time.rti)
#print(TI.semi)
SemiPara.obj <- c(bestmodel, TI= TI.semi)
time.rti <- 1e+05
initial.val <- NULL
failure.threshold <- failure.threshold
dat0 <- dat
}
else
{
if(semi.control$cor=="TRUE")
{
#print("corr succeed")
# knot selection result
res=monotone.bspline.fit.knotselection.output(dat,cov.fun.type="cov.fun.REMLc")
bestmodel=bspline.fit.addt(test.dat=dat, n.knots=NULL, knots=as.numeric(unlist(strsplit(as.character(res[,"Knots"]), ",")))
, degree=res[,"Order"], cov.fun.type="cov.fun.REMLc")
TI.semi=try(TI.bspline.nocor(dat=dat, model.fit.obj=bestmodel, dd=time.rti, failure.threshold = failure.threshold/100), silent = T)
SemiPara.obj <- c(bestmodel, TI= TI.semi)
time.rti <- 1e+05
initial.val <- NULL
failure.threshold <- failure.threshold
dat0 <- dat
}
}
}
addt.fit=list(LS.obj=LS.obj, ML.obj=ML.obj,SemiPara.obj=SemiPara.obj, dat=dat0, time.rti=time.rti, initial.val=initial.val,
failure.threshold=failure.threshold)
class(addt.fit)="addt.fit"
return(addt.fit)
}
################################################################################
power.exponential.fun=function(tt, temp, alpha, beta0,beta1,gamma)
{
x=11605/(temp+273.16)
bb=exp(beta0+beta1*x)
res=alpha*exp(-(tt/bb)^gamma)
return(res)
}
################################################################################
true.ti.compute=function(pars, failure.threshold, initial.val, time.rti)
{
#browser()
pp=failure.threshold/100
beta0=pars[2]/log(10)
beta1=pars[3]/log(10)
gamma=exp(pars[4])
gamma.term=(1/gamma)*log((1-pp)/pp)/log(10)
dK=273.16
rti=beta1/(log10(time.rti)-beta0-gamma.term)-dK
beta0.log10=beta0+gamma.term
beta1.log10=beta1
names(beta0.log10)="beta0"
names(beta1.log10)="beta1"
res=list(coef=c(beta0.log10, beta1.log10),rti=rti,time.rti=time.rti,failure.threshold=failure.threshold)
return(res)
}
####################################################################################
print.addt.fit=function(x,...){
obj=x
if(!is.null(obj$LS.obj)){
cat("Least Squares Approach: \n")
print(round(x$LS.obj$coef0, 4))
cat("est.TI:" , round(obj$LS.obj$rti), "\n")
}
if(!is.null(obj$ML.obj)){
cat("\n", "Maximum Likelihood Approach:", "\n")
alpha=exp(obj$ML.obj$coef[1])
beta0=obj$ML.obj$coef[2]
beta1=obj$ML.obj$coef[3]
gamma=exp(obj$ML.obj$coef[4])
sigma=sqrt(exp(obj$ML.obj$coef[5])^2+exp(obj$ML.obj$coef[6])^2)
rho=exp(obj$ML.obj$coef[6])^2/(sigma^2)
coef=c(alpha, beta0, beta1, gamma, sigma, rho)
names(coef)=c("alpha", "beta0", "beta1", "gamma", "sigma", "rho")
cat("Call:\n")
print(obj$ML.obj$call)
cat("\n")
cat("Coefficients:\n")
print(round(coef,4))
cat("est.TI:", round(obj$ML.obj$rti), "\n")
cat("\n")
cat("Loglikelihod:\n")
print(as.numeric(-obj$ML.obj$min))
}
if(!is.null(obj$SemiPara.obj)){
cat("\n", "Semi-Parametric Approach:", "\n")
betahat <- obj$SemiPara.obj$betahat
print("beta estimates:")
print(betahat)
rho <- obj$SemiPara.obj$fit.monotone.bspline$coef[13]
print("rho")
print(rho)
knots <- obj$SemiPara.obj$spline.inf$knots
print("knots")
print(knots)
Loglike <- obj$SemiPara.obj$fit.monotone.bspline$loglik
print("Log-likelihood")
print(Loglike)
print("TI")
TI.semi <- obj$SemiPara.obj$TI
print(TI.semi)
}
}
####################################################################################
summary.addt.fit=function(object,...)
{
obj=object
if(!is.null(obj$ML.obj)){
alpha=exp(obj$ML.obj$coef[1])
beta0=obj$ML.obj$coef[2]
beta1=obj$ML.obj$coef[3]
gamma=exp(obj$ML.obj$coef[4])
sigma=sqrt(exp(obj$ML.obj$coef[5])^2+exp(obj$ML.obj$coef[6])^2)
rho=exp(obj$ML.obj$coef[6])^2/(sigma^2)
sigma.dev=deriv(~sqrt(exp(x)^2+exp(y)^2),c("x","y"), function(x,y){})
rho.dev=deriv(~exp(y)^2/(exp(x)^2+exp(y)^2),c("x","y"), function(x,y){})
ll=length(obj$ML.obj$coef)
coef.dev=diag(rep(1, ll))
coef.dev[1,1]=alpha
coef.dev[4,4]=gamma
coef.dev[(ll-1),(ll-1):ll]=attr(sigma.dev(obj$ML.obj$coef[5], obj$ML.obj$coef[6]), "gradient")
coef.dev[ll,(ll-1):ll]=attr(rho.dev(obj$ML.obj$coef[5], obj$ML.obj$coef[6]), "gradient")
vcov=coef.dev%*%obj$ML.obj$vcov%*%t(coef.dev)
coef=c(alpha, beta0, beta1, gamma, sigma, rho)
names(coef)=c("alpha", "beta0", "beta1", "gamma", "sigma", "rho")
mat=matrix(0, ll, 4)
mat[,1]=coef
mat[,2]=sqrt(diag(vcov))
mat[,3]=mat[,1]*exp(-1.96*mat[,2]/mat[,1])
mat[,4]=mat[,1]*exp(+1.96*mat[,2]/mat[,1])
mat[c(2:3,ll),3]=mat[c(2:3,ll),1]-1.96*mat[c(2:3,ll),2]
mat[c(2:3,ll),4]=mat[c(2:3,ll),1]+1.96*mat[c(2:3,ll),2]
colnames(mat)=c("mean", "std", "95% Lower", "95% Upper")
rownames(mat)=names(coef)
ti.CI=addt.confint.ti.mle(obj=obj, conflevel=0.95)
names(ti.CI)=c("est", "std", "95% Lower", "95% Upper")
beta0 <- round(obj$ML.obj$beta.beta0 , 4)
beta1 <- round(obj$ML.obj$beta.beta1 , 4)
Temperature_time <- c(beta0, beta1)
names(Temperature_time) <- c("beta0", "beta1")
obj=c(obj, list(coef.mle.mat=mat), list(ti.CI=ti.CI), list(Temperature_time =Temperature_time))
}
if(!is.null(obj$SemiPara.obj)){
betahat <- obj$SemiPara.obj$betahat
rho <- as.numeric(obj$SemiPara.obj$fit.monotone.bspline$coef["rho"])
knots <- as.numeric(obj$SemiPara.obj$spline.inf$knots)
Boundary <- obj$SemiPara.obj$spline.inf$Boundary.knots
Loglike <- obj$SemiPara.obj$fit.monotone.bspline$loglik
aic <- obj$SemiPara.obj$aic
aicc <- obj$SemiPara.obj$aicc
TI.semi <- obj$SemiPara.obj$TI.TI
beta0 <- obj$SemiPara.obj$TI.beta0
beta1 <- obj$SemiPara.obj$TI.beta1
TI.value <- c(TI.semi, beta0, beta1)
names(TI.value) <- c("TI.semi", "beta0", "beta1")
Semi.knots <- c(knots, Boundary)
names(Semi.knots) <- c("knots", "Left Boundary", "Right Boundary")
parameter <- c(betahat, rho)
names(parameter) <- c("betahat", "rho")
evaluation <- c(Loglike , aic, aicc)
names(evaluation) <- c("Loglikelihood","AIC","AICC")
obj = c(obj , betahat, knots, Loglike, aic, aicc, rho, TI.semi, beta0, beta1)
}
class(obj)="summary.addt.fit"
return(obj)
}
####################################################################################
print.summary.addt.fit=function(x,...)
{
if(!is.null(x$LS.obj)){
cat("Least Squares Approach: \n")
print(round(x$LS.obj$coef0, 4))
cat("est.TI:" , round(x$LS.obj$rti), "\n")
cat("Interpolation time: \n")
print(x$LS.obj$interp.mat)
}
if(!is.null(x$ML.obj)){
cat("\n", "Maximum Likelihood Approach:", "\n")
cat("Call:\n")
print(x$ML.obj$call)
cat("\n")
cat("Parameters:\n")
print(round(x$coef.mle.mat, 4))
cat("\n")
cat("Temperature-Time Relationship: \n")
print(x$Temperature_time)
cat("\n")
#print(c("beta0", "beta1"))
#print(c(round(x$ML.obj$beta.beta0), round(x$ML.obj$beta.beta1)))
cat("TI: \n")
print(round(x$ti.CI, 4))
cat("\n")
cat("Loglikelihood:\n")
print(as.numeric(-x$ML.obj$min))
}
if(!is.null(x$SemiPara.obj)){
betahat <- round(x$SemiPara.obj$betahat,3)
rho <- round(as.numeric(x$SemiPara.obj$fit.monotone.bspline$coef["rho"]) ,3)
knots <- round(as.numeric(x$SemiPara.obj$spline.inf$knots) , 3)
Boundary <-round(x$SemiPara.obj$spline.inf$Boundary.knots ,3)
Loglike <- round(x$SemiPara.obj$fit.monotone.bspline$loglik,3)
aic <- round(x$SemiPara.obj$aic ,3)
aicc <- round(x$SemiPara.obj$aicc ,3)
TI.semi <- round(x$SemiPara.obj$TI.TI ,3)
beta0 <- round(x$SemiPara.obj$TI.beta0 , 3)
beta1 <- round(x$SemiPara.obj$TI.beta1 , 3)
TI.value <- c(TI.semi, beta0, beta1)
names(TI.value) <- c("TI.semi", "beta0", "beta1")
Semi.knots <- c(Boundary[1], knots, Boundary[2])
names(Semi.knots) <- c("Left Boundary",rep("knots", length(knots)), "Right Boundary")
if(!is.null(x$SemiPara.obj$fit.monotone.bspline$rho))
{
parameter <- c(betahat, rho)
names(parameter) <- c("betahat", "rho")
}
if(is.null(x$SemiPara.obj$fit.monotone.bspline$rho))
{
parameter <- c(betahat)
names(parameter) <- c("betahat")
}
evaluation <- c(Loglike , aicc)
names(evaluation) <- c("Loglikelihood","AICC")
cat("\n", "Semi-Parametric Approach:", "\n")
cat("\n")
cat("Parameters Estimates: \n")
print(parameter)
cat("\n")
cat("TI estimates: \n")
print(TI.value)
cat("\n")
cat("Model Evaluations: \n")
print(evaluation)
cat("\n")
cat("B-spline: \n")
print(Semi.knots)
}
}
####################################################################################
plot.addt.fit=function(x, type,...){
if(type=="data"){
addt.data.plot(x$ML.obj$dat)
}
if(type=="ML"){
addt.par.fitted.plot(x$ML.obj,mean.fun="kinetics",zero.correction=F,timeline.show=F,legend.pos=1)
}
if(type=="LS"){
polynomial.interpolation(dat=x$dat,initial.val=x$initial.val,failure.threshold=x$failure.threshold)
}
if(type=="SEMI"){
data0 <- x$dat
colnames(data0) <- c("Temp", "Time", "Response")
addt.data.plot(data0, xlab="Time in Weeks", ylab=expression(paste(Log, " of Strength (Newton)")), main="", legend.pos=1)
colnames(data0) <- c("temp", "time", "response")
monotone.bspline.data0.fitted=aggregate(x$SemiPara.obj$fit.monotone.bspline$yhat, by=list(data0$temp, data0$time), mean)
names(monotone.bspline.data0.fitted)=c("temp", "time", "fitted")
temp.uni=unique(data0$temp)
#monotone.bspline.data0.fitted=monotone.bspline.fit.obj$fit.mat
monotone.bspline.baseline=monotone.bspline.data0.fitted[monotone.bspline.data0.fitted$time==0, "fitted"]
for(i in 1:length(temp.uni)){
lines(c(0, monotone.bspline.data0.fitted[monotone.bspline.data0.fitted$temp==temp.uni[i], "time"]),
c(monotone.bspline.baseline, monotone.bspline.data0.fitted[monotone.bspline.data0.fitted$temp==temp.uni[i], "fitted"]),
lty=i, col=i)
}
}
}
################################################################################
addt.par.fitted.plot=function(obj,mean.fun,zero.correction=F,timeline.show=T,legend.pos=1,initial.val=100,...)
{
dat=obj$dat
addt.data.plot(dat=dat,zero.correction=zero.correction,timeline.show=timeline.show,legend.pos=legend.pos,...)
mtime=max(dat[,"Time"])
stime=ifelse(zero.correction,1,0)
ww=seq(stime,mtime,,1000)
coefs=obj$coef
switch(mean.fun,
"kinetics"={
mfun=kinetics.fun
alpha=exp(coefs[1])
beta0=coefs[2]
beta1=coefs[3]
gamma=exp(coefs[4])
},
"kinetics0"={
mfun=kinetics.fun
alpha=initial.val
beta0=coefs[1]
beta1=coefs[2]
gamma=exp(coefs[3])
},
"power.exponential"={
mfun=power.exponential.fun
alpha=exp(coefs[1])
beta0=coefs[2]
beta1=coefs[3]
gamma=exp(coefs[4])
},
"power.exponential0"={
mfun=power.exponential.fun
alpha=initial.val
beta0=coefs[1]
beta1=coefs[2]
gamma=exp(coefs[3])
},
)
cc2=unique(dat[,"Temp"])
for(i in 1:(length(cc2)))
{
yy=mfun(tt=ww, temp=cc2[i], alpha=alpha, beta0=beta0, beta1=beta1, gamma=gamma)
lines(ww,yy,lwd=3,col=i)
}
}
################################################################################
addt.data.plot=function(dat,zero.correction=F,timeline.show=T,legend.pos=1,xlab,ylab,...)
{
cc=dat[,"Temp"]
cc=as.factor(cc)
cc1=as.numeric(cc)
if(zero.correction)
{
dat[dat[,"Time"]==0,"Time"]=1
}
#browser()
mm=max(dat[,"Response"])
mmin=min(dat[,"Response"])
mtime=max(dat[,"Time"])
mintime=min(dat[,"Time"])
if(!zero.correction)
{
mintime=0
}
plot(dat[,"Time"], dat[,"Response"],col=cc1,xlim=c(mintime,1.05*mtime),ylim=c(.8*mmin,1.2*mm),pch=cc1,xlab="Time", ylab="Responses Values",las=1,lwd=2,...)
#browser()
if(legend.pos==1)
{
legend(0.7*mtime,1.23*mm,paste(levels(cc),"C",sep=""),col=unique(cc1),pch=unique(cc1),lwd=2,lty=NA,bty="n")
}
if(legend.pos==2)
{
legend(mintime,.5*mm,paste(levels(cc),"C",sep=""),col=unique(cc1),pch=unique(cc1),lwd=2,lty=NA,bty="n")
}
if(timeline.show)
{
tt=unique(dat[,"Time"])
abline(v=tt,col="grey",lty=2)
text(tt,rep(.9*mmin,length(tt)),tt,cex=.9)
}
}
################################################################################
lsa.fit=function(dat, initial.val, failure.threshold, time.rti)
{
dK=273.16
# polynominal fit to the data
dat0=polynomial.interpolation(dat=dat,initial.val=initial.val,failure.threshold=failure.threshold,plot=F,plot.all=F)
dat0=dat0[!is.na(dat0[,2]),]
# least square fit to the data
fit0=lm(I(log10(dat0[,2]))~I(1/(dat0[,1]+dK)))
coef0=fit0$coef
names(coef0)=c("beta0", "beta1")
rti0=coef0[2]/(log10(time.rti)-coef0[1])-dK
res=list(coef0=coef0, interp.mat=dat0, rti0=rti0)
class(res)="addt.fit.lsa"
return(res)
}
################################################################################
lifetime.mle=function(dat, minusloglik, starts, method = "BFGS",hessian = TRUE,...)
{
call=match.call()
f = function(p) {
minusloglik(dat,p)
}
oout = optim(starts, f, method = method, hessian = hessian,...)#,control=list(trace=T))
coef = oout$par
#browser()
if(hessian)
{
vcov =solve(oout$hessian)
}else{
vcov=NULL
}
min = oout$value
invisible(list(call = call, coef = coef,vcov = vcov, min = min,dat=dat,minusloglik=minusloglik))
}
################################################################################
# sample mean at each combo of time and temp
addt.mean.summary=function(dat)
{
aa=tapply(dat[,3],dat[,1:2],"mean")
bb=rownames(aa)
bb=as.numeric(bb)
cc=colnames(aa)
cc=as.numeric(cc)
mm=dim(aa)[1]
nn=dim(aa)[2]
res=data.frame(Temp=rep(bb,nn), Time=rep(cc,rep(mm,nn)), Response=as.vector(aa))
res=res[!is.na(res[,3]),]
res=res[order(res[,1],res[,2]),]
rownames(res)=NULL
return(res)
}
################################################################################
addt.confint.ti.mle=function(obj,conflevel)
{
if(is.null(obj$ML.obj)) {
stop("this function can only use for maximum likelihood approach")
}
#browser()
failure.threshold=obj$failure.threshold
initial.val=obj$initial.val
time.rti=obj$time.rti
obj=obj$ML.obj
Sigma.part=obj$vcov[c(2:4),c(2:4)]
beta0.hat=obj$coef[2]
beta1.hat=obj$coef[3]
pp=failure.threshold/100
gamma.hat=exp(obj$coef[4])
gamma.term=(1/gamma.hat)*log((1-pp)/pp)
ti.hat=true.ti.compute(pars=obj$coef,failure.threshold=failure.threshold,initial.val=initial.val,time.rti=time.rti)$rti
beta0.log10=beta0.hat/log(10)
beta1.log10=beta1.hat/log(10)
gamma.log10=gamma.hat*log(10)
gamma.term.log10=log((1-pp)/pp)/gamma.log10
den=log10(time.rti)-beta0.log10-gamma.term.log10
partial.b0=beta1.log10/(den^2)
partial.b1=1/den
partial.gamma=-(beta1.log10/(den^2))*(gamma.term.log10/gamma.log10)
Sigma.part.trans=diag(c(1/log(10),1/log(10),log(10)*gamma.hat))%*%Sigma.part%*%diag(c(1/log(10),1/log(10),log(10)*gamma.hat))
Sigma.ti=t(c(partial.b0,partial.b1,partial.gamma))%*%Sigma.part.trans%*%c(partial.b0,partial.b1,partial.gamma)
CI=c(ti.hat-qnorm(0.5+conflevel/2)*sqrt(Sigma.ti),ti.hat+qnorm(0.5+conflevel/2)*sqrt(Sigma.ti))
#cat(100*(1-conflevel), "% CI is ", "(", round(CI[1], 3), ",", round(CI[2], 3), ") \n", sep="")
CI[1]=ifelse(CI[1]<0, 0, CI[1])
res=c(ti.hat, sqrt(Sigma.ti), CI)
names(res)=c("est.", "s.e.", "lower", "upper")
return(res)
}
################################################################################
addt.predint.ybar.mle=function(obj,conflevel,num.fut.obs=5,temp,tt)
{
if(is.null(obj$ML.obj)) {
stop("this function can only use for maximum likelihood approach")
}
obj=obj$ML.obj
alpha.hat=exp(obj$coef[1])
beta0.hat=obj$coef[2]
beta1.hat=obj$coef[3]
gamma.hat=exp(obj$coef[4])
sigma.sq.hat=exp(obj$coef[5])^2+exp(obj$coef[6])^2
rho.hat=exp(obj$coef[6])^2/sigma.sq.hat
Sigma.betaf=diag(c(alpha.hat,1,1,gamma.hat))%*%obj$vcov[c(1:4),c(1:4)]%*%diag(c(alpha.hat,1,1,gamma.hat))
ybar.hat=kinetics.fun(tt=tt, temp=temp, alpha=alpha.hat, beta0=beta0.hat, beta1=beta1.hat, gamma=gamma.hat)
partial.vec=first.dev.kinetics.fun(tt=tt, temp=temp, alpha=alpha.hat, beta0=beta0.hat, beta1=beta1.hat, gamma=gamma.hat)
Sigma.ybar=sigma.sq.hat*(rho.hat+(1-rho.hat)/num.fut.obs)+
t(partial.vec)%*%Sigma.betaf%*%partial.vec
return(c(ybar.hat-qnorm(0.5+conflevel/2)*sqrt(Sigma.ybar),ybar.hat+qnorm(0.5+conflevel/2)*sqrt(Sigma.ybar)))
}
################################################################################
first.dev.kinetics.fun=function(tt, temp, alpha, beta0, beta1, gamma){
x=1/(temp+273.16)
partial.beta0 = alpha*gamma*(tt^gamma)*
(1+(tt^gamma)*exp(-(beta0+beta1*x)*gamma))^(-2)*exp(-(beta0+beta1*x)*gamma)
partial.beta1 = alpha*x*gamma*(tt^gamma)*
(1+(tt^gamma)*exp(-(beta0+beta1*x)*gamma))^(-2)*exp(-(beta0+beta1*x)*gamma)
partial.gamma = -alpha*(tt^gamma)*
(1+(tt^gamma)*exp(-(beta0+beta1*x)*gamma))^(-2)*exp(-(beta0+beta1*x)*gamma)*
(log(tt)-(beta0+beta1*x))
partial.alpha=(1+(tt^gamma)*exp(-(beta0+beta1*x)*gamma))^(-1)
return(c(partial.alpha, partial.beta0, partial.beta1, partial.gamma))
}
################################################################################
# initial.val is specified if no records available at time 0
addt.data.normalization=function(dat, initial.val)
{
temps=unique(dat[,"Temp"])
nn=length(temps)
if(!any(dat[,"Time"]==0))
{
tmp1=cbind(Temp=temps, Time=0, Response=100)
dat[,"Response"]=dat[,"Response"]/initial.val*100
dat=rbind(tmp1,dat)
dat=dat[order(dat[,"Temp"],dat[,"Time"]),]
rownames(dat)=NULL
res=dat
}
if(any(dat[,"Time"]==0))
{
initial.val=dat[dat[,"Temp"]==min(temps) & dat[,"Time"]==0,"Response"]
res=NULL
for(i in 1:nn)
{
xtmp=dat[dat[,"Temp"]==temps[i],]
if(any(xtmp[,"Time"]==0))
{
xtmp[,"Response"]=xtmp[,"Response"]/xtmp[xtmp[,"Time"]==0,"Response"]*100
}else{
xtmp[,"Response"]=xtmp[,"Response"]/initial.val*100
xtmp=rbind(xtmp,c(temps[i],0,100))
}
res=rbind(res,xtmp)
}
res=res[order(res[,"Temp"],res[,"Time"]),]
rownames(res)=NULL
}
return(res)
}
################################################################################
polynomial.interpolation=function(dat,initial.val=100,failure.threshold=80,plot.all=T,plot=T)
{
dat=addt.mean.summary(dat=dat)
dat=addt.data.normalization(dat=dat, initial.val=initial.val)
temps=unique(dat[,"Temp"])
nn=length(temps)
tres=rep(NA,nn)
if(plot)
{
if(plot.all)
{
plot(0,0,type="n",xlim=c(0,max(dat[,"Time"])),ylim=c(0,150),ylab="Response (Relative %)",xlab="Time (hours)")
abline(h=failure.threshold)
}else{
par(mfrow=c(ceiling(nn/round(sqrt(nn))),round(sqrt(nn))))
}
}
for(i in 1:nn)
{
idx=(dat[,"Temp"]==temps[i])
if(sum(idx)>=3)
{
yy=dat[idx,"Response"]
xx=dat[idx,"Time"]
#browser()
if(sum(idx)==3 & min(yy)<failure.threshold)
{
afit=lm(yy~I(xx)+I(xx^2))
coefs=afit$coef
tt=seq(0,max(xx),,1000)
yyhat=coefs[1]+coefs[2]*tt+coefs[3]*tt^2
ctmp=polyroot(c(coefs[1]-failure.threshold,coefs[2:3]))
#print(ctmp)
ctmp1=Re(ctmp[abs(Im(ctmp))<1e-5])
ctmp2=ctmp1[ctmp1>0 & ctmp1<=max(xx)]
if(length(ctmp2)>0)
{
tres[i]=min(ctmp2)
}else{
ff=approx(yyhat, tt, failure.threshold)
tres[i]=min(ff$y)
}
#browser()
if(plot)
{
if(!plot.all)
{
plot(tt,yyhat,type="l",ylim=c(0,100),xlim=c(0,max(xx)),xlab="Time",ylab="Response (Relative %)")
abline(h=failure.threshold)
points(xx,yy)
}else{
lines(tt,yyhat,lwd=2,col=i)
points(xx,yy,col=i,pch=i,lwd=2)
}
points(tres[i],failure.threshold,pch=13,col=2,lwd=2)
}
}
if(sum(idx)>3 & min(yy)<failure.threshold)
{
afit=lm(yy~I(xx)+I(xx^2)+I(xx^3))
coefs=afit$coef
tt=seq(0,max(xx),,1000)
yyhat=coefs[1]+coefs[2]*tt+coefs[3]*tt^2+coefs[4]*tt^3
#ff=approx(yyhat, tt, failure.threshold)
#tres[i]=min(ff$y)
#browser()
ctmp=polyroot(c(coefs[1]-failure.threshold,coefs[2:4]))
#print(ctmp)
ctmp1=Re(ctmp[abs(Im(ctmp))<1e-5])
ctmp2=ctmp1[ctmp1>0 & ctmp1<=max(xx)]
if(length(ctmp2)>0)
{
tres[i]=min(ctmp2)
}else{
ff=approx(yyhat, tt, failure.threshold)
tres[i]=min(ff$y)
}
if(plot)
{
if(!plot.all)
{
plot(tt,yyhat,type="l",ylim=c(0,100),xlim=c(0,max(xx)),xlab="Time",ylab="Response (Relative %)")
abline(h=failure.threshold)
points(xx,yy)
}else{
lines(tt,yyhat,lwd=2,col=i)
points(xx,yy,col=i,pch=i,lwd=2)
}
points(tres[i],failure.threshold,pch=13,col=2,lwd=2)
}
}
}
}
res=cbind(temps,tres)
colnames(res)=c("Temp","Time")
if(plot.all)
{
legend(0.7*max(dat[,"Time"]),150, paste("T_",res[,"Temp"],"=",round(res[,"Time"]),sep="") ,pch=1:nn,col=1:nn,lty=1,lwd=2,bty="n")
}
return(res)
}
################################################################################
addt.mle.initial.val=function(dat=dat, time.rti=100000, initial.val=100, failure.threshold=70, failure.threshold1=50)
{
#browser()
base.factor=log10(exp(1))
dK=273.16
yy=dat[,"Response"]
tt=dat[,"Time"]
temp=dat[,"Temp"]
#initial value for alpha
alpha=mean(yy[tt==0])
#initial value for beta0, beta1 and gamma
tmp.sfit1=lsa.fit(dat=dat,time.rti=time.rti,initial.val=initial.val,failure.threshold=failure.threshold)
tmp.sfit2=lsa.fit(dat=dat,time.rti=time.rti,initial.val=initial.val,failure.threshold=failure.threshold1)
pp1=failure.threshold/100
pp2=failure.threshold1/100
coef1=tmp.sfit1$coef0
coef2=tmp.sfit2$coef0
#initial value for beta1
beta1=coef1[2]
sbeta0=log10(time.rti)-beta1/(tmp.sfit2$rti0+dK)
cc1=log((1-pp1)/pp1)
cc2=log((1-pp2)/pp2)
kk1=coef1[1]/base.factor
kk2=sbeta0/base.factor
kk3=(kk1-kk2)/(cc1-cc2)
gamma=1/kk3
if(gamma<0)
{
gamma=1
}
beta0=kk1-kk3*cc1
beta1=beta1/11605/base.factor
beta0=beta0+beta1*11605/(min(temp)+dK)
aa=kinetics.fun1(tt=tt, temp=temp, alpha=alpha, beta0=beta0, beta1=beta1, gamma=gamma)
des=yy-aa
sigma=sqrt(mean(des^2))
start.val0=c(log(alpha), beta0, beta1, log(gamma), log(sigma))
names(start.val0)=NULL
#browser()
tmp.fit=lifetime.mle(dat=dat, minusloglik=minus.loglik.kinetics.no.cor, starts=start.val0, method = "Nelder-Mead", control=list(maxit=10000))
#cat("-log likelihood without correlation:", tmp.fit$min, "\n")
tcoef=tmp.fit$coef
alpha=exp(tcoef[1])
beta0=tcoef[2]
beta1=tcoef[3]
gamma=exp(tcoef[4])
aa1=kinetics.fun1(tt=tt, temp=temp, alpha=alpha, beta0=beta0, beta1=beta1, gamma=gamma)
des1=yy-aa1
xtmp=tapply(des1,paste(temp,tt),"mean")
sigma1=sd(xtmp)
res=c(tcoef,log(sigma1))
return(res)
}
################################################################################
minus.loglik.kinetics.no.cor=function(dat,pars)
{
#print(pars)
yy=dat[,"Response"]
tt=dat[,"Time"]
temp=dat[,"Temp"]
alpha=exp(pars[1])
beta0=pars[2]
beta1=pars[3]
gamma=exp(pars[4])
sigma=exp(pars[5])
sigma1=0#exp(pars[6]) #batch level
####
#yy=yy/100
#A=A/100
#######
aa=kinetics.fun1(tt=tt, temp=temp, alpha=alpha, beta0=beta0, beta1=beta1, gamma=gamma)
time.rti=yy-aa
#plot(tt,aa)
#browser()
temp.time=paste(dat[,"Temp"],"C",dat[,"Time"],"H",sep="")
cc=unique(temp.time)
nn=length(cc)
###
res=0
for(i in 1:nn)
{
#browser()
idx=(temp.time==cc[i])
pp=sum(idx)
SS=diag(sigma^2,pp)+sigma1^2*matrix(1,pp,pp)*as.numeric(pp>1)
SS.inv=solve(SS)
tdd=time.rti[idx]
tres=-0.5*pp*log(2*pi)-0.5*log(det(SS))-0.5*t(tdd)%*%SS.inv%*%tdd
#print(tres)
res=res+tres
#print(round(SS.inv,3))
#browser()
}
res=as.vector(res)
res=(-1)*res
#print(res)
return(res)
}
################################################################################
minus.loglik.kinetics=function(dat,pars)
{
#print(pars)
yy=dat[,"Response"]
tt=dat[,"Time"]
temp=dat[,"Temp"]
alpha=exp(pars[1])
beta0=pars[2]
beta1=pars[3]
gamma=exp(pars[4])
sigma=exp(pars[5])
sigma1=exp(pars[6]) #batch level
####
#yy=yy/100
#A=A/100
#######
aa=kinetics.fun1(tt=tt, temp=temp, alpha=alpha, beta0=beta0, beta1=beta1, gamma=gamma)
time.rti=yy-aa
#plot(tt,aa)
#browser()
temp.time=paste(dat[,"Temp"],"C",dat[,"Time"],"H",sep="")
cc=unique(temp.time)
nn=length(cc)
###
res=0
for(i in 1:nn)
{
#browser()
idx=(temp.time==cc[i])
pp=sum(idx)
SS=diag(sigma^2,pp)+sigma1^2*matrix(1,pp,pp)*as.numeric(pp>1)
SS.inv=solve(SS)
tdd=time.rti[idx]
tres=-0.5*pp*log(2*pi)-0.5*log(det(SS))-0.5*t(tdd)%*%SS.inv%*%tdd
#print(tres)
res=res+tres
#print(round(SS.inv,3))
#browser()
}
res=as.vector(res)
res=(-1)*res
#print(res)
return(res)
}
################################################################################
mle.transform=function(obj)
{
#browser()
dat=obj$dat
coefs=obj$coef
vcovs=obj$vcov
temp=dat[,"Temp"]
x=11605/(temp+273.16)
mx=min(x)
mat=diag(length(coefs))
mat[2,3]=-mx
mat[3,3]=11605
coefs=mat%*%coefs
coefs=as.vector(coefs)
vcovs=mat%*%vcovs%*%t(mat)
coefs->obj$coef
vcovs->obj$vcov
return(obj)
}
################################################################################
kinetics.fun=function(tt, temp, alpha, beta0, beta1, gamma)
{
#browser()
x=1/(temp+273.16)
mu=beta0+beta1*x
nu=1/gamma
zz=(log(tt)-mu)/nu
res=alpha/(1+exp(zz))
return(res)
}
################################################################################
#used for MLE
kinetics.fun1=function(tt, temp, alpha, beta0, beta1, gamma)
{
#browser()
x=11605/(temp+273.16)
mx=min(x)
mu=beta0+beta1*(x-mx)
nu=1/gamma
zz=(log(tt)-mu)/nu
res=alpha/(1+exp(zz))
return(res)
}
######################
# Semi-parametric model
######################
############################################################################
monotone.bspline.fit.knotselection.output=function(test.dat, n.knots.vec=1:5, degree=3, plot.loglike.fun=TRUE, use.aic=FALSE, use.aicc=TRUE, Boundary.knots=NULL, deg.vec=2:4, cov.fun.type){
#deg.vec=2:4
#browser()
aicc.vec=lld.vec=edf.vec=NULL
knot.list=NULL
nknots.vec=NULL
knotseq.vec=NULL
for(m in 1:length(deg.vec)){
tmp=monotone.bspline.fit.knotselection(test.dat=test.dat, degree=deg.vec[m], Boundary.knots=Boundary.knots, n.knots.vec=n.knots.vec, plot.loglike.fun=plot.loglike.fun, use.aic=use.aic, use.aicc=use.aicc, cov.fun.type=cov.fun.type)
aicc.vec=c(aicc.vec, tmp$aicc)
knot.list=c(knot.list, list(knots=tmp$knots))
}
for(k in 1:length(knot.list)){
nknots.vec[k]=length(knot.list[[k]])
knotseq.vec[k]=paste(round(knot.list[[k]], 2), collapse=", ")
tmp=bspline.fit.addt(test.dat=test.dat, n.knots=NULL, knots=knot.list[[k]], degree=deg.vec[k], plot.loglike.fun=T, Boundary.knots=Boundary.knots,
cov.fun.type=cov.fun.type)
lld.vec[k]=tmp$lld
edf.vec[k]=tmp$fit.monotone.bspline$edf
}
res=data.frame(2:4, lld.vec, edf.vec, aicc.vec, nknots.vec, knotseq.vec)
names(res)=c("Order", "Loglik", "edf", "AICC", "NumKnots", "Knots")
#print(xtable(res), include.rownames=FALSE)
return(invisible(res[which.min(res$AICC),]))
}
############################################################################
monotone.bspline.fit.nocor.knotselection.output=function(test.dat, n.knots.vec=1:5, degree=3, plot.loglike.fun=TRUE, use.aic=FALSE, use.aicc=TRUE, Boundary.knots=NULL, deg.vec=2:4, cov.fun.type=NULL){
#deg.vec=2:4
#browser()
aicc.vec=lld.vec=edf.vec=NULL
knot.list=NULL
nknots.vec=NULL
knotseq.vec=NULL
for(m in 1:length(deg.vec)){
tmp=monotone.bspline.fit.nocor.knotselection(test.dat=test.dat, degree=deg.vec[m], Boundary.knots=Boundary.knots, n.knots.vec=n.knots.vec, plot.loglike.fun=plot.loglike.fun, use.aic=use.aic, use.aicc=use.aicc, cov.fun.type=cov.fun.type)
aicc.vec=c(aicc.vec, tmp$aicc)
knot.list=c(knot.list, list(knots=tmp$knots))
}
for(k in 1:length(knot.list)){
nknots.vec[k]=length(knot.list[[k]])
knotseq.vec[k]=paste(round(knot.list[[k]], 2), collapse=", ")
tmp=bspline.fit.addt.nocor(test.dat=test.dat, n.knots=NULL, knots=knot.list[[k]], degree=deg.vec[k], plot.loglike.fun=T, Boundary.knots=Boundary.knots)
lld.vec[k]=tmp$lld
edf.vec[k]=tmp$fit.monotone.bspline$edf
}
res=data.frame(2:4, lld.vec, edf.vec, aicc.vec, nknots.vec, knotseq.vec)
names(res)=c("Order", "Loglik", "edf", "AICC", "NumKnots", "Knots")
#print(xtable(res), include.rownames=FALSE)
return(invisible(res[which.min(res$AICC),]))
}
###################################################################################
### fit monotone b-spline to the data
monotone.bspline.fit.knotselection=function(test.dat, n.knots.vec=1:5, degree=3, plot.loglike.fun=TRUE, use.aic=FALSE, use.aicc=TRUE, Boundary.knots=NULL, cov.fun.type){
names(test.dat)=c("temp", "time", "Converted.Value")
## first step: find the number of knots that gives the minimum aic
betahat.vec=aic=aicc=lld=rep(0, length(n.knots.vec))
tmp.cat=function(syb, nsyb)
{
for(cat.count in 1:nsyb)
{
cat(syb)
}
}
cat("|")
tmp.cat(">", sum(n.knots.vec))
cat("|\n|")
for(nk in 1:length(n.knots.vec))
{
tmp.cat(">", nk)
bspline.fit.addt.obj=bspline.fit.addt(test.dat=test.dat, n.knots=n.knots.vec[nk], knots=NULL, degree=degree, plot.loglike.fun=plot.loglike.fun, Boundary.knots=Boundary.knots, cov.fun.type=cov.fun.type)
betahat.vec[nk]=bspline.fit.addt.obj$betahat
lld[nk]=bspline.fit.addt.obj$lld
aic[nk]=bspline.fit.addt.obj$aic
aicc[nk]=bspline.fit.addt.obj$aicc
}
cat("|\n")
#browser()
#########################################################
# use aic
if(use.aic){
nknotsbest=n.knots.vec[which.min(aic)]
xt.mat=addt.bsplines.xmat.obj.fun(dat=test.dat, beta=betahat.vec[which.min(aic)], n.knots=nknotsbest, degree=degree, plot.splines=F, Boundary.knots=Boundary.knots)
knots.nknotsbest=xt.mat$knots
## stepwise knot deletion
aic.dele=rep(0, length(knots.nknotsbest))
for(j in 1:length(knots.nknotsbest)){
bspline.fit.addt.obj=bspline.fit.addt(test.dat=test.dat, n.knots=NULL, knots=knots.nknotsbest[-j], degree=degree, plot.loglike.fun=plot.loglike.fun, Boundary.knots=Boundary.knots, cov.fun.type=cov.fun.type)
aic.dele[j]=bspline.fit.addt.obj$aic
}
# stepwise knot addition
}
#########################################################
# use aicc
if(use.aicc){
nknotsbest=n.knots.vec[which.min(aicc)]
xt.mat=addt.bsplines.xmat.obj.fun(dat=test.dat, beta=betahat.vec[which.min(aicc)], n.knots=nknotsbest, degree=degree, plot.splines=F, Boundary.knots=Boundary.knots)
aicc.cur=min(aicc)
aicc.diff=1
knots.nknotsbest=xt.mat$knots
# if number of knots equal 1, don't perform knot deletion
if(nknotsbest>1){
## stepwise knot deletion
while(aicc.diff>0){
aicc.dele=rep(0, length(knots.nknotsbest))
for(j in 1:length(knots.nknotsbest)){
bspline.fit.addt.obj=try(bspline.fit.addt(test.dat=test.dat, n.knots=NULL, knots=knots.nknotsbest[-j], degree=degree, plot.loglike.fun=plot.loglike.fun, Boundary.knots=Boundary.knots, cov.fun.type=cov.fun.type), silent=T)
if(!is.null(bspline.fit.addt.obj)){
aicc.dele[j]=bspline.fit.addt.obj$aicc
}
}
aicc.diff=aicc.cur-min(aicc.dele)
if(aicc.diff>0){
knots.nknotsbest=knots.nknotsbest[-which.min(aicc.dele)]
aicc.cur=min(aicc.dele)
}
}
# stepwise knot addition
}
}
return(list(knots=knots.nknotsbest, aicc=aicc.cur))
}
###################################################################################
### fit monotone b-spline to the data, no correlation
monotone.bspline.fit.nocor.knotselection=function(test.dat, n.knots.vec=1:5, degree=3, plot.loglike.fun=TRUE, use.aic=FALSE, use.aicc=TRUE, Boundary.knots=NULL, cov.fun.type){
names(test.dat)=c("temp", "time", "Converted.Value")
## first step: find the number of knots that gives the minimum aic
betahat.vec=aic=aicc=lld=rep(0, length(n.knots.vec))
tmp.cat=function(syb, nsyb)
{
for(cat.count in 1:nsyb)
{
cat(syb)
}
}
cat("|")
tmp.cat(">", sum(n.knots.vec))
cat("|\n|")
for(nk in 1:length(n.knots.vec))
{
tmp.cat(">", nk)
bspline.fit.addt.obj=bspline.fit.addt.nocor(test.dat=test.dat, n.knots=n.knots.vec[nk], knots=NULL, degree=degree, plot.loglike.fun=plot.loglike.fun, Boundary.knots=Boundary.knots)
betahat.vec[nk]=bspline.fit.addt.obj$betahat
lld[nk]=bspline.fit.addt.obj$lld
#aic[nk]=bspline.fit.addt.obj$aic
aicc[nk]=bspline.fit.addt.obj$aicc
}
cat("|\n")
#browser()
#########################################################
# use aicc
if(use.aicc){
nknotsbest=n.knots.vec[which.min(aicc)]
xt.mat=addt.bsplines.xmat.obj.fun(dat=test.dat, beta=betahat.vec[which.min(aicc)], n.knots=nknotsbest, degree=degree, plot.splines=F, Boundary.knots=Boundary.knots)
aicc.cur=min(aicc)
aicc.diff=1
knots.nknotsbest=xt.mat$knots
# if number of knots equal 1, don't perform knot deletion
if(nknotsbest>1){
## stepwise knot deletion
while(aicc.diff>0){
aicc.dele=rep(0, length(knots.nknotsbest))
for(j in 1:length(knots.nknotsbest)){
bspline.fit.addt.obj=bspline.fit.addt.nocor(test.dat=test.dat, n.knots=NULL, knots=knots.nknotsbest[-j], degree=degree, plot.loglike.fun=plot.loglike.fun, Boundary.knots=Boundary.knots)
aicc.dele[j]=bspline.fit.addt.obj$aicc
}
aicc.diff=aicc.cur-min(aicc.dele)
if(aicc.diff>0){
knots.nknotsbest=knots.nknotsbest[-which.min(aicc.dele)]
aicc.cur=min(aicc.dele)
}
}
# stepwise knot addition
}
}
return(list(knots=knots.nknotsbest, aicc=aicc.cur))
}
##############################################################################
bspline.fit.addt=function(test.dat, n.knots, knots, degree, plot.loglike.fun=T, Boundary.knots=NULL, rho.method="other", cov.fun.type){
names(test.dat)=c("temp", "time", "Converted.Value")
#browser()
aa=seq(0.01, 1.93,len=50)
#aa=seq(0.01, 3, len=100)
#aa=seq(0.2, 1, len=60)
#aa=seq(0.01, 0.5, len=50)
raa=raa.lme=rep(NA, length(aa))
if(is.null(n.knots)){ncoef=length(knots)+degree+1}else{ncoef=n.knots+degree+1}
coef.mat.lme=matrix(NA, length(aa), ncoef+2)
for(i in 1:length(aa))
{
#print(i)
xt.mat=addt.bsplines.xmat.obj.fun(dat=test.dat, beta=aa[i], n.knots=n.knots, knots=knots, degree=degree, plot.splines=F, Boundary.knots=Boundary.knots)
fit.dat.obj=xmat.obj.to.xmat(dat=test.dat,xmat.obj=xt.mat, intercept=FALSE)
y=fit.dat.obj$dat[,"Y"]
X=as.matrix(fit.dat.obj$dat[,3:(xt.mat$dfs+2)])
ID=fit.dat.obj$dat[,"Batch"]
#browser()
# treat the results from B-spline as initial values
#fit.gls <- try(gls(y~X-1, correlation=corCompSymm(form = ~ 1 | ID)), silent=T)
# treat the results from B-spline as initial values
fit.lme <- try( lme(y~X-1, random = ~ 1|ID), silent=T)
# treat the results from B-spline as initial values
lme.flag=(attr(fit.lme,"class")=="try-error")
pp=NULL
if(!lme.flag){pp=as.vector(attr(fit.lme$apVar,"Pars"))}
if(is.null(pp))
{
#res=list(conv=F)
#return(res)
ss=0.019
rho=0.2
}else{
ts1=exp(pp[1]) #reStruct.Batch
ts0=exp(pp[2]) #lSigma
ss=sqrt(ts0^2+ts1^2)
rho=ts1^2/(ts0^2+ts1^2)
}
if(!lme.flag){
raa.lme[i]=loglik.compute(mm=length(unique(ID)), dat=fit.dat.obj$dat, ids=unique(ID), ss=ss, rho=rho,
yhat=as.numeric(X%*%fixef(fit.lme)))$loglik
coef.mat.lme[i,]=c(fixef(fit.lme),ss,rho)
#if(class(fit.gls)!="try-error"){
#ss=fit.gls$sigma
#rho=coef(summary(fit.gls)$modelStruct$corStruct, unconstrained=FALSE)
#rho=ifelse(rho<0, 0.1, rho)
#browser()
#if(i==22 &cov.fun.type=="cov.fun.REMLc"){browser()}
fit.monotone.bspline=try(bspline.lme.addt.cone(dat.obj=fit.dat.obj,beta.vec0=fixef(fit.lme),theta0=c(ss, rho), cov.fun.type=cov.fun.type, rho.method=rho.method), silent=T)
#print(fit.monotone.bspline$coef["sigma"])
#print(fit.monotone.bspline$coef["rho"])
if(all(class(fit.monotone.bspline)!="try-error")){
if(fit.monotone.bspline$conv==TRUE){
if(fit.monotone.bspline$iter>=20){
#print("greater than 20")
fit.monotone.bspline=try(bspline.lme.addt.cone(dat.obj=fit.dat.obj,beta.vec0=fixef(fit.lme),theta0=c(ss, rho), cov.fun.type=cov.fun.type,rho.method=rho.method, adjust.stepsize=T), silent=T)
#save(test.dat, file="test.dat")
}
raa[i]=fit.monotone.bspline$loglik
}
}
}
#print(i)
}
#max.id=which.max(raa)
#print(raa)
#browser()
#pineapple
#if(plot.loglike.fun){
# if(is.null(n.knots)){
# plot(aa,raa,type="l", xlab=expression(beta), ylab="log likelihood", main=paste("monotone b-spline, knots:",paste(round(knots, 2), collapse = ",")))
# abline(v=aa[which.max(raa)], lty=2, col=2)
# } else{
# plot(aa,raa,type="l", xlab=expression(beta), ylab="log likelihood", main=paste("monotone b-spline, number of interior knots:", n.knots))
# abline(v=aa[which.max(raa)], lty=2, col=2)
# }
#}
# check whether it is a global maximum point for beta
#if((all(diff(raa)[1:(max.id-1)]>0)) & (all(diff(raa)[max.id:(length(raa)-1)]<0))){
# betahat=aa[which.max(raa)]
#} else{
# stop("cannot find the betahat")
#}
if(all(is.na(raa))){
return(list(conv=F))
}
betahat=aa[which.max(raa)]
coef.lme=c(aa[which.max(raa.lme)], coef.mat.lme[which.max(raa.lme),])
xt.mat=addt.bsplines.xmat.obj.fun(dat=test.dat, beta=betahat, n.knots=n.knots, knots=knots, degree=degree, plot.splines=F, Boundary.knots=Boundary.knots)
fit.dat.obj=xmat.obj.to.xmat(dat=test.dat,xmat.obj=xt.mat, intercept=FALSE)
y=fit.dat.obj$dat[,"Y"]
X=as.matrix(fit.dat.obj$dat[,3:(xt.mat$dfs+2)])
ID=fit.dat.obj$dat[,"Batch"]
fit.lme <- try( lme(y~X-1, random = ~ 1|ID), silent=T)
# treat the results from B-spline as initial values
lme.flag=(attr(fit.lme,"class")=="try-error")
if(!lme.flag){pp=as.vector(attr(fit.lme$apVar,"Pars"))}
if(is.null(pp))
{
#res=list(conv=F)
#return(res)
ss=0.019
rho=0.2
}else{
ts1=exp(pp[1]) #reStruct.Batch
ts0=exp(pp[2]) #lSigma
ss=sqrt(ts0^2+ts1^2)
rho=ts1^2/(ts0^2+ts1^2)
}
# treat the results from B-spline as initial values
#fit.gls <- try( gls(y~X-1, correlation=corCompSymm(form = ~ 1 | ID)), silent=T)
#if(class(fit.gls)!="try-error"){
# ss=fit.gls$sigma
# rho=coef(summary(fit.gls)$modelStruct$corStruct, unconstrained=FALSE)
if(!lme.flag){
fit.monotone.bspline=try(bspline.lme.addt.cone(dat.obj=fit.dat.obj,beta.vec0=fixef(fit.lme),theta0=c(ss, rho), cov.fun.type=cov.fun.type, rho.method=rho.method), silent=T)
if(all(class(fit.monotone.bspline)!="try-error")){
if(fit.monotone.bspline$conv==TRUE){
if(fit.monotone.bspline$iter>=20){
#print("greater than 20")
fit.monotone.bspline=try(bspline.lme.addt.cone(dat.obj=fit.dat.obj,beta.vec0=fixef(fit.lme),theta0=c(ss, rho), cov.fun.type=cov.fun.type,rho.method=rho.method, adjust.stepsize=T), silent=T)
#save(test.dat, file="test.dat")
}
}
}
lld=fit.monotone.bspline$loglik
n=dim(test.dat)[1]
tmp=as.numeric(fit.monotone.bspline$dat$dat[,"Y"]-fit.monotone.bspline$yhat)
aic=log(as.numeric(t(tmp)%*%solve(fit.monotone.bspline$Sigma)%*%tmp))+2*(2+fit.monotone.bspline$edf)/n
aicc=-2*lld+2*(3+fit.monotone.bspline$edf)
spline.inf=list(Boundary.knots=xt.mat$Boundary.knots, n.knots=n.knots, knots=knots, degree=degree)
return(list(betahat=betahat, lld=lld, aic=aic, aicc=aicc, fit.monotone.bspline=fit.monotone.bspline, spline.inf=spline.inf, coef.lme=coef.lme, conv=T))
}
}
##############################################################################
bspline.fit.addt.nocor=function(test.dat, n.knots, knots, degree, plot.loglike.fun=T, Boundary.knots=NULL, cov.fun.type, aa=seq(0.01, 1.93,len=50)){
names(test.dat)=c("temp", "time", "Converted.Value")
#browser()
aa=seq(0.01, 1.93,len=100)
#aa=seq(0.01, 3, len=100)
#aa=seq(0.2, 1, len=60)
#aa=seq(0.01, 0.5, len=50)
raa=raa.lm=rep(NA, length(aa))
if(is.null(n.knots)){ncoef=length(knots)+degree+1}else{ncoef=n.knots+degree+1}
coef.mat.lm=matrix(NA, length(aa), ncoef+1)
for(i in 1:length(aa))
{
#print(i)
xt.mat=addt.bsplines.xmat.obj.fun(dat=test.dat, beta=aa[i], n.knots=n.knots, knots=knots, degree=degree, plot.splines=F, Boundary.knots=Boundary.knots)
fit.dat.obj=xmat.obj.to.xmat(dat=test.dat,xmat.obj=xt.mat, intercept=FALSE)
y=fit.dat.obj$dat[,"Y"]
X=as.matrix(fit.dat.obj$dat[,3:(xt.mat$dfs+2)])
ID=fit.dat.obj$dat[,"Batch"]
#browser()
# treat the results from B-spline as initial values
#fit.gls <- try(gls(y~X-1, correlation=corCompSymm(form = ~ 1 | ID)), silent=T)
# treat the results from B-spline as initial values
fit.lm <- try( lm(y~X-1), silent=T)
# treat the results from B-spline as initial values
lm.flag=(attr(fit.lm,"class")=="try-error")
if(lm.flag)
{
#res=list(conv=F)
#return(res)
}else{
raa.lm[i]=logLik(fit.lm)
coef.mat.lm[i,]=c(coef(fit.lm), summary(fit.lm)$sigma)
#if(i==22 &cov.fun.type=="cov.fun.REMLc"){browser()}
fit.monotone.bspline=try(bspline.lme.addt.cone.nocor(dat.obj=fit.dat.obj,beta.vec0=coef(fit.lm),theta0=summary(fit.lm)$sigma), silent=T)
#print(fit.monotone.bspline$coef["sigma"])
#print(fit.monotone.bspline$coef["rho"])
if(class(fit.monotone.bspline)!="try-error"){
raa[i]=fit.monotone.bspline$loglik
}
}
#print(i)
}
#max.id=which.max(raa)
#print(raa)
#browser()
#pineapple
#if(plot.loglike.fun){
# if(is.null(n.knots)){
# plot(aa,raa,type="l", xlab=expression(beta), ylab="log likelihood", main=paste("monotone b-spline, knots:",paste(round(knots, 2), collapse = ",")))
# abline(v=aa[which.max(raa)], lty=2, col=2)
# } else{
# plot(aa,raa,type="l", xlab=expression(beta), ylab="log likelihood", main=paste("monotone b-spline, number of interior knots:", n.knots))
# abline(v=aa[which.max(raa)], lty=2, col=2)
# }
#}
# check whether it is a global maximum point for beta
#if((all(diff(raa)[1:(max.id-1)]>0)) & (all(diff(raa)[max.id:(length(raa)-1)]<0))){
# betahat=aa[which.max(raa)]
#} else{
# stop("cannot find the betahat")
#}
if(all(is.na(raa))){
return(list(conv=F))
}
betahat=aa[which.max(raa)]
coef.lm=c(aa[which.max(raa.lm)], coef.mat.lm[which.max(raa.lm),])
xt.mat=addt.bsplines.xmat.obj.fun(dat=test.dat, beta=betahat, n.knots=n.knots, knots=knots, degree=degree, plot.splines=F, Boundary.knots=Boundary.knots)
fit.dat.obj=xmat.obj.to.xmat(dat=test.dat,xmat.obj=xt.mat, intercept=FALSE)
y=fit.dat.obj$dat[,"Y"]
X=as.matrix(fit.dat.obj$dat[,3:(xt.mat$dfs+2)])
ID=fit.dat.obj$dat[,"Batch"]
# treat the results from B-spline as initial values
fit.lm <- try( lm(y~X-1), silent=T)
# treat the results from B-spline as initial values
lm.flag=(attr(fit.lm,"class")=="try-error")
if(lm.flag)
{
return(NULL)
}else{
#if(i==22 &cov.fun.type=="cov.fun.REMLc"){browser()}
fit.monotone.bspline=try(bspline.lme.addt.cone.nocor(dat.obj=fit.dat.obj,beta.vec0=coef(fit.lm),theta0=summary(fit.lm)$sigma), silent=T)
if(class(fit.monotone.bspline)!="try-error"){
lld=fit.monotone.bspline$loglik
tmp=as.numeric(fit.monotone.bspline$dat$dat[,"Y"]-fit.monotone.bspline$yhat)
#aic=log(as.numeric(t(tmp)%*%solve(fit.monotone.bspline$Sigma)%*%tmp))+2*(2+fit.monotone.bspline$edf)/n
aicc=-2*lld+2*(2+fit.monotone.bspline$edf)
spline.inf=list(Boundary.knots=xt.mat$Boundary.knots, n.knots=n.knots, knots=knots, degree=degree)
return(list(betahat=betahat, lld=lld, aicc=aicc, fit.monotone.bspline=fit.monotone.bspline, spline.inf=spline.inf, coef.lm=coef.lm, conv=T))
}
}
}
##############################################################################
################################################################################
bspline.lme.addt.cone=function(dat.obj,theta0,beta.vec0, control.beta=0.001, control.theta=0.001, monotone=T,
cov.fun.type="lme", rho.method="other", adjust.stepsize=F)
{
dat=dat.obj$dat
lam=dat.obj$lam
ncoef=length(lam)
nn=dim(dat)[1]
ids=unique(dat[,1])
mm=length(ids)
ss=theta0[1]
rho=theta0[2]
ss.vec0=c(ss,rho)
beta.vec=beta.vec0
#print(beta.vec0)
ys=rep(0,nn)
xmats=matrix(0,nrow=nn,ncol=ncoef)
mat.inv=matrix(0,nrow=nn,ncol=nn)
X=as.matrix(dat[, 3:(2+ncoef)])
y=dat[, "Y"]
convbeta=convtheta=1
iter=1
#compute log likelihood
llk.obj0=loglik.compute(mm=mm, dat=dat, ids=ids, ss=ss, rho=rho, yhat=X%*%beta.vec0)
while((convbeta>control.beta | convtheta>control.theta) & iter<20)
{
iter=iter+1
## construct covariance matrix
Sigma=llk.obj0$Sigma
#browser()
Sigma.inv=solve(Sigma)
#browser()
# treat sigma matrix as known, define transformed X and y
Ltilde=as.matrix(t(chol(Sigma.inv)))
Ltildeinv=solve(Ltilde)
ytilde=Ltildeinv%*%y
Xtilde=Ltildeinv%*%X
# use cone projection to solve the quadratic object function with constrains
umat=chol(t(Xtilde)%*%Xtilde)
uinv=solve(umat)
A=cbind(diag(rep(1, ncoef-1)), rep(0, ncoef-1))+cbind(rep(0, ncoef-1), diag(rep(-1, ncoef-1)))
bmata=A%*%uinv
bmat=matrix(0, ncoef, ncoef)
uvec=runif(ncoef)
bmat[1,]=uvec-t(bmata)%*%solve(bmata%*%t(bmata))%*%bmata%*%uvec
bmat[2:ncoef,]=bmata
edges=t(solve(bmat))
ysend=t(uinv)%*%t(Xtilde)%*%ytilde
# use package coneproj
coef=coneB(ysend,edges[2:ncoef,],matrix(t(edges[1,]),ncol=1))
beta.vec=uinv%*%t(edges)%*%coef$coefs
# find the effective degree of freedom
sm=1e-8
index=coef$coefs>sm
index[1]=TRUE
gmat=edges[index,]
if(length(gmat)/ncoef==1){gmat=matrix(gmat,ncol=ncoef)}
pcmat=Xtilde%*%uinv%*%t(gmat)%*%solve(gmat%*%t(gmat))%*%gmat%*%t(uinv)%*%t(Xtilde)
edfc=sum(diag(pcmat))
#
yhat=X%*%beta.vec
ww1=y-yhat
#browser()
# estimate covariance parameters
if(cov.fun.type=="lme"){
tdat=data.frame(Batch=dat[,c("Batch")],ww=ww1)
tfit=try(lme(ww~-1,data=tdat,random=~1|Batch),silent=T)
#browser()
#tfit=try(gls(ww~1,data=tdat,correlation=corCompSymm(form = ~ 1 | Batch)),silent=T)
lme.flag=(attr(tfit,"class")=="try-error")
if(lme.flag)
{
res=list(conv=F)
return(res)
}
pp=as.vector(attr(tfit$apVar,"Pars"))
if(is.null(pp))
{
pp=c(-12, log(tfit$sigma))
#res=list(conv=F)
#return(res)
}
ts1=exp(pp[1]) #reStruct.Batch
ts0=exp(pp[2]) #lSigma
ss=sqrt(ts0^2+ts1^2)
rho=ts1^2/(ts0^2+ts1^2)
ss.vec=c(ss,rho)
#fitted=yhat+tfit$coef$fixed+as.vector(fitted(tfit))
fitted=yhat+as.vector(fitted(tfit))
llk.obj=loglik.compute(mm=mm, dat=dat, ids=ids, ss=ss, rho=rho, yhat=yhat)
#fitted=yhat
}
if(cov.fun.type=="cov.fun.gls"){
tdat=data.frame(Batch=dat[,c("Batch")],ww=ww1)
fit.gls=try(gls(ww~1,data=tdat,correlation=corCompSymm(form = ~ 1 | Batch)),silent=T)
ss=fit.gls$sigma
rho=coef(summary(fit.gls)$modelStruct$corStruct, unconstrained=FALSE)
ss.vec=c(ss, rho)
llk.obj=loglik.compute(mm=mm, dat=dat, ids=ids, ss=ss, rho=rho, yhat=yhat)
}
if(cov.fun.type=="cov.fun.REML"){
#browser()
ID=dat.obj$dat[,"Batch"]
rho=glsnew(y~X-1, correlation=corCompSymm(form = ~ 1 | ID), coef.ini=as.numeric(round(beta.vec, 8)),
sigma.ini=ss)
#print(rho)
if(rho<0){
#browser()
rho=exp(try(nlminb(start=log(0.5), control=list(iter.max=20), objective=cov.fun.REML.rho, mm=mm, dat=dat, ids=ids, X_L=X_L, ww1=ww1, ss=ss)$par, silent=T))
}
for(j in 1:mm)
{
ww=sum((dat[,1]==ids[j])*1)
RR=diag(ww)+rho*as.numeric(ww>1)-rho*diag(ww)*as.numeric(ww>1)
tmp.ww1=as.numeric(ww1)[dat[,1]==ids[j]]
if(j==1){qudsum=t(tmp.ww1)%*%solve(RR)%*%tmp.ww1} else{
qudsum=t(tmp.ww1)%*%solve(RR)%*%tmp.ww1+qudsum
}
}
ss=sqrt(as.numeric(qudsum)/(nrow(X)-ncol(X)))
llk.obj=loglik.compute(mm=mm, dat=dat, ids=ids, ss=ss, rho=rho, yhat=yhat)
ss.vec=c(ss, rho)
}
if(cov.fun.type=="cov.fun.REMLc"){
#browser()
X_L=NULL
for(ll in unique(round(beta.vec, 8))){
tmp=X[,round(beta.vec, 8)==ll]
if(sum(round(beta.vec, 8)==ll)!=1){tmp=apply(tmp, 1, sum)}
X_L=cbind(X_L, tmp)
}
#browser()
colnames(X_L)=NULL
ID=dat.obj$dat[,"Batch"]
if(rho.method=="glsnew"){
rho=glsnew(y~X_L-1, correlation=corCompSymm(form = ~ 1 | ID), coef.ini=as.numeric(unique(round(beta.vec, 8))),
sigma.ini=ss)
#print(rho)
if(rho<0){
#browser()
#if(iter==4){browser()}
rho=exp(try(nlminb(start=qlogis(0.2), control=list(iter.max=20), objective=cov.fun.REML.rho, mm=mm, dat=dat, ids=ids, X_L=X_L, ww1=ww1, ss=ss)$par, silent=T))
#print(rho)
}
}else{
rho=plogis(try(nlminb(start=qlogis(0.5), control=list(iter.max=20), objective=cov.fun.REML.rho, mm=mm, dat=dat, ids=ids, X_L=X_L, ww1=ww1, ss=ss)$par, silent=T))
}
#browser()
# compute sigma
for(j in 1:mm)
{
ww=sum((dat[,1]==ids[j])*1)
RR=diag(ww)+rho*as.numeric(ww>1)-rho*diag(ww)*as.numeric(ww>1)
tmp.ww1=as.numeric(ww1)[dat[,1]==ids[j]]
if(j==1){qudsum=t(tmp.ww1)%*%solve(RR)%*%tmp.ww1} else{
qudsum=t(tmp.ww1)%*%solve(RR)%*%tmp.ww1+qudsum
}
}
#browser()
#print(rho)
ss=sqrt(as.numeric(qudsum)/(nrow(X_L)-ncol(X_L)))
#browser()
llk.obj=loglik.compute(mm=mm, dat=dat, ids=ids, ss=ss, rho=rho, yhat=yhat)
#print(llk.obj$loglik)
if(adjust.stepsize==T){
iter2=1
while((llk.obj$loglik<llk.obj0$loglik) & iter2<50){
iter2=iter2+1
ss=(ss+ss.vec0[1])/2
rho=(rho+ss.vec0[2])/2
llk.obj=loglik.compute(mm=mm, dat=dat, ids=ids, ss=ss, rho=rho, yhat=yhat)
}
}
ss.vec=c(ss, rho)
#print(ss.vec)
#ss.vec=REML(pars=c(ss.vec_L[1], ss.vec_L[2]), mm=mm, dat=dat, ids=ids, X_L=X_L, ww1=ww1)$par
}
convbeta=max(abs(beta.vec0-beta.vec))
convtheta=max(abs(ss.vec0-ss.vec))
beta.vec0=beta.vec
ss.vec0=ss.vec
llk.obj0=llk.obj
#print(round(beta.vec0, 4))
#print(round(ss.vec0, 4))
#print(paste("diff in beta", formatC(convbeta, digits=4, format="fg", flag="#")))
#print(paste("diff in theta", formatC(convtheta, digits=4, format="fg", flag="#")))
}
if(cov.fun.type=="cov.fun.gls"|cov.fun.type=="cov.fun.REML"|cov.fun.type=="cov.fun.REMLc"){
fitted=NULL
error=NULL
ts0=sqrt(ss.vec[1]^2*(1-ss.vec[2]))
ts1=sqrt(ss.vec[1]^2*(ss.vec[2]))
Sigma=llk.obj$Sigma
loglik=llk.obj$loglik
}else{
error=dat[,"Y"]-fitted
loglik=llk.obj$loglik
}
#std.error=error/ss
coef=c(beta.vec,ss.vec, ts0, ts1)
names(coef)=c(colnames(dat)[3:(ncoef+2)],"sigma","rho", "sigma_e", "sigma_u")
#coef=c(beta.vec,ss.vec)
#names(coef)=c(colnames(dat)[3:(ncoef+2)],"sigma","rho")
res=list(dat=dat.obj,fitted=fitted,yhat=yhat,coef=coef,
error=error,loglik=loglik,edf=edfc,Sigma=Sigma,conv=TRUE, iter=iter)
return(res)
}
################################################################################
bspline.lme.addt.cone.nocor=function(dat.obj,theta0,beta.vec0, control.beta=0.001, control.theta=0.001, monotone=T)
{
dat=dat.obj$dat
lam=dat.obj$lam
ncoef=length(lam)
nn=dim(dat)[1]
ids=unique(dat[,1])
mm=length(ids)
beta.vec=beta.vec0
sigma=theta=theta0
#print(beta.vec0)
ys=rep(0,nn)
xmats=matrix(0,nrow=nn,ncol=ncoef)
mat.inv=matrix(0,nrow=nn,ncol=nn)
X=as.matrix(dat[, 3:(2+ncoef)])
y=dat[, "Y"]
n=length(y)
convbeta=convtheta=1
iter=1
#compute log likelihood
while((convbeta>control.beta | convtheta>control.theta) & iter<20)
{
iter=iter+1
## construct covariance matrix
Sigma=sigma^2*diag(rep(1, n))
#browser()
Sigma.inv=sigma^(-2)*diag(rep(1, n))
#browser()
# treat sigma matrix as known, define transformed X and y
Ltilde=as.matrix(t(chol(Sigma.inv)))
Ltildeinv=solve(Ltilde)
ytilde=Ltildeinv%*%y
Xtilde=Ltildeinv%*%X
# use cone projection to solve the quadratic object function with constrains
umat=chol(t(Xtilde)%*%Xtilde)
uinv=solve(umat)
A=cbind(diag(rep(1, ncoef-1)), rep(0, ncoef-1))+cbind(rep(0, ncoef-1), diag(rep(-1, ncoef-1)))
bmata=A%*%uinv
bmat=matrix(0, ncoef, ncoef)
uvec=runif(ncoef)
bmat[1,]=uvec-t(bmata)%*%solve(bmata%*%t(bmata))%*%bmata%*%uvec
bmat[2:ncoef,]=bmata
edges=t(solve(bmat))
ysend=t(uinv)%*%t(Xtilde)%*%ytilde
# use package coneproj
coef=coneB(ysend,edges[2:ncoef,],matrix(t(edges[1,]),ncol=1))
beta.vec=uinv%*%t(edges)%*%coef$coefs
# find the effective degree of freedom
sm=1e-8
index=coef$coefs>sm
index[1]=TRUE
gmat=edges[index,]
if(length(gmat)/ncoef==1){gmat=matrix(gmat,ncol=ncoef)}
pcmat=Xtilde%*%uinv%*%t(gmat)%*%solve(gmat%*%t(gmat))%*%gmat%*%t(uinv)%*%t(Xtilde)
edfc=sum(diag(pcmat))
#
yhat=X%*%beta.vec
ww1=y-yhat
#browser()
theta=sigma=sqrt(sum(ww1^2)/(n-edfc))
convbeta=max(abs(beta.vec0-beta.vec))
convtheta=max(abs(theta-theta0))
beta.vec0=beta.vec
theta0=theta
#print(paste("diff in beta", formatC(convbeta, digits=4, format="fg", flag="#")))
#print(paste("diff in theta", formatC(convtheta, digits=4, format="fg", flag="#")))
}
loglik=sum(dnorm(ww1, mean=0, sd=sigma, log=TRUE))
#std.error=error/ss
coef=c(beta.vec, sigma)
names(coef)=c(colnames(dat)[3:(ncoef+2)],"sigma")
res=list(dat=dat.obj,fitted=fitted,yhat=yhat,coef=coef,
loglik=loglik,edf=edfc, conv=TRUE, iter=iter)
return(res)
}
################################################################################
addt.bsplines.xmat.obj.fun=function(dat,beta=0,n.knots=5,degree=3,plot.splines=F, knots=NULL, eq.alloc, Boundary.knots=NULL)
{
time=dat[,"time"]
temp=dat[,"temp"]
max.temp=max(temp)
dK=273.16
temp.K.inv=11605/(temp+dK)-11605/(max.temp+dK)
ss=exp(beta*temp.K.inv)
time.ss=time/ss
#print(time.ss)
#browser()
if(is.null(knots)){
knots=quantile(time.ss, probs=(1:n.knots)/(n.knots+1))
}
#knots=(1:n.knots)*max(time.ss)/(n.knots+1)
#knots=quantile(time.ss, probs=c(0.1, 1:(n.knots-1)/n.knots))
if(is.null(Boundary.knots)){
Boundary.knots=range(time.ss)
}
#browser()
#spline.mat=bs(time.ss, knots=knots, degree = degree, Boundary.knots=Boundary.knots, intercept=TRUE)
spline.mat=newbs.matfun(time.ss, knots=knots, degree = degree, Boundary.knots=Boundary.knots)
#spline.mat=ns(time.ss, df=n.knots+degree)
#spline.mat=(-1)*MIC.splines.basis(time.ss, df = n.knots+degree, knots = NULL,
# boundary.knots=NULL,type="Is",degree = degree,delta=1,eq.alloc=T)$mat
#spline.mat=bs(time.ss, df=n.knots+degree, degree = degree)
if(plot.splines)
{
#fig.paper(file="uv.splines.basis")
#matplot(time.ss[order(time.ss)],spline.mat[order(time.ss),],col=1,type="l",xlab="",ylab="",las=1,cex.axis=1.5)
matplot(time.ss[order(time.ss)],spline.mat[order(time.ss),],col=1,type="l",xlab="",ylab="",las=1)
#dev.off()
}
a1=dim(spline.mat)[2]
dfs=c(a1)
res=list(dat=dat,dfs=dfs, spline.mat=spline.mat, time.ss=time.ss, knots=knots, degree=degree, Boundary.knots=Boundary.knots)
return(invisible(res))
}
################################################################################
newbs.matfun=function(time.ss, knots, degree, Boundary.knots){
spline.mat=matrix(0, length(time.ss), degree+length(knots)+1)
for(mm in 1:length(time.ss)){
spline.mat[mm,]=newbs(x=time.ss[mm], degree=degree, inner.knots=knots, Boundary.knots=Boundary.knots)
}
#pineapple
return(spline.mat)
}
################################################################################
xmat.obj.to.xmat=function(dat,xmat.obj, intercept=TRUE)
{
dfs=xmat.obj$dfs
ids=paste(dat[,"temp"],dat[,"time"],sep="")
if(intercept==TRUE){
xmat=data.frame(ids, dat[,"Converted.Value"],1,xmat.obj$spline.mat)
#browser()
colnames(xmat)=c("Batch", "Y", paste("Time",0:(dfs[1]),sep=""))
dat=as.data.frame(xmat)
lam=c(0,rep(1,dfs[1]))
} else {
xmat=data.frame(ids, dat[,"Converted.Value"], xmat.obj$spline.mat)
#browser()
colnames(xmat)=c("Batch", "Y", paste("Time", 1:(dfs[1]),sep=""))
dat=as.data.frame(xmat)
lam=c(0,rep(1,dfs[1]-1))
}
res=list(dat=dat,lam=lam)
return(res)
}
##############################################################################
newbs=function(x, degree, inner.knots, Boundary.knots) {
Boundary.knots=sort(Boundary.knots);
knots=c(rep(Boundary.knots[1], (degree+1)), sort(inner.knots),
rep(Boundary.knots[2], (degree+1)));
np=degree+length(inner.knots)+1
s=rep(0, np)
if(x==Boundary.knots[2]) {s[np]=1} else {for( i in 1: np)
s[i]=basis(x, degree, i, knots)}
return(s)}
##############################################################################
basis=function(x, degree, i, knots)
{ if(degree==0){ if((x<knots[i+1])&(x>=knots[i])) y=1 else
y=0}else{
if((knots[degree+i]-knots[i])==0) {temp1=0} else {temp1=
(x-knots[i])/(knots[degree+i]-knots[i])};
if((knots[i+degree+1]-knots[i+1])==0) {temp2=0} else {temp2=
(knots[i+degree+1]-x)/(knots[i+degree+1]-knots[i+1])}
y= temp1*basis(x, (degree-1), i, knots) +temp2*basis(x, (degree-1),
(i+1), knots)}
return(y)}
################################################################################
#compute log likelihood
loglik.compute=function(mm, dat, ids, ss, rho, yhat){
loglik=0
for(j in 1:mm)
{
id.x=(dat[,1]==ids[j])
tmp1=dat[id.x, "Y"]-yhat[id.x]
ww=length(tmp1)
#browser()
SS=diag(ww)*ss^2+rho*ss^2*as.numeric(ww>1)-rho*ss^2*diag(ww)*as.numeric(ww>1)
SS.inv=solve(SS)
ll=-(ww/2)*log(2*pi)-.5*log(det(SS))-.5*as.vector(t(tmp1)%*%SS.inv%*%tmp1)
#print(ll)
if(j==1){ Sigma=SS } else{ Sigma=bdiag(Sigma, SS)}
loglik=loglik+ll
}
return(list(Sigma=Sigma, loglik=loglik))
}
################################################################################
cov.fun.REML=function(pars, mm, dat, ids, X, ww1){
ss=exp(pars[1])
rho=pars[2]
for(j in 1:mm)
{
ww=sum((dat[,1]==ids[j])*1)
SS=diag(ww)*ss^2+rho*ss^2*as.numeric(ww>1)-rho*ss^2*diag(ww)*as.numeric(ww>1)
if(j==1){ Sigma=SS } else{ Sigma=bdiag(Sigma, SS)}
#cat("j=",j,"OK","\n")
}
Sigma.inv=solve(Sigma)
det.sigma=determinant(Sigma)
det.XinvX=determinant(t(X)%*%Sigma.inv%*%X)
return(as.numeric(det.sigma$modulus*det.sigma$sign+det.XinvX$modulus*det.XinvX$sign+as.numeric(t(ww1)%*%Sigma.inv%*%ww1)))
}
################################################################################
cov.fun.REMLc=function(pars, X_L, mm, dat, ids, ww1){
ss=exp(pars[1])
rho=pars[2]
for(j in 1:mm)
{
ww=sum((dat[,1]==ids[j])*1)
SS=diag(ww)*ss^2+rho*ss^2*as.numeric(ww>1)-rho*ss^2*diag(ww)*as.numeric(ww>1)
if(j==1){ Sigma=SS } else{ Sigma=bdiag(Sigma, SS)}
#cat("j=",j,"OK","\n")
}
Sigma.inv=solve(Sigma)
det.sigma=determinant(Sigma)
det.XinvX=determinant(t(X_L)%*%Sigma.inv%*%X_L)
return(as.numeric(det.sigma$modulus*det.sigma$sign+det.XinvX$modulus*det.XinvX$sign+as.numeric(t(ww1)%*%Sigma.inv%*%ww1)))
}
################################################################################
cov.fun.REML.rho=function(pars, X_L, mm, dat, ids, ww1, ss){
rho=plogis(pars)
for(j in 1:mm)
{
ww=sum((dat[,1]==ids[j])*1)
SS=diag(ww)*ss^2+rho*ss^2*as.numeric(ww>1)-rho*ss^2*diag(ww)*as.numeric(ww>1)
if(j==1){ Sigma=SS } else{ Sigma=bdiag(Sigma, SS)}
#cat("j=",j,"OK","\n")
}
Sigma.inv=solve(Sigma)
det.sigma=determinant(Sigma)
det.XinvX=determinant(t(X_L)%*%Sigma.inv%*%X_L)
return(as.numeric(det.sigma$modulus*det.sigma$sign+det.XinvX$modulus*det.XinvX$sign+as.numeric(t(ww1)%*%Sigma.inv%*%ww1)))
}
############################################################################
glsnew=function (model, data = sys.frame(sys.parent()), correlation = NULL,
weights = NULL, subset, method = c("REML", "ML"), na.action = na.fail,
control = list(), verbose = FALSE, coef.ini, sigma.ini)
{
Call <- match.call()
controlvals <- glsControl()
if (!missing(control)) {
controlvals[names(control)] <- control
}
if (!inherits(model, "formula") || length(model) != 3L) {
stop("\nmodel must be a formula of the form \"resp ~ pred\"")
}
method <- match.arg(method)
REML <- method == "REML"
if (!is.null(correlation)) {
groups <- getGroupsFormula(correlation)
}
else groups <- NULL
glsSt <- glsStruct(corStruct = correlation, varStruct = varFunc(weights))
model <- terms(model, data = data)
mfArgs <- list(formula = asOneFormula(formula(glsSt), model,
groups), data = data, na.action = na.action)
if (!missing(subset)) {
mfArgs[["subset"]] <- asOneSidedFormula(Call[["subset"]])[[2L]]
}
mfArgs$drop.unused.levels <- TRUE
dataMod <- do.call("model.frame", mfArgs)
origOrder <- row.names(dataMod)
if (!is.null(groups)) {
groups <- eval(parse(text = paste("~1", deparse(groups[[2L]]),
sep = "|")))
grps <- getGroups(dataMod, groups, level = length(getGroupsFormula(groups,
asList = TRUE)))
ord <- order(grps)
grps <- grps[ord]
dataMod <- dataMod[ord, , drop = FALSE]
revOrder <- match(origOrder, row.names(dataMod))
}
else grps <- NULL
X <- model.frame(model, dataMod)
contr <- lapply(X, function(el) if (inherits(el, "factor"))
contrasts(el))
contr <- contr[!unlist(lapply(contr, is.null))]
X <- model.matrix(model, X)
if (ncol(X) == 0L)
stop("no coefficients to fit")
y <- eval(model[[2L]], dataMod)
N <- nrow(X)
p <- ncol(X)
parAssign <- attr(X, "assign")
fTerms <- terms(as.formula(model), data = data)
namTerms <- attr(fTerms, "term.labels")
if (attr(fTerms, "intercept") > 0) {
namTerms <- c("(Intercept)", namTerms)
}
namTerms <- factor(parAssign, labels = namTerms)
parAssign <- split(order(parAssign), namTerms)
attr(glsSt, "conLin") <- list(Xy = array(c(X, y), c(N, ncol(X) +
1L), list(row.names(dataMod), c(colnames(X), deparse(model[[2]])))),
dims = list(N = N, p = p, REML = as.integer(REML)), logLik = 0)
glsEstControl <- controlvals["singular.ok"]
#browser()
glsSt <- Initialize(glsSt, dataMod, glsEstControl)
attr(glsSt, "glsFit")[["beta"]]=coef.ini
attr(glsSt, "glsFit")[["sigma"]]=sigma.ini
parMap <- attr(glsSt, "pmap")
numIter <- numIter0 <- 0L # was commented out
if (length(coef(glsSt))) {
optRes <- if (controlvals$opt == "nlminb") {
nlminb(c(coef(glsSt)), function(glsPars) -logLik(glsSt,
glsPars), control = list(trace = controlvals$msVerbose,
iter.max = controlvals$msMaxIter))
}
else {
cat("numIter=", numIter, "\n")
optim(c(coef(glsSt)), function(glsPars) -logLik(glsSt, glsPars), method = controlvals$optimMethod, control = list(trace = controlvals$msVerbose, maxit = controlvals$msMaxIter, reltol = if(numIter ==0L) controlvals$msTol else 100 * .Machine$double.eps))
}
coef(glsSt) <- optRes$par
}
else {
optRes <- list(convergence = 0)
}
return(coef(glsSt, unconstrained=FALSE))
}
############################################################################
AdhesiveBondB.data.read=function()
{
tmp=read.csv(file="AdhesiveBondB.csv",header=T)
tmp=tmp[tmp[,4]=="Exact",]
tmp=tmp[,-4]
tmp=tmp[!(tmp[,1]%in%c(60,70) & tmp[,2]==0),]
tmp=tmp[order(tmp[,1],tmp[,2]),]
dat=data.frame(TempC=tmp[,1],TimeH=tmp[,2]*24*7,Response=tmp[,3])
return(dat)
}
############################################################################
############################################################################
# TI
TI.bspline.nocor=function(dat, model.fit.obj, dd=100000, failure.threshold = 0.5){
# browser()
#browser()
names(dat)[1:3]=c("temp", "time", "response")
time=dat[,"time"]
temp=dat[,"temp"]
max.temp=max(temp)
dK=273.16
xmax=1/(max.temp+dK)
beta=model.fit.obj$betahat
Boundary.knots=model.fit.obj$spline.inf$Boundary.knots
knots=model.fit.obj$spline.inf$knots
degree=model.fit.obj$spline.inf$degree
ncoef=length(knots)+degree+1
time.ss.vec=seq(min(Boundary.knots), max(Boundary.knots), len=200)
#time.ss.vec=seq(0, max(Boundary.knots), len=200)
spline.mat=newbs.matfun(time.ss.vec, knots=knots, degree = degree, Boundary.knots=Boundary.knots)
yests=as.numeric(spline.mat%*%model.fit.obj$fit.monotone.bspline$coef[1:ncoef])
tt.max=approx(yests, time.ss.vec, xout=failure.threshold*yests[1])$y
TI=11605/(log(dd/tt.max)/beta+11605/(max.temp+dK))-dK
beta0=log10(tt.max)-beta*(11605)*xmax/log(10)
beta1=11605*beta/log(10)
#return(c("TI"=TI, "beta1" = beta1 , "beta0" = beta0))
TI.values <- c(c("TI"=TI, "beta1" = beta1 , "beta0" = beta0))
return("TI"=TI.values)
}
| /scratch/gouwar.j/cran-all/cranData/ADDT/R/ADDT-package.R |
/scratch/gouwar.j/cran-all/cranData/ADER/R/ADER-internal.R |
|
#################################################################
#
# File: ad.test.r
# Purpose: Implements the Anderson Darling GoF test
#
# Created: 20090625
# Author: Carlos J. Gil Bellosta
#
# Modifications:
#
#################################################################
ad.test <- function( x, distr.fun, ... )
{
DNAME <- deparse( substitute( x ) )
x <- sort( x )
if( missing( distr.fun ) && ( x[1] < 0 || x[ length( x ) ] > 1 ) )
stop( paste( "Data ", DNAME, " is not in the [0,1] range." ) )
if( ! missing( distr.fun ) ){
x <- distr.fun( x, ... )
DNAME <- paste( DNAME, " and ", deparse( substitute( distr.fun ) ) )
}
STATISTIC <- ad.test.statistic( x )
names( STATISTIC ) <- "AD"
PVAL <- 1 - ad.test.pvalue( STATISTIC, length( x ) )
METHOD <- "Anderson-Darling GoF Test"
ALTERNATIVE <- "NA"
RVAL <- list(statistic = STATISTIC, p.value = PVAL, alternative = ALTERNATIVE, method = METHOD, data.name = DNAME)
class(RVAL) <- "htest"
return(RVAL)
}
| /scratch/gouwar.j/cran-all/cranData/ADGofTest/R/ad.test.R |
#################################################################
#
# File: ad.test.pvalue.r
# Purpose: Gets the p-value for an Anderson Darling GoF test
#
# Created: 20090625
# Author: Carlos J. Gil Bellosta
#
# Modifications:
#
#################################################################
ad.test.pvalue <- function( x, n )
{
if( x < 2 )
x <- exp(-1.2337141/x)/sqrt(x)*(2.00012+(.247105- (.0649821-(.0347962-(.011672-.00168691*x)*x)*x)*x)*x)
else
x <- exp(-exp(1.0776-(2.30695-(.43424-(.082433-(.008056 -.0003146*x)*x)*x)*x)*x))
if( x > 0.8 )
return( x + (-130.2137+(745.2337-(1705.091-(1950.646-(1116.360-255.7844*x)*x)*x)*x)*x)/n )
z <- 0.01265 + 0.1757 / n
if( x < z ){
v <- x / z
v <- sqrt(v)*(1.-v)*(49*v-102)
return ( x + v * (.0037/(n*n)+.00078/n+.00006)/n )
}
v <- (x-z) / (0.8-z)
v <- -0.00022633+(6.54034-(14.6538-(14.458-(8.259-1.91864*v)*v)*v)*v)*v
x + v * (.04213+.01365/n)/n
}
| /scratch/gouwar.j/cran-all/cranData/ADGofTest/R/ad.test.pvalue.R |
#################################################################
#
# File: ad.test.statistic.r
# Purpose: Calculates the statistic for the Anderson Darling GoF test
#
# Created: 20090625
# Author: Carlos J. Gil Bellosta
#
# Modifications:
#
#################################################################
ad.test.statistic <- function( x )
{
tmp <- x * ( 1 - rev( x ) )
tmp <- ( 2 * seq(x) - 1 ) * log( tmp )
tmp <- - mean( tmp ) - length( x )
}
| /scratch/gouwar.j/cran-all/cranData/ADGofTest/R/ad.test.statistic.R |
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
#' @keywords internal
#' @noRd
multipleinversion <- function(A, rho, L, R, lambda2) {
.Call('_ADMM_multipleinversion', PACKAGE = 'ADMM', A, rho, L, R, lambda2)
}
admm_tv <- function(b, xinit, lambda, reltol, abstol, maxiter, rho, alpha) {
.Call('_ADMM_admm_tv', PACKAGE = 'ADMM', b, xinit, lambda, reltol, abstol, maxiter, rho, alpha)
}
admm_bp <- function(A, b, xinit, reltol, abstol, maxiter, rho, alpha) {
.Call('_ADMM_admm_bp', PACKAGE = 'ADMM', A, b, xinit, reltol, abstol, maxiter, rho, alpha)
}
admm_enet <- function(A, b, lambda, alpha, reltol, abstol, maxiter, rho) {
.Call('_ADMM_admm_enet', PACKAGE = 'ADMM', A, b, lambda, alpha, reltol, abstol, maxiter, rho)
}
admm_genlasso <- function(A, b, D, lambda, reltol, abstol, maxiter, rho) {
.Call('_ADMM_admm_genlasso', PACKAGE = 'ADMM', A, b, D, lambda, reltol, abstol, maxiter, rho)
}
admm_lad <- function(A, b, xinit, reltol, abstol, maxiter, rho, alpha) {
.Call('_ADMM_admm_lad', PACKAGE = 'ADMM', A, b, xinit, reltol, abstol, maxiter, rho, alpha)
}
admm_lasso <- function(A, b, lambda, xinit, reltol, abstol, maxiter, rho, alpha) {
.Call('_ADMM_admm_lasso', PACKAGE = 'ADMM', A, b, lambda, xinit, reltol, abstol, maxiter, rho, alpha)
}
admm_rpca <- function(M, tol, maxiter, mu, lambda) {
.Call('_ADMM_admm_rpca', PACKAGE = 'ADMM', M, tol, maxiter, mu, lambda)
}
admm_spca <- function(Sigma, reltol, abstol, maxiter, mu, rho) {
.Call('_ADMM_admm_spca', PACKAGE = 'ADMM', Sigma, reltol, abstol, maxiter, mu, rho)
}
admm_sdp <- function(C, listA, b, mymu, myrho, mygamma, maxiter, abstol, printer) {
.Call('_ADMM_admm_sdp', PACKAGE = 'ADMM', C, listA, b, mymu, myrho, mygamma, maxiter, abstol, printer)
}
| /scratch/gouwar.j/cran-all/cranData/ADMM/R/RcppExports.R |
#' ADMM : Algorithms using Alternating Direction Method of Multipliers
#'
#' An introduction of Alternating Direction Method of Multipliers (ADMM) method has been a breakthrough in
#' solving complex and non-convex optimization problems in a reasonably stable as well as scalable fashion.
#' Our package aims at providing handy tools for fast computation on well-known problems using the method.
#' For interested users/readers, please visit Prof. Stephen Boyd's \href{https://stanford.edu/~boyd/papers/admm_distr_stats.html}{website}
#' entirely devoted to the topic.
#'
#' @docType package
#' @name ADMM
#' @aliases ADMM-package
#' @import Rdpack
#' @import Matrix
#' @importFrom Rcpp evalCpp
#' @importFrom stats rnorm
#' @importFrom foreach "%dopar%" foreach registerDoSEQ
#' @importFrom parallel detectCores stopCluster makeCluster
#' @importFrom doParallel registerDoParallel
#' @importFrom utils packageVersion
#' @useDynLib ADMM
NULL
| /scratch/gouwar.j/cran-all/cranData/ADMM/R/admm-package.R |
#' Basis Pursuit
#'
#' For an underdetermined system, Basis Pursuit
#' aims to find a sparse solution that solves
#' \deqn{\textrm{min}_x ~ \|x\|_1 \quad \textrm{s.t} \quad Ax=b}
#' which is a relaxed version of strict non-zero support finding problem.
#' The implementation is borrowed from Stephen Boyd's
#' \href{https://web.stanford.edu/~boyd/papers/admm/basis_pursuit/basis_pursuit.html}{MATLAB code}.
#'
#' @param A an \eqn{(m \times n)} regressor matrix
#' @param b a length-\eqn{m} response vector
#' @param xinit a length-\eqn{n} vector for initial value
#' @param rho an augmented Lagrangian parameter
#' @param alpha an overrelaxation parameter in [1,2]
#' @param abstol absolute tolerance stopping criterion
#' @param reltol relative tolerance stopping criterion
#' @param maxiter maximum number of iterations
#'
#' @return a named list containing \describe{
#' \item{x}{a length-\eqn{n} solution vector}
#' \item{history}{dataframe recording iteration numerics. See the section for more details.}
#' }
#'
#' @section Iteration History:
#' When you run the algorithm, output returns not only the solution, but also the iteration history recording
#' following fields over iterates,
#' \describe{
#' \item{objval}{object (cost) function value}
#' \item{r_norm}{norm of primal residual}
#' \item{s_norm}{norm of dual residual}
#' \item{eps_pri}{feasibility tolerance for primal feasibility condition}
#' \item{eps_dual}{feasibility tolerance for dual feasibility condition}
#' }
#' In accordance with the paper, iteration stops when both \code{r_norm} and \code{s_norm} values
#' become smaller than \code{eps_pri} and \code{eps_dual}, respectively.
#'
#' @examples
#' ## generate sample data
#' n = 30
#' m = 10
#'
#' A = matrix(rnorm(n*m), nrow=m) # design matrix
#' x = c(stats::rnorm(3),rep(0,n-3)) # coefficient
#' x = base::sample(x)
#' b = as.vector(A%*%x) # response
#'
#' ## run example
#' output = admm.bp(A, b)
#' niter = length(output$history$s_norm)
#' history = output$history
#'
#' ## report convergence plot
#' opar <- par(no.readonly=TRUE)
#' par(mfrow=c(1,3))
#' plot(1:niter, history$objval, "b", main="cost function")
#' plot(1:niter, history$r_norm, "b", main="primal residual")
#' plot(1:niter, history$s_norm, "b", main="dual residual")
#' par(opar)
#'
#' @export
admm.bp <- function(A, b, xinit=NA,
rho=1.0, alpha=1.0,
abstol=1e-4, reltol=1e-2, maxiter=1000){
## PREPROCESSING
# data validity
if (!check_data_matrix(A)){
stop("* ADMM.BP : input 'A' is invalid data matrix.") }
if (!check_data_vector(b)){
stop("* ADMM.BP : input 'b' is invalid data vector") }
b = as.vector(b)
# data size
if (nrow(A)!=length(b)){
stop("* ADMM.BP : two inputs 'A' and 'b' have non-matching dimension.")}
# initial value
if (!is.na(xinit)){
if ((!check_data_vector(xinit))||(length(xinit)!=ncol(A))){
stop("* ADMM.BP : input 'xinit' is invalid.")
}
xinit = as.vector(xinit)
} else {
xinit = as.vector(rep(0,ncol(A)))
}
# other parameters
if (!check_param_constant_multiple(c(abstol, reltol))){
stop("* ADMM.BP : tolerance level is invalid.")
}
if (!check_param_integer(maxiter, 2)){
stop("* ADMM.BP : 'maxiter' should be a positive integer.")
}
maxiter = as.integer(maxiter)
if (!check_param_constant(rho,0)){
stop("* ADMM.BP : 'rho' should be a positive real number.")
}
if (!check_param_constant(alpha,0)){
stop("* ADMM.BP : 'alpha' should be a positive real number.")
}
if ((alpha<1)||(alpha>2)){
warning("* ADMM.BP : 'alpha' value is suggested to be in [1,2].")
}
## MAIN COMPUTATION & RESULT RETURN
result = admm_bp(A,b,xinit,reltol,abstol,maxiter,rho,alpha)
## RESULT RETURN
kk = result$k
output = list()
output$x = result$x
output$history = data.frame(objval=result$objval[1:kk],
r_norm=result$r_norm[1:kk],
s_norm=result$s_norm[1:kk],
eps_pri=result$eps_pri[1:kk],
eps_dual=result$eps_dual[1:kk]
)
return(output)
}
| /scratch/gouwar.j/cran-all/cranData/ADMM/R/admm.bp.R |
#' Elastic Net Regularization
#'
#' Elastic Net regularization is a combination of \eqn{\ell_2} stability and
#' \eqn{\ell_1} sparsity constraint simulatenously solving the following,
#' \deqn{\textrm{min}_x ~ \frac{1}{2}\|Ax-b\|_2^2 + \lambda_1 \|x\|_1 + \lambda_2 \|x\|_2^2}
#' with nonnegative constraints \eqn{\lambda_1} and \eqn{\lambda_2}. Note that if both lambda values are 0,
#' it reduces to least-squares solution.
#'
#' @param A an \eqn{(m\times n)} regressor matrix
#' @param b a length-\eqn{m} response vector
#' @param lambda1 a regularization parameter for \eqn{\ell_1} term
#' @param lambda2 a regularization parameter for \eqn{\ell_2} term
#' @param rho an augmented Lagrangian parameter
#' @param abstol absolute tolerance stopping criterion
#' @param reltol relative tolerance stopping criterion
#' @param maxiter maximum number of iterations
#'
#' @return a named list containing \describe{
#' \item{x}{a length-\eqn{n} solution vector}
#' \item{history}{dataframe recording iteration numerics. See the section for more details.}
#' }
#'
#' @section Iteration History:
#' When you run the algorithm, output returns not only the solution, but also the iteration history recording
#' following fields over iterates,
#' \describe{
#' \item{objval}{object (cost) function value}
#' \item{r_norm}{norm of primal residual}
#' \item{s_norm}{norm of dual residual}
#' \item{eps_pri}{feasibility tolerance for primal feasibility condition}
#' \item{eps_dual}{feasibility tolerance for dual feasibility condition}
#' }
#' In accordance with the paper, iteration stops when both \code{r_norm} and \code{s_norm} values
#' become smaller than \code{eps_pri} and \code{eps_dual}, respectively.
#'
#' @examples
#' ## generate underdetermined design matrix
#' m = 50
#' n = 100
#' p = 0.1 # percentange of non-zero elements
#'
#' x0 = matrix(Matrix::rsparsematrix(n,1,p))
#' A = matrix(rnorm(m*n),nrow=m)
#' for (i in 1:ncol(A)){
#' A[,i] = A[,i]/sqrt(sum(A[,i]*A[,i]))
#' }
#' b = A%*%x0 + sqrt(0.001)*matrix(rnorm(m))
#'
#' ## run example with both regularization values = 1
#' output = admm.enet(A, b, lambda1=1, lambda2=1)
#' niter = length(output$history$s_norm)
#' history = output$history
#'
#' ## report convergence plot
#' opar <- par(no.readonly=TRUE)
#' par(mfrow=c(1,3))
#' plot(1:niter, history$objval, "b", main="cost function")
#' plot(1:niter, history$r_norm, "b", main="primal residual")
#' plot(1:niter, history$s_norm, "b", main="dual residual")
#' par(opar)
#'
#' @references
#' \insertRef{zou_regularization_2005a}{ADMM}
#'
#' @seealso \code{\link{admm.lasso}}
#' @author Xiaozhi Zhu
#' @export
admm.enet <- function(A, b,
lambda1=1.0, lambda2=1.0, rho=1.0,
abstol=1e-4, reltol=1e-2, maxiter=1000){
#-----------------------------------------------------------
## PREPROCESSING
# data validity
if (!check_data_matrix(A)){
stop("* ADMM.ENET : input 'A' is invalid data matrix.") }
if (!check_data_vector(b)){
stop("* ADMM.ENET : input 'b' is invalid data vector") }
b = as.vector(b)
# data size
if (nrow(A)!=length(b)){
stop("* ADMM.ENET : two inputs 'A' and 'b' have non-matching dimension.")}
# other parameters
if (!check_param_constant_multiple(c(abstol, reltol))){
stop("* ADMM.ENET : tolerance level is invalid.")
}
if (!check_param_integer(maxiter, 2)){
stop("* ADMM.ENET : 'maxiter' should be a positive integer.")
}
maxiter = as.integer(maxiter)
rho = as.double(rho)
if (!check_param_constant(rho,0)){
stop("* ADMM.ENET : 'rho' should be a positive real number.")
}
# adjust for Xiaozhi's code
meps = (.Machine$double.eps)
negsmall = -meps
lambda1 = as.double(lambda1)
lambda2 = as.double(lambda2)
if (!check_param_constant(lambda1, negsmall)){
stop("* ADMM.ENET : 'lambda1' is invalid.")
}
if (!check_param_constant(lambda2, negsmall)){
stop("* ADMM.ENET : 'lambda2' is invalid.")
}
if ((lambda1<meps)&&(lambda2<meps)){
message("* ADMM.ENET : since both regularization parameters are effectively zero, a least-squares solution is returned.")
xsol = as.vector(aux_pinv(A)%*%matrix(b))
output = list()
output$x = xsol
return(output)
}
lambda = (2*lambda2 + lambda1)
alpha = (lambda1/lambda)
#-----------------------------------------------------------
## MAIN COMPUTATION
result = admm_enet(A,b,lambda,alpha,reltol,abstol,maxiter,rho)
#-----------------------------------------------------------
## RESULT RETURN
kk = result$k
output = list()
output$x = result$x
output$history = data.frame(objval=result$objval[1:kk],
r_norm=result$r_norm[1:kk],
s_norm=result$s_norm[1:kk],
eps_pri=result$eps_pri[1:kk],
eps_dual=result$eps_dual[1:kk]
)
return(output)
}
| /scratch/gouwar.j/cran-all/cranData/ADMM/R/admm.enet.R |
#' Generalized LASSO
#'
#' Generalized LASSO is solving the following equation,
#' \deqn{\textrm{min}_x ~ \frac{1}{2}\|Ax-b\|_2^2 + \lambda \|Dx\|_1}
#' where the choice of regularization matrix \eqn{D} leads to different problem formulations.
#'
#' @param A an \eqn{(m\times n)} regressor matrix
#' @param b a length-\eqn{m} response vector
#' @param D a regularization matrix of \eqn{n} columns
#' @param lambda a regularization parameter
#' @param rho an augmented Lagrangian parameter
#' @param alpha an overrelaxation parameter in [1,2]
#' @param abstol absolute tolerance stopping criterion
#' @param reltol relative tolerance stopping criterion
#' @param maxiter maximum number of iterations
#'
#' @return a named list containing \describe{
#' \item{x}{a length-\eqn{n} solution vector}
#' \item{history}{dataframe recording iteration numerics. See the section for more details.}
#' }
#'
#' @section Iteration History:
#' When you run the algorithm, output returns not only the solution, but also the iteration history recording
#' following fields over iterates,
#' \describe{
#' \item{objval}{object (cost) function value}
#' \item{r_norm}{norm of primal residual}
#' \item{s_norm}{norm of dual residual}
#' \item{eps_pri}{feasibility tolerance for primal feasibility condition}
#' \item{eps_dual}{feasibility tolerance for dual feasibility condition}
#' }
#' In accordance with the paper, iteration stops when both \code{r_norm} and \code{s_norm} values
#' become smaller than \code{eps_pri} and \code{eps_dual}, respectively.
#'
#'
#' @examples
#' ## generate sample data
#' m = 100
#' n = 200
#' p = 0.1 # percentange of non-zero elements
#'
#' x0 = matrix(Matrix::rsparsematrix(n,1,p))
#' A = matrix(rnorm(m*n),nrow=m)
#' for (i in 1:ncol(A)){
#' A[,i] = A[,i]/sqrt(sum(A[,i]*A[,i]))
#' }
#' b = A%*%x0 + sqrt(0.001)*matrix(rnorm(m))
#' D = diag(n);
#'
#' ## set regularization lambda value
#' regval = 0.1*Matrix::norm(t(A)%*%b, 'I')
#'
#' ## solve LASSO via reducing from Generalized LASSO
#' output = admm.genlasso(A,b,D,lambda=regval) # set D as identity matrix
#' niter = length(output$history$s_norm)
#' history = output$history
#'
#' ## report convergence plot
#' opar <- par(no.readonly=TRUE)
#' par(mfrow=c(1,3))
#' plot(1:niter, history$objval, "b", main="cost function")
#' plot(1:niter, history$r_norm, "b", main="primal residual")
#' plot(1:niter, history$s_norm, "b", main="dual residual")
#' par(opar)
#'
#' @references
#' \insertRef{tibshirani_solution_2011}{ADMM}
#'
#' \insertRef{zhu_augmented_2017}{ADMM}
#'
#' @author Xiaozhi Zhu
#' @export
admm.genlasso <- function(A, b, D=diag(length(b)), lambda=1.0, rho=1.0, alpha=1.0,
abstol=1e-4, reltol=1e-2, maxiter=1000){
#-----------------------------------------------------------
## PREPROCESSING
# 1. data validity
if (!check_data_matrix(A)){
stop("* ADMM.GENLASSO : input 'A' is invalid data matrix.") }
if (!check_data_vector(b)){
stop("* ADMM.GENLASSO : input 'b' is invalid data vector") }
b = as.vector(b)
# 2. data size
if (nrow(A)!=length(b)){
stop("* ADMM.GENLASSO : two inputs 'A' and 'b' have non-matching dimension.")}
# 3. D : regularization matrix
if (!check_data_matrix(D)){
stop("* ADMM.GENLASSO : input 'D' is invalid regularization matrix.")
}
if (ncol(A)!=ncol(D)){
stop("* ADMM.GENLASSO : input 'D' has invalid size.")
}
# 4. other parameters
if (!check_param_constant_multiple(c(abstol, reltol))){
stop("* ADMM.GENLASSO : tolerance level is invalid.")
}
if (!check_param_integer(maxiter, 2)){
stop("* ADMM.GENLASSO : 'maxiter' should be a positive integer.")
}
maxiter = as.integer(maxiter)
rho = as.double(rho)
if (!check_param_constant(rho,0)){
stop("* ADMM.GENLASSO : 'rho' should be a positive real number.")
}
#-----------------------------------------------------------
## MAIN COMPUTATION
# 1. lambda=0 case; pseudoinverse
meps = (.Machine$double.eps)
negsmall = -meps
lambda = as.double(lambda)
if (!check_param_constant(lambda, negsmall)){
stop("* ADMM.GENLASSO : 'lambda' is invalid; should be a nonnegative real number.")
}
if (lambda<meps){
message("* ADMM.GENLASSO : since both regularization parameters are effectively zero, a least-squares solution is returned.")
xsol = as.vector(aux_pinv(A)%*%matrix(b))
output = list()
output$x = xsol
return(output)
}
# 2. main computation : Xiaozhi's work
result = admm_genlasso(A,b,D,lambda,reltol,abstol,maxiter,rho)
#-----------------------------------------------------------
## RESULT RETURN
kk = result$k
output = list()
output$x = result$x
output$history = data.frame(objval=result$objval[1:kk],
r_norm=result$r_norm[1:kk],
s_norm=result$s_norm[1:kk],
eps_pri=result$eps_pri[1:kk],
eps_dual=result$eps_dual[1:kk])
return(output)
}
| /scratch/gouwar.j/cran-all/cranData/ADMM/R/admm.genlasso.R |
#' Least Absolute Deviations
#'
#' Least Absolute Deviations (LAD) is an alternative to traditional Least Sqaures by using cost function
#' \deqn{\textrm{min}_x ~ \|Ax-b\|_1}
#' to use \eqn{\ell_1} norm instead of square loss for robust estimation of coefficient.
#'
#' @param A an \eqn{(m\times n)} regressor matrix
#' @param b a length-\eqn{m} response vector
#' @param xinit a length-\eqn{n} vector for initial value
#' @param rho an augmented Lagrangian parameter
#' @param alpha an overrelaxation parameter in [1,2]
#' @param abstol absolute tolerance stopping criterion
#' @param reltol relative tolerance stopping criterion
#' @param maxiter maximum number of iterations
#'
#' @return a named list containing \describe{
#' \item{x}{a length-\eqn{n} solution vector}
#' \item{history}{dataframe recording iteration numerics. See the section for more details.}
#' }
#'
#' @section Iteration History:
#' When you run the algorithm, output returns not only the solution, but also the iteration history recording
#' following fields over iterates,
#' \describe{
#' \item{objval}{object (cost) function value}
#' \item{r_norm}{norm of primal residual}
#' \item{s_norm}{norm of dual residual}
#' \item{eps_pri}{feasibility tolerance for primal feasibility condition}
#' \item{eps_dual}{feasibility tolerance for dual feasibility condition}
#' }
#' In accordance with the paper, iteration stops when both \code{r_norm} and \code{s_norm} values
#' become smaller than \code{eps_pri} and \code{eps_dual}, respectively.
#'
#'
#' @examples
#' \donttest{
#' ## generate data
#' m = 1000
#' n = 100
#' A = matrix(rnorm(m*n),nrow=m)
#' x = 10*matrix(rnorm(n))
#' b = A%*%x
#'
#' ## add impulsive noise to 10% of positions
#' idx = sample(1:m, round(m/10))
#' b[idx] = b[idx] + 100*rnorm(length(idx))
#'
#' ## run the code
#' output = admm.lad(A,b)
#' niter = length(output$history$s_norm)
#' history = output$history
#'
#' ## report convergence plot
#' opar <- par(no.readonly=TRUE)
#' par(mfrow=c(1,3))
#' plot(1:niter, history$objval, "b", main="cost function")
#' plot(1:niter, history$r_norm, "b", main="primal residual")
#' plot(1:niter, history$s_norm, "b", main="dual residual")
#' par(opar)
#' }
#'
#' @export
admm.lad <- function(A, b, xinit=NA,
rho=1.0, alpha=1.0,
abstol=1e-4, reltol=1e-2, maxiter=1000){
## PREPROCESSING
# data validity
if (!check_data_matrix(A)){
stop("* ADMM.LAD : input 'A' is invalid data matrix.") }
if (!check_data_vector(b)){
stop("* ADMM.LAD : input 'b' is invalid data vector") }
b = as.vector(b)
# data size
if (nrow(A)!=length(b)){
stop("* ADMM.LAD : two inputs 'A' and 'b' have non-matching dimension.")}
# initial value
if (!is.na(xinit)){
if ((!check_data_vector(xinit))||(length(xinit)!=ncol(A))){
stop("* ADMM.LAD : input 'xinit' is invalid.")
}
xinit = as.vector(xinit)
} else {
xinit = as.vector(rep(0,ncol(A)))
}
# other parameters
if (!check_param_constant_multiple(c(abstol, reltol))){
stop("* ADMM.LAD : tolerance level is invalid.")
}
if (!check_param_integer(maxiter, 2)){
stop("* ADMM.LAD : 'maxiter' should be a positive integer.")
}
maxiter = as.integer(maxiter)
if (!check_param_constant(rho,0)){
stop("* ADMM.LAD : 'rho' should be a positive real number.")
}
if (!check_param_constant(alpha,0)){
stop("* ADMM.LAD : 'alpha' should be a positive real number.")
}
if ((alpha<1)||(alpha>2)){
warning("* ADMM.LAD : 'alpha' value is suggested to be in [1,2].")
}
## MAIN COMPUTATION & RESULT RETURN
result = admm_lad(A,b,xinit,reltol,abstol,maxiter,rho,alpha)
## RESULT RETURN
kk = result$k
output = list()
output$x = result$x
output$history = data.frame(objval=result$objval[1:kk],
r_norm=result$r_norm[1:kk],
s_norm=result$s_norm[1:kk],
eps_pri=result$eps_pri[1:kk],
eps_dual=result$eps_dual[1:kk]
)
return(output)
}
| /scratch/gouwar.j/cran-all/cranData/ADMM/R/admm.lad.R |
#' Least Absolute Shrinkage and Selection Operator
#'
#' LASSO, or L1-regularized regression, is an optimization problem to solve
#' \deqn{\textrm{min}_x ~ \frac{1}{2}\|Ax-b\|_2^2 + \lambda \|x\|_1}
#' for sparsifying the coefficient vector \eqn{x}.
#' The implementation is borrowed from Stephen Boyd's
#' \href{https://stanford.edu/~boyd/papers/admm/lasso/lasso.html}{MATLAB code}.
#'
#' @param A an \eqn{(m\times n)} regressor matrix
#' @param b a length-\eqn{m} response vector
#' @param lambda a regularization parameter
#' @param rho an augmented Lagrangian parameter
#' @param alpha an overrelaxation parameter in [1,2]
#' @param abstol absolute tolerance stopping criterion
#' @param reltol relative tolerance stopping criterion
#' @param maxiter maximum number of iterations
#'
#' @return a named list containing \describe{
#' \item{x}{a length-\eqn{n} solution vector}
#' \item{history}{dataframe recording iteration numerics. See the section for more details.}
#' }
#'
#' @section Iteration History:
#' When you run the algorithm, output returns not only the solution, but also the iteration history recording
#' following fields over iterates,
#' \describe{
#' \item{objval}{object (cost) function value}
#' \item{r_norm}{norm of primal residual}
#' \item{s_norm}{norm of dual residual}
#' \item{eps_pri}{feasibility tolerance for primal feasibility condition}
#' \item{eps_dual}{feasibility tolerance for dual feasibility condition}
#' }
#' In accordance with the paper, iteration stops when both \code{r_norm} and \code{s_norm} values
#' become smaller than \code{eps_pri} and \code{eps_dual}, respectively.
#'
#' @examples
#' \donttest{
#' ## generate sample data
#' m = 50
#' n = 100
#' p = 0.1 # percentange of non-zero elements
#'
#' x0 = matrix(Matrix::rsparsematrix(n,1,p))
#' A = matrix(rnorm(m*n),nrow=m)
#' for (i in 1:ncol(A)){
#' A[,i] = A[,i]/sqrt(sum(A[,i]*A[,i]))
#' }
#' b = A%*%x0 + sqrt(0.001)*matrix(rnorm(m))
#'
#' ## set regularization lambda value
#' lambda = 0.1*base::norm(t(A)%*%b, "F")
#'
#' ## run example
#' output = admm.lasso(A, b, lambda)
#' niter = length(output$history$s_norm)
#' history = output$history
#'
#' ## report convergence plot
#' opar <- par(no.readonly=TRUE)
#' par(mfrow=c(1,3))
#' plot(1:niter, history$objval, "b", main="cost function")
#' plot(1:niter, history$r_norm, "b", main="primal residual")
#' plot(1:niter, history$s_norm, "b", main="dual residual")
#' par(opar)
#' }
#'
#' @references
#' \insertRef{tibshirani_regression_1996a}{ADMM}
#'
#' @export
admm.lasso <- function(A, b, lambda=1.0, rho=1.0, alpha=1.0,
abstol=1e-4, reltol=1e-2, maxiter=1000){
## PREPROCESSING
# data validity
if (!check_data_matrix(A)){
stop("* ADMM.LASSO : input 'A' is invalid data matrix.") }
if (!check_data_vector(b)){
stop("* ADMM.LASSO : input 'b' is invalid data vector") }
b = as.vector(b)
# data size
if (nrow(A)!=length(b)){
stop("* ADMM.LASSO : two inputs 'A' and 'b' have non-matching dimension.")}
# initial value
xinit = as.vector(rnorm(ncol(A))/10)
# if (!is.na(xinit)){
# if ((!check_data_vector(xinit))||(length(xinit)!=ncol(A))){
# stop("* ADMM.LASSO : input 'xinit' is invalid.")
# }
# xinit = as.vector(xinit)
# } else {
# xinit = as.vector(rep(0,ncol(A)))
# }
# other parameters
meps = (.Machine$double.eps)
negsmall = -meps
if (!check_param_constant(lambda,negsmall)){
stop("* ADMM.LASSO : reg. parameter 'lambda' is invalid.")
}
if (lambda < meps){
message("* ADMM.LASSO : since 'lambda' is effectively zero, a least-squares solution is returned.")
xsol = as.vector(aux_pinv(A)%*%matrix(b))
output = list()
output$x = xsol
return(output)
}
if (!check_param_constant_multiple(c(abstol, reltol))){
stop("* ADMM.LASSO : tolerance level is invalid.")
}
if (!check_param_integer(maxiter, 2)){
stop("* ADMM.LASSO : 'maxiter' should be a positive integer.")
}
maxiter = as.integer(maxiter)
if (!check_param_constant(rho,0)){
stop("* ADMM.LASSO : 'rho' should be a positive real number.")
}
if (!check_param_constant(alpha,0)){
stop("* ADMM.LASSO : 'alpha' should be a positive real number.")
}
if ((alpha<1)||(alpha>2)){
warning("* ADMM.LASSO : 'alpha' value is suggested to be in [1,2].")
}
## MAIN COMPUTATION & RESULT RETURN
result = admm_lasso(A,b,lambda,xinit,reltol,abstol,maxiter,rho,alpha)
## RESULT RETURN
kk = result$k
output = list()
output$x = result$x
output$history = data.frame(objval=result$objval[1:kk],
r_norm=result$r_norm[1:kk],
s_norm=result$s_norm[1:kk],
eps_pri=result$eps_pri[1:kk],
eps_dual=result$eps_dual[1:kk]
)
return(output)
}
#
# lcost <- function(x){diff = as.vector(A%*%x-b); return((sum(diff*diff)/2)+lambda*sum(abs(x)))}
#
# 1. https://stackoverflow.com/questions/2247111/evaluating-variable-within-r-loop
# multiply <- function(i) {
# force(i)
# function(x) x * i
# }
# funcs <- list()
# for(i in 1:21){
# funcName <- paste( 'func', i, sep = '' )
# funcs[[funcName]] = multiply(i)
# }
#
# 2. https://stackoverflow.com/questions/15627701/r-scope-force-variable-substitution-in-function-without-local-environment?noredirect=1&lq=1
#
# 3. https://stackoverflow.com/questions/32100372/calling-functions-from-a-list-recursively?noredirect=1&lq=1
| /scratch/gouwar.j/cran-all/cranData/ADMM/R/admm.lasso.R |
#' Robust Principal Component Analysis
#'
#' Given a data matrix \eqn{M}, it finds a decomposition
#' \deqn{\textrm{min}~\|L\|_*+\lambda \|S\|_1\quad \textrm{s.t.}\quad L+S=M}
#' where \eqn{\|L\|_*} represents a nuclear norm for a matrix \eqn{L} and
#' \eqn{\|S\|_1 = \sum |S_{i,j}|}, and \eqn{\lambda} a balancing/regularization
#' parameter. The choice of such norms leads to impose \emph{low-rank} property for \eqn{L} and
#' \emph{sparsity} on \eqn{S}.
#'
#'
#' @param M an \eqn{(m\times n)} data matrix
#' @param lambda a regularization parameter
#' @param mu an augmented Lagrangian parameter
#' @param tol relative tolerance stopping criterion
#' @param maxiter maximum number of iterations
#'
#' @return a named list containing \describe{
#' \item{L}{an \eqn{(m\times n)} low-rank matrix}
#' \item{S}{an \eqn{(m\times n)} sparse matrix}
#' \item{history}{dataframe recording iteration numerics. See the section for more details.}
#' }
#'
#' @section Iteration History:
#' For RPCA implementation, we chose a very simple stopping criterion
#' \deqn{\|M-(L_k+S_k)\|_F \le tol*\|M\|_F}
#' for each iteration step \eqn{k}. So for this method, we provide a vector of only relative errors,
#' \describe{
#' \item{error}{relative error computed}
#' }
#'
#' @examples
#' ## generate data matrix from standard normal
#' X = matrix(rnorm(20*5),nrow=5)
#'
#' ## try different regularization values
#' out1 = admm.rpca(X, lambda=0.01)
#' out2 = admm.rpca(X, lambda=0.1)
#' out3 = admm.rpca(X, lambda=1)
#'
#' ## visualize sparsity
#' opar <- par(no.readonly=TRUE)
#' par(mfrow=c(1,3))
#' image(out1$S, main="lambda=0.01")
#' image(out2$S, main="lambda=0.1")
#' image(out3$S, main="lambda=1")
#' par(opar)
#'
#' @references
#' \insertRef{candes_robust_2011a}{ADMM}
#'
#' @export
admm.rpca <- function(M, lambda=1/sqrt(max(nrow(M),ncol(M))), mu=1.0, tol=1e-7, maxiter=1000){
# -----------------------------------------------------------------
## PREPROCESSING
# 1. data M
if (!check_data_matrix(M)){
stop("* ADMM.RPCA : input 'M' is invalid data matrix.")
}
# 2. lambda and mu
lambda = as.double(lambda)
mu = as.double(mu)
if (!check_param_constant(mu,0)){
stop("* ADMM.RPCA : 'mu' should be a positive real number.")
}
if (!check_param_constant(lambda,0)){
stop("* ADMM.RPCA : 'lambda' should be a positive real number.")
}
# 3. tol and maxiter
tol = as.double(tol)
maxiter = as.integer(maxiter)
if (!check_param_constant(tol,0)){
stop("* ADMM.RPCA : 'tol' should be a positive real number.")
}
if (!check_param_integer(maxiter, 2)){
stop("* ADMM.RPCA : 'maxiter' should be a positive integer.")
}
# -----------------------------------------------------------------
## MAIN ITERATION
# 1. run CPP computation
runcpp = admm_rpca(M, tol, maxiter, mu, lambda)
# 2. separate out the results
kk = runcpp$k
# -----------------------------------------------------------------
## RETURN THE OUTPUT
output = list()
output$L = runcpp$L
output$S = runcpp$S
output$history = data.frame(error=runcpp$errors[1:kk])
return(output)
}
| /scratch/gouwar.j/cran-all/cranData/ADMM/R/admm.rpca.R |
#' Semidefinite Programming
#'
#' We solve the following standard semidefinite programming (SDP) problem
#' \deqn{\textrm{min}_X ~ \textrm{tr}(CX)}
#' \deqn{\textrm{s.t.} A(X)=b, ~ X \geq 0 }
#' with \eqn{A(X)_i = \textrm{tr}(A_i^\top X) = b_i} for \eqn{i=1,\ldots,m} and \eqn{X \geq 0} stands for positive-definiteness of the matrix \eqn{X}. In the standard form,
#' matrices \eqn{C, A_1,A_2,\ldots,A_m} are symmetric and solution \eqn{X} would be symmetric and positive semidefinite. This function implements alternating direction augmented Lagrangian methods.
#'
#' @param C an \eqn{(n\times n)} symmetric matrix for cost.
#' @param A a length-\eqn{m} list of \eqn{(n\times n)} symmetric matrices for constraint.
#' @param b a length-\eqn{m} vector for equality condition.
#' @param mu penalty parameter; positive real number.
#' @param rho step size for updating in \eqn{(0, \frac{1+\sqrt{5}}{2})}.
#' @param abstol absolute tolerance stopping criterion.
#' @param maxiter maximum number of iterations.
#' @param print.progress a logical; \code{TRUE} to show the progress, \code{FALSE} to go silent.
#'
#' @return a named list containing \describe{
#' \item{x}{a length-\eqn{n} solution vector}
#' \item{history}{dataframe recording iteration numerics. See the section for more details.}
#' }
#'
#' @section Iteration History:
#' When you run the algorithm, output returns not only the solution, but also the iteration history recording
#' following fields over iterates,
#' \describe{
#' \item{objval}{object (cost) function value}
#' \item{eps_pri}{feasibility tolerance for primal feasibility condition}
#' \item{eps_dual}{feasibility tolerance for dual feasibility condition}
#' \item{gap}{gap between primal and dual cost function.}
#' }
#' We use the stopping criterion which breaks the iteration when all \code{eps_pri},\code{eps_dual}, and \code{gap}
#' become smaller than \code{abstol}.
#'
#' @examples
#' ## a toy example
#' # generate parameters
#' C = matrix(c(1,2,3,2,9,0,3,0,7),nrow=3,byrow=TRUE)
#' A1 = matrix(c(1,0,1,0,3,7,1,7,5),nrow=3,byrow=TRUE)
#' A2 = matrix(c(0,2,8,2,6,0,8,0,4),nrow=3,byrow=TRUE)
#'
#' A = list(A1, A2)
#' b = c(11, 19)
#'
#' # run the algorithm
#' run = admm.sdp(C,A,b)
#' hst = run$history
#'
#' # visualize
#' opar <- par(no.readonly=TRUE)
#' par(mfrow=c(2,2))
#' plot(hst$objval, type="b", cex=0.25, main="objective value")
#' plot(hst$eps_pri, type="b", cex=0.25, main="primal feasibility")
#' plot(hst$eps_dual, type="b", cex=0.25, main="dual feasibility")
#' plot(hst$gap, type="b", cex=0.25, main="primal-dual gap")
#' par(opar)
#'
#' \dontrun{
#' ## comparison with CVXR's result
#' require(CVXR)
#'
#' # problems definition
#' X = Variable(3,3,PSD=TRUE)
#' myobj = Minimize(sum_entries(C*X)) # objective
#' mycon = list( # constraint
#' sum_entries(A[[1]]*X) == b[1],
#' sum_entries(A[[2]]*X) == b[2]
#' )
#' myp = Problem(myobj, mycon) # problem
#'
#' # run and visualize
#' res = solve(myp)
#' Xsol = res$getValue(X)
#'
#' opar = par(no.readonly=TRUE)
#' par(mfrow=c(1,2), pty="s")
#' image(run$X, axes=FALSE, main="ADMM result")
#' image(Xsol, axes=FALSE, main="CVXR result")
#' par(opar)
#' }
#'
#' @references
#' \insertRef{wen_alternating_2010a}{ADMM}
#'
#' @author Kisung You
#' @export
admm.sdp <- function(C, A, b, mu=1.0, rho=1, abstol=1e-10, maxiter=496, print.progress=FALSE){
#----------------------------------------------------------------------
## PREPROCESSING
if ((!is.matrix(C))||(!base::isSymmetric(C))){
stop("* admm.sdp : 'C' should be a symmetric matrix.")
}
n = base::nrow(C)
if (!is.list(A)){
stop("* admm.sdp : 'A' should be a list.")
}
m = length(A)
if ((!is.vector(b))||(length(b)!=m)){
stop("* admm.sdp : 'b' should be a vector having same length as 'A'.")
}
Anrow = unique(unlist(lapply(A, base::nrow)))
Ancol = unique(unlist(lapply(A, base::ncol)))
cond1 = all(unlist(lapply(A, base::isSymmetric))==TRUE)
cond2 = FALSE
cond3 = FALSE
if (length(Anrow)==1){ if (Anrow==n){ cond2 = TRUE } }
if (length(Ancol)==1){ if (Ancol==n){ cond3 = TRUE } }
if (!(cond1&&cond2&&cond3)){
stop("* admm.sdp : 'A' should be a list of symmetric matrices having same size as 'C'.")
}
mymu = ifelse(mu > 0, as.double(mu), stop("* admm.sdp : 'mu' should be a positive real number."))
myrho = ifelse(((rho > 0)&&(rho < (1+sqrt(5))/2)), as.double(rho), stop("* admm.sdp : 'rho' should be a number in (0,(1+sqrt(5))/2)."))
myiter = round(maxiter)
mytol = as.double(abstol)
mygamma = 0.9
myprint = as.logical(print.progress)
#----------------------------------------------------------------------
## Run
result = admm_sdp(C, A, b, mymu, myrho, mygamma, myiter, mytol, myprint)
#----------------------------------------------------------------------
## Wrap and Report
output = list()
output$X = result$X
output$history = data.frame(objval=result$objval,
eps_pri=result$eps_pri,
eps_dual=result$eps_dual,
gap=result$gap)
return(output)
}
# pack <- "ADMM"
# path <- find.package(pack)
# system(paste(shQuote(file.path(R.home("bin"), "R")),
# "CMD", "Rd2pdf", shQuote(path)))
# C = matrix(c(1,2,3,2,9,0,3,0,7),nrow=3,byrow=TRUE)
# A1 = matrix(c(1,0,1,0,3,7,1,7,5),nrow=3,byrow=TRUE)
# A2 = matrix(c(0,2,8,2,6,0,8,0,4),nrow=3,byrow=TRUE)
#
# A = list(A1, A2)
# b = c(11, 19)
#
# output = admm.sdp(C,A,b)
#
# library(CVXR)
#
# X = Variable(3,3,PSD=TRUE)
# myobj = Minimize(sum_entries(C*X))
# mycon = list(
# sum_entries(A[[1]]*X) == b[1],
# sum_entries(A[[2]]*X) == b[2]
# )
# myp = Problem(myobj, mycon)
# res = solve(myp)
# Xsol = res$getValue(X)
| /scratch/gouwar.j/cran-all/cranData/ADMM/R/admm.sdp.R |
#' Sparse PCA
#'
#' @description Sparse Principal Component Analysis aims at finding a sparse vector by solving
#' \deqn{\textrm{max}_x~x^T\Sigma x \quad \textrm{s.t.} \quad \|x\|_2\le 1,~\|x\|_0\le K}
#' where \eqn{\|x\|_0} is the number of non-zero elements in a vector \eqn{x}. A convex relaxation
#' of this problem was proposed to solve the following problem,
#' \deqn{\textrm{max}_X~<\Sigma,X> ~\textrm{s.t.} \quad Tr(X)=1,~\|X\|_0 \le K^2, ~X\ge 0,~\textrm{rank}(X)=1}
#' where \eqn{X=xx^T} is a \eqn{(p\times p)} matrix that is outer product of a vector \eqn{x} by itself,
#' and \eqn{X\ge 0} means the matrix \eqn{X} is positive semidefinite.
#' With the rank condition dropped, it can be restated as
#' \deqn{\textrm{max}_X~ <\Sigma,X>-\rho\|X\|_1 \quad \textrm{s.t.}\quad Tr(X)=1,X\ge 0.}
#' After acquiring each principal component vector, an iterative step based on Schur complement deflation method
#' is applied to regress out the impact of previously-computed projection vectors. It should be noted that
#' those sparse basis may \emph{not be orthonormal}.
#'
#' @param Sigma a \eqn{(p\times p)} (sample) covariance matrix.
#' @param numpc number of principal components to be extracted.
#' @param mu an augmented Lagrangian parameter.
#' @param rho a regularization parameter for sparsity.
#' @param abstol absolute tolerance stopping criterion.
#' @param reltol relative tolerance stopping criterion.
#' @param maxiter maximum number of iterations.
#'
#' @return a named list containing \describe{
#' \item{basis}{a \eqn{(p\times numpc)} matrix whose columns are sparse principal components.}
#' \item{history}{a length-\code{numpc} list of dataframes recording iteration numerics. See the section for more details.}
#' }
#'
#' @section Iteration History:
#' For SPCA implementation, main computation is sequentially performed for each projection vector. The \code{history}
#' field is a list of length \code{numpc}, where each element is a data frame containing iteration history recording
#' following fields over iterates,
#' \describe{
#' \item{r_norm}{norm of primal residual}
#' \item{s_norm}{norm of dual residual}
#' \item{eps_pri}{feasibility tolerance for primal feasibility condition}
#' \item{eps_dual}{feasibility tolerance for dual feasibility condition}
#' }
#' In accordance with the paper, iteration stops when both \code{r_norm} and \code{s_norm} values
#' become smaller than \code{eps_pri} and \code{eps_dual}, respectively.
#'
#' @examples
#' ## generate a random matrix and compute its sample covariance
#' X = matrix(rnorm(1000*5),nrow=1000)
#' covX = stats::cov(X)
#'
#' ## compute 3 sparse basis
#' output = admm.spca(covX, 3)
#'
#' @references
#' \insertRef{ma_alternating_2013a}{ADMM}
#'
#' @export
admm.spca <- function(Sigma, numpc, mu=1.0, rho=1.0, abstol=1e-4, reltol=1e-2, maxiter=1000){
# -----------------------------------------------------------------
## PREPROCESSING
# 1. data
if ((!check_data_matrix(Sigma))||(!isSymmetric(Sigma))){
stop("* ADMM.SPCA : input 'Sigma' is invalid data matrix.") }
p = nrow(Sigma)
# 2. numpc
numpc = as.integer(numpc)
if ((numpc<1)||(numpc>=p)||(is.na(numpc))||(is.infinite(numpc))||(!is.numeric(numpc))){
stop("* ADMM.SPCA : 'numpc' should be an integer in [1,nrow(Sigma)).")
}
# 3. mu, rho, abstol, reltol, maxiter
mu = as.double(mu)
rho = as.double(rho)
if (!check_param_constant(rho,0)){
stop("* ADMM.SPCA : 'rho' should be a positive real number.")
}
if (!check_param_constant(mu,0)){
stop("* ADMM.SPCA : 'mu' should be a positive real number.")
}
if (!check_param_constant_multiple(c(abstol, reltol))){
stop("* ADMM.SPCA : tolerance level is invalid.")
}
if (!check_param_integer(maxiter, 2)){
stop("* ADMM.SPCA : 'maxiter' should be a positive integer.")
}
# -----------------------------------------------------------------
## MAIN ITERATION
basis = array(0,c(p,numpc))
history = list()
for (i in 1:numpc){
# 1. run cpp part
runcpp = admm_spca(Sigma, reltol, abstol, maxiter, mu, rho)
# 2. separate outputs
tmpX = runcpp$X
tmpk = runcpp$k
tmphist = data.frame(r_norm=runcpp$r_norm[1:tmpk],
s_norm=runcpp$s_norm[1:tmpk],
eps_pri=runcpp$eps_pri[1:tmpk],
eps_dual=runcpp$eps_dual[1:tmpk])
history[[i]] = tmphist
# 3. rank-1 vector extraction
solvec = admm_spca_rk1vec(tmpX)
basis[,i] = solvec
# 4. update
Sigma = admm_spca_deflation(Sigma, solvec)
}
# -----------------------------------------------------------------
## RETURN OUTPUT
output = list()
output$basis = basis
output$history = history
return(output)
}
# Schur complement deflation ----------------------------------------------
#' @keywords internal
#' @noRd
admm_spca_deflation <- function(Sig, vec){
p = length(vec)
term1 = (Sig%*%outer(vec,vec)%*%Sig)
term2 = sum((as.vector(Sig%*%matrix(vec,nrow=p)))*vec)
output = Sig - term1/term2
return(output)
}
# Rank-1 extraction -------------------------------------------------------
#' @keywords internal
#' @noRd
admm_spca_rk1vec <- function(X){
y = as.vector(base::eigen(X)$vectors[,1])
return(y)
}
| /scratch/gouwar.j/cran-all/cranData/ADMM/R/admm.spca.R |
#' Total Variation Minimization
#'
#' 1-dimensional total variation minimization - also known as
#' signal denoising - is to solve the following
#' \deqn{\textrm{min}_x ~ \frac{1}{2}\|x-b\|_2^2 + \lambda \sum_i |x_{i+1}-x_i|}
#' for a given signal \eqn{b}.
#' The implementation is borrowed from Stephen Boyd's
#' \href{https://stanford.edu/~boyd/papers/admm/total_variation/total_variation.html}{MATLAB code}.
#'
#' @param b a length-\eqn{m} response vector
#' @param lambda regularization parameter
#' @param xinit a length-\eqn{m} vector for initial value
#' @param rho an augmented Lagrangian parameter
#' @param alpha an overrelaxation parameter in \eqn{[1,2]}
#' @param abstol absolute tolerance stopping criterion
#' @param reltol relative tolerance stopping criterion
#' @param maxiter maximum number of iterations
#'
#' @return a named list containing \describe{
#' \item{x}{a length-\eqn{m} solution vector}
#' \item{history}{dataframe recording iteration numerics. See the section for more details.}
#' }
#'
#' @section Iteration History:
#' When you run the algorithm, output returns not only the solution, but also the iteration history recording
#' following fields over iterates,
#' \describe{
#' \item{objval}{object (cost) function value}
#' \item{r_norm}{norm of primal residual}
#' \item{s_norm}{norm of dual residual}
#' \item{eps_pri}{feasibility tolerance for primal feasibility condition}
#' \item{eps_dual}{feasibility tolerance for dual feasibility condition}
#' }
#' In accordance with the paper, iteration stops when both \code{r_norm} and \code{s_norm} values
#' become smaller than \code{eps_pri} and \code{eps_dual}, respectively.
#'
#' @examples
#' ## generate sample data
#' x1 = as.vector(sin(1:100)+0.1*rnorm(100))
#' x2 = as.vector(cos(1:100)+0.1*rnorm(100)+5)
#' x3 = as.vector(sin(1:100)+0.1*rnorm(100)+2.5)
#' xsignal = c(x1,x2,x3)
#'
#' ## run example
#' output = admm.tv(xsignal)
#'
#' ## visualize
#' opar <- par(no.readonly=TRUE)
#' plot(1:300, xsignal, type="l", main="TV Regularization")
#' lines(1:300, output$x, col="red", lwd=2)
#' par(opar)
#'
#' @export
admm.tv <- function(b, lambda=1.0, xinit=NA,
rho=1.0, alpha=1.0, abstol=1e-4, reltol=1e-2, maxiter=1000){
## PREPROCESSING
# data validity
if (!check_data_vector(b)){
stop("* ADMM.TV : input 'b' is invalid data vector") }
b = as.vector(b)
# data size
# initial value
if (!is.na(xinit)){
if ((!check_data_vector(xinit))||(length(xinit)!=length(b))){
stop("* ADMM.TV : input 'xinit' is invalid.")
}
xinit = as.vector(xinit)
} else {
xinit = as.vector(rep(0,length(b)))
}
# other parameters
if (!check_param_constant_multiple(c(abstol, reltol))){
stop("* ADMM.TV : tolerance level is invalid.")
}
if (!check_param_integer(maxiter, 2)){
stop("* ADMM.TV : 'maxiter' should be a positive integer.")
}
maxiter = as.integer(maxiter)
if (!check_param_constant(rho,0)){
stop("* ADMM.TV : 'rho' should be a positive real number.")
}
if (!check_param_constant(alpha,0)){
stop("* ADMM.TV : 'alpha' should be a positive real number.")
}
if ((alpha<1)||(alpha>2)){
warning("* ADMM.TV : 'alpha' value is suggested to be in [1,2].")
}
## MAIN COMPUTATION & RESULT RETURN
result = admm_tv(b, xinit, lambda, reltol, abstol, maxiter, rho, alpha)
## RESULT RETURN
klength = result$k
output = list()
output$x = result$x
output$history = data.frame(objval=result$objval[1:klength],
r_norm=result$r_norm[1:klength],
s_norm=result$s_norm[1:klength],
eps_pri=result$eps_pri[1:klength],
eps_dual=result$eps_dual[1:klength]
)
return(output)
}
| /scratch/gouwar.j/cran-all/cranData/ADMM/R/admm.tv.R |
# CHECKERS ----------------------------------------------------------------
#' @keywords internal
#' @noRd
check_data_matrix <- function(A){
cond1 = (is.matrix(A)) # matrix
cond2 = (!(any(is.infinite(A))||any(is.na(A))))
if (cond1&&cond2){
return(TRUE)
} else {
return(FALSE)
}
}
#' @keywords internal
#' @noRd
check_data_vector <- function(b){
cond1 = ((is.vector(b))||((is.matrix(b))&&
(length(b)==nrow(b))||(length(b)==ncol(b))))
cond2 = (!(any(is.infinite(b))||any(is.na(b))))
if (cond1&&cond2){
return(TRUE)
} else {
return(FALSE)
}
}
#' @keywords internal
#' @noRd
check_param_constant <- function(num, lowerbound=0){
cond1 = (length(num)==1)
cond2 = ((!is.infinite(num))&&(!is.na(num)))
cond3 = (num > lowerbound)
if (cond1&&cond2&&cond3){
return(TRUE)
} else {
return(FALSE)
}
}
#' @keywords internal
#' @noRd
check_param_constant_multiple <- function(numvec, lowerbound=0){
for (i in 1:length(numvec)){
if (!check_param_constant(numvec[i], lowerbound)){
return(FALSE)
}
}
return(TRUE)
}
#' @keywords internal
#' @noRd
check_param_integer <- function(num, lowerbound=0){
cond1 = (length(num)==1)
cond2 = ((!is.infinite(num))&&(!is.na(num)))
cond3 = (num > lowerbound)
cond4 = (abs(num-round(num)) < sqrt(.Machine$double.eps))
if (cond1&&cond2&&cond3&&cond4){
return(TRUE)
} else {
return(FALSE)
}
}
# AUXILIARY COMPUTATIONS --------------------------------------------------
# -----------------------------------------------------------------------
# 1. Regularized LU decomposition
#' @keywords internal
#' @noRd
boyd_factor <- function(A, rho){
m = nrow(A)
n = ncol(A)
if (m>=n){ # if skinny matrix
U = (chol(t(A)%*%A + rho*diag(n)))
} else {
U = (chol(diag(m)+(1/rho)*(A%*%t(A))))
}
output = list()
output$L = t(U)
output$U = U
}
# -----------------------------------------------------------------------
# 2. PseudoInverse using SVD and NumPy Scheme
# https://en.wikipedia.org/wiki/Moore%E2%80%93Penrose_inverse#Singular_value_decomposition_(SVD)
#' @keywords internal
#' @noRd
aux_pinv <- function(A){
svdA = base::svd(A)
tolerance = (.Machine$double.eps)*max(c(nrow(A),ncol(A)))*as.double(max(svdA$d))
idxcut = which(svdA$d <= tolerance)
invDvec = (1/svdA$d)
invDvec[idxcut] = 0
output = (svdA$v%*%diag(invDvec)%*%t(svdA$u))
return(output)
}
# -----------------------------------------------------------------------
# 3. updatd for genenet
# inversion : use half of the cores
#' @keywords internal
#' @noRd
aux_genetinversion <- function(A,rho,L,R,lambda2,parallel=FALSE,nCore=ceiling(detectCores()/2)){
# -----------------------------------------------------------------------
# for the checker part, I pass, only need to
# -----------------------------------------------------------------------
# case 1 : lambda2 is a single value
if (length(lambda2)==1){
mat = aux_pinv((t(A)%*%A)+rho*(t(L)%*%L)+2*lambda2*(t(R)%*%R))
return(mat)
} else {
# -----------------------------------------------------------------------
# case 2 : lambda2 is a vector of lambda values
if (parallel==TRUE){
nCore = max(1, as.integer(nCore))
nlambda = length(lambda2)
p = ncol(A)
cl = makeCluster(nCore)
registerDoParallel(cl)
iteach = NULL
output = foreach (iteach=1:nlambda, .combine = cbind) %dopar% {
aux_pinv((t(A)%*%A)+rho*(t(L)%*%L)+2*lambda2[iteach]*(t(R)%*%R))
}
stopCluster(cl)
dim(output) = c(p,p,nlambda)
return(output)
} else { # this is CPP part
output = multipleinversion(A,rho,L,R,lambda2)
return(output)
}
}
}
# test1 : parallel (of R) : 1830803 vs 3986 : Sequential is Better
# microbenchmark("job1"={output1=aux_genetinversion(A,rho,L,R,lambda2,parallel=TRUE,nCore=1)},
# "job2"={output2=aux_genetinversion(A,rho,L,R,lambda2,parallel=FALSE)}, times=10)
# test2 : number of Cores : not necessarily better to use more cores
#
# result : simply use RCPP VERSION : it is much faster
# -----------------------------------------------------------------------
# 4. Laplacian L to R matrix : L = R^T * R
#' @keywords internal
#' @noRd
aux_laplacian2R <- function(L,size="auto"){
if (!isSymmetric(L)){
stop("we need symmetric matrix anyway.")
}
# possibly the rank
rL = as.integer(Matrix::rankMatrix(L))
if (size=="auto"){
size = rL
} else {
size = as.integer(size)
if (size>rL){
message("* laplacian... hmm... auto adjust!")
size=rL
}
}
# use top eigenpairs
eigL = base::eigen(L)
V = eigL$vectors[,1:size]
# for (i in 1:size){
# vecV = as.vector(V[,i])
# normV = sqrt(sum(vecV*vecV))
# V[,i] = V[,i]/normV
# }
Dhalf = as.vector(sqrt(eigL$values[1:size]))
R = (diag(Dhalf)%*%t(V))
return(R)
}
| /scratch/gouwar.j/cran-all/cranData/ADMM/R/auxiliary.R |
.pkgenv <- new.env(parent = emptyenv())
.onAttach <- function(...){
## Retrieve Year Information
date <- date()
x <- regexpr("[0-9]{4}", date)
this.year <- substr(date, x[1], x[1] + attr(x, "match.length") - 1)
# Retrieve Current Version
this.version = utils::packageVersion("ADMM")
## Print on Screen
packageStartupMessage("** ----------------------------------------------------------------- **")
packageStartupMessage("** ADMM")
packageStartupMessage("** - Algorithms using Alternating Direction Method of Multipliers")
packageStartupMessage("**")
packageStartupMessage("** Version : ",this.version," (",this.year,")",sep="")
packageStartupMessage("** Maintainer : Kisung You (kisungyou@outlook.com)")
packageStartupMessage("**")
packageStartupMessage("** Please share any bugs or suggestions to the maintainer.")
packageStartupMessage("** ----------------------------------------------------------------- **")
}
.onUnload <- function(libpath) {
library.dynam.unload("ADMM", libpath)
}
| /scratch/gouwar.j/cran-all/cranData/ADMM/R/zzz.R |
## Matt Galloway
#' @title Penalized precision matrix estimation via ADMM
#'
#' @description Penalized precision matrix estimation using the ADMM algorithm.
#' Consider the case where \eqn{X_{1}, ..., X_{n}} are iid \eqn{N_{p}(\mu,
#' \Sigma)} and we are tasked with estimating the precision matrix,
#' denoted \eqn{\Omega \equiv \Sigma^{-1}}. This function solves the
#' following optimization problem:
#' \describe{
#' \item{Objective:}{
#' \eqn{\hat{\Omega}_{\lambda} = \arg\min_{\Omega \in S_{+}^{p}}
#' \left\{ Tr\left(S\Omega\right) - \log \det\left(\Omega \right) +
#' \lambda\left[\frac{1 - \alpha}{2}\left\| \Omega \right|_{F}^{2} +
#' \alpha\left\| \Omega \right\|_{1} \right] \right\}}}
#' }
#' where \eqn{0 \leq \alpha \leq 1}, \eqn{\lambda > 0},
#' \eqn{\left\|\cdot \right\|_{F}^{2}} is the Frobenius norm and we define
#' \eqn{\left\|A \right\|_{1} = \sum_{i, j} \left| A_{ij} \right|}.
#' This elastic net penalty is identical to the penalty used in the popular penalized
#' regression package \code{glmnet}. Clearly, when \eqn{\alpha = 0} the elastic-net
#' reduces to a ridge-type penalty and when \eqn{\alpha = 1} it reduces to a
#' lasso-type penalty.
#'
#' @details For details on the implementation of 'ADMMsigma', see the website
#' \url{https://mgallow.github.io/ADMMsigma/articles/Details.html}.
#'
#' @param X option to provide a nxp data matrix. Each row corresponds to a single observation and each column contains n observations of a single feature/variable.
#' @param S option to provide a pxp sample covariance matrix (denominator n). If argument is \code{NULL} and \code{X} is provided instead then \code{S} will be computed automatically.
#' @param nlam number of \code{lam} tuning parameters for penalty term generated from \code{lam.min.ratio} and \code{lam.max} (automatically generated). Defaults to 10.
#' @param lam.min.ratio smallest \code{lam} value provided as a fraction of \code{lam.max}. The function will automatically generate \code{nlam} tuning parameters from \code{lam.min.ratio*lam.max} to \code{lam.max} in log10 scale. \code{lam.max} is calculated to be the smallest \code{lam} such that all off-diagonal entries in \code{Omega} are equal to zero (\code{alpha} = 1). Defaults to 1e-2.
#' @param lam option to provide positive tuning parameters for penalty term. This will cause \code{nlam} and \code{lam.min.ratio} to be disregarded. If a vector of parameters is provided, they should be in increasing order. Defaults to NULL.
#' @param alpha elastic net mixing parameter contained in [0, 1]. \code{0 = ridge, 1 = lasso}. If a vector of parameters is provided, they should be in increasing order. Defaults to grid of values \code{seq(0, 1, 0.2)}.
#' @param diagonal option to penalize the diagonal elements of the estimated precision matrix (\eqn{\Omega}). Defaults to \code{FALSE}.
#' @param path option to return the regularization path. This option should be used with extreme care if the dimension is large. If set to TRUE, cores must be set to 1 and errors and optimal tuning parameters will based on the full sample. Defaults to FALSE.
#' @param rho initial step size for ADMM algorithm.
#' @param mu factor for primal and residual norms in the ADMM algorithm. This will be used to adjust the step size \code{rho} after each iteration.
#' @param tau.inc factor in which to increase step size \code{rho}
#' @param tau.dec factor in which to decrease step size \code{rho}
#' @param crit criterion for convergence (\code{ADMM} or \code{loglik}). If \code{crit = loglik} then iterations will stop when the relative change in log-likelihood is less than \code{tol.abs}. Default is \code{ADMM} and follows the procedure outlined in Boyd, et al.
#' @param tol.abs absolute convergence tolerance. Defaults to 1e-4.
#' @param tol.rel relative convergence tolerance. Defaults to 1e-4.
#' @param maxit maximum number of iterations. Defaults to 1e4.
#' @param adjmaxit adjusted maximum number of iterations. During cross validation this option allows the user to adjust the maximum number of iterations after the first \code{lam} tuning parameter has converged (for each \code{alpha}). This option is intended to be paired with \code{warm} starts and allows for 'one-step' estimators. Defaults to NULL.
#' @param K specify the number of folds for cross validation.
#' @param crit.cv cross validation criterion (\code{loglik}, \code{penloglik}, \code{AIC}, or \code{BIC}). Defaults to \code{loglik}.
#' @param start specify \code{warm} or \code{cold} start for cross validation. Default is \code{warm}.
#' @param cores option to run CV in parallel. Defaults to \code{cores = 1}.
#' @param trace option to display progress of CV. Choose one of \code{progress} to print a progress bar, \code{print} to print completed tuning parameters, or \code{none}.
#'
#' @return returns class object \code{ADMMsigma} which includes:
#' \item{Call}{function call.}
#' \item{Iterations}{number of iterations.}
#' \item{Tuning}{optimal tuning parameters (lam and alpha).}
#' \item{Lambdas}{grid of lambda values for CV.}
#' \item{Alphas}{grid of alpha values for CV.}
#' \item{maxit}{maximum number of iterations.}
#' \item{Omega}{estimated penalized precision matrix.}
#' \item{Sigma}{estimated covariance matrix from the penalized precision matrix (inverse of Omega).}
#' \item{Path}{array containing the solution path. Solutions will be ordered in ascending alpha values for each lambda.}
#' \item{Z}{final sparse update of estimated penalized precision matrix.}
#' \item{Y}{final dual update.}
#' \item{rho}{final step size.}
#' \item{Loglik}{penalized log-likelihood for Omega}
#' \item{MIN.error}{minimum average cross validation error (cv.crit) for optimal parameters.}
#' \item{AVG.error}{average cross validation error (cv.crit) across all folds.}
#' \item{CV.error}{cross validation errors (cv.crit).}
#'
#' @references
#' \itemize{
#' \item Boyd, Stephen, Neal Parikh, Eric Chu, Borja Peleato, Jonathan Eckstein, and others. 2011. 'Distributed Optimization and Statistical Learning via the Alternating Direction Method of Multipliers.' \emph{Foundations and Trends in Machine Learning} 3 (1). Now Publishers, Inc.: 1-122. \url{https://web.stanford.edu/~boyd/papers/pdf/admm_distr_stats.pdf}
#' \item Hu, Yue, Chi, Eric C, amd Allen, Genevera I. 2016. 'ADMM Algorithmic Regularization Paths for Sparse Statistical Machine Learning.' \emph{Splitting Methods in Communication, Imaging, Science, and Engineering}. Springer: 433-459.
#' \item Zou, Hui and Hastie, Trevor. 2005. 'Regularization and Variable Selection via the Elastic Net.' \emph{Journal of the Royal Statistial Society: Series B (Statistical Methodology)} 67 (2). Wiley Online Library: 301-320.
#' \item Rothman, Adam. 2017. 'STAT 8931 notes on an algorithm to compute the Lasso-penalized Gaussian likelihood precision matrix estimator.'
#' }
#'
#' @author Matt Galloway \email{gall0441@@umn.edu}
#'
#' @seealso \code{\link{plot.ADMM}}, \code{\link{RIDGEsigma}}
#'
#' @export
#'
#' @examples
#' # generate data from a sparse matrix
#' # first compute covariance matrix
#' S = matrix(0.7, nrow = 5, ncol = 5)
#' for (i in 1:5){
#' for (j in 1:5){
#' S[i, j] = S[i, j]^abs(i - j)
#' }
#' }
#'
#' # generate 100 x 5 matrix with rows drawn from iid N_p(0, S)
#' set.seed(123)
#' Z = matrix(rnorm(100*5), nrow = 100, ncol = 5)
#' out = eigen(S, symmetric = TRUE)
#' S.sqrt = out$vectors %*% diag(out$values^0.5)
#' S.sqrt = S.sqrt %*% t(out$vectors)
#' X = Z %*% S.sqrt
#'
#' # elastic-net type penalty (use CV for optimal lambda and alpha)
#' ADMMsigma(X)
#'
#' # ridge penalty (use CV for optimal lambda)
#' ADMMsigma(X, alpha = 0)
#'
#' # lasso penalty (lam = 0.1)
#' ADMMsigma(X, lam = 0.1, alpha = 1)
# we define the ADMM covariance estimation function
ADMMsigma = function(X = NULL, S = NULL, nlam = 10, lam.min.ratio = 0.01,
lam = NULL, alpha = seq(0, 1, 0.2), diagonal = FALSE, path = FALSE,
rho = 2, mu = 10, tau.inc = 2, tau.dec = 2, crit = c("ADMM",
"loglik"), tol.abs = 1e-04, tol.rel = 1e-04, maxit = 10000,
adjmaxit = NULL, K = 5, crit.cv = c("loglik", "penloglik",
"AIC", "BIC"), start = c("warm", "cold"), cores = 1, trace = c("progress",
"print", "none")) {
# checks
if (is.null(X) && is.null(S)) {
stop("Must provide entry for X or S!")
}
if (!all(alpha >= 0 & alpha <= 1)) {
stop("alpha must be in [0,1]!")
}
if (!all(lam > 0)) {
stop("lam must be positive!")
}
if (!(all(c(rho, mu, tau.inc, tau.dec, tol.abs, tol.rel, maxit,
adjmaxit, K, cores) > 0))) {
stop("Entry must be positive!")
}
if (!(all(sapply(c(rho, mu, tau.inc, tau.dec, tol.abs, tol.rel,
maxit, adjmaxit, K, cores, nlam, lam.min.ratio), length) <=
1))) {
stop("Entry must be single value!")
}
if (all(c(maxit, adjmaxit, K, cores)%%1 != 0)) {
stop("Entry must be an integer!")
}
if (cores < 1) {
stop("Number of cores must be positive!")
}
if (cores > 1 && path) {
cat("Parallelization not possible when producing solution path. Setting cores = 1...\n\n")
cores = 1
}
K = ifelse(path, 1, K)
if (cores > K) {
cat("Number of cores exceeds K... setting cores = K\n\n")
cores = K
}
if (is.null(adjmaxit)) {
adjmaxit = maxit
}
# match values
crit = match.arg(crit)
crit.cv = match.arg(crit.cv)
start = match.arg(start)
trace = match.arg(trace)
call = match.call()
alpha = sort(alpha)
MIN.error = AVG.error = CV.error = NULL
n = ifelse(is.null(X), nrow(S), nrow(X))
# compute sample covariance matrix, if necessary
if (is.null(S)) {
S = (nrow(X) - 1)/nrow(X) * cov(X)
}
# compute grid of lam values, if necessary
if (is.null(lam)) {
if (!((lam.min.ratio <= 1) && (lam.min.ratio > 0))) {
cat("lam.min.ratio must be in (0, 1]... setting to 1e-2!\n\n")
lam.min.ratio = 0.01
}
if (!((nlam > 0) && (nlam%%1 == 0))) {
cat("nlam must be a positive integer... setting to 10!\n\n")
nlam = 10
}
# calculate lam.max and lam.min
lam.max = max(abs(S - diag(S)))
lam.min = lam.min.ratio * lam.max
# calculate grid of lambda values
lam = 10^seq(log10(lam.min), log10(lam.max), length = nlam)
} else {
# sort lambda values
lam = sort(lam)
}
# perform cross validation, if necessary
init = diag(diag(S)^(-1))
zeros = matrix(0, nrow = nrow(S), ncol = ncol(S))
if ((length(lam) > 1 || length(alpha) > 1) & (!is.null(X) ||
path)) {
# run CV in parallel?
if (cores > 1) {
# execute CVP_ADMM
ADMM = CVP_ADMM(X = X, lam = lam, alpha = alpha, diagonal = diagonal,
rho = rho, mu = mu, tau.inc = tau.inc, tau.dec = tau.dec,
crit = crit, tol.abs = tol.abs, tol.rel = tol.rel,
maxit = maxit, adjmaxit = adjmaxit, K = K, crit.cv = crit.cv,
start = start, cores = cores, trace = trace)
MIN.error = ADMM$min.error
AVG.error = ADMM$avg.error
CV.error = ADMM$cv.error
} else {
# execute CV_ADMMc
if (is.null(X)) {
X = matrix(0)
}
ADMM = CV_ADMMc(X = X, S = S, lam = lam, alpha = alpha,
diagonal = diagonal, path = path, rho = rho, mu = mu,
tau_inc = tau.inc, tau_dec = tau.dec, crit = crit,
tol_abs = tol.abs, tol_rel = tol.rel, maxit = maxit,
adjmaxit = adjmaxit, K = K, crit_cv = crit.cv,
start = start, trace = trace)
MIN.error = ADMM$min.error
AVG.error = ADMM$avg.error
CV.error = ADMM$cv.error
Path = ADMM$path
}
# print warning if lam on boundary
if (((ADMM$lam == lam[1]) || ADMM$lam == lam[length(lam)]) &&
((length(lam) != 1) && (!path))) {
cat("\nOptimal tuning parameter on boundary...!")
}
# compute final estimate at best tuning parameters
ADMM = ADMMc(S = S, initOmega = init, initZ = init, initY = zeros,
lam = ADMM$lam, alpha = ADMM$alpha, diagonal = diagonal,
rho = rho, mu = mu, tau_inc = tau.inc, tau_dec = tau.dec,
crit = crit, tol_abs = tol.abs, tol_rel = tol.rel,
maxit = maxit)
} else {
# execute ADMM_sigmac
if (length(lam) > 1 || length(alpha) > 1) {
stop("Must set specify X, set path = TRUE, or provide single value for lam and alpha.")
}
ADMM = ADMMc(S = S, initOmega = init, initZ = init, initY = zeros,
lam = lam, alpha = alpha, diagonal = diagonal, rho = rho,
mu = mu, tau_inc = tau.inc, tau_dec = tau.dec, crit = crit,
tol_abs = tol.abs, tol_rel = tol.rel, maxit = maxit)
}
# option to penalize diagonal
if (diagonal) {
C = 1
} else {
C = 1 - diag(ncol(S))
}
# compute penalized loglik
loglik = (-n/2) * (sum(ADMM$Omega * S) - determinant(ADMM$Omega,
logarithm = TRUE)$modulus[1] + ADMM$lam * ((1 - ADMM$alpha)/2 *
sum((C * ADMM$Omega)^2) + ADMM$alpha * sum(abs(C * ADMM$Omega))))
# return values
tuning = matrix(c(log10(ADMM$lam), ADMM$alpha), ncol = 2)
colnames(tuning) = c("log10(lam)", "alpha")
if (!path) {
Path = NULL
}
returns = list(Call = call, Iterations = ADMM$Iterations, Tuning = tuning,
Lambdas = lam, Alphas = alpha, maxit = maxit, Omega = ADMM$Omega,
Sigma = qr.solve(ADMM$Omega), Path = Path, Z = ADMM$Z,
Y = ADMM$Y, rho = ADMM$rho, Loglik = loglik, MIN.error = MIN.error,
AVG.error = AVG.error, CV.error = CV.error)
class(returns) = "ADMM"
return(returns)
}
##-----------------------------------------------------------------------------------
#' @title Print ADMM object
#' @description Prints ADMM object and suppresses output if needed.
#' @param x class object ADMM
#' @param ... additional arguments.
#' @keywords internal
#' @export
print.ADMM = function(x, ...) {
# print warning if maxit reached
if (x$maxit <= x$Iterations) {
cat("\nMaximum iterations reached...!")
}
# print call
cat("\nCall: ", paste(deparse(x$Call), sep = "\n", collapse = "\n"),
"\n", sep = "")
# print iterations
cat("\nIterations: ", paste(x$Iterations, sep = "\n", collapse = "\n"),
"\n", sep = "")
# print optimal tuning parameters
cat("\nTuning parameters:\n")
print.default(round(x$Tuning, 3), print.gap = 2L, quote = FALSE)
# print loglik
cat("\nLog-likelihood: ", paste(round(x$Loglik, 5), sep = "\n",
collapse = "\n"), "\n", sep = "")
# print Omega if dim <= 10
if (nrow(x$Z) <= 10) {
cat("\nOmega:\n")
print.default(round(x$Z, 5))
} else {
cat("\n(...output suppressed due to large dimension!)\n")
}
}
##-----------------------------------------------------------------------------------
#' @title Plot ADMM object
#' @description Produces a plot for the cross validation errors, if available.
#' @param x class object ADMM.
#' @param type produce either 'heatmap' or 'line' graph
#' @param footnote option to print footnote of optimal values. Defaults to TRUE.
#' @param ... additional arguments.
#' @export
#' @examples
#' # generate data from a sparse matrix
#' # first compute covariance matrix
#' S = matrix(0.7, nrow = 5, ncol = 5)
#' for (i in 1:5){
#' for (j in 1:5){
#' S[i, j] = S[i, j]^abs(i - j)
#' }
#' }
#'
#' # generate 100 x 5 matrix with rows drawn from iid N_p(0, S)
#' set.seed(123)
#' Z = matrix(rnorm(100*5), nrow = 100, ncol = 5)
#' out = eigen(S, symmetric = TRUE)
#' S.sqrt = out$vectors %*% diag(out$values^0.5)
#' S.sqrt = S.sqrt %*% t(out$vectors)
#' X = Z %*% S.sqrt
#'
#' # produce line graph for ADMMsigma
#' plot(ADMMsigma(X), type = 'line')
#'
#' # produce CV heat map for ADMMsigma
#' plot(ADMMsigma(X), type = 'heatmap')
plot.ADMM = function(x, type = c("line", "heatmap"), footnote = TRUE,
...) {
# check
type = match.arg(type)
Means = NULL
if (is.null(x$CV.error)) {
stop("No cross validation errors to plot!")
}
if (type == "line") {
# gather values to plot
cv = cbind(expand.grid(lam = x$Lambdas, alpha = x$Alphas),
Errors = as.data.frame.table(x$CV.error)$Freq)
if (length(x$Alphas) > 1) {
# produce line graph
graph = ggplot(summarise(group_by(cv, lam, alpha),
Means = mean(Errors)), aes(log10(lam), Means, color = as.factor(alpha))) +
theme_minimal() + geom_line() + labs(title = "Cross-Validation Errors",
color = "alpha", y = "Average Error") + geom_vline(xintercept = x$Tuning[1],
linetype = "dotted")
} else {
# produce line graph with boxplots
graph = ggplot(cv, aes(as.factor(log10(lam)), Errors)) +
geom_jitter(width = 0.2, color = "navy blue") +
geom_boxplot() + theme_minimal() + labs(title = "Cross-Validation Errors",
y = "Error", x = "log10(lam)")
}
} else {
# augment values for heat map (helps visually)
lam = x$Lambdas
cv = expand.grid(lam = lam, alpha = x$Alphas)
Errors = 1/(c(x$AVG.error) + abs(min(x$AVG.error)) + 1)
cv = cbind(cv, Errors)
# design color palette
bluetowhite <- c("#000E29", "white")
# produce ggplot heat map
graph = ggplot(cv, aes(alpha, log10(lam))) + geom_raster(aes(fill = Errors)) +
scale_fill_gradientn(colours = colorRampPalette(bluetowhite)(2),
guide = "none") + theme_minimal() + labs(title = "Heatmap of Cross-Validation Errors")
}
if (footnote) {
# produce with footnote
graph + labs(caption = paste("**Optimal: log10(lam) = ",
round(x$Tuning[1], 3), ", alpha = ", round(x$Tuning[2],
3), sep = ""))
} else {
# produce without footnote
graph
}
}
| /scratch/gouwar.j/cran-all/cranData/ADMMsigma/R/ADMMsigma.R |
## Matt Galloway
#' @title Parallel CV (uses CV_ADMMc)
#' @description Parallel implementation of cross validation.
#'
#' @param X nxp data matrix. Each row corresponds to a single observation and each column contains n observations of a single feature/variable.
#' @param lam positive tuning parameters for elastic net penalty. If a vector of parameters is provided, they should be in increasing order. Defaults to grid of values \code{10^seq(-2, 2, 0.2)}.
#' @param alpha elastic net mixing parameter contained in [0, 1]. \code{0 = ridge, 1 = lasso}. If a vector of parameters is provided, they should be in increasing order. Defaults to grid of values \code{seq(-1, 1, 0.2)}.
#' @param diagonal option to penalize the diagonal elements of the estimated precision matrix (\eqn{\Omega}). Defaults to \code{FALSE}.
#' @param rho initial step size for ADMM algorithm.
#' @param mu factor for primal and residual norms in the ADMM algorithm. This will be used to adjust the step size \code{rho} after each iteration.
#' @param tau.inc factor in which to increase step size \code{rho}
#' @param tau.dec factor in which to decrease step size \code{rho}
#' @param crit criterion for convergence (\code{ADMM} or \code{loglik}). If \code{crit = loglik} then iterations will stop when the relative change in log-likelihood is less than \code{tol.abs}. Default is \code{ADMM} and follows the procedure outlined in Boyd, et al.
#' @param tol.abs absolute convergence tolerance. Defaults to 1e-4.
#' @param tol.rel relative convergence tolerance. Defaults to 1e-4.
#' @param maxit maximum number of iterations. Defaults to 1e3.
#' @param adjmaxit adjusted maximum number of iterations. During cross validation this option allows the user to adjust the maximum number of iterations after the first \code{lam} tuning parameter has converged (for each \code{alpha}). This option is intended to be paired with \code{warm} starts and allows for 'one-step' estimators. Defaults to NULL.
#' @param K specify the number of folds for cross validation.
#' @param crit.cv cross validation criterion (\code{loglik}, \code{penloglik}, \code{AIC}, or \code{BIC}). Defaults to \code{loglik}.
#' @param start specify \code{warm} or \code{cold} start for cross validation. Default is \code{warm}.
#' @param cores option to run CV in parallel. Defaults to \code{cores = 1}.
#' @param trace option to display progress of CV. Choose one of \code{progress} to print a progress bar, \code{print} to print completed tuning parameters, or \code{none}.
#'
#' @return returns list of returns which includes:
#' \item{lam}{optimal tuning parameter.}
#' \item{alpha}{optimal tuning parameter.}
#' \item{min.error}{minimum average cross validation error (cv.crit) for optimal parameters.}
#' \item{avg.error}{average cross validation error (cv.crit) across all folds.}
#' \item{cv.error}{cross validation errors (cv.crit).}
#'
#' @keywords internal
# we define the CV_ADMMc function
CVP_ADMM = function(X = NULL, lam = 10^seq(-2, 2, 0.2), alpha = seq(0,
1, 0.2), diagonal = FALSE, rho = 2, mu = 10, tau.inc = 2, tau.dec = 2,
crit = c("ADMM", "loglik"), tol.abs = 1e-04, tol.rel = 1e-04,
maxit = 1000, adjmaxit = NULL, K = 5, crit.cv = c("loglik",
"penloglik", "AIC", "BIC"), start = c("warm", "cold"),
cores = 1, trace = c("progress", "print", "none")) {
# match values
crit = match.arg(crit)
crit.cv = match.arg(crit.cv)
start = match.arg(start)
trace = match.arg(trace)
lam = sort(lam)
alpha = sort(alpha)
# make cluster and register cluster
num_cores = detectCores()
if (cores > num_cores) {
cat("\nOnly detected", paste(num_cores, "cores...", sep = " "))
}
if (cores > K) {
cat("\nNumber of cores exceeds K... setting cores = K")
cores = K
}
cluster = makeCluster(cores)
registerDoParallel(cluster)
# use cluster for each fold in CV
n = nrow(X)
ind = sample(n)
k = NULL
CV = foreach(k = 1:K, .packages = "ADMMsigma", .inorder = FALSE) %dopar%
{
leave.out = ind[(1 + floor((k - 1) * n/K)):floor(k *
n/K)]
# training set
X.train = X[-leave.out, , drop = FALSE]
X_bar = apply(X.train, 2, mean)
X.train = scale(X.train, center = X_bar, scale = FALSE)
# validation set
X.valid = X[leave.out, , drop = FALSE]
X.valid = scale(X.valid, center = X_bar, scale = FALSE)
# sample covariances
S.train = crossprod(X.train)/(dim(X.train)[1])
S.valid = crossprod(X.valid)/(dim(X.valid)[1])
# run foreach loop on CVP_ADMMc
CVP_ADMMc(n = nrow(X.valid), S_train = S.train, S_valid = S.valid,
lam = lam, alpha = alpha, diagonal = diagonal,
rho = rho, mu = mu, tau_inc = tau.inc, tau_dec = tau.dec,
crit = crit, tol_abs = tol.abs, tol_rel = tol.rel,
maxit = maxit, adjmaxit = adjmaxit, crit_cv = crit.cv,
start = start, trace = trace)
}
# determine optimal tuning parameters
CV = array(as.numeric(unlist(CV)), dim = c(length(lam), length(alpha),
K))
AVG = apply(CV, c(1, 2), mean)
best = which(AVG == min(AVG), arr.ind = TRUE)
error = min(AVG)
best_lam = lam[best[1]]
best_alpha = alpha[best[2]]
# stop cluster
stopCluster(cluster)
# return best lam and alpha values
return(list(lam = best_lam, alpha = best_alpha, min.error = error,
avg.error = AVG, cv.error = CV))
}
##-----------------------------------------------------------------------------------
#' @title Parallel Ridge CV (uses CVP_RIDGEc)
#' @description Parallel implementation of cross validation for RIDGEsigma.
#'
#' @param X nxp data matrix. Each row corresponds to a single observation and each column contains n observations of a single feature/variable.
#' @param lam positive tuning parameters for ridge penalty. If a vector of parameters is provided, they should be in increasing order. Defaults to grid of values \code{10^seq(-2, 2, 0.1)}.
#' @param K specify the number of folds for cross validation.
#' @param cores option to run CV in parallel. Defaults to \code{cores = 1}.
#' @param trace option to display progress of CV. Choose one of \code{progress} to print a progress bar, \code{print} to print completed tuning parameters, or \code{none}.
#'
#' @return returns list of returns which includes:
#' \item{lam}{optimal tuning parameter.}
#' \item{min.error}{minimum average cross validation error for optimal parameters.}
#' \item{avg.error}{average cross validation error across all folds.}
#' \item{cv.error}{cross validation errors (negative validation likelihood).}
#'
#' @keywords internal
# we define the CVP_RIDGE function
CVP_RIDGE = function(X = NULL, lam = 10^seq(-2, 2, 0.1), K = 5,
cores = 1, trace = c("none", "progress", "print")) {
# make cluster and register cluster
num_cores = detectCores()
if (cores > num_cores) {
cat("\nOnly detected", paste(num_cores, "cores...", sep = " "))
}
if (cores > K) {
cat("\nNumber of cores exceeds K... setting cores = K")
cores = K
}
cluster = makeCluster(cores)
registerDoParallel(cluster)
# use cluster for each fold in CV
n = dim(X)[1]
ind = sample(n)
lam = sort(lam)
k = NULL
CV = foreach(k = 1:K, .packages = "ADMMsigma", .combine = "cbind",
.inorder = FALSE) %dopar% {
leave.out = ind[(1 + floor((k - 1) * n/K)):floor(k * n/K)]
# training set
X.train = X[-leave.out, , drop = FALSE]
X_bar = apply(X.train, 2, mean)
X.train = scale(X.train, center = X_bar, scale = FALSE)
# validation set
X.valid = X[leave.out, , drop = FALSE]
X.valid = scale(X.valid, center = X_bar, scale = FALSE)
# sample covariances
S.train = crossprod(X.train)/(dim(X.train)[1])
S.valid = crossprod(X.valid)/(dim(X.valid)[1])
# run foreach loop on CVP_RIDGEc
CVP_RIDGEc(n = nrow(X.valid), S_train = S.train, S_valid = S.valid,
lam = lam, trace = trace)
}
# determine optimal tuning parameters
AVG = as.matrix(apply(CV, 1, mean))
best = which(AVG == min(AVG), arr.ind = TRUE)
error = min(AVG)
best_lam = lam[best[1]]
# stop cluster
stopCluster(cluster)
# return best lam and alpha values
return(list(lam = best_lam, min.error = error, avg.error = AVG,
cv.error = CV))
}
| /scratch/gouwar.j/cran-all/cranData/ADMMsigma/R/Parallel.R |
## Matt Galloway
#' @title Ridge penalized precision matrix estimation
#'
#' @description Ridge penalized matrix estimation via closed-form solution. If one is only interested in the ridge penalty, this function will be faster and provide a more precise estimate than using \code{ADMMsigma}. \cr
#' Consider the case where
#' \eqn{X_{1}, ..., X_{n}} are iid \eqn{N_{p}(\mu, \Sigma)}
#' and we are tasked with estimating the precision matrix,
#' denoted \eqn{\Omega \equiv \Sigma^{-1}}. This function solves the
#' following optimization problem:
#' \describe{
#' \item{Objective:}{
#' \eqn{\hat{\Omega}_{\lambda} = \arg\min_{\Omega \in S_{+}^{p}}
#' \left\{ Tr\left(S\Omega\right) - \log \det\left(\Omega \right) +
#' \frac{\lambda}{2}\left\| \Omega \right\|_{F}^{2} \right\}}}
#' }
#' where \eqn{\lambda > 0} and \eqn{\left\|\cdot \right\|_{F}^{2}} is the Frobenius
#' norm.
#'
#' @param X option to provide a nxp data matrix. Each row corresponds to a single observation and each column contains n observations of a single feature/variable.
#' @param S option to provide a pxp sample covariance matrix (denominator n). If argument is \code{NULL} and \code{X} is provided instead then \code{S} will be computed automatically.
#' @param lam positive tuning parameters for ridge penalty. If a vector of parameters is provided, they should be in increasing order. Defaults to grid of values \code{10^seq(-2, 2, 0.1)}.
#' @param path option to return the regularization path. This option should be used with extreme care if the dimension is large. If set to TRUE, cores will be set to 1 and errors and optimal tuning parameters will based on the full sample. Defaults to FALSE.
#' @param K specify the number of folds for cross validation.
#' @param cores option to run CV in parallel. Defaults to \code{cores = 1}.
#' @param trace option to display progress of CV. Choose one of \code{progress} to print a progress bar, \code{print} to print completed tuning parameters, or \code{none}.
#'
#' @return returns class object \code{RIDGEsigma} which includes:
#' \item{Lambda}{optimal tuning parameter.}
#' \item{Lambdas}{grid of lambda values for CV.}
#' \item{Omega}{estimated penalized precision matrix.}
#' \item{Sigma}{estimated covariance matrix from the penalized precision matrix (inverse of Omega).}
#' \item{Path}{array containing the solution path. Solutions are ordered dense to sparse.}
#' \item{Gradient}{gradient of optimization function (penalized gaussian likelihood).}
#' \item{MIN.error}{minimum average cross validation error (cv.crit) for optimal parameters.}
#' \item{AVG.error}{average cross validation error (cv.crit) across all folds.}
#' \item{CV.error}{cross validation errors (cv.crit).}
#'
#' @references
#' \itemize{
#' \item Rothman, Adam. 2017. 'STAT 8931 notes on an algorithm to compute the Lasso-penalized Gaussian likelihood precision matrix estimator.'
#' }
#'
#' @author Matt Galloway \email{gall0441@@umn.edu}
#'
#' @seealso \code{\link{plot.RIDGE}}, \code{\link{ADMMsigma}}
#'
#' @export
#'
#' @examples
#' # generate data from a sparse matrix
#' # first compute covariance matrix
#' S = matrix(0.7, nrow = 5, ncol = 5)
#' for (i in 1:5){
#' for (j in 1:5){
#' S[i, j] = S[i, j]^abs(i - j)
#' }
#' }
#'
#' # generate 100 x 5 matrix with rows drawn from iid N_p(0, S)
#' set.seed(123)
#' Z = matrix(rnorm(100*5), nrow = 100, ncol = 5)
#' out = eigen(S, symmetric = TRUE)
#' S.sqrt = out$vectors %*% diag(out$values^0.5)
#' S.sqrt = S.sqrt %*% t(out$vectors)
#' X = Z %*% S.sqrt
#'
#' # ridge penalty no ADMM
#' RIDGEsigma(X, lam = 10^seq(-5, 5, 0.5))
# we define the ADMM covariance estimation function
RIDGEsigma = function(X = NULL, S = NULL, lam = 10^seq(-2, 2, 0.1),
path = FALSE, K = 5, cores = 1, trace = c("none", "progress",
"print")) {
# checks
if (is.null(X) && is.null(S)) {
stop("Must provide entry for X or S!")
}
if (!all(lam > 0)) {
stop("lam must be positive!")
}
if (all(c(K, cores)%%1 != 0)) {
stop("Entry must be an integer!")
}
if (cores < 1) {
stop("Number of cores must be positive!")
}
if (cores > 1 && path) {
cat("Parallelization not possible when producing solution path. Setting cores = 1...\n\n")
cores = 1
}
if (path) {
K = 1
}
# match values
call = match.call()
trace = match.arg(trace)
MIN.error = AVG.error = CV.error = NULL
Lambdas = lam = sort(lam)
n = ifelse(is.null(X), nrow(S), nrow(X))
# compute sample covariance matrix, if necessary
if (is.null(S)) {
S = (nrow(X) - 1)/nrow(X) * cov(X)
}
# perform cross validation, if necessary
if ((length(lam) > 1) & (!is.null(X) || path)) {
# run CV in parallel?
if (cores > 1) {
# execute ParallelCV
RIDGE = CVP_RIDGE(X = X, lam = lam, K = K, cores = cores,
trace = trace)
MIN.error = RIDGE$min.error
AVG.error = RIDGE$avg.error
CV.error = RIDGE$cv.error
lam = RIDGE$lam
} else {
# execute CV_RIDGEsigma
if (is.null(X)) {
X = matrix(0)
}
RIDGE = CV_RIDGEc(X = X, S = S, lam = lam, path = path,
K = K, trace = trace)
MIN.error = RIDGE$min.error
AVG.error = RIDGE$avg.error
CV.error = RIDGE$cv.error
lam = RIDGE$lam
Path = RIDGE$path
}
# print warning if lam on boundary
if ((lam == Lambdas[1]) || (lam == Lambdas[length(Lambdas)]) &&
(length(Lambdas) != 1)) {
cat("\nOptimal tuning parameter on boundary...!")
}
# compute final estimate at best tuning parameters
Omega = RIDGEc(S = S, lam = lam)
} else {
# execute RIDGEsigmac
if (length(lam) > 1) {
stop("Must specify X, set path = TRUE, or provide single value for lam.")
}
Omega = RIDGEc(S = S, lam = lam)
}
# compute gradient
grad = S - qr.solve(Omega) + lam * Omega
# compute penalized loglik
loglik = (-n/2) * (sum(Omega * S) - determinant(Omega, logarithm = TRUE)$modulus[1] +
lam * sum(Omega^2))
# return values
tuning = matrix(c(log10(lam), lam), ncol = 2)
colnames(tuning) = c("log10(lam)", "lam")
if (!path) {
Path = NULL
}
returns = list(Call = call, Lambda = tuning, Lambdas = Lambdas,
Omega = Omega, Sigma = qr.solve(Omega), Path = Path, Gradient = grad,
Loglik = loglik, MIN.error = MIN.error, AVG.error = AVG.error,
CV.error = CV.error)
class(returns) = "RIDGE"
return(returns)
}
##-----------------------------------------------------------------------------------
#' @title Print RIDGE object
#' @description Prints RIDGE object and suppresses output if needed.
#' @param x class object RIDGE.
#' @param ... additional arguments.
#' @keywords internal
#' @export
print.RIDGE = function(x, ...) {
# print call
cat("\nCall: ", paste(deparse(x$Call), sep = "\n", collapse = "\n"),
"\n", sep = "")
# print optimal tuning parameter
cat("\nTuning parameter:\n")
print.default(round(x$Lambda, 3), print.gap = 2L, quote = FALSE)
# print loglik
cat("\nLog-likelihood: ", paste(round(x$Loglik, 5), sep = "\n",
collapse = "\n"), "\n", sep = "")
# print Omega if dim <= 10
if (nrow(x$Omega) <= 10) {
cat("\nOmega:\n")
print.default(round(x$Omega, 5))
} else {
cat("\n(...output suppressed due to large dimension!)\n")
}
}
##-----------------------------------------------------------------------------------
#' @title Plot RIDGE object
#' @description Produces a heat plot for the cross validation errors, if available.
#' @param x class object RIDGE
#' @param type produce either 'heatmap' or 'line' graph
#' @param footnote option to print footnote of optimal values. Defaults to TRUE.
#' @param ... additional arguments.
#' @export
#' @examples
#' # generate data from a sparse matrix
#' # first compute covariance matrix
#' S = matrix(0.7, nrow = 5, ncol = 5)
#' for (i in 1:5){
#' for (j in 1:5){
#' S[i, j] = S[i, j]^abs(i - j)
#' }
#' }
#'
#' # generate 100 x 5 matrix with rows drawn from iid N_p(0, S)
#' set.seed(123)
#' Z = matrix(rnorm(100*5), nrow = 100, ncol = 5)
#' out = eigen(S, symmetric = TRUE)
#' S.sqrt = out$vectors %*% diag(out$values^0.5)
#' S.sqrt = S.sqrt %*% t(out$vectors)
#' X = Z %*% S.sqrt
#'
#' # produce CV heat map for RIDGEsigma
#' plot(RIDGEsigma(X, lam = 10^seq(-5, 5, 0.5)))
#'
#' # produce line graph for RIDGEsigma
#' plot(RIDGEsigma(X), type = 'line')
plot.RIDGE = function(x, type = c("heatmap", "line"), footnote = TRUE,
...) {
# check
type = match.arg(type)
Means = NULL
if (is.null(x$CV.error)) {
stop("No cross validation errors to plot!")
}
if (type == "line") {
# gather values to plot
cv = cbind(expand.grid(lam = x$Lambdas, alpha = 0), Errors = as.data.frame.table(x$CV.error)$Freq)
# produce line graph
graph = ggplot(summarise(group_by(cv, lam), Means = mean(Errors)),
aes(log10(lam), Means)) + geom_jitter(width = 0.2,
color = "navy blue") + theme_minimal() + geom_line(color = "red") +
labs(title = "Cross-Validation Errors", y = "Error")
} else {
# augment values for heat map (helps visually)
lam = x$Lambdas
cv = expand.grid(lam = lam, alpha = 0)
Errors = 1/(c(x$AVG.error) + abs(min(x$AVG.error)) + 1)
cv = cbind(cv, Errors)
# design color palette
bluetowhite <- c("#000E29", "white")
# produce ggplot heat map
graph = ggplot(cv, aes(alpha, log10(lam))) + geom_raster(aes(fill = Errors)) +
scale_fill_gradientn(colours = colorRampPalette(bluetowhite)(2),
guide = "none") + theme_minimal() + labs(title = "Heatmap of Cross-Validation Errors") +
theme(axis.title.x = element_blank(), axis.text.x = element_blank(),
axis.ticks.x = element_blank())
}
if (footnote) {
# produce with footnote
graph + labs(caption = paste("**Optimal: log10(lam) = ",
x$Lambda[1], sep = ""))
} else {
# produce without footnote
graph
}
}
| /scratch/gouwar.j/cran-all/cranData/ADMMsigma/R/RIDGEsigma.R |
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
#' @title K fold (c++)
#' @description creates vector of shuffled indices.
#' @param n number of elements.
#' @param K number of folds.
#' @keywords internal
#'
NULL
#' @title CV ADMM penalized precision matrix estimation (c++)
#' @description Cross validation function for ADMMsigma.
#'
#' @param X option to provide a nxp matrix. Each row corresponds to a single observation and each column contains n observations of a single feature/variable.
#' @param S option to provide a pxp sample covariance matrix (denominator n). If argument is \code{NULL} and \code{X} is provided instead then \code{S} will be computed automatically.
#' @param lam positive tuning parameters for elastic net penalty. If a vector of parameters is provided, they should be in increasing order.
#' @param alpha elastic net mixing parameter contained in [0, 1]. \code{0 = ridge, 1 = lasso}. If a vector of parameters is provided, they should be in increasing order.
#' @param diagonal option to penalize the diagonal elements of the estimated precision matrix (\eqn{\Omega}). Defaults to \code{FALSE}.
#' @param path option to return the regularization path. This option should be used with extreme care if the dimension is large. If set to TRUE, cores will be set to 1 and errors and optimal tuning parameters will based on the full sample. Defaults to FALSE.
#' @param rho initial step size for ADMM algorithm.
#' @param mu factor for primal and residual norms in the ADMM algorithm. This will be used to adjust the step size \code{rho} after each iteration.
#' @param tau_inc factor in which to increase step size \code{rho}
#' @param tau_dec factor in which to decrease step size \code{rho}
#' @param crit criterion for convergence (\code{ADMM} or \code{loglik}). If \code{crit = loglik} then iterations will stop when the relative change in log-likelihood is less than \code{tol.abs}. Default is \code{ADMM} and follows the procedure outlined in Boyd, et al.
#' @param tol_rel relative convergence tolerance. Defaults to 1e-4.
#' @param maxit maximum number of iterations. Defaults to 1e4.
#' @param adjmaxit adjusted maximum number of iterations. During cross validation this option allows the user to adjust the maximum number of iterations after the first \code{lam} tuning parameter has converged (for each \code{alpha}). This option is intended to be paired with \code{warm} starts and allows for "one-step" estimators. Defaults to 1e4.
#' @param K specify the number of folds for cross validation.
#' @param crit_cv cross validation criterion (\code{loglik} \code{penloglik}, \code{AIC}, or \code{BIC}). Defaults to \code{loglik}.
#' @param start specify \code{warm} or \code{cold} start for cross validation. Default is \code{warm}.
#' @param trace option to display progress of CV. Choose one of \code{progress} to print a progress bar, \code{print} to print completed tuning parameters, or \code{none}.
#'
#' @return list of returns includes:
#' \item{lam}{optimal tuning parameter.}
#' \item{alpha}{optimal tuning parameter.}
#' \item{path}{array containing the solution path. Solutions will be ordered in ascending alpha values for each lambda.}
#' \item{min.error}{minimum average cross validation error (cv_crit) for optimal parameters.}
#' \item{avg.error}{average cross validation error (cv_crit) across all folds.}
#' \item{cv.error}{cross validation errors (cv_crit).}
#'
#' @keywords internal
#'
CV_ADMMc <- function(X, S, lam, alpha, diagonal = FALSE, path = FALSE, rho = 2, mu = 10, tau_inc = 2, tau_dec = 2, crit = "ADMM", tol_abs = 1e-4, tol_rel = 1e-4, maxit = 1e4L, adjmaxit = 1e4L, K = 5L, crit_cv = "loglik", start = "warm", trace = "progress") {
.Call('_ADMMsigma_CV_ADMMc', PACKAGE = 'ADMMsigma', X, S, lam, alpha, diagonal, path, rho, mu, tau_inc, tau_dec, crit, tol_abs, tol_rel, maxit, adjmaxit, K, crit_cv, start, trace)
}
#' @title CV ridge penalized precision matrix estimation (c++)
#' @description Cross validation function for RIDGEsigma.
#'
#' @param X option to provide a nxp matrix. Each row corresponds to a single observation and each column contains n observations of a single feature/variable.
#' @param S option to provide a pxp sample covariance matrix (denominator n). If argument is \code{NULL} and \code{X} is provided instead then \code{S} will be computed automatically.
#' @param lam positive tuning parameters for ridge penalty. If a vector of parameters is provided, they should be in increasing order. Defaults to grid of values \code{10^seq(-5, 5, 0.5)}.
#' @param path option to return the regularization path. This option should be used with extreme care if the dimension is large. If set to TRUE, cores will be set to 1 and errors and optimal tuning parameters will based on the full sample. Defaults to FALSE.
#' @param K specify the number of folds for cross validation.
#' @param trace option to display progress of CV. Choose one of \code{progress} to print a progress bar, \code{print} to print completed tuning parameters, or \code{none}.
#'
#' @return list of returns includes:
#' \item{lam}{optimal tuning parameter.}
#' \item{path}{array containing the solution path. Solutions are ordered dense to sparse.}
#' \item{min.error}{minimum average cross validation error for optimal parameters.}
#' \item{avg.error}{average cross validation error across all folds.}
#' \item{cv.error}{cross validation errors (negative validation likelihood).}
#'
#' @keywords internal
#'
CV_RIDGEc <- function(X, S, lam, path = FALSE, K = 3L, trace = "none") {
.Call('_ADMMsigma_CV_RIDGEc', PACKAGE = 'ADMMsigma', X, S, lam, path, K, trace)
}
#' @title CV (no folds) ADMM penalized precision matrix estimation (c++)
#' @description Cross validation (no folds) function for ADMMsigma. This function is to be used with CVP_ADMM.
#'
#' @param n sample size for X_valid (used to calculate crit_cv)
#' @param S_train pxp sample covariance matrix for training data (denominator n).
#' @param S_valid pxp sample covariance matrix for validation data (denominator n).
#' @param lam positive tuning parameters for elastic net penalty. If a vector of parameters is provided, they should be in increasing order.
#' @param alpha elastic net mixing parameter contained in [0, 1]. \code{0 = ridge, 1 = lasso}. If a vector of parameters is provided, they should be in increasing order.
#' @param diagonal option to penalize the diagonal elements of the estimated precision matrix (\eqn{\Omega}). Defaults to \code{FALSE}.
#' @param rho initial step size for ADMM algorithm.
#' @param mu factor for primal and residual norms in the ADMM algorithm. This will be used to adjust the step size \code{rho} after each iteration.
#' @param tau_inc factor in which to increase step size \code{rho}
#' @param tau_dec factor in which to decrease step size \code{rho}
#' @param crit criterion for convergence (\code{ADMM} or \code{loglik}). If \code{crit = loglik} then iterations will stop when the relative change in log-likelihood is less than \code{tol.abs}. Default is \code{ADMM} and follows the procedure outlined in Boyd, et al.
#' @param tol_abs absolute convergence tolerance. Defaults to 1e-4.
#' @param tol_rel relative convergence tolerance. Defaults to 1e-4.
#' @param maxit maximum number of iterations. Defaults to 1e4.
#' @param adjmaxit adjusted maximum number of iterations. During cross validation this option allows the user to adjust the maximum number of iterations after the first \code{lam} tuning parameter has converged (for each \code{alpha}). This option is intended to be paired with \code{warm} starts and allows for "one-step" estimators. Defaults to 1e4.
#' @param crit_cv cross validation criterion (\code{loglik}, \code{penloglik}, \code{AIC}, or \code{BIC}). Defaults to \code{loglik}.
#' @param start specify \code{warm} or \code{cold} start for cross validation. Default is \code{warm}.
#' @param trace option to display progress of CV. Choose one of \code{progress} to print a progress bar, \code{print} to print completed tuning parameters, or \code{none}.
#'
#' @return cross validation errors (cv_crit)
#'
#' @keywords internal
#'
CVP_ADMMc <- function(n, S_train, S_valid, lam, alpha, diagonal = FALSE, rho = 2, mu = 10, tau_inc = 2, tau_dec = 2, crit = "ADMM", tol_abs = 1e-4, tol_rel = 1e-4, maxit = 1e4L, adjmaxit = 1e4L, crit_cv = "loglik", start = "warm", trace = "progress") {
.Call('_ADMMsigma_CVP_ADMMc', PACKAGE = 'ADMMsigma', n, S_train, S_valid, lam, alpha, diagonal, rho, mu, tau_inc, tau_dec, crit, tol_abs, tol_rel, maxit, adjmaxit, crit_cv, start, trace)
}
#' @title CV (no folds) RIDGE penalized precision matrix estimation (c++)
#' @description Cross validation (no folds) function for RIDGEsigma. This function is to be used with CVP_RIDGE.
#'
#' @param n sample size for X_valid (used to calculate CV_error)
#' @param S_train pxp sample covariance matrix for training data (denominator n).
#' @param S_valid pxp sample covariance matrix for validation data (denominator n).
#' @param lam positive tuning parameters for ridge penalty. If a vector of parameters is provided, they should be in increasing order.
#' @param trace option to display progress of CV. Choose one of \code{progress} to print a progress bar, \code{print} to print completed tuning parameters, or \code{none}.
#'
#' @return cross validation errors (negative validation likelihood)
#'
#' @keywords internal
#'
CVP_RIDGEc <- function(n, S_train, S_valid, lam, trace = "none") {
.Call('_ADMMsigma_CVP_RIDGEc', PACKAGE = 'ADMMsigma', n, S_train, S_valid, lam, trace)
}
#' @title Ridge-penalized precision matrix estimation (c++)
#' @description Ridge penalized matrix estimation via closed-form solution. Augmented from Adam Rothman's STAT 8931 code.
#'
#' @param S sample covariance matrix (denominator n).
#' @param lam tuning parameter for ridge penalty.
#'
#' @return estimated Omega
#'
#' @export
#'
#' @keywords internal
#'
RIDGEc <- function(S, lam) {
.Call('_ADMMsigma_RIDGEc', PACKAGE = 'ADMMsigma', S, lam)
}
#' @title Penalized precision matrix estimation via ADMM (c++)
#'
#' @description Penalized precision matrix estimation using the ADMM algorithm
#'
#' @details For details on the implementation of 'ADMMsigma', see the vignette
#' \url{https://mgallow.github.io/ADMMsigma/}.
#'
#' @param S pxp sample covariance matrix (denominator n).
#' @param initOmega initialization matrix for Omega
#' @param initZ initialization matrix for Z
#' @param initY initialization matrix for Y
#' @param lam postive tuning parameter for elastic net penalty.
#' @param alpha elastic net mixing parameter contained in [0, 1]. \code{0 = ridge, 1 = lasso}. Defaults to alpha = 1.
#' @param diagonal option to penalize the diagonal elements of the estimated precision matrix (\eqn{\Omega}). Defaults to \code{FALSE}.
#' @param rho initial step size for ADMM algorithm.
#' @param mu factor for primal and residual norms in the ADMM algorithm. This will be used to adjust the step size \code{rho} after each iteration.
#' @param tau_inc factor in which to increase step size \code{rho}.
#' @param tau_dec factor in which to decrease step size \code{rho}.
#' @param crit criterion for convergence (\code{ADMM} or \code{loglik}). If \code{crit = loglik} then iterations will stop when the relative change in log-likelihood is less than \code{tol.abs}. Default is \code{ADMM} and follows the procedure outlined in Boyd, et al.
#' @param tol_abs absolute convergence tolerance. Defaults to 1e-4.
#' @param tol_rel relative convergence tolerance. Defaults to 1e-4.
#' @param maxit maximum number of iterations. Defaults to 1e4.
#'
#' @return returns list of returns which includes:
#' \item{Iterations}{number of iterations.}
#' \item{lam}{optimal tuning parameters.}
#' \item{alpha}{optimal tuning parameter.}
#' \item{Omega}{estimated penalized precision matrix.}
#' \item{Z2}{estimated Z matrix.}
#' \item{Y}{estimated Y matrix.}
#' \item{rho}{estimated rho.}
#'
#' @references
#' \itemize{
#' \item Boyd, Stephen, Neal Parikh, Eric Chu, Borja Peleato, Jonathan Eckstein, and others. 2011. 'Distributed Optimization and Statistical Learning via the Alternating Direction Method of Multipliers.' \emph{Foundations and Trends in Machine Learning} 3 (1). Now Publishers, Inc.: 1-122. \url{https://web.stanford.edu/~boyd/papers/pdf/admm_distr_stats.pdf}
#' \item Hu, Yue, Chi, Eric C, amd Allen, Genevera I. 2016. 'ADMM Algorithmic Regularization Paths for Sparse Statistical Machine Learning.' \emph{Splitting Methods in Communication, Imaging, Science, and Engineering}. Springer: 433-459.
#' \item Zou, Hui and Hastie, Trevor. 2005. "Regularization and Variable Selection via the Elastic Net." \emph{Journal of the Royal Statistial Society: Series B (Statistical Methodology)} 67 (2). Wiley Online Library: 301-320.
#' \item Rothman, Adam. 2017. 'STAT 8931 notes on an algorithm to compute the Lasso-penalized Gaussian likelihood precision matrix estimator.'
#' }
#'
#' @author Matt Galloway \email{gall0441@@umn.edu}
#'
#' @export
#'
#' @keywords internal
#'
ADMMc <- function(S, initOmega, initZ, initY, lam, alpha = 1, diagonal = FALSE, rho = 2, mu = 10, tau_inc = 2, tau_dec = 2, crit = "ADMM", tol_abs = 1e-4, tol_rel = 1e-4, maxit = 1e4L) {
.Call('_ADMMsigma_ADMMc', PACKAGE = 'ADMMsigma', S, initOmega, initZ, initY, lam, alpha, diagonal, rho, mu, tau_inc, tau_dec, crit, tol_abs, tol_rel, maxit)
}
| /scratch/gouwar.j/cran-all/cranData/ADMMsigma/R/RcppExports.R |
#' @useDynLib ADMMsigma
#' @importFrom Rcpp sourceCpp
#' @importFrom grDevices colorRampPalette
#' @importFrom stats cov
#' @importFrom parallel detectCores
#' @importFrom parallel makeCluster
#' @importFrom parallel stopCluster
#' @importFrom doParallel registerDoParallel
#' @importFrom dplyr summarise
#' @importFrom dplyr group_by
#' @import RcppProgress
#' @import foreach
#' @import ggplot2
NULL
| /scratch/gouwar.j/cran-all/cranData/ADMMsigma/R/misc.R |
## ----setup, include=FALSE------------------------------------------------
knitr::opts_chunk$set(echo = TRUE)
| /scratch/gouwar.j/cran-all/cranData/ADMMsigma/inst/doc/Details.R |
---
title: "Precision Matrix Estimation via ADMM"
author: "Matt Galloway"
#date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
bibliography: lib.bib
vignette: >
%\VignetteIndexEntry{Precision Matrix Estimation via ADMM}
%\VignetteEngine{knitr::knitr}
%\usepackage[UTF-8]{inputenc}
---
```{r setup, include=FALSE}
knitr::opts_chunk$set(echo = TRUE)
```
## Introduction
Consider the case where we observe $n$ independent, identically distributed copies of the random variable ($X_{i}$) where $X_{i} \in \mathbb{R}^{p}$ is normally distributed with some mean, $\mu$, and some variance, $\Sigma$. That is, $X_{i} \sim N_{p}\left( \mu, \Sigma \right)$.
Because we assume independence, we know that the probability of observing these specific observations $X_{1}, ..., X_{n}$ is equal to
\begin{align*}
f(X_{1}, ..., X_{n}; \mu, \Sigma) &= \prod_{i = 1}^{n}(2\pi)^{-p/2}\left| \Sigma \right|^{-1/2}\exp\left[ -\frac{1}{2}\left( X_{i} - \mu \right)^{T}\Sigma^{-1}\left( X_{i} - \mu \right) \right] \\
&= (2\pi)^{-nr/2}\left| \Sigma \right|^{-n/2}\mbox{etr}\left[ -\frac{1}{2}\sum_{i = 1}^{n}\left( X_{i} - \mu \right)\left( X_{i} - \mu \right)^{T}\Sigma^{-1} \right]
\end{align*}
where $\mbox{etr}\left( \cdot \right)$ denotes the exponential trace operator. It follows that the log-likelihood for $\mu$ and $\Sigma$ is equal to the following:
\[ l(\mu, \Sigma | X) = const. - \frac{n}{2}\log\left| \Sigma \right| - tr\left[ \frac{1}{2}\sum_{i = 1}^{n}\left(X_{i} - \mu \right)\left(X_{i} - \mu \right)^{T}\Sigma^{-1} \right] \]
If we are interested in estimating $\mu$, it is relatively straight forward to show that the maximum likelihood estimator (MLE) for $\mu$ is $\hat{\mu}_{MLE} = \sum_{i = 1}^{n}X_{i}/n$ which we typically denote as $\bar{X}$. However, in addition to $\mu$, many applications require the estimation of $\Sigma$ as well. We can also find a maximum likelihood estimator:
\begin{align*}
&\hat{\Sigma}_{MLE} = \arg\max_{\Sigma \in \mathbb{S}_{+}^{p}}\left\{ const. - \frac{n}{2}\log\left| \Sigma \right| - tr\left[ \frac{1}{2}\sum_{i = 1}^{n}\left(X_{i} - \mu \right)\left(X_{i} - \mu \right)^{T}\Sigma^{-1} \right] \right\} \\
&\nabla_{\Sigma}l(\mu, \Sigma | X) = -\frac{n}{2}\Sigma^{-1} + \frac{1}{2}\sum_{i = 1}^{n}\left(X_{i} - \mu \right)\left(X_{i} - \mu \right)^{T}\Sigma^{-2} \\
\Rightarrow &\hat{\Sigma}_{MLE} = \left[ \frac{1}{n}\sum_{i = 1}^{n}\left(X_{i} - \bar{X} \right)\left(X_{i} - \bar{X} \right)^{T} \right]
\end{align*}
By setting the gradient equal to zero and plugging in the MLE for $\mu$, we find that the MLE for $\Sigma$ is our usual sample estimator often denoted as $S$. It turns out that we could have just as easily computed the maximum likelihood estimator for the precision matrix $\Omega \equiv \Sigma^{-1}$ and taken its inverse:
\[ \hat{\Omega}_{MLE} = \arg\min_{\Omega \in S_{+}^{p}}\left\{ tr\left(S\Omega\right) - \log\left|\Omega\right| \right\} \]
so that $\hat{\Omega}_{MLE} = S^{-1}$. Beyond the formatting convenience, computing estimates for $\Omega$ as opposed to $\Sigma$ often poses less computational challenges -- and accordingly, the literature has placed more emphasis on efficiently solving for $\Omega$ instead of $\Sigma$.
Notice that here we are *minimizing* the negative log-likelihood as opposed to maximizing the log-likelihood. Both procedures will result in the same estimate.
As in regression settings, we can construct a *penalized* log-likelihood estimator by adding a penalty term, $P\left(\Omega\right)$, to the log-likelihood so that
\[ \hat{\Omega} = \arg\min_{\Omega \in S_{+}^{p}}\left\{ tr\left(S\Omega\right) - \log\left|\Omega \right| + P\left( \Omega \right) \right\} \]
$P\left( \Omega \right)$ is often of the form $P\left(\Omega \right) = \lambda\|\Omega \|_{F}^{2}/2$ or $P\left(\Omega \right) = \|\Omega\|_{1}$ where $\lambda > 0$, $\left\|\cdot \right\|_{F}^{2}$ is the Frobenius norm and we define $\left\|A \right\|_{1} = \sum_{i, j} \left| A_{ij} \right|$. These penalties are the ridge and lasso, respectively. In the `ADMMsigma` package, we instead take
\[ P\left( \Omega \right) = \lambda\left[\frac{1 - \alpha}{2}\left\| \Omega \right|_{F}^{2} + \alpha\left\| \Omega \right\|_{1} \right] \]
so that solving the full penalized log-likelihood for $\Omega$ results in solving
\[ \hat{\Omega} = \arg\min_{\Omega \in S_{+}^{p}}\left\{ tr\left(S\Omega\right) - \log\left|\Omega \right| + \lambda\left[\frac{1 - \alpha}{2}\left\| \Omega \right|_{F}^{2} + \alpha\left\| \Omega \right\|_{1} \right] \right\} \]
where $0 \leq \alpha \leq 1$. This penalty, know as the *elastic-net* penalty, was explored by Hui Zou and Trevor Hastie [@zou2005regularization] and is identical to the penalty used in the popular penalized regression package `glmnet`. Clearly, when $\alpha = 0$ the elastic-net reduces to a ridge-type penalty and when $\alpha = 1$ it reduces to a lasso-type penalty. Having this flexibility and generalization allows us to perform cross validation across proposed $\alpha$ values in addition to proposed $\lambda$ values.
We will explore how to solve for $\hat{\Omega}$ in the next section.
<br>\vspace{1cm}
## ADMM Algorithm
Many efficient methods have been proposed to solve for $\hat{\Omega}$ when $\alpha = 1$. The most popular method is the graphical lasso algorithm (glasso) introduced by @friedman2008sparse. However, no methods (to the best of my knowledge) have estimated $\Omega$ when $\alpha \in (0, 1)$. We will use the alternating direction method of multipliers (ADMM) algorithm to do so.
As the authors state in @boyd2011distributed, the "ADMM is an algorithm that is intended to blend the decomposability of dual ascent with the superior convergence properties of the method of multipliers." For our purposes, we will only focus on the ADMM algorithm but it is encouraged to read the original text from Boyd and others for a complete introduction to the other two methods.
In general, suppose we want to solve an optimization problem of the following form:
\begin{align*}
\mbox{minimize } f(x) + g(z) \\
\mbox{subject to } Ax + Bz = c
\end{align*}
where $x \in \mathbb{R}^{n}, z \in \mathbb{R}^{m}, A \in \mathbb{R}^{p \times n}, B \in \mathbb{R}^{p \times m}$, $c \in \mathbb{R}^{p}$, and $f$ and $g$ are assumed to be convex functions (following @boyd2011distributed, the estimation procedure will be introduced in vector form though we could just as easily take $x$ and $z$ to be matrices). In addition to penalized precision matrix estimation, optimization problems like this arise naturally in several statistics and machine learning applications -- particularly regularization methods. For instance, we could take $f$ to be the squared error loss, $g$ to be the $l_{2}$-norm, $c$ to be equal to zero and $A$ and $B$ to be identity matrices to solve the ridge regression optimization problem. In all cases, our goal is to find $x^{*} \in \mathbb{R}^{n}$ and $z^{*} \in \mathbb{R}^{m}$ that achieves the infimum $p^{*}$:
\[ p^{*} = inf\left\{ f(x) + g(z) | Ax + Bz = c \right\} \]
To do so, the ADMM algorithm uses the *augmented lagrangian*
\[ L_{\rho}(x, z, y) = f(x) + g(z) + y^{T}(Ax + Bz - c) + \frac{\rho}{2}\left\| Ax + Bz - c \right\|_{2}^{2} \]
where $y \in \mathbb{R}^{p}$ is the lagrange multiplier and $\rho > 0$ is a scalar. Clearly any minimizer, $p^{*}$, under the augmented lagrangian is equivalent to that of the lagrangian since any feasible point $(x, z)$ satisfies the constraint $\rho\left\| Ax + Bz - c \right\|_{2}^{2}/2 = 0$.
The algorithm consists of the following repeated iterations:
\begin{align}
x^{k + 1} &= \arg\min_{x \in \mathbb{R}^{n}}L_{\rho}(x, z^{k}, y^{k}) \\
z^{k + 1} &= \arg\min_{z \in \mathbb{R}^{m}}L_{\rho}(x^{k + 1}, z, y^{k}) \\
y^{k + 1} &= y^{k} + \rho(Ax^{k + 1} + Bz^{k + 1} - c)
\end{align}
In the context of precision matrix estimation, we can let $f$ be equal to the non-penalized likelihood, $g$ be equal to $P\left( \Omega \right)$, and use the constraint $\Omega$ equal to some $Z$ so that the lagrangian is
\[ L_{\rho}(\Omega, Z, \Lambda) = f\left(\Omega\right) + g\left(Z\right) + tr\left[\Lambda\left(\Omega - Z\right)\right] \]
and the augmented lagrangian is
\[ L_{\rho}(\Omega, Z, \Lambda) = f\left(\Omega\right) + g\left(Z\right) + tr\left[\Lambda\left(\Omega - Z\right)\right] + \frac{\rho}{2}\left\|\Omega - Z\right\|_{F}^{2} \]
The ADMM algorithm is now the following:
\begin{align}
\Omega^{k + 1} &= \arg\min_{\Omega \in \mathbb{S}_{+}^{p}}\left\{ tr\left(S\Omega\right) - \log\left|\Omega\right| + tr\left[\Lambda^{k}\left(\Omega - Z^{k}\right)\right] + \frac{\rho}{2}\left\| \Omega - Z^{k} \right\|_{F}^{2} \right\} \\
Z^{k + 1} &= \arg\min_{Z \in \mathbb{S}^{p}}\left\{ \lambda\left[ \frac{1 - \alpha}{2}\left\| Z \right\|_{F}^{2} + \alpha\left\| Z \right\|_{1} \right] + tr\left[\Lambda^{k}\left(\Omega^{k + 1} - Z\right)\right] + \frac{\rho}{2}\left\| \Omega^{k + 1} - Z \right\|_{F}^{2} \right\} \\
\Lambda^{k + 1} &= \Lambda^{k} + \rho\left( \Omega^{k + 1} - Z^{k + 1} \right)
\end{align}
<br>\vspace{1cm}
### Algorithm
Set $k = 0$ and initialize $Z^{0}, \Lambda^{0}$, and $\rho$. Repeat steps 1-3 until convergence:
1. Decompose $S + \Lambda^{k} - \rho Z^{k} = VQV^{T}$ via spectral decomposition.
\[ \Omega^{k + 1} = \frac{1}{2\rho}V\left[ -Q + \left( Q^{2} + 4\rho I_{p} \right)^{1/2} \right]V^{T} \]
2. Elementwise soft-thresholding for all $i = 1,..., p$ and $j = 1,..., p$.
\begin{align*}
Z_{ij}^{k + 1} &= \frac{1}{\lambda(1 - \alpha) + \rho}\mbox{sign}\left(\rho\Omega_{ij}^{k + 1} + \Lambda_{ij}^{k}\right)\left( \left| \rho\Omega_{ij}^{k + 1} + \Lambda_{ij}^{k} \right| - \lambda\alpha \right)_{+} \\
&= \frac{1}{\lambda(1 - \alpha) + \rho}\mbox{soft}\left(\rho\Omega_{ij}^{k + 1} + \Lambda_{ij}^{k}, \lambda\alpha\right)
\end{align*}
3. Update $\Lambda^{k + 1}$.
\[ \Lambda^{k + 1} = \Lambda^{k} + \rho\left( \Omega^{k + 1} - Z^{k + 1} \right) \]
where $\mbox{soft}(a, b) = \mbox{sign}(a)(\left| a \right| - b)_{+}$.
<br>\vspace{1cm}
### Proof of (1):
\[ \Omega^{k + 1} = \arg\min_{\Omega \in \mathbb{S}_{+}^{p}}\left\{ tr\left(S\Omega\right) - \log\left|\Omega\right| + tr\left[\Lambda^{k}\left(\Omega - Z^{k}\right)\right] + \frac{\rho}{2}\left\| \Omega - Z^{k} \right\|_{F}^{2} \right\} \]
\vspace{1cm}
\begin{align*}
\nabla_{\Omega}&\left\{ tr\left(S\Omega\right) - \log\left|\Omega\right| + tr\left[\Lambda^{k}\left(\Omega - Z^{k}\right)\right] + \frac{\rho}{2}\left\| \Omega - Z^{k} \right\|_{F}^{2} \right\} \\
&= S - \Omega^{-1} + \Lambda^{k} + \rho\left( \Omega - Z^{k} \right)
\end{align*}
Note that because all of the variables are symmetric, we can ignore the symmetric constraint when deriving the gradient. First set the gradient equal to zero and decompose $\Omega^{k + 1} = VDV^{T}$ where $D$ is a diagonal matrix with diagonal elements equal to the eigen values of $\Omega^{k + 1}$ and $V$ is the matrix with corresponding eigen vectors as columns:
\[ S + \Lambda^{k} - \rho Z^{k} = (\Omega^{k + 1})^{-1} - \rho \Omega^{k + 1} = VD^{-1}V^{T} - \rho VDV^{T} = V\left(D^{-1} - \rho D\right)V^{T} \]
This equivalence implies that
\[ \phi_{j}\left( S + \Lambda^{k} - \rho Z^{k} \right) = \frac{1}{\phi_{j}(\Omega^{k + 1})} - \rho\phi_{j}(\Omega^{k + 1}) \]
where $\phi_{j}(\cdot)$ is the $j$th eigen value.
\begin{align*}
&\Rightarrow \rho\phi_{j}^{2}(\Omega^{k + 1}) + \phi_{j}\left( S + \Lambda^{k} - \rho Z^{k} \right)\phi_{j}(\Omega^{k + 1}) - 1 = 0 \\
&\Rightarrow \phi_{j}(\Omega^{k + 1}) = \frac{-\phi_{j}(S + \Lambda^{k} - \rho Z^{k}) \pm \sqrt{\phi_{j}^{2}(S + \Lambda^{k} - \rho Z^{k}) + 4\rho}}{2\rho}
\end{align*}
In summary, if we decompose $S + \Lambda^{k} - \rho Z^{k} = VQV^{T}$ then
\[ \Omega^{k + 1} = \frac{1}{2\rho}V\left[ -Q + (Q^{2} + 4\rho I_{p})^{1/2}\right] V^{T} \]
<br>\vspace{1cm}
### Proof of (2)
\[ Z^{k + 1} = \arg\min_{Z \in \mathbb{S}^{p}}\left\{ \lambda\left[ \frac{1 - \alpha}{2}\left\| Z \right\|_{F}^{2} + \alpha\left\| Z \right\|_{1} \right] + tr\left[\Lambda^{k}\left(\Omega^{k + 1} - Z\right)\right] + \frac{\rho}{2}\left\| \Omega^{k + 1} - Z \right\|_{F}^{2} \right\} \]
\vspace{1cm}
\begin{align*}
\partial&\left\{ \lambda\left[ \frac{1 - \alpha}{2}\left\| Z \right\|_{F}^{2} + \alpha\left\| Z \right\|_{1} \right] + tr\left[\Lambda^{k}\left(\Omega^{k + 1} - Z\right)\right] + \frac{\rho}{2}\left\| \Omega^{k + 1} - Z \right\|_{F}^{2} \right\} \\
&= \partial\left\{ \lambda\left[ \frac{1 - \alpha}{2}\left\| Z \right\|_{F}^{2} + \alpha\left\| Z \right\|_{1} \right] \right\} + \nabla_{\Omega}\left\{ tr\left[\Lambda^{k}\left(\Omega^{k + 1} - Z\right)\right] + \frac{\rho}{2}\left\| \Omega^{k + 1} - Z \right\|_{F}^{2} \right\} \\
&= \lambda(1 - \alpha)Z + \mbox{sign}(Z)\lambda\alpha - \Lambda^{k} - \rho\left( \Omega^{k + 1} - Z \right)
\end{align*}
where sign is the elementwise sign operator. By setting the gradient/sub-differential equal to zero, we arrive at the following equivalence:
\[ Z_{ij}^{k + 1} = \frac{1}{\lambda(1 - \alpha) + \rho}\left( \rho \Omega_{ij}^{k + 1} + \Lambda_{ij}^{k} - \mbox{sign}(Z_{ij}^{k + 1})\lambda\alpha \right) \]
for all $i = 1,..., p$ and $j = 1,..., p$. We observe two scenarios:
- If $Z_{ij}^{k + 1} > 0$ then
\[ \rho\Omega_{ij}^{k + 1} + \Lambda_{ij}^{k} > \lambda\alpha \]
- If $Z_{ij}^{k + 1} < 0$ then
\[ \rho\Omega_{ij}^{k + 1} + \Lambda_{ij}^{k} < -\lambda\alpha \]
This implies that $\mbox{sign}(Z_{ij}) = \mbox{sign}(\rho\Omega_{ij}^{k + 1} + \Lambda_{ij}^{k})$. Putting all the pieces together, we arrive at
\begin{align*}
Z_{ij}^{k + 1} &= \frac{1}{\lambda(1 - \alpha) + \rho}\mbox{sign}\left(\rho\Omega_{ij}^{k + 1} + \Lambda_{ij}^{k}\right)\left( \left| \rho\Omega_{ij}^{k + 1} + \Lambda_{ij}^{k} \right| - \lambda\alpha \right)_{+} \\
&= \frac{1}{\lambda(1 - \alpha) + \rho}\mbox{soft}\left(\rho\Omega_{ij}^{k + 1} + \Lambda_{ij}^{k}, \lambda\alpha\right)
\end{align*}
where soft is the soft-thresholding function.
<br>\vspace{1cm}
## Scaled-Form ADMM
There is another popular, alternate form of the ADMM algorithm used by scaling the dual variable ($\Lambda^{k}$). Let us define $R^{k} = \Omega - Z^{k}$ and $U^{k} = \Lambda^{k}/\rho$.
\begin{align*}
tr\left[ \Lambda^{k}\left( \Omega - Z^{k} \right) \right] + \frac{\rho}{2}\left\| \Omega - Z^{k} \right\|_{F}^{2} &= tr\left[ \Lambda^{k}R^{k} \right] + \frac{\rho}{2}\left\| R^{k} \right\|_{F}^{2} \\
&= \frac{\rho}{2}\left\| R^{k} + \Lambda^{k}/\rho \right\|_{F}^{2} - \frac{\rho}{2}\left\| \Lambda^{k}/\rho \right\|_{F}^{2} \\
&= \frac{\rho}{2}\left\| R^{k} + U^{k} \right\|_{F}^{2} - \frac{\rho}{2}\left\| U^{k} \right\|_{F}^{2}
\end{align*}
Therefore, a scaled-form can now be written as
\begin{align}
\Omega^{k + 1} &= \arg\min_{\Omega \in \mathbb{R}_{+}^{p}}\left\{ tr\left(S\Omega\right) - \log\left|\Omega\right| + \frac{\rho}{2}\left\| \Omega - Z^{k} + U^{k} \right\|_{F}^{2} \right\} \\
Z^{k + 1} &= \arg\min_{Z \in \mathbb{S}^{p}}\left\{ \lambda\left[ \frac{1 - \alpha}{2}\left\| Z \right\|_{F}^{2} + \alpha\left\| Z \right\|_{1} \right] + \frac{\rho}{2}\left\| \Omega^{k + 1} - Z + U^{k} \right\|_{F}^{2} \right\} \\
U^{k + 1} &= U^{k} + \Omega^{k + 1} - Z^{k + 1}
\end{align}
And more generally (in vector form),
\begin{align}
x^{k + 1} &= \arg\min_{x}\left\{ f(x) + \frac{\rho}{2}\left\| Ax + Bz^{k} - c + u^{k} \right\|_{2}^{2} \right\} \\
z^{k + 1} &= \arg\min_{z}\left\{ g(z) + \frac{\rho}{2}\left\| Ax^{k + 1} + Bz - c + u^{k} \right\|_{2}^{2} \right\} \\
u^{k + 1} &= u^{k} + Ax^{k + 1} + Bz^{k + 1} - c
\end{align}
Note that there are limitations to using this method. Because the dual variable is scaled by $\rho$ (the step size), this form limits one to using a constant step size without making further adjustments to $U^{k}$. It has been shown in the literature that a dynamic step size (like the one used in `ADMMsigma`) can significantly reduce the number of iterations required for convergence.
<br>\vspace{1cm}
## Stopping Criterion
In discussing the optimality conditions and stopping criterion, we will follow the steps outlined in @boyd2011distributed and cater them to precision matrix estimation.
Below we have three optimality conditions:
1. Primal:
\[ \Omega^{k + 1} - Z^{k + 1} = 0 \]
2. Dual:
\[ 0 \in \partial f\left(\Omega^{k + 1}\right) + \Lambda^{k + 1} \]
\[ 0 \in \partial g\left(Z^{k + 1}\right) - \Lambda^{k + 1} \]
The first dual optimality condition is a result of taking the sub-differential of the lagrangian (non-augmented) with respect to $\Omega^{k + 1}$ (note that we must honor the symmetric constraint of $\Omega^{k + 1}$) and the second is a result of taking the sub-differential of the lagrangian with respect to $Z^{k + 1}$ (no symmetric constraint).
We will define the left-hand side of the first condition as the primal residual $r^{k + 1} = \Omega^{k + 1} - Z^{k + 1}$. At convergence, optimality conditions require that $r^{k + 1} \approx 0$. The second residual we will define is the dual residual $s^{k + 1} = \rho\left( Z^{k + 1} - Z^{k} \right)$. This residual is derived from the following:
Because $\Omega^{k + 1}$ is the argument that minimizes $L_{p}\left( \Omega, Z^{k}, \Lambda^{k} \right)$,
\begin{align*}
0 &\in \partial \left\{ f\left(\Omega^{k + 1}\right) + tr\left[ \Lambda^{k}\left( \Omega^{k + 1} - Z^{k} \right) \right] + \frac{\rho}{2}\left\| \Omega^{k + 1} - Z^{k} \right\|_{F}^{2} \right\} \\
&= \partial f\left(\Omega^{k + 1}\right) + \Lambda^{k} + \rho\left(\Omega^{k + 1} - Z^{k}\right) \\
&= \partial f\left(\Omega^{k + 1}\right) + \Lambda^{k} + \rho\left(\Omega^{k + 1} + Z^{k + 1} - Z^{k + 1} - Z^{k}\right) \\
&= \partial f\left(\Omega^{k + 1}\right) + \Lambda^{k} + \rho\left(\Omega^{k + 1} - Z^{k + 1}\right) + \rho\left(Z^{k + 1} - Z^{k}\right) \\
&= \partial f\left(\Omega^{k + 1}\right) + \Lambda^{k + 1} + \rho\left(Z^{k + 1} - Z^{k}\right) \\
\Rightarrow 0 &\in \rho\left( Z^{k + 1} - Z^{k} \right)
\end{align*}
Like the primal residual, at convergence optimality conditions require that $s^{k + 1} \approx 0$. Note that the second dual optimality condition is always satisfied:
\begin{align*}
0 &\in \partial \left\{ g\left(Z^{k + 1}\right) + tr\left[ \Lambda^{k}\left( \Omega^{k + 1} - Z^{k + 1} \right) \right] + \rho\left\| \Omega^{k + 1} - Z^{k + 1} \right\|_{F}^{2} \right\} \\
&= \partial g\left(Z^{k + 1}\right) - \Lambda^{k} - \rho\left(\Omega^{k + 1} - Z^{k + 1}\right) \\
&= \partial g\left(Z^{k + 1}\right) - \Lambda^{k + 1} \\
\end{align*}
One possible stopping criterion is to set $\epsilon^{rel} = \epsilon^{abs} = 10^{-3}$ and stop the algorithm when $\epsilon^{pri} \leq \left\| r^{k + 1} \right\|_{F}$ and $\epsilon^{dual} \leq \left\| s^{k + 1} \right\|_{F}$ where
\begin{align*}
\epsilon^{pri} &= p\epsilon^{abs} + \epsilon^{rel}\max\left\{ \left\| \Omega^{k + 1} \right\|_{F}, \left\| Z^{k + 1} \right\|_{F} \right\} \\
\epsilon^{dual} &= p\epsilon^{abs} + \epsilon^{rel}\left\| \Lambda^{k + 1} \right\|_{F}
\end{align*}
<br><br>\newpage
## References
| /scratch/gouwar.j/cran-all/cranData/ADMMsigma/inst/doc/Details.Rmd |
## ----setup, include=FALSE------------------------------------------------
knitr::opts_chunk$set(echo = TRUE)
## ---- message = FALSE, echo = TRUE, eval = FALSE-------------------------
#
# # oracle precision matrix
# Omega = matrix(0.9, ncol = 100, nrow = 100)
# diag(Omega = 1)
#
# # generate covariance matrix
# S = qr.solve(Omega)
#
# # generate data
# Z = matrix(rnorm(100*50), nrow = 50, ncol = 100)
# out = eigen(S, symmetric = TRUE)
# S.sqrt = out$vectors %*% diag(out$values^0.5) %*% t(out$vectors)
# X = Z %*% S.sqrt
#
## ---- message = FALSE, echo = TRUE, eval = FALSE-------------------------
#
# # oracle precision matrix
# Omega = matrix(0.9, ncol = 10, nrow = 10)
# diag(Omega = 1)
#
# # generate covariance matrix
# S = qr.solve(Omega)
#
# # generate data
# Z = matrix(rnorm(10*1000), nrow = 1000, ncol = 10)
# out = eigen(S, symmetric = TRUE)
# S.sqrt = out$vectors %*% diag(out$values^0.5) %*% t(out$vectors)
# X = Z %*% S.sqrt
#
## ---- message = FALSE, echo = TRUE, eval = FALSE, tidy = FALSE-----------
#
# # generate eigen values
# eigen = c(rep(1000, 5, rep(1, 100 - 5)))
#
# # randomly generate orthogonal basis (via QR)
# Q = matrix(rnorm(100*100), nrow = 100, ncol = 100) %>% qr %>% qr.Q
#
# # generate covariance matrix
# S = Q %*% diag(eigen) %*% t(Q)
#
# # generate data
# Z = matrix(rnorm(100*50), nrow = 50, ncol = 100)
# out = eigen(S, symmetric = TRUE)
# S.sqrt = out$vectors %*% diag(out$values^0.5) %*% t(out$vectors)
# X = Z %*% S.sqrt
#
## ---- message = FALSE, echo = TRUE, eval = FALSE, tidy = FALSE-----------
#
# # generate eigen values
# eigen = c(rep(1000, 5, rep(1, 10 - 5)))
#
# # randomly generate orthogonal basis (via QR)
# Q = matrix(rnorm(10*10), nrow = 10, ncol = 10) %>% qr %>% qr.Q
#
# # generate covariance matrix
# S = Q %*% diag(eigen) %*% t(Q)
#
# # generate data
# Z = matrix(rnorm(10*50), nrow = 50, ncol = 10)
# out = eigen(S, symmetric = TRUE)
# S.sqrt = out$vectors %*% diag(out$values^0.5) %*% t(out$vectors)
# X = Z %*% S.sqrt
#
## ---- message = FALSE, echo = TRUE, eval = FALSE, tidy = FALSE-----------
#
# # generate covariance matrix
# # (can confirm inverse is tri-diagonal)
# S = matrix(0, nrow = 100, ncol = 100)
# for (i in 1:100){
# for (j in 1:100){
# S[i, j] = 0.7^abs(i - j)
# }
# }
#
# # generate data
# Z = matrix(rnorm(10*50), nrow = 50, ncol = 10)
# out = eigen(S, symmetric = TRUE)
# S.sqrt = out$vectors %*% diag(out$values^0.5) %*% t(out$vectors)
# X = Z %*% S.sqrt
#
| /scratch/gouwar.j/cran-all/cranData/ADMMsigma/inst/doc/Simulations.R |
---
title: "Simulations"
author: "Matt Galloway"
#date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Simulations}
%\VignetteEngine{knitr::knitr}
%\usepackage[UTF-8]{inputenc}
---
```{r setup, include=FALSE}
knitr::opts_chunk$set(echo = TRUE)
```
In the simulations below, we generate data from a number of different oracle precision matrices with various structures. For each data-generating procedure, the `ADMMsigma()` function was run using 5-fold cross validation. After 20 replications, the cross validation errors were totalled and the optimal tuning parameters were selected (results below the top figure). These results are compared with the Kullback Leibler (KL) losses between the estimates and the oracle precision matrix (bottom figure).
We can see below that our cross validation procedure consistently chooses tuning parameters that are close to the optimal parameters with respsect to the oracle.
<br>\newpage
### Compound Symmetric: P = 100, N = 50
```{r, message = FALSE, echo = TRUE, eval = FALSE}
# oracle precision matrix
Omega = matrix(0.9, ncol = 100, nrow = 100)
diag(Omega = 1)
# generate covariance matrix
S = qr.solve(Omega)
# generate data
Z = matrix(rnorm(100*50), nrow = 50, ncol = 100)
out = eigen(S, symmetric = TRUE)
S.sqrt = out$vectors %*% diag(out$values^0.5) %*% t(out$vectors)
X = Z %*% S.sqrt
```
<br>\vspace{0.5cm}
![](images/compound_N50_P100.png)
<br>\newpage
### Compound Symmetric: P = 10, N = 1000
```{r, message = FALSE, echo = TRUE, eval = FALSE}
# oracle precision matrix
Omega = matrix(0.9, ncol = 10, nrow = 10)
diag(Omega = 1)
# generate covariance matrix
S = qr.solve(Omega)
# generate data
Z = matrix(rnorm(10*1000), nrow = 1000, ncol = 10)
out = eigen(S, symmetric = TRUE)
S.sqrt = out$vectors %*% diag(out$values^0.5) %*% t(out$vectors)
X = Z %*% S.sqrt
```
<br>\vspace{0.5cm}
![](images/compound_N1000_P10.png)
<br>\newpage
### Dense: P = 100, N = 50
```{r, message = FALSE, echo = TRUE, eval = FALSE, tidy = FALSE}
# generate eigen values
eigen = c(rep(1000, 5, rep(1, 100 - 5)))
# randomly generate orthogonal basis (via QR)
Q = matrix(rnorm(100*100), nrow = 100, ncol = 100) %>% qr %>% qr.Q
# generate covariance matrix
S = Q %*% diag(eigen) %*% t(Q)
# generate data
Z = matrix(rnorm(100*50), nrow = 50, ncol = 100)
out = eigen(S, symmetric = TRUE)
S.sqrt = out$vectors %*% diag(out$values^0.5) %*% t(out$vectors)
X = Z %*% S.sqrt
```
<br>\vspace{0.5cm}
![](images/repsKLdenseQR_N50_P100.png)
<br>\newpage
### Dense: P = 10, N = 50
```{r, message = FALSE, echo = TRUE, eval = FALSE, tidy = FALSE}
# generate eigen values
eigen = c(rep(1000, 5, rep(1, 10 - 5)))
# randomly generate orthogonal basis (via QR)
Q = matrix(rnorm(10*10), nrow = 10, ncol = 10) %>% qr %>% qr.Q
# generate covariance matrix
S = Q %*% diag(eigen) %*% t(Q)
# generate data
Z = matrix(rnorm(10*50), nrow = 50, ncol = 10)
out = eigen(S, symmetric = TRUE)
S.sqrt = out$vectors %*% diag(out$values^0.5) %*% t(out$vectors)
X = Z %*% S.sqrt
```
<br>\vspace{0.5cm}
![](images/repsKLdense_N50_P10.png)
<br>\newpage
### Tridiagonal: P = 100, N = 50
```{r, message = FALSE, echo = TRUE, eval = FALSE, tidy = FALSE}
# generate covariance matrix
# (can confirm inverse is tri-diagonal)
S = matrix(0, nrow = 100, ncol = 100)
for (i in 1:100){
for (j in 1:100){
S[i, j] = 0.7^abs(i - j)
}
}
# generate data
Z = matrix(rnorm(10*50), nrow = 50, ncol = 10)
out = eigen(S, symmetric = TRUE)
S.sqrt = out$vectors %*% diag(out$values^0.5) %*% t(out$vectors)
X = Z %*% S.sqrt
```
<br>\vspace{0.5cm}
![](images/repsKLtridiag_N50_P100.png)
| /scratch/gouwar.j/cran-all/cranData/ADMMsigma/inst/doc/Simulations.Rmd |
## ----setup, include=FALSE------------------------------------------------
knitr::opts_chunk$set(echo = TRUE, cache = TRUE)
## ---- message = FALSE, echo = TRUE---------------------------------------
library(ADMMsigma)
# generate data from a sparse matrix
# first compute covariance matrix
S = matrix(0.7, nrow = 5, ncol = 5)
for (i in 1:5){
for (j in 1:5){
S[i, j] = S[i, j]^abs(i - j)
}
}
# generate 100 x 5 matrix with rows drawn from iid N_p(0, S)
set.seed(123)
Z = matrix(rnorm(100*5), nrow = 100, ncol = 5)
out = eigen(S, symmetric = TRUE)
S.sqrt = out$vectors %*% diag(out$values^0.5) %*% t(out$vectors)
X = Z %*% S.sqrt
# snap shot of data
head(X)
## ---- message = FALSE, echo = TRUE---------------------------------------
# print oracle covariance matrix
S
# print inverse covariance matrix (omega)
round(qr.solve(S), 5)
## ---- message = FALSE, echo = TRUE---------------------------------------
# print inverse of sample precision matrix (perhaps a bad estimate)
round(qr.solve(cov(X)*(nrow(X) - 1)/nrow(X)), 5)
## ---- message = FALSE, echo = TRUE---------------------------------------
# elastic-net type penalty (set tolerance to 1e-8)
ADMMsigma(X, tol.abs = 1e-8, tol.rel = 1e-8)
## ---- message = FALSE, echo = TRUE---------------------------------------
# lasso penalty (default tolerance)
ADMMsigma(X, alpha = 1)
## ---- message = FALSE, echo = TRUE---------------------------------------
# elastic-net penalty (alpha = 0.5)
ADMMsigma(X, alpha = 0.5)
## ---- message = FALSE, echo = TRUE---------------------------------------
# ridge penalty
ADMMsigma(X, alpha = 0)
# ridge penalty (using closed-form solution)
RIDGEsigma(X, lam = 10^seq(-8, 8, 0.01))
## ---- message = FALSE, echo = TRUE---------------------------------------
# produce CV heat map for ADMMsigma
ADMM = ADMMsigma(X, tol.abs = 1e-8, tol.rel = 1e-8)
plot(ADMM, type = "heatmap")
## ---- message = FALSE, echo = TRUE---------------------------------------
# produce line graph for CV errors for ADMMsigma
plot(ADMM, type = "line")
## ---- message = FALSE, echo = TRUE---------------------------------------
# produce CV heat map for RIDGEsigma
RIDGE = RIDGEsigma(X, lam = 10^seq(-8, 8, 0.01))
plot(RIDGE, type = "heatmap")
# produce line graph for CV errors for RIDGEsigma
plot(RIDGE, type = "line")
## ---- message = FALSE, echo = TRUE---------------------------------------
# AIC
plot(ADMMsigma(X, crit.cv = "AIC"))
# BIC
plot(ADMMsigma(X, crit.cv = "BIC"))
## ---- message = FALSE, echo = TRUE---------------------------------------
# keep all estimates using path
ADMM = ADMMsigma(X, path = TRUE)
# print only first three objects
ADMM$Path[,,1:3]
## ---- message = FALSE, echo = TRUE, eval = FALSE-------------------------
#
# # reduce number of lam to 5
# ADMM = ADMMsigma(X, nlam = 5)
#
## ---- message = FALSE, echo = TRUE, eval = FALSE-------------------------
#
# # reduce number of folds to 3
# ADMM = ADMMsigma(X, K = 3)
#
## ---- message = FALSE, echo = TRUE, eval = FALSE-------------------------
#
# # relax convergence criteria
# ADMM = ADMMsigma(X, tol.abs = 1e-3, tol.rel = 1e-3)
#
## ---- message = FALSE, echo = TRUE, eval = FALSE-------------------------
#
# # adjust maximum number of iterations
# ADMM = ADMMsigma(X, maxit = 1e3)
#
## ---- message = FALSE, echo = TRUE, eval = FALSE-------------------------
#
# # adjust adjmaxit
# ADMM = ADMMsigma(X, maxit = 1e4, adjmaxit = 2)
#
## ---- message = FALSE, echo = TRUE, eval = FALSE-------------------------
#
# # parallel CV
# ADMM = ADMMsigma(X, cores = 3)
#
| /scratch/gouwar.j/cran-all/cranData/ADMMsigma/inst/doc/Tutorial.R |
---
title: " ADMMsigma Tutorial"
#author: "Matt Galloway"
#date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{ADMMsigma Tutorial}
%\VignetteEngine{knitr::knitr}
%\usepackage[UTF-8]{inputenc}
---
```{r setup, include=FALSE}
knitr::opts_chunk$set(echo = TRUE, cache = TRUE)
```
## Introduction
In many statistical applications, estimating the covariance for a set of random variables is a critical task. The covariance is useful because it characterizes the *relationship* between variables. For instance, suppose we have three variables $X, Y, \mbox{ and } Z$ and their covariance matrix is of the form
\[ \Sigma_{xyz} = \begin{pmatrix}
1 & 0 & 0.5 \\
0 & 1 & 0 \\
0.5 & 0 & 1
\end{pmatrix} \]
We can gather valuable information from this matrix. First of all, we know that each of the variables has an equal variance of 1. Second, we know that variables $X$ and $Y$ are likely independent because the covariance between the two is equal to 0. This implies that any information in $X$ is useless in trying to gather information about $Y$. Lastly, we know that variables $X$ and $Z$ are moderately, positively correlated because their covariance is 0.5.
Unfortunately, estimating $\Sigma$ well is often computationally expensive and, in a few settings, extremely challenging. For this reason, emphasis in the literature and elsewhere has been placed on estimating the inverse of $\Sigma$ which we like to denote as $\Omega \equiv \Sigma^{-1}$.
`ADMMsigma` is designed to estimate a robust $\Omega$ efficiently while also allowing for flexibility and rapid experimentation for the end user.
We will illustrate this with a short simulation.
<br>\vspace{0.5cm}
## Simulation
Let us generate some data.
<br>\vspace{0.5cm}
```{r, message = FALSE, echo = TRUE}
library(ADMMsigma)
# generate data from a sparse matrix
# first compute covariance matrix
S = matrix(0.7, nrow = 5, ncol = 5)
for (i in 1:5){
for (j in 1:5){
S[i, j] = S[i, j]^abs(i - j)
}
}
# generate 100 x 5 matrix with rows drawn from iid N_p(0, S)
set.seed(123)
Z = matrix(rnorm(100*5), nrow = 100, ncol = 5)
out = eigen(S, symmetric = TRUE)
S.sqrt = out$vectors %*% diag(out$values^0.5) %*% t(out$vectors)
X = Z %*% S.sqrt
# snap shot of data
head(X)
```
<br>\vspace{0.5cm}
We have generated 100 samples (5 variables) from a normal distribution with mean equal to zero and an oracle covariance matrix $S$.
<br>\vspace{0.5cm}
```{r, message = FALSE, echo = TRUE}
# print oracle covariance matrix
S
# print inverse covariance matrix (omega)
round(qr.solve(S), 5)
```
<br>\vspace{0.5cm}
It turns out that this particular oracle covariance matrix (tapered matrix) has an inverse - or precision matrix - that is sparse (tri-diagonal). That is, the precision matrix has many zeros.
In this particular setting, we could estimate $\Omega$ by taking the inverse of the sample covariance matrix $\hat{S} = \sum_{i = 1}^{n}(X_{i} - \bar{X})(X_{i} - \bar{X})^{T}/n$:
<br>\vspace{0.5cm}
```{r, message = FALSE, echo = TRUE}
# print inverse of sample precision matrix (perhaps a bad estimate)
round(qr.solve(cov(X)*(nrow(X) - 1)/nrow(X)), 5)
```
<br>\vspace{0.5cm}
However, because $\Omega$ is sparse, this estimator will likely perform very poorly. Notice the number of zeros in our oracle precision matrix compared to the inverse of the sample covariance matrix. Instead, we will use `ADMMsigma` to estimate $\Omega$.
By default, `ADMMsigma` will construct $\Omega$ using an elastic-net penalty and choose the optimal `lam` and `alpha` tuning parameters.
<br>\vspace{0.5cm}
```{r, message = FALSE, echo = TRUE}
# elastic-net type penalty (set tolerance to 1e-8)
ADMMsigma(X, tol.abs = 1e-8, tol.rel = 1e-8)
```
<br>\vspace{0.5cm}
We can see that the optimal `alpha` value selected was 1. This selection corresponds with a lasso penalty -- a special case of the elastic-net penalty. Further, a lasso penalty embeds an assumption in the estimate (call it $\hat{\Omega}$) that the true $\Omega$ is sparse. Thus the package has automatically selected the penalty that most-appropriately matches the *true* data-generating precision matrix.
We can also explicitly assume sparsity in our estimate by restricting the class of penalties to the lasso. We do this by setting `alpha = 1` in our function:
<br>\vspace{0.5cm}
```{r, message = FALSE, echo = TRUE}
# lasso penalty (default tolerance)
ADMMsigma(X, alpha = 1)
```
<br>\vspace{0.5cm}
We might also want to restrict `alpha = 0.5`:
<br>\vspace{0.5cm}
```{r, message = FALSE, echo = TRUE}
# elastic-net penalty (alpha = 0.5)
ADMMsigma(X, alpha = 0.5)
```
<br>\newpage
Or maybe we want to assume that $\Omega$ is *not* sparse but has entries close to zero. In this case, a ridge penalty would be most appropriate. We can estimate an $\Omega$ of this form by setting `alpha = 0` in the `ADMMsigma` function or using the `RIDGEsigma` function. `RIDGEsigma` uses a closed-form solution rather than an algorithm to compute its estimate -- and for this reason should be preferred in most cases (less computationally intensive).
<br>\vspace{0.5cm}
```{r, message = FALSE, echo = TRUE}
# ridge penalty
ADMMsigma(X, alpha = 0)
# ridge penalty (using closed-form solution)
RIDGEsigma(X, lam = 10^seq(-8, 8, 0.01))
```
<br>\newpage
`ADMMsigma` also has the capability to provide plots for the cross validation errors. This allows the user to analyze and select the appropriate tuning parameters.
In the heatmap plot below, the more bright (white) areas of the heat map correspond to a better tuning parameter selection.
<br>\vspace{0.5cm}
```{r, message = FALSE, echo = TRUE}
# produce CV heat map for ADMMsigma
ADMM = ADMMsigma(X, tol.abs = 1e-8, tol.rel = 1e-8)
plot(ADMM, type = "heatmap")
```
<br>\vspace{0.5cm}
We can also produce a line graph of the cross validation errors:
<br>\vspace{0.5cm}
```{r, message = FALSE, echo = TRUE}
# produce line graph for CV errors for ADMMsigma
plot(ADMM, type = "line")
```
<br>\vspace{0.5cm}
And similarly for `RIDGEsigma`:
<br>\vspace{0.5cm}
```{r, message = FALSE, echo = TRUE}
# produce CV heat map for RIDGEsigma
RIDGE = RIDGEsigma(X, lam = 10^seq(-8, 8, 0.01))
plot(RIDGE, type = "heatmap")
# produce line graph for CV errors for RIDGEsigma
plot(RIDGE, type = "line")
```
<br>\vspace{0.5cm}
## More advanced options
`ADMMsigma` contains a number of different criteria for selecting the optimal tuning parameters during cross validation. The package default is to choose the tuning parameters that maximize the log-likelihood (`crit.cv = loglik`). Other options include `AIC` and `BIC`.
<br>\vspace{0.5cm}
```{r, message = FALSE, echo = TRUE}
# AIC
plot(ADMMsigma(X, crit.cv = "AIC"))
# BIC
plot(ADMMsigma(X, crit.cv = "BIC"))
```
<br>\vspace{0.5cm}
This allows the user to select appropriate tuning parameters under various decision criteria. We also have the option to print *all* of the estimated precision matrices for each tuning parameter combination using the `path` option. This option should be used with *extreme* care when the dimension and sample size is large -- you may run into memory issues.
<br>\vspace{0.5cm}
```{r, message = FALSE, echo = TRUE}
# keep all estimates using path
ADMM = ADMMsigma(X, path = TRUE)
# print only first three objects
ADMM$Path[,,1:3]
```
<br>\vspace{0.5cm}
A huge issue in precision matrix estimation is the computational complexity when the sample size and dimension of our data is particularly large. There are a number of built-in options in `ADMMsigma` that can be used to improve computation speed:
- Reduce the number of `lam` values during cross validation. The default number is 10.
<br>\vspace{0.5cm}
```{r, message = FALSE, echo = TRUE, eval = FALSE}
# reduce number of lam to 5
ADMM = ADMMsigma(X, nlam = 5)
```
<br>\vspace{0.5cm}
- Reduce the number of `K` folds during cross validation. The default number is 5.
<br>\vspace{0.5cm}
```{r, message = FALSE, echo = TRUE, eval = FALSE}
# reduce number of folds to 3
ADMM = ADMMsigma(X, K = 3)
```
<br>\vspace{0.5cm}
- Relax the convergence critera for the ADMM algorithm using the `tol.abs` and `tol.rel` options. The default for each is 1e-4.
<br>\vspace{0.5cm}
```{r, message = FALSE, echo = TRUE, eval = FALSE}
# relax convergence criteria
ADMM = ADMMsigma(X, tol.abs = 1e-3, tol.rel = 1e-3)
```
<br>\vspace{0.5cm}
- Adjust the maximum number of iterations. The default is 1e4.
<br>\vspace{0.5cm}
```{r, message = FALSE, echo = TRUE, eval = FALSE}
# adjust maximum number of iterations
ADMM = ADMMsigma(X, maxit = 1e3)
```
<br>\vspace{0.5cm}
- Adjust `adjmaxit`. This allows the user to adjust the maximum number of iterations *after* the first `lam` tuning parameter has fully converged during cross validation. This allows for *one-step estimators* and can greatly reduce the time required for the cross validation procedure while still choosing near-optimal tuning parameters.
<br>\vspace{0.5cm}
```{r, message = FALSE, echo = TRUE, eval = FALSE}
# adjust adjmaxit
ADMM = ADMMsigma(X, maxit = 1e4, adjmaxit = 2)
```
<br>\vspace{0.5cm}
- We can also opt to run our cross validation procedure in parallel. The user should check how many cores are on their system before using this option
<br>\vspace{0.5cm}
```{r, message = FALSE, echo = TRUE, eval = FALSE}
# parallel CV
ADMM = ADMMsigma(X, cores = 3)
```
<br>\vspace{0.5cm}
| /scratch/gouwar.j/cran-all/cranData/ADMMsigma/inst/doc/Tutorial.Rmd |
---
title: "Precision Matrix Estimation via ADMM"
author: "Matt Galloway"
#date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
bibliography: lib.bib
vignette: >
%\VignetteIndexEntry{Precision Matrix Estimation via ADMM}
%\VignetteEngine{knitr::knitr}
%\usepackage[UTF-8]{inputenc}
---
```{r setup, include=FALSE}
knitr::opts_chunk$set(echo = TRUE)
```
## Introduction
Consider the case where we observe $n$ independent, identically distributed copies of the random variable ($X_{i}$) where $X_{i} \in \mathbb{R}^{p}$ is normally distributed with some mean, $\mu$, and some variance, $\Sigma$. That is, $X_{i} \sim N_{p}\left( \mu, \Sigma \right)$.
Because we assume independence, we know that the probability of observing these specific observations $X_{1}, ..., X_{n}$ is equal to
\begin{align*}
f(X_{1}, ..., X_{n}; \mu, \Sigma) &= \prod_{i = 1}^{n}(2\pi)^{-p/2}\left| \Sigma \right|^{-1/2}\exp\left[ -\frac{1}{2}\left( X_{i} - \mu \right)^{T}\Sigma^{-1}\left( X_{i} - \mu \right) \right] \\
&= (2\pi)^{-nr/2}\left| \Sigma \right|^{-n/2}\mbox{etr}\left[ -\frac{1}{2}\sum_{i = 1}^{n}\left( X_{i} - \mu \right)\left( X_{i} - \mu \right)^{T}\Sigma^{-1} \right]
\end{align*}
where $\mbox{etr}\left( \cdot \right)$ denotes the exponential trace operator. It follows that the log-likelihood for $\mu$ and $\Sigma$ is equal to the following:
\[ l(\mu, \Sigma | X) = const. - \frac{n}{2}\log\left| \Sigma \right| - tr\left[ \frac{1}{2}\sum_{i = 1}^{n}\left(X_{i} - \mu \right)\left(X_{i} - \mu \right)^{T}\Sigma^{-1} \right] \]
If we are interested in estimating $\mu$, it is relatively straight forward to show that the maximum likelihood estimator (MLE) for $\mu$ is $\hat{\mu}_{MLE} = \sum_{i = 1}^{n}X_{i}/n$ which we typically denote as $\bar{X}$. However, in addition to $\mu$, many applications require the estimation of $\Sigma$ as well. We can also find a maximum likelihood estimator:
\begin{align*}
&\hat{\Sigma}_{MLE} = \arg\max_{\Sigma \in \mathbb{S}_{+}^{p}}\left\{ const. - \frac{n}{2}\log\left| \Sigma \right| - tr\left[ \frac{1}{2}\sum_{i = 1}^{n}\left(X_{i} - \mu \right)\left(X_{i} - \mu \right)^{T}\Sigma^{-1} \right] \right\} \\
&\nabla_{\Sigma}l(\mu, \Sigma | X) = -\frac{n}{2}\Sigma^{-1} + \frac{1}{2}\sum_{i = 1}^{n}\left(X_{i} - \mu \right)\left(X_{i} - \mu \right)^{T}\Sigma^{-2} \\
\Rightarrow &\hat{\Sigma}_{MLE} = \left[ \frac{1}{n}\sum_{i = 1}^{n}\left(X_{i} - \bar{X} \right)\left(X_{i} - \bar{X} \right)^{T} \right]
\end{align*}
By setting the gradient equal to zero and plugging in the MLE for $\mu$, we find that the MLE for $\Sigma$ is our usual sample estimator often denoted as $S$. It turns out that we could have just as easily computed the maximum likelihood estimator for the precision matrix $\Omega \equiv \Sigma^{-1}$ and taken its inverse:
\[ \hat{\Omega}_{MLE} = \arg\min_{\Omega \in S_{+}^{p}}\left\{ tr\left(S\Omega\right) - \log\left|\Omega\right| \right\} \]
so that $\hat{\Omega}_{MLE} = S^{-1}$. Beyond the formatting convenience, computing estimates for $\Omega$ as opposed to $\Sigma$ often poses less computational challenges -- and accordingly, the literature has placed more emphasis on efficiently solving for $\Omega$ instead of $\Sigma$.
Notice that here we are *minimizing* the negative log-likelihood as opposed to maximizing the log-likelihood. Both procedures will result in the same estimate.
As in regression settings, we can construct a *penalized* log-likelihood estimator by adding a penalty term, $P\left(\Omega\right)$, to the log-likelihood so that
\[ \hat{\Omega} = \arg\min_{\Omega \in S_{+}^{p}}\left\{ tr\left(S\Omega\right) - \log\left|\Omega \right| + P\left( \Omega \right) \right\} \]
$P\left( \Omega \right)$ is often of the form $P\left(\Omega \right) = \lambda\|\Omega \|_{F}^{2}/2$ or $P\left(\Omega \right) = \|\Omega\|_{1}$ where $\lambda > 0$, $\left\|\cdot \right\|_{F}^{2}$ is the Frobenius norm and we define $\left\|A \right\|_{1} = \sum_{i, j} \left| A_{ij} \right|$. These penalties are the ridge and lasso, respectively. In the `ADMMsigma` package, we instead take
\[ P\left( \Omega \right) = \lambda\left[\frac{1 - \alpha}{2}\left\| \Omega \right|_{F}^{2} + \alpha\left\| \Omega \right\|_{1} \right] \]
so that solving the full penalized log-likelihood for $\Omega$ results in solving
\[ \hat{\Omega} = \arg\min_{\Omega \in S_{+}^{p}}\left\{ tr\left(S\Omega\right) - \log\left|\Omega \right| + \lambda\left[\frac{1 - \alpha}{2}\left\| \Omega \right|_{F}^{2} + \alpha\left\| \Omega \right\|_{1} \right] \right\} \]
where $0 \leq \alpha \leq 1$. This penalty, know as the *elastic-net* penalty, was explored by Hui Zou and Trevor Hastie [@zou2005regularization] and is identical to the penalty used in the popular penalized regression package `glmnet`. Clearly, when $\alpha = 0$ the elastic-net reduces to a ridge-type penalty and when $\alpha = 1$ it reduces to a lasso-type penalty. Having this flexibility and generalization allows us to perform cross validation across proposed $\alpha$ values in addition to proposed $\lambda$ values.
We will explore how to solve for $\hat{\Omega}$ in the next section.
<br>\vspace{1cm}
## ADMM Algorithm
Many efficient methods have been proposed to solve for $\hat{\Omega}$ when $\alpha = 1$. The most popular method is the graphical lasso algorithm (glasso) introduced by @friedman2008sparse. However, no methods (to the best of my knowledge) have estimated $\Omega$ when $\alpha \in (0, 1)$. We will use the alternating direction method of multipliers (ADMM) algorithm to do so.
As the authors state in @boyd2011distributed, the "ADMM is an algorithm that is intended to blend the decomposability of dual ascent with the superior convergence properties of the method of multipliers." For our purposes, we will only focus on the ADMM algorithm but it is encouraged to read the original text from Boyd and others for a complete introduction to the other two methods.
In general, suppose we want to solve an optimization problem of the following form:
\begin{align*}
\mbox{minimize } f(x) + g(z) \\
\mbox{subject to } Ax + Bz = c
\end{align*}
where $x \in \mathbb{R}^{n}, z \in \mathbb{R}^{m}, A \in \mathbb{R}^{p \times n}, B \in \mathbb{R}^{p \times m}$, $c \in \mathbb{R}^{p}$, and $f$ and $g$ are assumed to be convex functions (following @boyd2011distributed, the estimation procedure will be introduced in vector form though we could just as easily take $x$ and $z$ to be matrices). In addition to penalized precision matrix estimation, optimization problems like this arise naturally in several statistics and machine learning applications -- particularly regularization methods. For instance, we could take $f$ to be the squared error loss, $g$ to be the $l_{2}$-norm, $c$ to be equal to zero and $A$ and $B$ to be identity matrices to solve the ridge regression optimization problem. In all cases, our goal is to find $x^{*} \in \mathbb{R}^{n}$ and $z^{*} \in \mathbb{R}^{m}$ that achieves the infimum $p^{*}$:
\[ p^{*} = inf\left\{ f(x) + g(z) | Ax + Bz = c \right\} \]
To do so, the ADMM algorithm uses the *augmented lagrangian*
\[ L_{\rho}(x, z, y) = f(x) + g(z) + y^{T}(Ax + Bz - c) + \frac{\rho}{2}\left\| Ax + Bz - c \right\|_{2}^{2} \]
where $y \in \mathbb{R}^{p}$ is the lagrange multiplier and $\rho > 0$ is a scalar. Clearly any minimizer, $p^{*}$, under the augmented lagrangian is equivalent to that of the lagrangian since any feasible point $(x, z)$ satisfies the constraint $\rho\left\| Ax + Bz - c \right\|_{2}^{2}/2 = 0$.
The algorithm consists of the following repeated iterations:
\begin{align}
x^{k + 1} &= \arg\min_{x \in \mathbb{R}^{n}}L_{\rho}(x, z^{k}, y^{k}) \\
z^{k + 1} &= \arg\min_{z \in \mathbb{R}^{m}}L_{\rho}(x^{k + 1}, z, y^{k}) \\
y^{k + 1} &= y^{k} + \rho(Ax^{k + 1} + Bz^{k + 1} - c)
\end{align}
In the context of precision matrix estimation, we can let $f$ be equal to the non-penalized likelihood, $g$ be equal to $P\left( \Omega \right)$, and use the constraint $\Omega$ equal to some $Z$ so that the lagrangian is
\[ L_{\rho}(\Omega, Z, \Lambda) = f\left(\Omega\right) + g\left(Z\right) + tr\left[\Lambda\left(\Omega - Z\right)\right] \]
and the augmented lagrangian is
\[ L_{\rho}(\Omega, Z, \Lambda) = f\left(\Omega\right) + g\left(Z\right) + tr\left[\Lambda\left(\Omega - Z\right)\right] + \frac{\rho}{2}\left\|\Omega - Z\right\|_{F}^{2} \]
The ADMM algorithm is now the following:
\begin{align}
\Omega^{k + 1} &= \arg\min_{\Omega \in \mathbb{S}_{+}^{p}}\left\{ tr\left(S\Omega\right) - \log\left|\Omega\right| + tr\left[\Lambda^{k}\left(\Omega - Z^{k}\right)\right] + \frac{\rho}{2}\left\| \Omega - Z^{k} \right\|_{F}^{2} \right\} \\
Z^{k + 1} &= \arg\min_{Z \in \mathbb{S}^{p}}\left\{ \lambda\left[ \frac{1 - \alpha}{2}\left\| Z \right\|_{F}^{2} + \alpha\left\| Z \right\|_{1} \right] + tr\left[\Lambda^{k}\left(\Omega^{k + 1} - Z\right)\right] + \frac{\rho}{2}\left\| \Omega^{k + 1} - Z \right\|_{F}^{2} \right\} \\
\Lambda^{k + 1} &= \Lambda^{k} + \rho\left( \Omega^{k + 1} - Z^{k + 1} \right)
\end{align}
<br>\vspace{1cm}
### Algorithm
Set $k = 0$ and initialize $Z^{0}, \Lambda^{0}$, and $\rho$. Repeat steps 1-3 until convergence:
1. Decompose $S + \Lambda^{k} - \rho Z^{k} = VQV^{T}$ via spectral decomposition.
\[ \Omega^{k + 1} = \frac{1}{2\rho}V\left[ -Q + \left( Q^{2} + 4\rho I_{p} \right)^{1/2} \right]V^{T} \]
2. Elementwise soft-thresholding for all $i = 1,..., p$ and $j = 1,..., p$.
\begin{align*}
Z_{ij}^{k + 1} &= \frac{1}{\lambda(1 - \alpha) + \rho}\mbox{sign}\left(\rho\Omega_{ij}^{k + 1} + \Lambda_{ij}^{k}\right)\left( \left| \rho\Omega_{ij}^{k + 1} + \Lambda_{ij}^{k} \right| - \lambda\alpha \right)_{+} \\
&= \frac{1}{\lambda(1 - \alpha) + \rho}\mbox{soft}\left(\rho\Omega_{ij}^{k + 1} + \Lambda_{ij}^{k}, \lambda\alpha\right)
\end{align*}
3. Update $\Lambda^{k + 1}$.
\[ \Lambda^{k + 1} = \Lambda^{k} + \rho\left( \Omega^{k + 1} - Z^{k + 1} \right) \]
where $\mbox{soft}(a, b) = \mbox{sign}(a)(\left| a \right| - b)_{+}$.
<br>\vspace{1cm}
### Proof of (1):
\[ \Omega^{k + 1} = \arg\min_{\Omega \in \mathbb{S}_{+}^{p}}\left\{ tr\left(S\Omega\right) - \log\left|\Omega\right| + tr\left[\Lambda^{k}\left(\Omega - Z^{k}\right)\right] + \frac{\rho}{2}\left\| \Omega - Z^{k} \right\|_{F}^{2} \right\} \]
\vspace{1cm}
\begin{align*}
\nabla_{\Omega}&\left\{ tr\left(S\Omega\right) - \log\left|\Omega\right| + tr\left[\Lambda^{k}\left(\Omega - Z^{k}\right)\right] + \frac{\rho}{2}\left\| \Omega - Z^{k} \right\|_{F}^{2} \right\} \\
&= S - \Omega^{-1} + \Lambda^{k} + \rho\left( \Omega - Z^{k} \right)
\end{align*}
Note that because all of the variables are symmetric, we can ignore the symmetric constraint when deriving the gradient. First set the gradient equal to zero and decompose $\Omega^{k + 1} = VDV^{T}$ where $D$ is a diagonal matrix with diagonal elements equal to the eigen values of $\Omega^{k + 1}$ and $V$ is the matrix with corresponding eigen vectors as columns:
\[ S + \Lambda^{k} - \rho Z^{k} = (\Omega^{k + 1})^{-1} - \rho \Omega^{k + 1} = VD^{-1}V^{T} - \rho VDV^{T} = V\left(D^{-1} - \rho D\right)V^{T} \]
This equivalence implies that
\[ \phi_{j}\left( S + \Lambda^{k} - \rho Z^{k} \right) = \frac{1}{\phi_{j}(\Omega^{k + 1})} - \rho\phi_{j}(\Omega^{k + 1}) \]
where $\phi_{j}(\cdot)$ is the $j$th eigen value.
\begin{align*}
&\Rightarrow \rho\phi_{j}^{2}(\Omega^{k + 1}) + \phi_{j}\left( S + \Lambda^{k} - \rho Z^{k} \right)\phi_{j}(\Omega^{k + 1}) - 1 = 0 \\
&\Rightarrow \phi_{j}(\Omega^{k + 1}) = \frac{-\phi_{j}(S + \Lambda^{k} - \rho Z^{k}) \pm \sqrt{\phi_{j}^{2}(S + \Lambda^{k} - \rho Z^{k}) + 4\rho}}{2\rho}
\end{align*}
In summary, if we decompose $S + \Lambda^{k} - \rho Z^{k} = VQV^{T}$ then
\[ \Omega^{k + 1} = \frac{1}{2\rho}V\left[ -Q + (Q^{2} + 4\rho I_{p})^{1/2}\right] V^{T} \]
<br>\vspace{1cm}
### Proof of (2)
\[ Z^{k + 1} = \arg\min_{Z \in \mathbb{S}^{p}}\left\{ \lambda\left[ \frac{1 - \alpha}{2}\left\| Z \right\|_{F}^{2} + \alpha\left\| Z \right\|_{1} \right] + tr\left[\Lambda^{k}\left(\Omega^{k + 1} - Z\right)\right] + \frac{\rho}{2}\left\| \Omega^{k + 1} - Z \right\|_{F}^{2} \right\} \]
\vspace{1cm}
\begin{align*}
\partial&\left\{ \lambda\left[ \frac{1 - \alpha}{2}\left\| Z \right\|_{F}^{2} + \alpha\left\| Z \right\|_{1} \right] + tr\left[\Lambda^{k}\left(\Omega^{k + 1} - Z\right)\right] + \frac{\rho}{2}\left\| \Omega^{k + 1} - Z \right\|_{F}^{2} \right\} \\
&= \partial\left\{ \lambda\left[ \frac{1 - \alpha}{2}\left\| Z \right\|_{F}^{2} + \alpha\left\| Z \right\|_{1} \right] \right\} + \nabla_{\Omega}\left\{ tr\left[\Lambda^{k}\left(\Omega^{k + 1} - Z\right)\right] + \frac{\rho}{2}\left\| \Omega^{k + 1} - Z \right\|_{F}^{2} \right\} \\
&= \lambda(1 - \alpha)Z + \mbox{sign}(Z)\lambda\alpha - \Lambda^{k} - \rho\left( \Omega^{k + 1} - Z \right)
\end{align*}
where sign is the elementwise sign operator. By setting the gradient/sub-differential equal to zero, we arrive at the following equivalence:
\[ Z_{ij}^{k + 1} = \frac{1}{\lambda(1 - \alpha) + \rho}\left( \rho \Omega_{ij}^{k + 1} + \Lambda_{ij}^{k} - \mbox{sign}(Z_{ij}^{k + 1})\lambda\alpha \right) \]
for all $i = 1,..., p$ and $j = 1,..., p$. We observe two scenarios:
- If $Z_{ij}^{k + 1} > 0$ then
\[ \rho\Omega_{ij}^{k + 1} + \Lambda_{ij}^{k} > \lambda\alpha \]
- If $Z_{ij}^{k + 1} < 0$ then
\[ \rho\Omega_{ij}^{k + 1} + \Lambda_{ij}^{k} < -\lambda\alpha \]
This implies that $\mbox{sign}(Z_{ij}) = \mbox{sign}(\rho\Omega_{ij}^{k + 1} + \Lambda_{ij}^{k})$. Putting all the pieces together, we arrive at
\begin{align*}
Z_{ij}^{k + 1} &= \frac{1}{\lambda(1 - \alpha) + \rho}\mbox{sign}\left(\rho\Omega_{ij}^{k + 1} + \Lambda_{ij}^{k}\right)\left( \left| \rho\Omega_{ij}^{k + 1} + \Lambda_{ij}^{k} \right| - \lambda\alpha \right)_{+} \\
&= \frac{1}{\lambda(1 - \alpha) + \rho}\mbox{soft}\left(\rho\Omega_{ij}^{k + 1} + \Lambda_{ij}^{k}, \lambda\alpha\right)
\end{align*}
where soft is the soft-thresholding function.
<br>\vspace{1cm}
## Scaled-Form ADMM
There is another popular, alternate form of the ADMM algorithm used by scaling the dual variable ($\Lambda^{k}$). Let us define $R^{k} = \Omega - Z^{k}$ and $U^{k} = \Lambda^{k}/\rho$.
\begin{align*}
tr\left[ \Lambda^{k}\left( \Omega - Z^{k} \right) \right] + \frac{\rho}{2}\left\| \Omega - Z^{k} \right\|_{F}^{2} &= tr\left[ \Lambda^{k}R^{k} \right] + \frac{\rho}{2}\left\| R^{k} \right\|_{F}^{2} \\
&= \frac{\rho}{2}\left\| R^{k} + \Lambda^{k}/\rho \right\|_{F}^{2} - \frac{\rho}{2}\left\| \Lambda^{k}/\rho \right\|_{F}^{2} \\
&= \frac{\rho}{2}\left\| R^{k} + U^{k} \right\|_{F}^{2} - \frac{\rho}{2}\left\| U^{k} \right\|_{F}^{2}
\end{align*}
Therefore, a scaled-form can now be written as
\begin{align}
\Omega^{k + 1} &= \arg\min_{\Omega \in \mathbb{R}_{+}^{p}}\left\{ tr\left(S\Omega\right) - \log\left|\Omega\right| + \frac{\rho}{2}\left\| \Omega - Z^{k} + U^{k} \right\|_{F}^{2} \right\} \\
Z^{k + 1} &= \arg\min_{Z \in \mathbb{S}^{p}}\left\{ \lambda\left[ \frac{1 - \alpha}{2}\left\| Z \right\|_{F}^{2} + \alpha\left\| Z \right\|_{1} \right] + \frac{\rho}{2}\left\| \Omega^{k + 1} - Z + U^{k} \right\|_{F}^{2} \right\} \\
U^{k + 1} &= U^{k} + \Omega^{k + 1} - Z^{k + 1}
\end{align}
And more generally (in vector form),
\begin{align}
x^{k + 1} &= \arg\min_{x}\left\{ f(x) + \frac{\rho}{2}\left\| Ax + Bz^{k} - c + u^{k} \right\|_{2}^{2} \right\} \\
z^{k + 1} &= \arg\min_{z}\left\{ g(z) + \frac{\rho}{2}\left\| Ax^{k + 1} + Bz - c + u^{k} \right\|_{2}^{2} \right\} \\
u^{k + 1} &= u^{k} + Ax^{k + 1} + Bz^{k + 1} - c
\end{align}
Note that there are limitations to using this method. Because the dual variable is scaled by $\rho$ (the step size), this form limits one to using a constant step size without making further adjustments to $U^{k}$. It has been shown in the literature that a dynamic step size (like the one used in `ADMMsigma`) can significantly reduce the number of iterations required for convergence.
<br>\vspace{1cm}
## Stopping Criterion
In discussing the optimality conditions and stopping criterion, we will follow the steps outlined in @boyd2011distributed and cater them to precision matrix estimation.
Below we have three optimality conditions:
1. Primal:
\[ \Omega^{k + 1} - Z^{k + 1} = 0 \]
2. Dual:
\[ 0 \in \partial f\left(\Omega^{k + 1}\right) + \Lambda^{k + 1} \]
\[ 0 \in \partial g\left(Z^{k + 1}\right) - \Lambda^{k + 1} \]
The first dual optimality condition is a result of taking the sub-differential of the lagrangian (non-augmented) with respect to $\Omega^{k + 1}$ (note that we must honor the symmetric constraint of $\Omega^{k + 1}$) and the second is a result of taking the sub-differential of the lagrangian with respect to $Z^{k + 1}$ (no symmetric constraint).
We will define the left-hand side of the first condition as the primal residual $r^{k + 1} = \Omega^{k + 1} - Z^{k + 1}$. At convergence, optimality conditions require that $r^{k + 1} \approx 0$. The second residual we will define is the dual residual $s^{k + 1} = \rho\left( Z^{k + 1} - Z^{k} \right)$. This residual is derived from the following:
Because $\Omega^{k + 1}$ is the argument that minimizes $L_{p}\left( \Omega, Z^{k}, \Lambda^{k} \right)$,
\begin{align*}
0 &\in \partial \left\{ f\left(\Omega^{k + 1}\right) + tr\left[ \Lambda^{k}\left( \Omega^{k + 1} - Z^{k} \right) \right] + \frac{\rho}{2}\left\| \Omega^{k + 1} - Z^{k} \right\|_{F}^{2} \right\} \\
&= \partial f\left(\Omega^{k + 1}\right) + \Lambda^{k} + \rho\left(\Omega^{k + 1} - Z^{k}\right) \\
&= \partial f\left(\Omega^{k + 1}\right) + \Lambda^{k} + \rho\left(\Omega^{k + 1} + Z^{k + 1} - Z^{k + 1} - Z^{k}\right) \\
&= \partial f\left(\Omega^{k + 1}\right) + \Lambda^{k} + \rho\left(\Omega^{k + 1} - Z^{k + 1}\right) + \rho\left(Z^{k + 1} - Z^{k}\right) \\
&= \partial f\left(\Omega^{k + 1}\right) + \Lambda^{k + 1} + \rho\left(Z^{k + 1} - Z^{k}\right) \\
\Rightarrow 0 &\in \rho\left( Z^{k + 1} - Z^{k} \right)
\end{align*}
Like the primal residual, at convergence optimality conditions require that $s^{k + 1} \approx 0$. Note that the second dual optimality condition is always satisfied:
\begin{align*}
0 &\in \partial \left\{ g\left(Z^{k + 1}\right) + tr\left[ \Lambda^{k}\left( \Omega^{k + 1} - Z^{k + 1} \right) \right] + \rho\left\| \Omega^{k + 1} - Z^{k + 1} \right\|_{F}^{2} \right\} \\
&= \partial g\left(Z^{k + 1}\right) - \Lambda^{k} - \rho\left(\Omega^{k + 1} - Z^{k + 1}\right) \\
&= \partial g\left(Z^{k + 1}\right) - \Lambda^{k + 1} \\
\end{align*}
One possible stopping criterion is to set $\epsilon^{rel} = \epsilon^{abs} = 10^{-3}$ and stop the algorithm when $\epsilon^{pri} \leq \left\| r^{k + 1} \right\|_{F}$ and $\epsilon^{dual} \leq \left\| s^{k + 1} \right\|_{F}$ where
\begin{align*}
\epsilon^{pri} &= p\epsilon^{abs} + \epsilon^{rel}\max\left\{ \left\| \Omega^{k + 1} \right\|_{F}, \left\| Z^{k + 1} \right\|_{F} \right\} \\
\epsilon^{dual} &= p\epsilon^{abs} + \epsilon^{rel}\left\| \Lambda^{k + 1} \right\|_{F}
\end{align*}
<br><br>\newpage
## References
| /scratch/gouwar.j/cran-all/cranData/ADMMsigma/vignettes/Details.Rmd |
---
title: "Simulations"
author: "Matt Galloway"
#date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Simulations}
%\VignetteEngine{knitr::knitr}
%\usepackage[UTF-8]{inputenc}
---
```{r setup, include=FALSE}
knitr::opts_chunk$set(echo = TRUE)
```
In the simulations below, we generate data from a number of different oracle precision matrices with various structures. For each data-generating procedure, the `ADMMsigma()` function was run using 5-fold cross validation. After 20 replications, the cross validation errors were totalled and the optimal tuning parameters were selected (results below the top figure). These results are compared with the Kullback Leibler (KL) losses between the estimates and the oracle precision matrix (bottom figure).
We can see below that our cross validation procedure consistently chooses tuning parameters that are close to the optimal parameters with respsect to the oracle.
<br>\newpage
### Compound Symmetric: P = 100, N = 50
```{r, message = FALSE, echo = TRUE, eval = FALSE}
# oracle precision matrix
Omega = matrix(0.9, ncol = 100, nrow = 100)
diag(Omega = 1)
# generate covariance matrix
S = qr.solve(Omega)
# generate data
Z = matrix(rnorm(100*50), nrow = 50, ncol = 100)
out = eigen(S, symmetric = TRUE)
S.sqrt = out$vectors %*% diag(out$values^0.5) %*% t(out$vectors)
X = Z %*% S.sqrt
```
<br>\vspace{0.5cm}
![](images/compound_N50_P100.png)
<br>\newpage
### Compound Symmetric: P = 10, N = 1000
```{r, message = FALSE, echo = TRUE, eval = FALSE}
# oracle precision matrix
Omega = matrix(0.9, ncol = 10, nrow = 10)
diag(Omega = 1)
# generate covariance matrix
S = qr.solve(Omega)
# generate data
Z = matrix(rnorm(10*1000), nrow = 1000, ncol = 10)
out = eigen(S, symmetric = TRUE)
S.sqrt = out$vectors %*% diag(out$values^0.5) %*% t(out$vectors)
X = Z %*% S.sqrt
```
<br>\vspace{0.5cm}
![](images/compound_N1000_P10.png)
<br>\newpage
### Dense: P = 100, N = 50
```{r, message = FALSE, echo = TRUE, eval = FALSE, tidy = FALSE}
# generate eigen values
eigen = c(rep(1000, 5, rep(1, 100 - 5)))
# randomly generate orthogonal basis (via QR)
Q = matrix(rnorm(100*100), nrow = 100, ncol = 100) %>% qr %>% qr.Q
# generate covariance matrix
S = Q %*% diag(eigen) %*% t(Q)
# generate data
Z = matrix(rnorm(100*50), nrow = 50, ncol = 100)
out = eigen(S, symmetric = TRUE)
S.sqrt = out$vectors %*% diag(out$values^0.5) %*% t(out$vectors)
X = Z %*% S.sqrt
```
<br>\vspace{0.5cm}
![](images/repsKLdenseQR_N50_P100.png)
<br>\newpage
### Dense: P = 10, N = 50
```{r, message = FALSE, echo = TRUE, eval = FALSE, tidy = FALSE}
# generate eigen values
eigen = c(rep(1000, 5, rep(1, 10 - 5)))
# randomly generate orthogonal basis (via QR)
Q = matrix(rnorm(10*10), nrow = 10, ncol = 10) %>% qr %>% qr.Q
# generate covariance matrix
S = Q %*% diag(eigen) %*% t(Q)
# generate data
Z = matrix(rnorm(10*50), nrow = 50, ncol = 10)
out = eigen(S, symmetric = TRUE)
S.sqrt = out$vectors %*% diag(out$values^0.5) %*% t(out$vectors)
X = Z %*% S.sqrt
```
<br>\vspace{0.5cm}
![](images/repsKLdense_N50_P10.png)
<br>\newpage
### Tridiagonal: P = 100, N = 50
```{r, message = FALSE, echo = TRUE, eval = FALSE, tidy = FALSE}
# generate covariance matrix
# (can confirm inverse is tri-diagonal)
S = matrix(0, nrow = 100, ncol = 100)
for (i in 1:100){
for (j in 1:100){
S[i, j] = 0.7^abs(i - j)
}
}
# generate data
Z = matrix(rnorm(10*50), nrow = 50, ncol = 10)
out = eigen(S, symmetric = TRUE)
S.sqrt = out$vectors %*% diag(out$values^0.5) %*% t(out$vectors)
X = Z %*% S.sqrt
```
<br>\vspace{0.5cm}
![](images/repsKLtridiag_N50_P100.png)
| /scratch/gouwar.j/cran-all/cranData/ADMMsigma/vignettes/Simulations.Rmd |