text
stringlengths 0
538k
|
---|
*! 1.2.1 NJC 1 March 2012
*! 1.2.0 NJC 15 September 2008
* 1.1.1 GML NJC 26 February 2002
* 1.1.0 GML NJC 25 February 2002
* 1.0.0 21 November 2001
program distinct, rclass sortpreserve byable(recall)
version 8.0
syntax [varlist] [if] [in] [, MISSing Abbrev(int -1) Joint ///
MINimum(int 0) MAXimum(int -1) ]
if `maximum' == -1 local maximum .
if `minimum' > `maximum' {
local swap `minimum'
local minimum `maximum'
local maximum `swap'
di as txt "min(`maximum') max(`minimum') interpreted as min(`minimum') max(`maximum')"
}
if "`joint'" != "" {
di
di in text " Observations"
di in text " total distinct"
if "`missing'" != "" marksample touse, novarlist
else marksample touse, strok
tempvar vals
bysort `touse' `varlist': gen byte `vals' = (_n == 1) * `touse'
su `vals' if `touse', meanonly
if r(sum) >= `minimum' & r(sum) <= `maximum' {
di as res %11.0g r(N) " " %9.0g r(sum)
}
}
else {
if `abbrev' == -1 {
foreach v of local varlist {
local abbrev = max(`abbrev', length("`v'"))
}
}
local abbrev = max(`abbrev', 5)
local abbp2 = `abbrev' + 2
local abbp3 = `abbrev' + 3
di
di as txt _col(`abbp3') "{c |} Observations"
di as txt _col(`abbp3') "{c |} total distinct"
di as txt "{hline `abbp2'}{c +}{hline 22}"
foreach v of local varlist {
tempvar touse vals
mark `touse' `if' `in'
// markout separately for each variable in varlist
if "`missing'" == "" markout `touse' `v', strok
bys `touse' `v' : gen byte `vals' = (_n == 1) * `touse'
su `vals' if `touse', meanonly
if r(sum) >= `minimum' & r(sum) <= `maximum' {
di " " as txt %`abbrev's abbrev("`v'", `abbrev') ///
" {c |} " as res %9.0g r(N) " " %9.0g r(sum)
}
drop `touse' `vals'
}
}
return scalar N = r(N)
return scalar ndistinct = r(sum)
end
|
** Downloaded from http://blogs.worldbank.org/impactevaluations/tools-of-the-trade-doing-stratified-randomization-with-uneven-numbers-in-some-strata
* Example code by Mirian Bruhm and David McKenzie
# delimit ;
*clear all; //EDIT: moved to calling routine
*cap log close;
*set mem 200m;
*use BlogStrataExample.dta, clear;
#delimit ;
* First Generate the Randomization Strata;
egen strata=group(variableA variableB variableC variableD);
* Count how many Strata there are and look at distribution per Strata;
distinct strata;
tab strata;
* ------------- *;
* Randomization *;
* ------------- *;
* First sort the data, assign the random number seed, generate a random number;
* for each unit, and then the rank of this number within each strata;
sort strata obsno;
by strata: gen obs=_N;
*set seed 467; //EDIT: moved to calling routine
gen random2=uniform();
by strata: egen rank2=rank(random2);
* Now start forming treatment groups - beginning with groups divisible by 6;
gen group="";
gen multiples=obs/6;
recast int multiples, force;
gen multiples2=multiples*6;
* since our biggest cellsize is 99 units, dividing this by 6 gives 16 as the;
* most we can have all 6 treatment and control groups ever get within a strata;
forvalues x=1/16{;
local obs=6*`x';
local rank1=6*`x'-5;
local rank2=6*`x'-4;
local rank3=6*`x'-3;
local rank4=6*`x'-2;
local rank5=6*`x'-1;
local rank6=6*`x';
replace group="Treatment 1" if obs>=`obs' & rank2==`rank1' & rank2<=multiples2;
replace group="Treatment 2" if obs>=`obs' & rank2==`rank2' & rank2<=multiples2;
replace group="Treatment 3" if obs>=`obs' & rank2==`rank3' & rank2<=multiples2;
replace group="Treatment 4" if obs>=`obs' & rank2==`rank4' & rank2<=multiples2;
replace group="Treatment 5" if obs>=`obs' & rank2==`rank5' & rank2<=multiples2;
replace group="Control" if obs>=`obs' & rank2==`rank6' & rank2<=multiples2;
};
* Now allocate the remainders or misfits;
* First generate count of how many remainders there are, and randomly rank them;
#delimit ;
gen residual=obs-multiples2;
gen random4=uniform() if group=="";
by strata: egen rank4=rank(random4);
* here is the key step: generate a single random number per strata, to randomly;
* allocate which extra treatments or control conditions this strata gets;
#delimit ;
gen random5=uniform() if rank4==1;
by strata: egen mrandom5=max(random5);
* Now go through and allocate left over units;
#delimit ;
**Strata with 1 residual observation;
replace group="Treatment 1" if residual==1 & random4<.16666667 & random4~=.;
replace group="Treatment 2" if residual==1 & random4>=.16666667 & random4<.33333333;
replace group="Treatment 3" if residual==1 & random4>=.33333333 & random4<.5;
replace group="Treatment 4" if residual==1 & random4>=.5 & random4<.66666667;
replace group="Treatment 5" if residual==1 & random4>=.66666667 & random4<.83333333;
replace group="Control" if residual==1 & random4>.83333333 & random4~=.;
**Strata with 2 residual observations;
replace group="Treatment 1" if residual==2 & rank4==1 & mrandom5<.06666667 & mrandom5~=.;
replace group="Treatment 2" if residual==2 & rank4==2 & mrandom5<.06666667 & mrandom5~=.;
replace group="Treatment 1" if residual==2 & rank4==1 & mrandom5>=.06666667 & mrandom5<.13333333;
replace group="Treatment 3" if residual==2 & rank4==2 & mrandom5>=.06666667 & mrandom5<.13333333;
replace group="Treatment 1" if residual==2 & rank4==1 & mrandom5>=.13333333 & mrandom5<.2;
replace group="Treatment 4" if residual==2 & rank4==2 & mrandom5>=.13333333 & mrandom5<.2;
replace group="Treatment 1" if residual==2 & rank4==1 & mrandom5>=.2 & mrandom5<.26666667;
replace group="Treatment 5" if residual==2 & rank4==2 & mrandom5>=.2 & mrandom5<.26666667;
replace group="Treatment 1" if residual==2 & rank4==1 & mrandom5>=.26666667 & mrandom5<.33333333;
replace group="Control" if residual==2 & rank4==2 & mrandom5>=.26666667 & mrandom5<.33333333;
replace group="Treatment 2" if residual==2 & rank4==1 & mrandom5>=.33333333 & mrandom5<.4;
replace group="Treatment 3" if residual==2 & rank4==2 & mrandom5>=.33333333 & mrandom5<.4;
replace group="Treatment 2" if residual==2 & rank4==1 & mrandom5>=.4 & mrandom5<.46666667;
replace group="Treatment 4" if residual==2 & rank4==2 & mrandom5>=.4 & mrandom5<.46666667;
replace group="Treatment 2" if residual==2 & rank4==1 & mrandom5>=.46666667 & mrandom5<.53333333;
replace group="Treatment 5" if residual==2 & rank4==2 & mrandom5>=.46666667 & mrandom5<.53333333;
replace group="Treatment 2" if residual==2 & rank4==1 & mrandom5>=.53333333 & mrandom5<.6;
replace group="Control" if residual==2 & rank4==2 & mrandom5>=.53333333 & mrandom5<.6;
replace group="Treatment 3" if residual==2 & rank4==1 & mrandom5>=.6 & mrandom5<.66666667;
replace group="Treatment 4" if residual==2 & rank4==2 & mrandom5>=.6 & mrandom5<.66666667;
replace group="Treatment 3" if residual==2 & rank4==1 & mrandom5>=.66666667 & mrandom5<.73333333;
replace group="Treatment 5" if residual==2 & rank4==2 & mrandom5>=.66666667 & mrandom5<.73333333;
replace group="Treatment 3" if residual==2 & rank4==1 & mrandom5>=.73333333 & mrandom5<.8;
replace group="Control" if residual==2 & rank4==2 & mrandom5>=.73333333 & mrandom5<.8;
replace group="Treatment 4" if residual==2 & rank4==1 & mrandom5>=.8 & mrandom5<.86666667;
replace group="Treatment 5" if residual==2 & rank4==2 & mrandom5>=.8 & mrandom5<.86666667;
replace group="Treatment 4" if residual==2 & rank4==1 & mrandom5>=.86666667 & mrandom5<.93333333;
replace group="Control" if residual==2 & rank4==2 & mrandom5>=.86666667 & mrandom5<.93333333;
replace group="Treatment 5" if residual==2 & rank4==1 & mrandom5>=.93333333 & mrandom5~=.;
replace group="Control" if residual==2 & rank4==2 & mrandom5>=.93333333 & mrandom5~=.;
**Strata with 3 residual observations;
replace group="Treatment 1" if residual==3 & rank4==1 & mrandom5<.05 & mrandom5~=.;
replace group="Treatment 2" if residual==3 & rank4==2 & mrandom5<.05 & mrandom5~=.;
replace group="Treatment 3" if residual==3 & rank4==3 & mrandom5<.5 & mrandom5~=.;
replace group="Treatment 1" if residual==3 & rank4==1 & mrandom5>=.05 & mrandom5<.1;
replace group="Treatment 2" if residual==3 & rank4==2 & mrandom5>=.05 & mrandom5<.1;
replace group="Treatment 4" if residual==3 & rank4==3 & mrandom5>=.05 & mrandom5<.1;
replace group="Treatment 1" if residual==3 & rank4==1 & mrandom5>=.1 & mrandom5<.15;
replace group="Treatment 2" if residual==3 & rank4==2 & mrandom5>=.1 & mrandom5<.15;
replace group="Treatment 5" if residual==3 & rank4==3 & mrandom5>=.1 & mrandom5<.15;
replace group="Treatment 1" if residual==3 & rank4==1 & mrandom5>=.15 & mrandom5<.2;
replace group="Treatment 2" if residual==3 & rank4==2 & mrandom5>=.15 & mrandom5<.2;
replace group="Control" if residual==3 & rank4==3 & mrandom5>=.15 & mrandom5<.2;
replace group="Treatment 1" if residual==3 & rank4==1 & mrandom5>=.2 & mrandom5<.25;
replace group="Treatment 3" if residual==3 & rank4==2 & mrandom5>=.2 & mrandom5<.25;
replace group="Treatment 4" if residual==3 & rank4==3 & mrandom5>=.2 & mrandom5<.25;
replace group="Treatment 1" if residual==3 & rank4==1 & mrandom5>=.25 & mrandom5<.3;
replace group="Treatment 3" if residual==3 & rank4==2 & mrandom5>=.25 & mrandom5<.3;
replace group="Treatment 5" if residual==3 & rank4==3 & mrandom5>=.25 & mrandom5<.3;
replace group="Treatment 1" if residual==3 & rank4==1 & mrandom5>=.3 & mrandom5<.35;
replace group="Treatment 3" if residual==3 & rank4==2 & mrandom5>=.3 & mrandom5<.35;
replace group="Control" if residual==3 & rank4==3 & mrandom5>=.3 & mrandom5<.35;
replace group="Treatment 1" if residual==3 & rank4==1 & mrandom5>=.35 & mrandom5<.4;
replace group="Treatment 4" if residual==3 & rank4==2 & mrandom5>=.35 & mrandom5<.4;
replace group="Treatment 5" if residual==3 & rank4==3 & mrandom5>=.35 & mrandom5<.4;
replace group="Treatment 1" if residual==3 & rank4==1 & mrandom5>=.4 & mrandom5<.45;
replace group="Treatment 4" if residual==3 & rank4==2 & mrandom5>=.4 & mrandom5<.45;
replace group="Control" if residual==3 & rank4==3 & mrandom5>=.4 & mrandom5<.45;
replace group="Treatment 2" if residual==3 & rank4==1 & mrandom5>=.45 & mrandom5<.5;
replace group="Treatment 3" if residual==3 & rank4==2 & mrandom5>=.45 & mrandom5<.5;
replace group="Treatment 4" if residual==3 & rank4==3 & mrandom5>=.45 & mrandom5<.5;
replace group="Treatment 2" if residual==3 & rank4==1 & mrandom5>=.5 & mrandom5<.55;
replace group="Treatment 3" if residual==3 & rank4==2 & mrandom5>=.5 & mrandom5<.55;
replace group="Treatment 5" if residual==3 & rank4==3 & mrandom5>=.5 & mrandom5<.55;
replace group="Treatment 2" if residual==3 & rank4==1 & mrandom5>=.55 & mrandom5<.6;
replace group="Treatment 3" if residual==3 & rank4==2 & mrandom5>=.55 & mrandom5<.6;
replace group="Control" if residual==3 & rank4==3 & mrandom5>=.55 & mrandom5<.6;
replace group="Treatment 2" if residual==3 & rank4==1 & mrandom5>=.6 & mrandom5<.65;
replace group="Treatment 4" if residual==3 & rank4==2 & mrandom5>=.6 & mrandom5<.65;
replace group="Treatment 5" if residual==3 & rank4==3 & mrandom5>=.6 & mrandom5<.65;
replace group="Treatment 2" if residual==3 & rank4==1 & mrandom5>=.65 & mrandom5<.7;
replace group="Treatment 4" if residual==3 & rank4==2 & mrandom5>=.65 & mrandom5<.7;
replace group="Control" if residual==3 & rank4==3 & mrandom5>=.65 & mrandom5<.7;
replace group="Treatment 3" if residual==3 & rank4==1 & mrandom5>=.7 & mrandom5<.75;
replace group="Treatment 4" if residual==3 & rank4==2 & mrandom5>=.7 & mrandom5<.75;
replace group="Treatment 5" if residual==3 & rank4==3 & mrandom5>=.7 & mrandom5<.75;
replace group="Treatment 3" if residual==3 & rank4==1 & mrandom5>=.75 & mrandom5<.8;
replace group="Treatment 4" if residual==3 & rank4==2 & mrandom5>=.75 & mrandom5<.8;
replace group="Control" if residual==3 & rank4==3 & mrandom5>=.75 & mrandom5<.8;
replace group="Treatment 3" if residual==3 & rank4==1 & mrandom5>=.8 & mrandom5<.85;
replace group="Treatment 5" if residual==3 & rank4==2 & mrandom5>=.8 & mrandom5<.85;
replace group="Control" if residual==3 & rank4==3 & mrandom5>=.8 & mrandom5<.85;
replace group="Treatment 4" if residual==3 & rank4==1 & mrandom5>=.85 & mrandom5<.9;
replace group="Treatment 5" if residual==3 & rank4==2 & mrandom5>=.85 & mrandom5<.9;
replace group="Control" if residual==3 & rank4==3 & mrandom5>=.85 & mrandom5<.9;
replace group="Treatment 1" if residual==3 & rank4==1 & mrandom5>=.9 & mrandom5<.95;
replace group="Treatment 5" if residual==3 & rank4==2 & mrandom5>=.9 & mrandom5<.95;
replace group="Control" if residual==3 & rank4==3 & mrandom5>=.9 & mrandom5<.95;
replace group="Treatment 2" if residual==3 & rank4==1 & mrandom5>=.95 & mrandom5~=.;
replace group="Treatment 5" if residual==3 & rank4==2 & mrandom5>=.95 & mrandom5~=.;
replace group="Control" if residual==3 & rank4==3 & mrandom5>=.95 & mrandom5~=.;
**Strata with 4 residual observations;
replace group="Treatment 1" if residual==4 & rank4==1 & mrandom5<.06666667 & mrandom5~=.;
replace group="Treatment 2" if residual==4 & rank4==2 & mrandom5<.06666667 & mrandom5~=.;
replace group="Treatment 3" if residual==4 & rank4==3 & mrandom5<.06666667 & mrandom5~=.;
replace group="Treatment 4" if residual==4 & rank4==4 & mrandom5<.06666667 & mrandom5~=.;
replace group="Treatment 1" if residual==4 & rank4==1 & mrandom5>=.06666667 & mrandom5<.13333333;
replace group="Treatment 2" if residual==4 & rank4==2 & mrandom5>=.06666667 & mrandom5<.13333333;
replace group="Treatment 3" if residual==4 & rank4==3 & mrandom5>=.06666667 & mrandom5<.13333333;
replace group="Treatment 5" if residual==4 & rank4==4 & mrandom5>=.06666667 & mrandom5<.13333333;
replace group="Treatment 1" if residual==4 & rank4==1 & mrandom5>=.13333333 & mrandom5<.2;
replace group="Treatment 2" if residual==4 & rank4==2 & mrandom5>=.13333333 & mrandom5<.2;
replace group="Treatment 3" if residual==4 & rank4==3 & mrandom5>=.13333333 & mrandom5<.2;
replace group="Control" if residual==4 & rank4==4 & mrandom5>=.13333333 & mrandom5<.2;
replace group="Treatment 1" if residual==4 & rank4==1 & mrandom5>=.2 & mrandom5<.26666667;
replace group="Treatment 2" if residual==4 & rank4==2 & mrandom5>=.2 & mrandom5<.26666667;
replace group="Treatment 3" if residual==4 & rank4==3 & mrandom5>=.2 & mrandom5<.26666667;
replace group="Treatment 5" if residual==4 & rank4==4 & mrandom5>=.2 & mrandom5<.26666667;
replace group="Treatment 1" if residual==4 & rank4==1 & mrandom5>=.26666667 & mrandom5<.33333333;
replace group="Treatment 2" if residual==4 & rank4==2 & mrandom5>=.26666667 & mrandom5<.33333333;
replace group="Treatment 4" if residual==4 & rank4==3 & mrandom5>=.26666667 & mrandom5<.33333333;
replace group="Treatment 5" if residual==4 & rank4==4 & mrandom5>=.26666667 & mrandom5<.33333333;
replace group="Treatment 1" if residual==4 & rank4==1 & mrandom5>=.33333333 & mrandom5<.4;
replace group="Treatment 2" if residual==4 & rank4==2 & mrandom5>=.33333333 & mrandom5<.4;
replace group="Treatment 4" if residual==4 & rank4==3 & mrandom5>=.33333333 & mrandom5<.4;
replace group="Control" if residual==4 & rank4==4 & mrandom5>=.33333333 & mrandom5<.4;
replace group="Treatment 1" if residual==4 & rank4==1 & mrandom5>=.4 & mrandom5<.46666667;
replace group="Treatment 3" if residual==4 & rank4==2 & mrandom5>=.4 & mrandom5<.46666667;
replace group="Treatment 4" if residual==4 & rank4==3 & mrandom5>=.4 & mrandom5<.46666667;
replace group="Treatment 5" if residual==4 & rank4==4 & mrandom5>=.4 & mrandom5<.46666667;
replace group="Treatment 1" if residual==4 & rank4==1 & mrandom5>=.46666667 & mrandom5<.53333333;
replace group="Treatment 3" if residual==4 & rank4==2 & mrandom5>=.46666667 & mrandom5<.53333333;
replace group="Treatment 4" if residual==4 & rank4==3 & mrandom5>=.46666667 & mrandom5<.53333333;
replace group="Control" if residual==4 & rank4==4 & mrandom5>=.46666667 & mrandom5<.53333333;
replace group="Treatment 1" if residual==4 & rank4==1 & mrandom5>=.53333333 & mrandom5<.6;
replace group="Treatment 4" if residual==4 & rank4==2 & mrandom5>=.53333333 & mrandom5<.6;
replace group="Treatment 5" if residual==4 & rank4==3 & mrandom5>=.53333333 & mrandom5<.6;
replace group="Control" if residual==4 & rank4==4 & mrandom5>=.53333333 & mrandom5<.6;
replace group="Treatment 1" if residual==4 & rank4==1 & mrandom5>=.6 & mrandom5<.66666667;
replace group="Treatment 2" if residual==4 & rank4==2 & mrandom5>=.6 & mrandom5<.66666667;
replace group="Treatment 5" if residual==4 & rank4==3 & mrandom5>=.6 & mrandom5<.66666667;
replace group="Control" if residual==4 & rank4==4 & mrandom5>=.6 & mrandom5<.66666667;
replace group="Treatment 2" if residual==4 & rank4==1 & mrandom5>=.66666667 & mrandom5<.73333333;
replace group="Treatment 3" if residual==4 & rank4==2 & mrandom5>=.66666667 & mrandom5<.73333333;
replace group="Treatment 4" if residual==4 & rank4==3 & mrandom5>=.66666667 & mrandom5<.73333333;
replace group="Treatment 5" if residual==4 & rank4==4 & mrandom5>=.66666667 & mrandom5<.73333333;
replace group="Treatment 2" if residual==4 & rank4==1 & mrandom5>=.73333333 & mrandom5<.8;
replace group="Treatment 3" if residual==4 & rank4==2 & mrandom5>=.73333333 & mrandom5<.8;
replace group="Treatment 4" if residual==4 & rank4==3 & mrandom5>=.73333333 & mrandom5<.8;
replace group="Control" if residual==4 & rank4==4 & mrandom5>=.73333333 & mrandom5<.8;
replace group="Treatment 2" if residual==4 & rank4==1 & mrandom5>=.8 & mrandom5<.86666667;
replace group="Treatment 3" if residual==4 & rank4==2 & mrandom5>=.8 & mrandom5<.86666667;
replace group="Treatment 5" if residual==4 & rank4==3 & mrandom5>=.8 & mrandom5<.86666667;
replace group="Control" if residual==4 & rank4==4 & mrandom5>=.8 & mrandom5<.86666667;
replace group="Treatment 3" if residual==4 & rank4==1 & mrandom5>=.86666667 & mrandom5<.93333333;
replace group="Treatment 4" if residual==4 & rank4==2 & mrandom5>=.86666667 & mrandom5<.93333333;
replace group="Treatment 5" if residual==4 & rank4==3 & mrandom5>=.86666667 & mrandom5<.93333333;
replace group="Control" if residual==4 & rank4==4 & mrandom5>=.86666667 & mrandom5<.93333333;
replace group="Treatment 2" if residual==4 & rank4==1 & mrandom5>=.93333333 & mrandom5~=.;
replace group="Treatment 4" if residual==4 & rank4==2 & mrandom5>=.93333333 & mrandom5~=.;
replace group="Treatment 5" if residual==4 & rank4==3 & mrandom5>=.93333333 & mrandom5~=.;
replace group="Control" if residual==4 & rank4==4 & mrandom5>=.93333333 & mrandom5~=.;
**Strata with 5 residual observations;
replace group="Treatment 1" if residual==5 & rank4==1 & mrandom5<.16666667 & mrandom5~=.;
replace group="Treatment 2" if residual==5 & rank4==2 & mrandom5<.16666667 & mrandom5~=.;
replace group="Treatment 3" if residual==5 & rank4==3 & mrandom5<.16666667 & mrandom5~=.;
replace group="Treatment 4" if residual==5 & rank4==4 & mrandom5<.16666667 & mrandom5~=.;
replace group="Treatment 5" if residual==5 & rank4==5 & mrandom5<.16666667 & mrandom5~=.;
replace group="Treatment 1" if residual==5 & rank4==1 & mrandom5>=.16666667 & mrandom5<.33333333;
replace group="Treatment 2" if residual==5 & rank4==2 & mrandom5>=.16666667 & mrandom5<.33333333;
replace group="Treatment 3" if residual==5 & rank4==3 & mrandom5>=.16666667 & mrandom5<.33333333;
replace group="Treatment 4" if residual==5 & rank4==4 & mrandom5>=.16666667 & mrandom5<.33333333;
replace group="Control" if residual==5 & rank4==5 & mrandom5>=.16666667 & mrandom5<.33333333;
replace group="Treatment 1" if residual==5 & rank4==1 & mrandom5>=.33333333 & mrandom5<.5;
replace group="Treatment 2" if residual==5 & rank4==2 & mrandom5>=.33333333 & mrandom5<.5;
replace group="Treatment 3" if residual==5 & rank4==3 & mrandom5>=.33333333 & mrandom5<.5;
replace group="Treatment 5" if residual==5 & rank4==4 & mrandom5>=.33333333 & mrandom5<.5;
replace group="Control" if residual==5 & rank4==5 & mrandom5>=.33333333 & mrandom5<.5;
replace group="Treatment 1" if residual==5 & rank4==1 & mrandom5>=.5 & mrandom5<.66666667;
replace group="Treatment 2" if residual==5 & rank4==2 & mrandom5>=.5 & mrandom5<.66666667;
replace group="Treatment 4" if residual==5 & rank4==3 & mrandom5>=.5 & mrandom5<.66666667;
replace group="Treatment 5" if residual==5 & rank4==4 & mrandom5>=.5 & mrandom5<.66666667;
replace group="Control" if residual==5 & rank4==5 & mrandom5>=.5 & mrandom5<.6666666;
replace group="Treatment 1" if residual==5 & rank4==1 & mrandom5>=.66666667 & mrandom5<.83333333;
replace group="Treatment 3" if residual==5 & rank4==2 & mrandom5>=.66666667 & mrandom5<.83333333;
replace group="Treatment 4" if residual==5 & rank4==3 & mrandom5>=.66666667 & mrandom5<.83333333;
replace group="Treatment 5" if residual==5 & rank4==4 & mrandom5>=.66666667 & mrandom5<.83333333;
replace group="Control" if residual==5 & rank4==5 & mrandom5>=.66666667 & mrandom5<.83333333;
replace group="Treatment 2" if residual==5 & rank4==1 & mrandom5>.83333333 & mrandom5~=.;
replace group="Treatment 3" if residual==5 & rank4==2 & mrandom5>.83333333 & mrandom5~=.;
replace group="Treatment 4" if residual==5 & rank4==3 & mrandom5>.83333333 & mrandom5~=.;
replace group="Treatment 5" if residual==5 & rank4==4 & mrandom5>.83333333 & mrandom5~=.;
replace group="Control" if residual==5 & rank4==5 & mrandom5>.83333333 & mrandom5~=.;
*** Let's look, did it give us good balance?;
* Overall numbers allocated to each group;
tab group;
* Number in each strata allocated to each group;
tab strata group;
//EDIT: cleanup;
egen int treatment_mb = group(group);
drop obs random2 rank2 group multiples multiples2 residual random4 rank4 random5 mrandom5;
qui compress;
|
sysdir set PERSONAL "../../"
global S_ADO "PERSONAL;BASE;."
clear_all, closealllogs
log using "test.log", replace name(test)
version 13
local init_seed 467 //from exampleofstratifying
set seed `init_seed'
local ngroups 6
local strat_vars = "variableA variableB variableC variableD"
local ttypes = "obalance reduction full_obalance full missing"
*setup data
use BlogStrataExample.dta, clear
egen byte int_strat = group(variableC variableD)
qui do exampleofstratifying.do //does bm treatment
local all_ttypes = "`ttypes' mb"
*generate assignments
foreach t in `ttypes'{
set seed `init_seed'
assign_treatment `strat_vars', generate(treatment_`t') num_treatments(`ngroups') handle_misfit(`t')
}
*All achieve cell-level balance (a must)
foreach t in `all_ttypes'{
*tab strata treatment_`t'
cell_count_diff_per_t strata treatment_`t'
_assert r(max)<=1, msg("t=`t' failed cell-level balance")
}
*Balance at intermediate levels
foreach t in `all_ttypes'{
*tab int_strat treatment_`t'
cell_count_diff_per_t int_strat treatment_`t'
if "`t'"!="missing" di "Intermediate diff=" r(max) " for t=`t'"
if inlist("`t'","missing") _assert r(max)<=1, msg("t=`t' failed intermediate-level balance")
}
*Balance overall (could be off by as much as num_cells)
foreach t in `all_ttypes'{
*tab treatment_`t'
cell_count_diff_per_t treatment_`t'
if "`t'"!="missing" di "Overall diff=" r(max) " for t=`t'"
if inlist("`t'","obalance","missing") _assert r(max)<=1, msg("t=`t' failed overall balance")
}
**************** Distribution tests ***********************
local ttypes2 = "obalance reduction full_obalance full"
local all_ttypes2 = "`ttypes2' mb"
local num_reps 1000
tempfile rfile
postfile rep_file int(diff_mb diff_obalance diff_reduction diff_full_obalance diff_full diff_int_mb diff_int_obalance diff_int_reduction diff_int_full_obalance diff_int_full) using `rfile', replace
forval i=1/`num_reps'{
capture noisily print_dots `i' `num_reps'
keep obsno variable* int_strat
qui do exampleofstratifying.do
foreach t in `ttypes2'{
assign_treatment `strat_vars', generate(treatment_`t') num_treatments(`ngroups') handle_misfit(`t')
}
foreach t in `all_ttypes2'{
cell_count_diff_per_t treatment_`t'
local diff_`t' = r(max)
cell_count_diff_per_t int_strat treatment_`t'
local diff_int_`t' = r(max)
}
post rep_file (`diff_mb') (`diff_obalance') (`diff_reduction') (`diff_full_obalance') (`diff_full') ///
(`diff_int_mb') (`diff_int_obalance') (`diff_int_reduction') (`diff_int_full_obalance') (`diff_int_full')
}
postclose rep_file
*The MB and Full method effectively give the same distribution for this summary stat.
use `rfile', clear
*The 'obalance', 'reduction', and 'full_obalance' methods are the only ones that keep overall balance
*The 'reduction' is the only that does well at intermediate levels of stratification.
summ
*Look at the summary statistics to show that mb==full
rename (diff_mb diff_full) (diff1 diff2)
keep diff1 diff2
gen int id = _n
reshape long diff, i(id) j(t)
ksmirnov diff , by(t)
local pval = r(p_cor)
di "P-value of test of H0 that the distributions are equal: `pval'"
assert `pval'>0.1
/* Old testing code for mata utils
impossible = (1,1 \ 2,2 \ 3,1 \ 4,2 \ 5,1)
possible = (1,1 \ 2,2 \ 3,1 \ 4,2)
full_obalance(2, impossible)
full_obalance(2, possible)
full_obalance(2, possible)
full_obalance(2, possible)
full_obalance(2, possible)
*/
log close test
|
*Notes:
* If you want the trace log from the optimizer from synth run it in batch mode and on Windwos it
* creates a 'temp' file.
*Todo:
*On linux, -synth, nested- it eats up all the memory. This didn't seem to be reported by -memory-.
cap log close _all
log using "test.internal.log", replace name(test)
clear all
sysdir set PERSONAL "`c(pwd)'/ado"
sysdir set PLUS "`c(pwd)'/ado"
global S_ADO "PERSONAL;BASE"
net set ado PLUS
net set other PLUS
set more off
cap confirm file ado/s/synth.ado
if _rc!=0{
mkdir ado
ssc install synth, all replace
net install synth2, from(`c(pwd)'/../../s) all force
local lcl_repo "`c(pwd)'/../.."
net install b_file_ops, from(`lcl_repo'/b) all
find_in_file using ado/s/synth.ado , regexp("qui plugin call synthopt") local(pluginline)
change_line using ado/s/synth.ado , ln(`pluginline') insert(" timer on 1")
change_line using ado/s/synth.ado , ln(`=`pluginline'+2') insert(" timer off 1")
find_in_file using ado/s/synth2.ado, regexp("cap plugin call synth2opt") local(pluginline)
change_line using ado/s/synth2.ado, ln(`pluginline') insert(" timer on 1")
change_line using ado/s/synth2.ado, ln(`=`pluginline'+2') insert(" timer off 1")
}
else{
*adoupdate synth synth2, update //TODO: re-enable when done with testing
}
mata: mata mlib index
version 13
local init_seed 1234567890
set seed `init_seed' //though I don't think I'm randomizing
* Test basic plugin behavior. Remember H has to be symmetric
if 0{
scalar n = 3
scalar m = 2
mat c = (1 \ 0 \ 0)
mat H = (0,0,0 \ 0,1,1 \ 0,1,1)
mat A = (1,0,1 \ 0,1,1) //m.n
mat b = (1.5 \ 1)
mat l = J(n,1,-2)
mat u = J(n,1,3)
mat wsol = J(n,1,.)
cap program synth2opt, plugin
plugin call synth2opt , c H A b l u 10 0.005 20 12 wsol
assert wsol[1,1]+1.5<0.001 & wsol[2,1]+2<0.001 & wsol[3,1]-3<0.001
}
*Test basic equality between synth and synth2
if 1{
sysuse smoking, clear
tsset state year
profiler on
qui synth cigsale beer(1984(1)1988) lnincome retprice age15to24 cigsale(1988 1980 1975), trunit(3) trperiod(1989)
profiler off
mat w = e(W_weights)
profiler report
profiler clear
profiler on
qui synth2 cigsale beer(1984(1)1988) lnincome retprice age15to24 cigsale(1988 1980 1975), trunit(3) trperiod(1989)
profiler off
profiler report
profiler clear
mat w2 = e(W_weights)
mat diff = w-w2
mata: assert(colsum(st_matrix("diff"))[1,2]<0.01)
}
*Test correction of error that synth can give
if 0{
*The below command with -synth- on Windows gives all . for weights.
* The optimizer matches on football airport and then dies
use syntherror12.dta, clear
local curr_eval 152
*original error report command
local predictors "dmlnpoptot(1) dmlnpoptot(3) dmlnpoptot(5) dmlnpoptot(7) football airport"
local mspeperiod "1(1)7"
*more minimal command that still errors
local predictors "dmlnpoptot(1) football airport"
local mspeperiod "4 7"
synth2 dmlnpoptot `predictors', trunit(`curr_eval') mspeperiod(`mspeperiod') resultsperiod(8(1)15) trperiod(8) skipchecks
mat x = e(W_weights_unr)
assert x[1,2]!=.
}
*Speed test synth vs synth2
if 0{
sysuse smoking, clear
drop if lnincome==.
label values state
tsset state year
set matsize 9000
tempfile smoke
save `smoke'
gen int wave=1
local N_waves=20
forval i=2/`N_waves'{
qui append using `smoke'
qui recode wave (.=`i')
*make them not colinear
qui replace cigsale=cigsale*1.01*`i' if wave==`i'
qui replace beer=beer*1.02*`i' if wave==`i'
qui replace lnincome=lnincome*1.03*`i' if wave==`i'
qui replace retprice=retprice*1.04*`i' if wave==`i'
qui replace age15to24=age15to24*1.05*`i' if wave==`i'
}
egen long new_id = group(state wave)
tsset new_id year
local N=1
*even took out some averaging becausing synth is probably slower at that.
local synth_command "cigsale beer(1988) lnincome(1984) retprice(1984) age15to24(1984) cigsale(1984) cigsale(1985) cigsale(1986) cigsale(1987) cigsale(1988), trunit(3) trperiod(1989)"
timer on 2
forval i=1/`N'{
qui synth2 `synth_command'
}
timer off 2
di "timer1 is plugin, timer2 is total"
timer list
timer clear
timer on 2
forval i=1/`N'{
qui synth `synth_command'
}
timer off 2
timer list
timer clear
}
if 0{
cap noisily program synth2opt, plugin
mat co_data = (-1, 1 \ .1, 1 \ 1, 1 \ -.1, -1)'
local cono = colsof(co_data)
mat tr_data = (0,0)'
mat u = J(`cono',1,1)
mat l = J(`cono',1,0)
*Orig pass
mat V = I(2)
mat c1 = (-1 * ((tr_data)' * V * co_data))'
mat H1 = (co_data)' * V * co_data
mat A1 = J(1,`cono',1)
mat b1 = (1)
mat wsol = J(`cono',1,.)
plugin call synth2opt , c1 H1 A1 b1 l u 10 0.005 20 12 wsol
mat li wsol
*Spread pass
mat c2 = J(`cono',1,0)
mat H2 = I(`cono')
mat A2 = J(1,`cono',1) \ co_data[1,1...] \ co_data[2,1...]
mat b2 = 1 \ tr_data[1,1...] \ tr_data[2,1...]
mat wsol = J(`cono',1,.)
plugin call synth2opt , c2 H2 A2 b2 l u 10 0.005 20 12 wsol
mat li wsol
}
if 0{
sysuse smoking, clear
drop if lnincome==.
tsset state year
qui levelsof state, local(state_ids)
cap mat drop spreads w_2
foreach state_id of local state_ids{
qui synth2 cigsale /*cigsale(1980) cigsale(1981) cigsale(1982) cigsale(1983)*/ cigsale(1984) ///
cigsale(1985) cigsale(1986) cigsale(1987) cigsale(1988), ///
trunit(`state_id') trperiod(1989) mspeperiod(1984(1)1988) spread spread_limit(.1)
mat spreads = nullmat(spreads) \ ( `state_id', `e(spread_diff_m)')
}
drop _all
svmat spreads
rename s* (id spread)
sort spread
kdensity spread
}
if 0{
sysuse smoking, clear
drop if lnincome==.
tsset state year
local trunit 18 //from the min spread from above
synth2 cigsale cigsale(1983) cigsale(1984) cigsale(1985) cigsale(1986) cigsale(1987) ///
cigsale(1988), trunit(`trunit') /*mspeperiod(1983(1)1988)*/ trperiod(1989) /*spread spread_limit(0.005)*/
}
*clear all //needed to copy over the plugin file (release the file lock)
log close test
|
*! version 1.0 Brian Quistorff <bquistorff@gmail.com>
*! usepackage_simple.ado -- Stata module to download and install user packages necessary to run a do-file
*! include the net(loc) option if you want to install the package from non-SSC sources (the default)
*! A simplified version of usepackage.ado
*! Usage:
*! . uspackage parallel
*! . uspackage parallel, location(https://raw.githubusercontent.com/gvegayon/parallel/master/)
*This is not simple as the output from neither -ado dir- nor -ado describe- can be used programmatically
*Don't use which as sometimes functions don't match to packages
*Return code is like that of findfile (0 if found, 601 otherwise)
program define findpackage_simple
args pkg
version 9.2
qui cap findfile `pkg'.ado
if _rc qui cap findfile l`pkg'.mlib
if _rc qui cap findfile `pkg'.hlp
if _rc qui cap findfile `pkg'.sthlp
end
program define usepackage_simple
syntax anything [, location(string) ]
version 9.2
foreach _f in `anything' {
**check to see if exists:
findpackage_simple `_f'
if _rc==0 {
continue
}
**doesnt exist, first try SSC:
if "`location'"=="" {
qui cap ssc install `_f'
if !_rc {
di in yellow as smcl `" Package {stata ado describe `_f': `_f'} installed from SSC"'
}
else{
di in red as smcl `" Package {stata ado describe `_f': `_f'} NOT installed from SSC"'
}
}
else{
qui cap net install `_f', from("`location'")
if !_rc {
di in yellow as smcl `" Package {stata ado describe `_f': `_f'} installed from net"'
}
else{
di in red as smcl `" Package {stata ado describe `_f': `_f'} NOT installed from net"'
}
}
}
*di in yellow `"Done"'
end
|
*! version 1.2 Brian Quistorff <bquistorff@gmail.com>
*! Auto wraps a long line to broken up ones (that graph commands turn into different lines)
*! Wrapped has the "" quotes to separate lines
*! Usage:
*! wrap_text , unwrappedtext(`longtext') wrapped_out_loc(wrapped)
*! twoway ..., note(`wrapped')
* With simple testing 100 chars is about the width of a note in twoway at "normal sizes".
program wrap_text
version 12
syntax , unwrappedtext(string asis) wrapped_out_loc(string) [width(integer 100)]
*di `"input: `unwrappedtext'"'
*get rid of outer quotes of only one set
local unwrappedtext = trim(`"`unwrappedtext'"')
if substr(`"`unwrappedtext'"',1,1)!=`"""' local unwrappedtext `""`unwrappedtext'""'
*di `"std: `unwrappedtext'"'
*if first char is not ", then wrap
foreach oline in `unwrappedtext'{
*di `"line: `oline'"'
local num_words : word count `oline'
if `num_words'==0 local wrappedtext `"`wrappedtext'`space'"`oline'""' //pass whitespace through
forval i = 1/`num_words' {
local line : piece `i' `width' of "`oline'"
if "`line'"==""{
continue, break
}
local wrappedtext `"`wrappedtext'`space'"`line'""'
local space " "
}
}
*di `"output: `wrappedtext'"'
c_local `wrapped_out_loc' `"`wrappedtext'"'
end
|
*! version 0.3 bquistorff@gmail.com
*! Writes out a string to a file
*! LaTeX automatically adds an extra space after \input (cf http://tex.stackexchange.com/questions/18017/)
*! Use rm_final_space_tex if you don't want this
*! (e.g. you're inserting numbers and a symbol should immediately follow)
program writeout_txt
version 11 //guess
syntax anything(name=towrite equalok everything), filename(string) [rm_final_space_tex format(string)]
if "`rm_final_space_tex'"!="" local pct_char = "%"
if "`format'"!="" local towrite =string(`towrite', "`format'")
tempname fhandle
file open `fhandle' using `"`filename'"', write text replace
file write `fhandle' `"`towrite'`pct_char'"'
file close `fhandle'
end
|
*===============================================================================
* PROGRAM: regressby.ado
* PURPOSE: Performs fast grouped univariate OLS regressions.
* The following commands are equivalent:
* regressby y x, by(byvars)
* statsby, by(byvars) clear: reg y x
* Except regressby will run 10-100x faster.
* Also computes standard errors in a variety of flavors: usual
* asymptotic standard errors, robust standard errors, and clustered
* standard errors.
* AUTHORS: Michael Stepner, Michael Droste, Wilbur Townsend
*===============================================================================
*-------------------------------------------------------------------------------
* Stata wrapper
*-------------------------------------------------------------------------------
program define regressby
version 12.0
syntax varlist(min=2 numeric) [aweight], by(varlist) [vce(string) covs save(string)]
* Preserve dataset in case we crash
preserve
* Restrict sample with if/in conditions
marksample touse, strok novarlist
qui drop if `touse'==0
* Parse VCE option, if specified
if `"`vce'"' != "" {
my_vce_parse , vce(`vce')
local vcetype "robust"
local clusterby "`r(clustervar)'"
if "`vcetype'"=="robust" local robust "robust"
if "`clusterby'"!="" local robust = ""
}
* Check to make sure save data file path is valid
if ("`replace'"=="") & (`"`savegraph'"'!="") {
if regexm(`"`savegraph'"',"\.[a-zA-Z0-9]+$") confirm new file `"`save'"'
else confirm new file `"`save'.dta"'
}
* Error checking: can't specify both robust and clusterby
if "`robust'"!="" & "`clusterby'"!="" {
di as error "Error: can't specify both clustered and robust standard errors at once! Choose one."
exit
}
* Display type of standard error chosen
if "`robust'"=="" & "`clusterby'"=="" {
di "Running regressby with normal OLS standard errors."
}
if "`robust'"!="" {
di "Running regressby with robust standard errors."
}
if "`clusterby'"!="" {
di "Running regressby with cluster-robust standard errors (clustered by `clusterby')."
}
* Construct analytical weight variable
if ("`weight'"!="") {
local wt [`weight'`exp']
tempvar tmpwt
gen `tmpwt' `exp'
local weightby `tmpwt'
di "Using analytical weights, weight `exp'."
}
* Display weighting scheme, if applicable
if "`weightby'"!="" {
foreach v in `varlist' {
qui replace `v' = `v' * sqrt(`weightby')
}
qui replace `weightby' = sqrt(`weightby')
}
* Convert string by-vars to temporary numeric variables
foreach var of varlist `by' {
cap confirm numeric variable `var', exact
if _rc==0 { // numeric var
local bynumeric `bynumeric' `var'
}
else { // string var
tempvar `var'N
encode `var', gen(``var'N')
local bynumeric `bynumeric' ``var'N'
local bystr `bystr' `var' // list of string by-vars
}
}
* Sort using by-groups
sort `by' `clusterby'
* Generate a single by-variable counting by groups
tempvar grp
egen `grp'=group(`bynumeric')
qui drop if mi(`grp')
* Drop observations missing independent or dependent variables
* Also count number of variables here including constant, awkward and should be replaced
local num_x = 0
foreach v in `varlist'{
qui drop if mi(`v')
local num_x = `num_x' + 1
}
local num_x = `num_x' - 1
if "`nocons'"=="" local num_x = `num_x' + 1
* Drop observations missing weight, if weights are specified
if "`weightby'"!="" {
drop if `weightby'==.
}
* XX revisit this later to handle missing data
* Perform regressions on each by-group, store in dataset
mata: _regressby("`varlist'", "`grp'", "`bynumeric'","`clusterby'","`robust'","`weightby'")
* Convert string by-vars back to strings, from numeric
foreach var in `bystr' {
decode ``var'N', gen(`var')
}
order `by'
* XX find out if it is faster to compute R2 in Mata or Stata
if "`nocov'"!="" {
cap drop _cov_*
}
* XX optionally save out to dta and just restore with a message
if "`save'"=="" {
restore, not
}
if "`save'"!="" {
save `save', replace
restore
}
end
*-------------------------------------------------------------------------------
* Mata program: _regressby3
* Inputs:
* - A y-var and x-var for an OLS regression
* - A group var, for which each value represents a distinct by-group.
* This var must be in ascending order.
* - A list of numeric by-variables, whose groups correspond to th group var.
* Outputs:
* - dataset of coefficients from OLS regression for each by-group
*-------------------------------------------------------------------------------
version 13.1
set matastrict on
mata:
void _regressby(string scalar regvars, string scalar grpvar, string scalar byvars, string scalar clusterby, string scalar robust, string scalar weightby) {
// Convert variable names to column indices
real rowvector regcols, bycols, clustercol, weightcol
real scalar ycol, xcol, grpcol
regcols = st_varindex(tokens(regvars))
bycols = st_varindex(tokens(byvars))
clustercol = st_varindex(tokens(clusterby))
weightcol = st_varindex(tokens(weightby))
grpcol = st_varindex(grpvar)
// Fetch number of groups
real scalar numgrp, startobs, curgrp
numgrp = _st_data(st_nobs(),grpcol)
startobs = 1
curgrp = _st_data(1,grpcol)
// Preallocate matrices for output
real matrix groups, coefs, ses, covs, nobs
groups = J(numgrp, cols(bycols), .)
coefs = J(numgrp, cols(regcols), .)
Vs = J(numgrp, cols(regcols)^2, .)
nobs = J(numgrp, 1, .)
// Preallocate regression objects
real matrix XX, Xy, XX_inv, V, Z, M, y, x, w
real scalar N, k, cov, p, nc
real vector beta, e, s2, cvar, xi, ei
// -----------------------------------------------------------------------------
// Iterate over groups
// -----------------------------------------------------------------------------
// Iterate over groups 1 to Ng-1
for (obs=1; obs<=st_nobs()-1; obs++) {
if (_st_data(obs,grpcol)!=curgrp) {
st_view(M, (startobs,obs-1), regcols, 0)
st_subview(y, M, ., 1)
st_subview(X, M, ., (2\.))
N = rows(X)
// Augment x with either column of 1's or weights
// TODO -- noconstant option needs to be specified here and also accounted for in df
if (weightby!="") {
st_view(w, (startobs,obs-1), weightcol, 0)
X = X,w
}
if (weightby=="") {
X = X,J(N,1,1)
}
// Define matrix products
XX = quadcross(X,X)
Xy = quadcross(X,y)
XX_inv = invsym(XX)
// ------------ COMPUTE COEFFICIENTS --------------------
beta = (XX_inv*Xy)'
e = y - X*beta'
p = cols(X)
k = p - diag0cnt(XX_inv)
// ------------ COMPUTE STANDARD ERRORS -----------------
if (robust == "" & clusterby=="") {
V = quadcross(e,e)/(N-k)*cholinv(XX)
}
if (robust != "") {
V = (N/(N-k))*XX_inv*quadcross(X, e:^2, X)*XX_inv
}
if (clusterby != "") {
st_view(cvar,(startobs,obs-1),clustercol,0)
info = panelsetup(cvar, 1)
nc = rows(info)
Z = J(k, k, 0)
if (nc>2) {
for (i=1; i<=nc; i++) {
xi = panelsubmatrix(X,i,info)
ei = panelsubmatrix(e,i,info)
Z = Z + xi'*(ei*ei')*xi
}
V = ((N-1)/(N-k))*(nc/(nc-1))*XX_inv*Z*XX_inv
}
}
// ------------ STORE OUTPUT ----------------------------
coefs[curgrp,.] = beta
Vs[curgrp,.] = rowshape(V, 1)
nobs[curgrp,1] = N
groups[curgrp,.] = st_data(startobs,bycols)
// ------------ WRAP UP BY ITERATING COUNTERS -----------
curgrp = _st_data(obs,grpcol)
startobs = obs
}
}
// Iterate over last group manually
obs=st_nobs()
if (_st_data(obs,grpcol)==curgrp) { // last observation is not a group to itself
// increment obs, since code is written as processing the observation that is 1 past the last in the group
++obs
// compute OLS coefs: beta = inv(X'X) * X'y. --> see Example 4 of -help mf_cross-
st_view(M, (startobs,obs-1), regcols, 0)
st_subview(y, M, ., 1)
st_subview(X, M, ., (2\.))
N = rows(X)
// Augment X with either column of 1's (unweighted) or weights (weighted)
// TODO -- noconstant option needs to be specified here and also accounted for in df
if (weightby!="") {
st_view(w, (startobs,obs-1), weightcol, 0)
X = X,w
}
if (weightby=="") {
X = X,J(N,1,1)
}
// Define matrix products
XX = quadcross(X,X)
Xy = quadcross(X,y)
XX_inv = invsym(XX)
beta = (XX_inv*Xy)'
e = y - X*beta'
p = cols(X)
k = p - diag0cnt(XX_inv)
// USUAL OLS STANDARD ERRORS
if (robust == "" & clusterby == "") {
V = quadcross(e,e)/(N-k)*cholinv(XX)
}
// ROBUST STANDARD ERRORS
if (robust != "") {
V = (N/(N-k))*XX_inv*quadcross(X, e:^2, X)*XX_inv
}
// CLUSTERED STANDARD ERRORS
if (clusterby != "") {
st_view(cvar,(startobs,obs-1),clustercol,0)
info = panelsetup(cvar, 1)
nc = rows(info)
Z = J(k, k, 0)
if (nc>2) {
for (i=1; i<=nc; i++) {
xi = panelsubmatrix(X,i,info)
ei = panelsubmatrix(e,i,info)
Z = Z + xi'*(ei*ei')*xi
}
V = ((N-1)/(N-k))*(nc/(nc-1))*XX_inv*Z*XX_inv
}
}
// STORE REGRESSION OUTPUT
coefs[curgrp,.] = beta
Vs[curgrp,.] = rowshape(V, 1)
nobs[curgrp,1] = N
groups[curgrp,.] = st_data(startobs,bycols)
}
else {
display("{error} last observation is in a singleton group")
exit(2001)
}
// -----------------------------------------------------------------------------
// Gather output and pass back into Stata
// -----------------------------------------------------------------------------
// Store group identifiers in dataset
stata("qui keep in 1/"+strofreal(numgrp, "%18.0g"))
stata("keep "+byvars)
st_store(.,tokens(byvars),groups)
// Store coefficients in dataset:
// ... Number of observations,
(void) st_addvar("long", "N")
st_store(., ("N"), nobs)
// ... And then looping over covariates,
covariates = (cols(regcols)>1) ? tokens(regvars)[|2 \ .|], "cons" : ("cons")
for (k=1; k<=length(covariates); k++) {
covName = covariates[k]
// ... Coefficients and standard errors,
(void) st_addvar("float", "_b_"+covName)
(void) st_addvar("float", "_se_"+covName)
st_store(., "_b_"+covName, coefs[., k])
st_store(., "_se_"+covName, sqrt(Vs[., k + cols(regcols)*(k - 1)]))
// ... And the sampling covariances.
for (j=1; j<k; j++) {
otherCovName = covariates[j]
(void) st_addvar("float", "_cov_"+covName+"_"+otherCovName)
st_store(., "_cov_"+covName+"_"+otherCovName, Vs[., k + cols(regcols) * (j - 1)])
}
}
}
end
*-------------------------------------------------------------------------------
* Auxiliary Stata programs for parsing vce (standard error options)
* Source: https://blog.stata.com/2015/12/08/programming-an-estimation-command-in-stata-using-a-subroutine-to-parse-a-complex-option/
*-------------------------------------------------------------------------------
program define my_vce_parse, rclass
syntax [, vce(string) ]
local case : word count `vce'
if `case' > 2 {
my_vce_error , typed(`vce')
}
local 0 `", `vce'"'
syntax [, Robust CLuster * ]
if `case' == 2 {
if "`robust'" == "robust" | "`cluster'" == "" {
my_vce_error , typed(`vce')
}
capture confirm numeric variable `options'
if _rc {
my_vce_error , typed(`vce')
}
local clustervar "`options'"
}
else { // case = 1
if "`robust'" == "" {
my_vce_error , typed(`vce')
}
}
return clear
return local clustervar "`clustervar'"
end
program define my_vce_error
syntax , typed(string)
display `"{red}{bf:vce(`typed')} invalid"'
error 498
end
|
cd C:\Users\Mike\Documents\Github\stata-modern
cap mkdir figs
*-------------------------------------------------------------------------------
* Fig 1: Histograms
*-------------------------------------------------------------------------------
sysuse nlsw88, clear
set scheme s2color
histogram wage, title("Histogram") subtitle("Default scheme") name(fig1a, replace)
graph export "figs/fig1a.png", replace
set scheme modern
histogram wage, title("Histogram") subtitle("Modern color scheme") name(fig1b, replace)
graph export "figs/fig1b.png", replace
set scheme modern_dark
histogram wage, title("Histogram") subtitle("Modern_dark color scheme") name(fig1c, replace)
graph export "figs/fig1c.png", replace
*-------------------------------------------------------------------------------
* Fig 2: Scatter plots
*-------------------------------------------------------------------------------
sysuse nlsw88, clear
set scheme s2color
scatter wage tenure if union==1, title("Scatterplot") subtitle("Default color scheme") name(fig2a, replace)
graph export "figs/fig2a.png", replace
set scheme modern
scatter wage tenure if union==1, title("Scatterplot") subtitle("Modern color scheme") name(fig2b, replace)
graph export "figs/fig2b.png", replace
set scheme modern_dark
scatter wage tenure if union==1, title("Scatterplot") subtitle("Modern_dark color scheme") name(fig2c, replace)
graph export "figs/fig2c.png", replace
*-------------------------------------------------------------------------------
* Fig 3: Horizontal bar plot
*-------------------------------------------------------------------------------
sysuse pop2000, clear
replace maletotal = -maletotal/1e+6
replace femtotal = femtotal/1e+6
set scheme s2color
twoway (bar maletotal agegrp, horizontal xvarlab(Males)) ///
(bar femtotal agegrp, horizontal xvarlab(Females)) ///
, ylabel(1(1)17, angle(horizontal) valuelabel labsize(*.8)) ///
xtitle("Population in millions") ytitle("") ///
xlabel(-10 "10" -7.5 "7.5" -5 "5" -2.5 "2.5" 2.5 5 7.5 10) ///
legend(label(1 Males) label(2 Females)) ///
title("Bar plot") subtitle("Default color scheme") name(fig3a, replace)
graph export "figs/fig3a.png", replace
set scheme modern
twoway (bar maletotal agegrp, horizontal xvarlab(Males)) ///
(bar femtotal agegrp, horizontal xvarlab(Females)) ///
, ylabel(1(1)17, angle(horizontal) valuelabel labsize(*.8)) ///
xtitle("Population in millions") ytitle("") ///
xlabel(-10 "10" -7.5 "7.5" -5 "5" -2.5 "2.5" 2.5 5 7.5 10) ///
legend(label(1 Males) label(2 Females)) ///
title("Bar plot") subtitle("Default color scheme") name(fig3b, replace)
graph export "figs/fig3b.png", replace
set scheme modern_dark
twoway (bar maletotal agegrp, horizontal xvarlab(Males)) ///
(bar femtotal agegrp, horizontal xvarlab(Females)) ///
, ylabel(1(1)17, angle(horizontal) valuelabel labsize(*.8)) ///
xtitle("Population in millions") ytitle("") ///
xlabel(-10 "10" -7.5 "7.5" -5 "5" -2.5 "2.5" 2.5 5 7.5 10) ///
legend(label(1 Males) label(2 Females)) ///
title("Bar plot") subtitle("Default color scheme") name(fig3c, replace)
graph export "figs/fig3c.png", replace
*-------------------------------------------------------------------------------
* Fig 4: Binned scatter plots
*-------------------------------------------------------------------------------
sysuse nlsw88, clear
set scheme s2color
binscatter wage tenure, by(union) linetype(connect) title("Binned scatterplot") subtitle("Default color scheme") name(fig4a, replace)
graph export "figs/fig4a.png", replace
set scheme modern
binscatter wage tenure, by(union) linetype(connect) title("Binned scatterplot") subtitle("Modern color scheme") name(fig4b, replace)
graph export "figs/fig4b.png", replace
set scheme modern_dark
binscatter wage tenure, by(union) linetype(connect) title("Binned scatterplot") subtitle("Modern_dark color scheme") name(fig4c, replace)
graph export "figs/fig4c.png", replace
*-------------------------------------------------------------------------------
* Fig 5: Line plots
*-------------------------------------------------------------------------------
sysuse nlsw88, clear
set scheme s2color
twoway (lfitci wage tenure if union==1), title("Line plot") subtitle("Default color scheme") name(fig5a, replace)
graph export "figs/fig5a.png", replace
set scheme modern
twoway (lfitci wage tenure if union==1), title("Line plot") subtitle("Modern color scheme") name(fig5b, replace)
graph export "figs/fig5b.png", replace
set scheme modern_dark
twoway (lfitci wage tenure if union==1), title("Line plot") subtitle("Modern_dark color scheme") name(fig5c, replace)
graph export "figs/fig5c.png", replace
|
capture program drop knit
program define knit
args in
_knit_with_code_tags "`in'"
_make_code_blocks "`in'"
end
capture program drop _knit_with_code_tags
program define _knit_with_code_tags
args in
set more off
file open f using "`in'", read
local out = subinstr("`in'", ".domd", ".md1", 1)
log using "`out'", text replace
local in_code_block = 0
file read f line
while r(eof) == 0 {
if substr("`line'", 1, 4) == " " {
if !`in_code_block' {
display "<code>"
local in_code_block = 1
}
else {
display ""
}
display ". `=ltrim("`line'")'"
`line'
}
else {
if `in_code_block' {
display "</code>"
local in_code_block = 0
}
display "`line'"
}
file read f line
}
log close
file close f
end
capture program drop _make_code_blocks
program define _make_code_blocks
args in
local out = subinstr("`in'", ".domd", ".md", 1)
file open f_in using "`out'1", read
file open f_out using "`out'", write replace
local in_code_block = 0
local footer = 0
file read f_in line
local line_no = 1
while r(eof) == 0 {
local header = `line_no' <= 5
local footer = ("`line'" == " name: <unnamed>" & !`header') | `footer'
if "`line'" == "<code>" {
local in_code_block = 1
}
else if "`line'" == "</code>" {
local in_code_block = 0
}
else {
if `in_code_block' {
file write f_out " `line'" _n
}
else {
if !`header' & !`footer' {
file write f_out "`line'" _n
}
}
}
file read f_in line
local line_no = `line_no' + 1
}
file close f_in
file close f_out
end
knit "clustered-standard-errors.domd"
|
This is the generate folder in [Fan](https://fanwangecon.github.io/)'s [Stata4Econ](https://fanwangecon.github.io/Stata4Econ/) repository. Materials gathered from various [projects](https://fanwangecon.github.io/research) in which STATA code is used. Please contact [FanWangEcon](https://fanwangecon.github.io/) for issues or problems.
|
cls
clear
macro drop _all
/*
Back to Fan's Stata4Econ or other repositories:
- http://fanwangecon.github.io
- http://fanwangecon.github.io/Stata4Econ
- http://fanwangecon.github.io/R4Econ
- http://fanwangecon.github.io/REconTools
- http://fanwangecon.github.io/M4Econ
- http://fanwangecon.github.io/Tex4Econ
- http://fanwangecon.github.io/CodeDynaAsset/
- http://fanwangecon.github.io/Math4Econ/
- http://fanwangecon.github.io/Stat4Econ/
The file works out a variety of within group operations.
1. spread and popolate within group value to all elements of the group
*/
///--- Start log
set more off
capture log close _all
cd "${root_log}"
global st_link "/gen/group/fs_group"
global curlogfile "~/Stata4Econ/${st_link}"
global st_logname "stata_recode_discrete_subset"
log using "${curlogfile}" , replace name($st_logname)
log on $st_logname
///-- Site Link: Fan's Project Reusable Stata Codes Table of Content
di "https://fanwangecon.github.io/"
di "https://fanwangecon.github.io/Stata4Econ/"
///-- File Title
global filetitle "Stata By Group Fill Missing Values by Nonmissing Values"
///--- Load Data
set more off
sysuse auto, clear
///////////////////////////////////////////////////////////////////////////////
///--- Fill Missing Values with NonMissing Min
///////////////////////////////////////////////////////////////////////////////
///--- there are 18 trunk categories
codebook trunk
* generate some random variable
gen var_one_val_in_group = uniform()
* keep one only value each group, all else null
* keep lowest weight length not null
bys trunk (weight length): replace var_one_val_in_group =. if _n != 1
* now populate this randomly selected value within each trunk group to all in group
* sort by var_test, the non-missing value shows up first
bys trunk (var_one_val_in_group): gen var_test_fill = var_one_val_in_group[1]
sort trunk price
list trunk price weight length var_one_val_in_group var_test_fill, sepby(trunk)
///--- End Log and to HTML
log close _all
capture noisily {
log2html "${curlogfile}", replace title($filetitle (<a href="https://github.com/FanWangEcon/Stata4Econ/blob/master${st_link}.do">DO</a>, more see: <a href="https://fanwangecon.github.io/">Fan</a> and <a href="https://fanwangecon.github.io/Stata4Econ">Stata4Econ</a>))
}
///--- to PDF
capture noisily {
translator set Results2pdf logo off
translator set Results2pdf fontsize 10
translator set Results2pdf pagesize custom
translator set Results2pdf pagewidth 8.27
translator set Results2pdf pageheight 11.69
translator set Results2pdf lmargin 0.2
translator set Results2pdf rmargin 0.2
translator set Results2pdf tmargin 0.2
translator set Results2pdf bmargin 0.2
translate @Results "${curlogfile}.pdf", replace translator(Results2pdf)
}
capture noisily {
erase "${curlogfile}.smcl"
}
|
cls
clear
macro drop _all
/*
Back to Fan's Stata4Econ or other repositories:
- http://fanwangecon.github.io
- http://fanwangecon.github.io/Stata4Econ
- http://fanwangecon.github.io/R4Econ
- http://fanwangecon.github.io/REconTools
- http://fanwangecon.github.io/M4Econ
- http://fanwangecon.github.io/Tex4Econ
- http://fanwangecon.github.io/CodeDynaAsset/
- http://fanwangecon.github.io/Math4Econ/
- http://fanwangecon.github.io/Stat4Econ/
1. given a discrete variable
2. recode the discrete variable to reduce the number of categories, generate larger category categorical
Note there are several ingredients to consider here:
1. current variable name
2. new variable name
3. new variable label
4. new value labels
5. new note
*/
///--- Start log
set more off
capture log close _all
cd "${root_log}"
global st_link "/gen/replace/fs_recode"
global curlogfile "~/Stata4Econ/${st_link}"
global st_logname "stata_recode_discrete_subset"
log using "${curlogfile}" , replace name($st_logname)
log on $st_logname
///-- Site Link: Fan's Project Reusable Stata Codes Table of Content
di "https://fanwangecon.github.io/"
di "https://fanwangecon.github.io/Stata4Econ/"
///-- File Title
global filetitle "Stata Recode a Discrete Variable with Alternative Labels and Values Subgroups: recode, inrange, inlist"
///--- Load Data
set more off
sysuse auto, clear
///////////////////////////////////////////////////////////////////////////////
///--- Recode Method 1
///////////////////////////////////////////////////////////////////////////////
///--- Recode Method 1a: recode
capture drop turn_m5
recode turn ///
(min/35 = 1 "Turn <35") ///
(36 = 2 "Turn = 36") ///
(37 = 3 "Turn = 37") ///
(38/45 = 4 "Turn 38 to 45") ///
(46/max = 5 "Turn > 45") ///
(else =. ) ///
, gen(turn_m5)
tab turn_m5
///--- Recode Method 1b: egen cut
capture drop turn_m5_cut
egen turn_m5_cut = cut(turn), at(31, 36, 37, 38, 46, 51) label
tab turn_m5_cut
capture drop turn_m7_cut
egen turn_m7_cut = cut(turn), at(31(3)52) label
tab turn_m7_cut
///--- Recode Method 1c: inrange and inlist
capture drop turn_m5_alt
clonevar turn_m5_alt = turn
label variable turn_m5_alt "Recode using inlist and inrange"
replace turn_m5_alt = 1 if inrange(turn, 31, 35)
replace turn_m5_alt = 2 if inlist(turn, 36)
replace turn_m5_alt = 3 if inlist(turn, 37)
replace turn_m5_alt = 4 if inrange(turn, 38, 45)
replace turn_m5_alt = 5 if inlist(turn, 46, 48, 51)
label define turn_m5_alt 1 "Turn <35" 2 "Turn = 36" 3 "Turn = 37" 4 "Turn 38 to 45" 5 "Turn > 45", modify
label values turn_m5_alt turn_m5_alt
tab turn_m5_alt
///--- compare
tab turn_m5 turn_m5_cut
tab turn_m5 turn_m5_alt
tab turn_m5 turn_m7_cut
///////////////////////////////////////////////////////////////////////////////
///--- Recode Method 2a: Recode based on single variable,
/// slightly less typing, compose ingredients together
///////////////////////////////////////////////////////////////////////////////
/*
Define string using local strings to avoid some retyping.
try to make variable label not longer than width limit.
*/
//-- Set Variable Strings
global svr_newv "trunk_new"
global svr_oldv "trunk"
global slb_labl "this is the new version of the trunk variable"
global slb_note "we reset this variable be grouping values 5 to 10, 11 to 13, 14 "
global slb_note "$slb_note to 18, 20 to 22, and 23 into subgroups. We did this "
global slb_note "$slb_note test things out for reseting variables"
//-- value resetting
#delimit;
global slb_valv "
(min/4 = 1 "trunk <5")
(5/10 = 2 "Turn = 36")
(11/13 = 3 "Turn = 37")
(14/18 = 4 "Turn 38 to 45")
(20/22 = 5 "Turn > 45")
(23 = 5 "Turn > 45")
(else =. )
";
#delimit cr
//-- recode
* generate
capture drop $svr_newv
recode $svr_oldv $slb_valv, gen($svr_newv)
label variable $svr_newv "$slb_labl"
notes $svr_newv: $slb_note
* summ
d $svr_oldv $svr_newv, f
notes $svr_oldv $svr_newv
summ $svr_oldv $svr_newv
tab $svr_oldv $svr_newv
tab $svr_newv
///////////////////////////////////////////////////////////////////////////////
///--- Recode Method 2b: same as method 2a, but do it for multiple variables loop loop
///////////////////////////////////////////////////////////////////////////////
/*
1. Define string using local strings to avoid some retyping.
2. Summarize outputs iteration by iteration, verbose or not
3. Summarize outputs at the end overall
4. if new and old variables have the same name, understand we want to use the
same name, will relabel generate a new variable with the same variable name
and keep old variable as old_abc, where abc is the current var name
*/
global svr_newv_all ""
foreach it_var of numlist 1 2 3 {
//-- Variable by Variable Naming Settings
if (`it_var' == 1) {
//-- Set Variable Strings
global svr_newv "price_2m"
global svr_oldv "price"
global slb_labl "price discretized 2 levels"
global slb_note "reset the price variable into two groups, original variable has"
global slb_note "$slb_note 74 observations with 74 unique values. "
//-- value resetting
#delimit;
global slb_valv "
(min/6000 = 1 "price <= 6000")
(6001/max = 2 "price > 6000")
(else =. )
";
#delimit cr
//-- states verbose show or not
global bl_verbose_print = 0
}
if (`it_var' == 2) {
//-- Set Variable Strings
global svr_newv "price_3m"
global svr_oldv "price"
global slb_labl "price discretized 3 levels"
global slb_note "reset the price variable into two groups, original variable has"
global slb_note "$slb_note 74 observations with 74 unique values. "
//-- value resetting
#delimit;
global slb_valv "
(min/5500 = 1 "price <= 5500")
(5501/8500 = 2 "5501 <= price <= 8500")
(8501/max = 3 "8501 <= price")
(else =. )
";
#delimit cr
//-- states verbose show or not
global bl_verbose_print = 0
}
if (`it_var' == 3) {
//-- Set Variable Strings
* this is an example where I relabel and revalue names, but keep variable name
* auto keep an old version
global svr_newv "foreign"
global svr_oldv "foreign"
global slb_labl "is car domestic (relabled, previous 1 is foreign now 0)"
global slb_note "reseting the foreign variable previously 1 is foreign 0"
global slb_note "$slb_note is domestic, now 1 is domestic 0 is foreign"
//-- value resetting
#delimit;
global slb_valv "
(1 = 0 "foreign car")
(0 = 1 "domestic car")
(else =. )
";
#delimit cr
//-- states verbose show or not
global bl_verbose_print = 1
}
//-- recode
di "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
di "Generate the `it_var'th variable: Generates $svr_newv based on $svr_oldv"
di "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
* generate
global svr_oldv_use "${svr_oldv}"
if ("$svr_newv" == "$svr_oldv") {
* allows for relabeling the same variable keeping name
global svr_oldv_use "_prev_${svr_oldv}"
clonevar _prev_${svr_oldv} = $svr_oldv
notes $svr_oldv_use: "this variable $svr_oldv_use is replaced by $svr_newv"
}
capture drop $svr_newv
recode $svr_oldv_use $slb_valv, gen($svr_newv)
label variable $svr_newv "$slb_labl"
notes $svr_newv: $slb_note
//-- summarize
d $svr_newv, f
summ $svr_oldv_use $svr_newv
tab $svr_newv
pwcorr $svr_oldv_use $svr_newv, sig
if ($bl_verbose_print) {
d $svr_oldv_use $svr_newv, f
notes $svr_oldv_use $svr_newv
tab $svr_oldv_use $svr_newv
label list $svr_newv
}
//-- Store all strings for easier later retrieval
global svr_newv_all `"$svr_newv_all $svr_newv"'
}
//-- recode
di "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
di "We just finished Generating `it_var' Variables, here is their joint summary"
di "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
d $svr_newv_all, f
summ $svr_newv_all
pwcorr $svr_newv_all, sig
///--- End Log and to HTML
log close _all
capture noisily {
log2html "${curlogfile}", replace title($filetitle (<a href="https://github.com/FanWangEcon/Stata4Econ/blob/master${st_link}.do">DO</a>, more see: <a href="https://fanwangecon.github.io/">Fan</a> and <a href="https://fanwangecon.github.io/Stata4Econ">Stata4Econ</a>))
}
///--- to PDF
capture noisily {
translator set Results2pdf logo off
translator set Results2pdf fontsize 10
translator set Results2pdf pagesize custom
translator set Results2pdf pagewidth 8.27
translator set Results2pdf pageheight 11.69
translator set Results2pdf lmargin 0.2
translator set Results2pdf rmargin 0.2
translator set Results2pdf tmargin 0.2
translator set Results2pdf bmargin 0.2
translate @Results "${curlogfile}.pdf", replace translator(Results2pdf)
}
capture noisily {
erase "${curlogfile}.smcl"
}
|
cls
clear
macro drop _all
/*
Back to Fan's Stata4Econ or other repositories:
- http://fanwangecon.github.io
- http://fanwangecon.github.io/Stata4Econ
- http://fanwangecon.github.io/R4Econ
- http://fanwangecon.github.io/M4Econ
- http://fanwangecon.github.io/CodeDynaAsset/
- http://fanwangecon.github.io/Math4Econ/
- http://fanwangecon.github.io/Stat4Econ/
- http://fanwangecon.github.io/Tex4Econ
Stata matrix basic generation and matrix slicing
1. Generate Matrix
2. Replace single cell values from matrix
3. Replace subset of matrix by row or column array
4. Row and Column Names
5. Retrieve matrix row and column values
*/
///--- Start log
set more off
capture log close _all
cd "${root_log}"
global st_link "/matrix/define/basic"
global curlogfile "~/Stata4Econ/${st_link}"
global st_logname "matrix_select_subset"
log using "${curlogfile}" , replace name($st_logname)
log on $st_logname
///-- Site Link: Fan's Project Reusable Stata Codes Table of Content
di "https://fanwangecon.github.io/"
di "https://fanwangecon.github.io/Stata4Econ/"
///-- File Title
global filetitle "Stata Matrix Slicing, Select Subset of Matrix Values, Subset of Rows and Columns"
///--- Generate matrix with all 0
scalar it_rowcnt = 4
scalar it_colcnt = 6
scalar bl_fillval = 0
matrix mt_bl_estd = J(it_rowcnt, it_colcnt, bl_fillval)
///--- Give Matrix Row and Column Names
matrix rownames mt_bl_estd = hhfe vilfe provfe morecontrols
matrix colnames mt_bl_estd = reg1 reg2 reg3 reg4 reg5 reg6
///--- Assign value to matrix cell single
matrix mt_bl_estd[rownumb(mt_bl_estd, "hhfe"), colnumb(mt_bl_estd, "reg1")] = 1
matrix mt_bl_estd[2,2] = 3
///--- Assign value to 4th row, 3nd to 6th
matrix mt_bl_estd[4,3] = (9,8,7,6)
///--- Assign value to 4th column, 2nd 3rd values
matrix mt_bl_estd[2,4] = (-3\-44.3)
///--- Obtain value from matrix
scalar bl_hhfe_reg1 = mt_bl_estd[rownumb(mt_bl_estd, "hhfe"), colnumb(mt_bl_estd, "reg1")]
di bl_hhfe_reg1
di el(mt_bl_estd, rownumb(mt_bl_estd, "hhfe"), colnumb(mt_bl_estd, "reg1"))
///--- Select a column from matrix
matrix mt_bl_estd_colreg1 = mt_bl_estd[1..., colnumb(mt_bl_estd, "reg1")]
matrix list mt_bl_estd_colreg1
///--- Get Row and Column Names
global st_colnames : colnames mt_bl_estd
di "${st_colnames}"
global st_rownames : rownames mt_bl_estd
di "${st_rownames}"
///--- Show Matrix
matrix list mt_bl_estd
///--- End Log and to HTML
log close _all
capture noisily {
log2html "${curlogfile}", replace title($filetitle (<a href="https://github.com/FanWangEcon/Stata4Econ/blob/master${st_link}.do">DO</a>, more see: <a href="https://fanwangecon.github.io/">Fan</a> and <a href="https://fanwangecon.github.io/Stata4Econ">Stata4Econ</a>))
}
///--- to PDF
capture noisily {
// translator query Results2pdf
translator set Results2pdf logo off
translator set Results2pdf fontsize 8
translator set Results2pdf pagesize letter
translator set Results2pdf lmargin 0.2
translator set Results2pdf rmargin 0.2
translator set Results2pdf tmargin 0.2
translator set Results2pdf bmargin 0.2
translate @Results "${curlogfile}.pdf", replace translator(Results2pdf)
}
capture noisily {
erase "${curlogfile}.smcl"
}
|
cls
clear
macro drop _all
/*
Back to Fan's Stata4Econ or other repositories:
- http://fanwangecon.github.io
- http://fanwangecon.github.io/Stata4Econ
- http://fanwangecon.github.io/R4Econ
- http://fanwangecon.github.io/M4Econ
- http://fanwangecon.github.io/CodeDynaAsset/
- http://fanwangecon.github.io/Math4Econ/
- http://fanwangecon.github.io/Stat4Econ/
- http://fanwangecon.github.io/Tex4Econ
1. Install log2html
2. Test log2html
*/
///--- Install log2html
ssc install log2html
///--- Start log
set more off
capture log close
cd "${root_log}"
global curlogfile "~\Stata4Econ\output\log\fs_log2html"
log using "${curlogfile}" , replace
log on
///--- log Contents
set more off
sysuse auto, clear
///--- Vars to Show
#delimit ;
global st_ids_etc "
make
foreign
";
global st_outcomes "
displacement
weight length
";
global st_inputs "
trunk headroom
price
turn
";
#delimit cr
///--- Describe Vars
d $st_ids_etc
d $st_outcomes
d $st_inputs
///--- Summ Vars
summ $st_ids_etc
summ $st_outcomes
summ $st_inputs
bys foreign (price): list ///
$st_ids_etc ///
$st_outcomes ///
$st_inputs
///--- End Log and to HTML
log close
capture noisily {
log2html "${curlogfile}", replace
}
///--- to PDF
capture noisily {
// translator query smcl2pdf
translator set smcl2pdf logo off
translator set smcl2pdf fontsize 8
translator set smcl2pdf pagesize custom
translator set smcl2pdf pagewidth 17
translator set smcl2pdf pageheight 17
translator set smcl2pdf lmargin 0.4
translator set smcl2pdf rmargin 0.4
translator set smcl2pdf tmargin 0.4
translator set smcl2pdf bmargin 0.4
translate "${curlogfile}.smcl" "${curlogfile}_smcllog.pdf", replace translator(smcl2pdf)
// translator query Results2pdf
translator set Results2pdf logo off
translator set Results2pdf fontsize 8
translator set Results2pdf pagesize custom
translator set Results2pdf pagewidth 9
* 20 is max height
translator set Results2pdf pageheight 20
// A3
translator set Results2pdf pagewidth 11.69
translator set Results2pdf pageheight 16.53
translator set Results2pdf lmargin 0.2
translator set Results2pdf rmargin 0.2
translator set Results2pdf tmargin 0.2
translator set Results2pdf bmargin 0.2
translate @Results "${curlogfile}_results.pdf", replace translator(Results2pdf)
}
capture noisily {
erase "${curlogfile}.smcl"
}
|
cls
clear
macro drop _all
/*
Back to Fan's Stata4Econ or other repositories:
- http://fanwangecon.github.io
- http://fanwangecon.github.io/Stata4Econ
- http://fanwangecon.github.io/R4Econ
- http://fanwangecon.github.io/REconTools
- http://fanwangecon.github.io/M4Econ
- http://fanwangecon.github.io/Tex4Econ
- http://fanwangecon.github.io/CodeDynaAsset/
- http://fanwangecon.github.io/Math4Econ/
- http://fanwangecon.github.io/Stat4Econ/
Generate loops in Stata
*/
///--- Start log
set more off
capture log close _all
cd "${root_log}"
global st_link "/prog/basics/fs_label"
global curlogfile "~/Stata4Econ/${st_link}"
global st_logname "stata_fs_label"
log using "${curlogfile}" , replace name($st_logname)
log on $st_logname
///-- Site Link: Fan's Project Reusable Stata Codes Table of Content
di "https://fanwangecon.github.io/"
di "https://fanwangecon.github.io/Stata4Econ/"
///-- File Title
global filetitle "Labeling Stata Variables, and Get Label and all Value Labels from Variables"
///--- Load Data
set more off
sysuse auto, clear
///////////////////////////////////////////////////////////////////////////////
///--- Labeling
///////////////////////////////////////////////////////////////////////////////
label variable make "Make and Model from the mtcars dataset"
label define foreign_lab 0 "domestic made" 1 "foreign made", modify
label values foreign foreign_lab
///////////////////////////////////////////////////////////////////////////////
///--- Get Label Values
///////////////////////////////////////////////////////////////////////////////
///--- Variable Labels show
labelbook foreign_lab, d
///--- Get Variable Label and Values hard-coded
local st_var_label : variable label foreign
local st_foreign_val_0_lab : label foreign_lab 0
local st_foreign_val_1_lab : label foreign_lab 1
di "st_var_label:`st_var_label'"
di "st_foreign_val_0_lab:`st_foreign_val_0_lab'"
di "st_foreign_val_1_lab:`st_foreign_val_1_lab'"
///--- Get Variable Label and Values more Automated
/*
For automated value printing etc:
Given Variable Name:
1. get the label of the variable
2. get all value labels
3. get the number of observation each value of categorical
4. generate string based on these
*/
* 0. Var name
global st_var "foreign"
* 1. get variable label
local st_var_label : variable label ${st_var}
global st_var_label "`st_var_label'"
* 2. all values of foreign label
local st_var_val_lab_name: value label ${st_var}
levelsof ${st_var}, local(ls_var_levels) clean
di "`st_var_val_lab_name'"
di "`ls_var_levels'"
* 3. Number of Observations from Each category
tab ${st_var}, matcell(mt_obs)
* 4. all label values
global st_var_val_labs ""
local it_ctr = 0
foreach it_foreign_lvl of numlist `ls_var_levels' {
local foreign_lvl_lab : label `st_var_val_lab_name' `it_foreign_lvl'
di "`it_foreign_lvl':`foreign_lvl_lab'"
local it_ctr = `it_ctr' + 1
if (`it_ctr' > 1 ) {
global st_var_val_labs "${st_var_val_labs}, "
}
global it_cate_obs = el(mt_obs, `it_ctr', 1)
global st_var_val_labs "${st_var_val_labs}`it_foreign_lvl'=`foreign_lvl_lab' [N=${it_cate_obs}]"
}
* 5. final outputs
di "${st_var_label}"
di "For Outcome ${st_var_label}: ${st_var_val_labs}"
global slb_table_varinfo "${st_var_label} (${st_var_val_labs}, NA excluded from Regression)"
di "${slb_table_varinfo}"
///--- End Log and to HTML
log close _all
capture noisily {
log2html "${curlogfile}", replace title($filetitle (<a href="https://github.com/FanWangEcon/Stata4Econ/blob/master${st_link}.do">DO</a>, more see: <a href="https://fanwangecon.github.io/">Fan</a> and <a href="https://fanwangecon.github.io/Stata4Econ">Stata4Econ</a>))
}
///--- to PDF
capture noisily {
translator set Results2pdf logo off
translator set Results2pdf fontsize 10
translator set Results2pdf pagesize custom
translator set Results2pdf pagewidth 8.27
translator set Results2pdf pageheight 11.69
translator set Results2pdf lmargin 0.2
translator set Results2pdf rmargin 0.2
translator set Results2pdf tmargin 0.2
translator set Results2pdf bmargin 0.2
translate @Results "${curlogfile}.pdf", replace translator(Results2pdf)
}
capture noisily {
erase "${curlogfile}.smcl"
}
|
cls
clear
macro drop _all
/*
Back to Fan's Stata4Econ or other repositories:
- http://fanwangecon.github.io
- http://fanwangecon.github.io/Stata4Econ
- http://fanwangecon.github.io/R4Econ
- http://fanwangecon.github.io/REconTools
- http://fanwangecon.github.io/M4Econ
- http://fanwangecon.github.io/Tex4Econ
- http://fanwangecon.github.io/CodeDynaAsset/
- http://fanwangecon.github.io/Math4Econ/
- http://fanwangecon.github.io/Stat4Econ/
Generate loops in Stata
*/
///--- Start log
set more off
capture log close _all
cd "${root_log}"
global st_link "/prog/basics/fs_loop"
global curlogfile "~/Stata4Econ/${st_link}"
global st_logname "stata_fs_loop"
log using "${curlogfile}" , replace name($st_logname)
log on $st_logname
///-- Site Link: Fan's Project Reusable Stata Codes Table of Content
di "https://fanwangecon.github.io/"
di "https://fanwangecon.github.io/Stata4Econ/"
///-- File Title
global filetitle "Loop Over String and Numeric Vectors in Stata"
///////////////////////////////////////////////////////////////////////////////
///--- Loop over String
///////////////////////////////////////////////////////////////////////////////
#delimit;
global ls_svr_outcome "
el_i_mand_talk_m2a el_i_mand_talk_m2b el_i_mand_talk_m2c
el_i_mand_write_m2a el_i_mand_write_m2b el_i_mand_write_m2c
el_i_mnew_m2a el_i_mnew_m2b
el_i_nnet_m2a el_i_nnet_m2b
";
#delimit cr
local it_counter = 0
foreach svr_outcome in $ls_svr_outcome {
local it_counter = `it_counter' + 1
di "`it_counter'th item of string list: `svr_outcome'"
}
///--- End Log and to HTML
log close _all
capture noisily {
log2html "${curlogfile}", replace title($filetitle (<a href="https://github.com/FanWangEcon/Stata4Econ/blob/master${st_link}.do">DO</a>, more see: <a href="https://fanwangecon.github.io/">Fan</a> and <a href="https://fanwangecon.github.io/Stata4Econ">Stata4Econ</a>))
}
///--- to PDF
capture noisily {
translator set Results2pdf logo off
translator set Results2pdf fontsize 10
translator set Results2pdf pagesize custom
translator set Results2pdf pagewidth 8.27
translator set Results2pdf pageheight 11.69
translator set Results2pdf lmargin 0.2
translator set Results2pdf rmargin 0.2
translator set Results2pdf tmargin 0.2
translator set Results2pdf bmargin 0.2
translate @Results "${curlogfile}.pdf", replace translator(Results2pdf)
}
capture noisily {
erase "${curlogfile}.smcl"
}
|
cls
clear
macro drop _all
/*
Back to Fan's Stata4Econ or other repositories:
- http://fanwangecon.github.io
- http://fanwangecon.github.io/Stata4Econ
- http://fanwangecon.github.io/R4Econ
- http://fanwangecon.github.io/M4Econ
- http://fanwangecon.github.io/CodeDynaAsset/
- http://fanwangecon.github.io/Math4Econ/
- http://fanwangecon.github.io/Stat4Econ/
- http://fanwangecon.github.io/Tex4Econ
1. boolean control in stata
*/
///--- Start log
set more off
capture log close
cd "${root_log}"
global curlogfile "~\Stata4Econ\prog\define\fs_boolean"
log using "${curlogfile}" , replace
log on
///--- Load Data
set more off
sysuse auto, clear
///--- Control
local bl_includereg1 = 1
local bl_includereg2 = 0
global bl_includereg3 = 0
global bl_includereg4 = 1
scalar bl_includereg5 = 0
scalar bl_includereg6 = 1
///--- Define Multiple Variables as global in delimit
#delimit;
global vars_rhs "
mpg
ib1.rep78
headroom trunk
weight
";
#delimit cr
di `"$vars_rhs"'
///--- Define String with Quotes
#delimit;
global st_coef_label "
mpg "mpg variable"
1.rep78 "BASE GROUP CONSTANT = rep78 is 1"
2.rep78 "rep78 is 2"
3.rep78 "rep78 is 3"
4.rep78 "rep78 is 4"
5.rep78 "rep78 is 5"
headroom "headroom variable"
trunk "this is the trunk variable"
weight "and here the weight variable"
";
#delimit cr
di `"$st_coef_label"'
///--- Describe and Summarize
d $rhs_vars_list, f
summ $rhs_vars_list
///--- Run Regression
eststo clear
if (`bl_includereg1') {
eststo, title("reg1"): regress price $vars_rhs if foreign == 0
}
if (`bl_includereg2') {
eststo, title("reg2"): regress price $vars_rhs if foreign == 1
}
if ($bl_includereg3) {
eststo, title("reg3"): regress price $vars_rhs if foreign == 1
}
if ($bl_includereg4) {
eststo, title("reg4"): regress price $vars_rhs if foreign == 1
}
if (bl_includereg5) {
eststo, title("reg5"): regress price $vars_rhs if foreign == 1
}
if (bl_includereg6) {
eststo, title("reg6"): regress price $vars_rhs if foreign == 1
}
esttab, title("include reg 1 2 and 4 but not 3, and 6 but not 5.") ///
mtitle ///
coeflabels($st_coef_label) ///
varwidth(50)
///--- End Log and to HTML
log close
capture noisily {
log2html "${curlogfile}", replace
}
///--- to PDF
capture noisily {
translator set Results2pdf logo off
translator set Results2pdf fontsize 10
translator set Results2pdf pagesize custom
translator set Results2pdf pagewidth 11.69
translator set Results2pdf pageheight 16.53
translator set Results2pdf lmargin 0.2
translator set Results2pdf rmargin 0.2
translator set Results2pdf tmargin 0.2
translator set Results2pdf bmargin 0.2
translate @Results "${curlogfile}.pdf", replace translator(Results2pdf)
}
capture noisily {
erase "${curlogfile}.smcl"
}
|
cls
clear
macro drop _all
/*
Back to Fan's Stata4Econ or other repositories:
- http://fanwangecon.github.io
- http://fanwangecon.github.io/Stata4Econ
- http://fanwangecon.github.io/R4Econ
- http://fanwangecon.github.io/REconTools
- http://fanwangecon.github.io/M4Econ
- http://fanwangecon.github.io/Tex4Econ
- http://fanwangecon.github.io/CodeDynaAsset/
- http://fanwangecon.github.io/Math4Econ/
- http://fanwangecon.github.io/Stat4Econ/
1. define variables through delimit
2. define string with quotes
3. run regression, and use defined string as labels for rows in esttab
4. replace all occurances of elements in strings
*/
///--- Start log
set more off
capture log close _all
cd "${root_log}"
global st_link "/prog/define/fs_strings"
global curlogfile "~/Stata4Econ/${st_link}"
global st_logname "stata_fs_strings"
log using "${curlogfile}" , replace name($st_logname)
log on $st_logname
///-- Site Link: Fan's Project Reusable Stata Codes Table of Content
di "https://fanwangecon.github.io/"
di "https://fanwangecon.github.io/Stata4Econ/"
///-- File Title
global filetitle "Stata string delimit, string with quotes, string regression labels, etc."
///////////////////////////////////////////////////////////////////////////////
///--- String Operations
///////////////////////////////////////////////////////////////////////////////
///--- Search and Replace Text in Substring
* replace quote in string
di subinstr(`"dataVar1 " dataVar2"',`"""',"",.)
* Replace quotes in string
di subinstr(`" "dataVar1 dataVar2 " "dataVar2 dataVar3" "',`"""',"",.)
* Replace & with /& in long string
global scd ""
global scd "${scd} Conditions: PA=(& el\_i\_mand\_talk\_m2a != -999 & S\_han !=.);"
global scd "${scd} PB=(& el\_i\_mand\_talk\_m2a != -999 & S\_han == 0);"
global scd "${scd} PC=(& el\_i\_mand\_talk\_m2a != -999 & S\_han == 1);"
global scd "${scd} common=(S\_han !=. & AgeCloseYr\_i\_G1 <= 30 & H\_age <= 44"
global scd "${scd} & (vE\_schCloseYr\_full >= 1998 | vE\_schCloseYr\_full == 0)"
global scd "${scd} & (vE\_schCloseYr\_full >= 1998 | vE\_schCloseYr\_full == 0)"
global scd "${scd} & (vE\_schCloseYr\_full >= 1998 | vE\_schCloseYr\_full == 0)"
global scd "${scd} & (vE\_schCloseYr\_full >= 1998 | vE\_schCloseYr\_full == 0)"
global scd "${scd} & (vE\_schCloseYr\_full >= 1998 | vE\_schCloseYr\_full == 0)"
global scd "${scd} & (vE\_schCloseYr\_full >= 1998 | vE\_schCloseYr\_full == 0)"
global scd "${scd} & (vE\_schCloseYr\_full >= 1998 | vE\_schCloseYr\_full == 0)"
global scd = subinstr("${scd}","&","\&",.)
di "${scd}"
* Replace dash
local tableRefName = "a_b_c"
local tableRefName = subinstr("`tableRefName'","_","",.)
di "`tableRefName'"
* replace pound
local instrCap = "_d1_l1#_d1_l2 _d2_l2#_d2_l4"
local cinstrCapF = subinstr(word("`instrCap'",1),"#"," ",.)
di "`cinstrCapF'"
///////////////////////////////////////////////////////////////////////////////
///--- String Definitions and Regressions
///////////////////////////////////////////////////////////////////////////////
///--- Load Data
set more off
sysuse auto, clear
///--- Define Multiple Variables as global in delimit
#delimit;
global vars_rhs "
mpg
ib1.rep78
headroom trunk
weight
";
#delimit cr
di `"$vars_rhs"'
///--- Define String with Quotes
#delimit;
global st_coef_label "
mpg "mpg variable"
1.rep78 "BASE GROUP CONSTANT = rep78 is 1"
2.rep78 "rep78 is 2"
3.rep78 "rep78 is 3"
4.rep78 "rep78 is 4"
5.rep78 "rep78 is 5"
headroom "headroom variable"
trunk "this is the trunk variable"
weight "and here the weight variable"
";
#delimit cr
di `"$st_coef_label"'
///--- Describe and Summarize
d $rhs_vars_list, f
summ $rhs_vars_list
///--- Run Regression
eststo clear
eststo, title("reg1"): regress price $vars_rhs if foreign == 0
eststo, title("reg2"): regress price $vars_rhs if foreign == 1
esttab, title("regtest") ///
mtitle ///
coeflabels($st_coef_label) ///
varwidth(50)
///--- End Log and to HTML
log close _all
capture noisily {
log2html "${curlogfile}", replace title($filetitle (<a href="https://github.com/FanWangEcon/Stata4Econ/blob/master${st_link}.do">DO</a>, more see: <a href="https://fanwangecon.github.io/">Fan</a> and <a href="https://fanwangecon.github.io/Stata4Econ">Stata4Econ</a>))
}
///--- to PDF
capture noisily {
translator set Results2pdf logo off
translator set Results2pdf fontsize 10
translator set Results2pdf pagesize custom
translator set Results2pdf pagewidth 8.27
translator set Results2pdf pageheight 11.69
translator set Results2pdf lmargin 0.2
translator set Results2pdf rmargin 0.2
translator set Results2pdf tmargin 0.2
translator set Results2pdf bmargin 0.2
translate @Results "${curlogfile}.pdf", replace translator(Results2pdf)
}
capture noisily {
erase "${curlogfile}.smcl"
}
|
cls
clear
macro drop _all
/*
Back to Fan's Stata4Econ or other repositories:
- http://fanwangecon.github.io
- http://fanwangecon.github.io/Stata4Econ
- http://fanwangecon.github.io/R4Econ
- http://fanwangecon.github.io/M4Econ
- http://fanwangecon.github.io/CodeDynaAsset/
- http://fanwangecon.github.io/Math4Econ/
- http://fanwangecon.github.io/Stat4Econ/
- http://fanwangecon.github.io/Tex4Econ
1. drop a random subset of values
*/
///--- Start log
set more off
capture log close _all
cd "${root_log}"
global st_link "/rand/basic/fs_droprand"
global curlogfile "~/Stata4Econ/${st_link}"
global st_logname "drop_random_subset"
log using "${curlogfile}" , replace name($st_logname)
log on $st_logname
///-- Site Link: Fan's Project Reusable Stata Codes Table of Content
di "https://fanwangecon.github.io/"
di "https://fanwangecon.github.io/Stata4Econ/"
///-- File Title
global filetitle "Stata Drop a Random Subset of Observations"
///--- Load Data
set more off
sysuse auto, clear
///--- Generating Index for Dropping
set seed 987
scalar it_drop_frac = 3
gen row_idx_it = round((_n/_N)*it_drop_frac)
gen row_idx_rand = round(it_drop_frac*uniform())
//--- drop when row_idx_it == row_idx_rand, if it_drop_frac set at 3
list make price mpg row_idx_it row_idx_rand, ab(20)
///--- Drop approximately 1/2 of make randomly
set seed 987
scalar it_drop_frac = 2
clonevar make_wth_mimssing = make
replace make_wth_mimssing = "" if round((_n/_N)*it_drop_frac) == round(it_drop_frac*uniform())
///--- Drop approximately 1/3 of mpg randomly
set seed 987
scalar it_drop_frac = 3
clonevar mpg_wth_mimssing = mpg
replace mpg_wth_mimssing =. if round((_n/_N)*it_drop_frac) == round(it_drop_frac*uniform())
///--- Drop approximately 1/5 of mpg randomly
set seed 987
scalar it_drop_frac = 5
clonevar price_wth_mimssing = price
replace price_wth_mimssing =. if round((_n/_N)*it_drop_frac) == round(it_drop_frac*uniform())
///--- Summarize
codebook make*
summ mpg* price*
list make* mpg* price*
///--- End Log and to HTML
log close _all
capture noisily {
log2html "${curlogfile}", replace title($filetitle (<a href="https://github.com/FanWangEcon/Stata4Econ/blob/master${st_link}.do">DO</a>, more see: <a href="https://fanwangecon.github.io/">Fan</a> and <a href="https://fanwangecon.github.io/Stata4Econ">Stata4Econ</a>))
}
///--- to PDF
capture noisily {
translator set Results2pdf logo off
translator set Results2pdf fontsize 10
translator set Results2pdf pagesize custom
translator set Results2pdf pagewidth 8.27
translator set Results2pdf pageheight 11.69
translator set Results2pdf lmargin 0.2
translator set Results2pdf rmargin 0.2
translator set Results2pdf tmargin 0.2
translator set Results2pdf bmargin 0.2
translate @Results "${curlogfile}.pdf", replace translator(Results2pdf)
}
capture noisily {
erase "${curlogfile}.smcl"
}
|
linear regression in Stata
|
cls
clear
macro drop _all
/*
Back to Fan's Stata4Econ or other repositories:
- http://fanwangecon.github.io
- http://fanwangecon.github.io/Stata4Econ
- http://fanwangecon.github.io/R4Econ
- http://fanwangecon.github.io/M4Econ
- http://fanwangecon.github.io/CodeDynaAsset/
- http://fanwangecon.github.io/Math4Econ/
- http://fanwangecon.github.io/Stat4Econ/
- http://fanwangecon.github.io/Tex4Econ
1. Get statistics from regression, for example the p value
2. Show alll subgroup coefficients in one regression
*/
///--- Start log
set more off
capture log close
cd "${root_log}"
global curlogfile "~\Stata4Econ\reglin\basic\fs_reg_get_stats"
log using "${curlogfile}" , replace
log on
///--- Load Data
set more off
sysuse auto, clear
tab rep78
tab foreign
///--- Regression
regress weight ib3.rep78 if foreign == 0
///--- Get r(table) Column Names
global colnames : colnames r(table)
di "$colnames"
global rownames : rownames r(table)
di "$rownames"
///--- Regression Statistics as matrix
matrix list r(table)
matrix rtable = r(table)
//-- Get All p values
matrix pval_row = rtable[rownumb(rtable, "pvalue"), 1...]
matrix list pval_row
//-- Get One Particular pValue
di colnumb(rtable, "5.rep78")
di rownumb(rtable, "pvalue")
global pval = rtable[rownumb(rtable, "pvalue"), colnumb(rtable, "5.rep78")]
di "$pval"
///--- End Log and to HTML
log close
capture noisily {
log2html "${curlogfile}", replace
}
///--- to PDF
capture noisily {
translator set Results2pdf logo off
translator set Results2pdf fontsize 8
translator set Results2pdf pagesize letter
translator set Results2pdf lmargin 0.2
translator set Results2pdf rmargin 0.2
translator set Results2pdf tmargin 0.2
translator set Results2pdf bmargin 0.2
translate @Results "${curlogfile}.pdf", replace translator(Results2pdf)
}
capture noisily {
erase "${curlogfile}.smcl"
}
|
cls
clear
macro drop _all
/*
Back to Fan's Stata4Econ or other repositories:
- http://fanwangecon.github.io
- http://fanwangecon.github.io/Stata4Econ
- http://fanwangecon.github.io/R4Econ
- http://fanwangecon.github.io/M4Econ
- http://fanwangecon.github.io/CodeDynaAsset/
- http://fanwangecon.github.io/Math4Econ/
- http://fanwangecon.github.io/Stat4Econ/
- http://fanwangecon.github.io/Tex4Econ
1. Same regression for two Subgroups
2. Show alll subgroup coefficients in one regression
*/
///--- Start log
set more off
capture log close
cd "${root_log}"
global curlogfile "~\Stata4Econ\reglin\discrete\fs_reg_d_interact"
log using "${curlogfile}" , replace
log on
///--- Load Data
set more off
sysuse auto, clear
tab rep78
tab foreign
* 1. Same regression for two Subgroups
eststo clear
eststo, title(dom): regress weight ib3.rep78 if foreign == 0
eststo, title(foreign): regress weight ib3.rep78 if foreign == 1
esttab, mtitle title("Foreign or Domestic")
* 2. Show alll subgroup coefficients in one regression
capture drop domestic
recode foreign ///
(0 = 1 "domestic") ///1
(1 = 0 "foreign") ///
(else = .) ///
, ///
gen(domestic)
tab domestic foreign
* using factor for binary
eststo clear
eststo, title(both): quietly regress ///
weight ///
ib0.foreign ib0.domestic ///
ib3.rep78#ib0.foreign ///
ib3.rep78#ib0.domestic ///
, noc
esttab, mtitle title("Foreign or Domestic")
* Streamlined
eststo clear
regress ///
weight ///
ib0.foreign ib0.domestic ///
ib3.rep78#ib0.foreign ///
, noc
esttab, mtitle title("Foreign or Domestic")
* Streamlined 2
eststo clear
regress ///
weight ///
ib0.foreign ///
ib3.rep78#ib0.foreign
esttab, mtitle title("Foreign or Domestic")
* using cts for binary
eststo clear
eststo, title(both): quietly regress ///
weight ///
c.foreign c.domestic ///
ib3.rep78#c.foreign ///
ib3.rep78#c.domestic ///
, noc
esttab, mtitle title("Foreign or Domestic")
///--- End Log and to HTML
log close
capture noisily {
log2html "${curlogfile}", replace
}
///--- to PDF
capture noisily {
translator set Results2pdf logo off
translator set Results2pdf fontsize 8
translator set Results2pdf pagesize custom
translator set Results2pdf pagewidth 9
translator set Results2pdf pageheight 20
translator set Results2pdf lmargin 0.2
translator set Results2pdf rmargin 0.2
translator set Results2pdf tmargin 0.2
translator set Results2pdf bmargin 0.2
translate @Results "${curlogfile}.pdf", replace translator(Results2pdf)
}
capture noisily {
erase "${curlogfile}.smcl"
}
|
cls
clear
macro drop _all
/*
Back to Fan's Stata4Econ or other repositories:
- http://fanwangecon.github.io
- http://fanwangecon.github.io/Stata4Econ
- http://fanwangecon.github.io/R4Econ
- http://fanwangecon.github.io/REconTools
- http://fanwangecon.github.io/M4Econ
- http://fanwangecon.github.io/Tex4Econ/
- http://fanwangecon.github.io/CodeDynaAsset/
- http://fanwangecon.github.io/Math4Econ/
- http://fanwangecon.github.io/Stat4Econ/
When we run regressions, what change:
1. LHS
2. RHS
3. Conditioning
4. Regression Method
We hope to store regression results to an estimates table that has columns and panels. Each column in each panel represents the results from one regression. This is a fully generic regression function.
The way program works is that:
1. define default strings for each regression ingredient
2. for panel and column, if we want column and panel specific conditions, if they are defined, then they override generic
3. could define for each ingredient, col specific, panel specific, or column and panel specific regressions
The output here are:
- allpurpose_tab.html
- allpurpose_tab.rft
- allpurpose_tab_texbody.tex
the allpurpose_tab.tex can be converted to PDF.
There is a pre-written tex file:
- allpurpose.tex, this is not generated by the do file here, but pre-written
- that file has only one *input*, which is allpurpose_tab_texbody.tex
*/
///--- File Names
global st_file_root "~\Stata4Econ\reglin\multipanel\allpurpose\"
global st_log_file "${st_file_root}allpurpose"
global st_tab_rtf "${st_file_root}allpurpose_tab.rtf"
global st_tab_html "${st_file_root}allpurpose_tab.html"
global st_tab_tex "${st_file_root}allpurpose_tab_texbody.tex"
///--- Start log
capture log close
log using "${st_log_file}" , replace
log on
set trace off
set tracedepth 1
/////////////////////////////////////////////////
///--- A0. Load Data
/////////////////////////////////////////////////
set more off
set trace off
sysuse auto, clear
///--- Controls
global quiornot "qui"
* global quiornot "noi"
/////////////////////////////////////////////////
///--- A1. Core String Initiation
/////////////////////////////////////////////////
/*
A regression has:
1. reg method: stc_rgc
2. LHS: svr_lhs
3. RHS (to keep): svr_rhs (go to svr_kep)
4. RHS (controls not to show in table): svr_cov
5. Conditions: svr_cdn
6. reg options: stc_opt
*/
* rgc = regression, opt = option
global stc_rgc "reg"
global stc_opt ", robust"
* sca = what scalar statistics to obtain from reg
global stc_sca "r2 rank"
* cdn = conditioning
global sif_cdn "if price !=. & foreign !=."
* regression outcome
global svr_lhs "price"
* right and side and what to Display
* svr_rhs what we want to keep on table
* svr_cov controls to not show on table
* this keeping aspect is not automatic, to allow flexibility, can specify
* with svr_kep what should be kept, below it is keeping svr_rhs.
global svr_rhs "rep78"
global svr_cov "gear_ratio"
global svr_kep "${svr_rhs}"
/////////////////////////////////////////////////
///--- A2. Set Number of Rows and Columns
/////////////////////////////////////////////////
* column count, and panel count
* can specify any numbers here, code will run for any col and row count
* if both equal to 1, will only generate 1 panel with 1 column of regression
* if both very large, but do not specify column or panel specific variables or
* conditions, will just keep running identical regressions over and over.
global it_col_cnt = 7
global it_pan_cnt = 6
/////////////////////////////////////////////////
///--- A3. Labeling
/////////////////////////////////////////////////
* column title, panel title, and slb_pan_nte = panel notes
global slb_col "price"
global slb_pan "current panel results"
global slb_pan_nte "general notes"
* eso = esttab options
global slb_eso "label mtitles p stats(N ${stc_sca}) star(* 0.10 ** 0.05 *** 0.01)"
global slb_tex_eso "booktabs ${slb_eso}"
/////////////////////////////////////////////////
///--- B1. Column Specific Strings
/////////////////////////////////////////////////
* Column titling, some columns get column specific titles
global slb_col_3 "wgt"
global slb_col_4 "areg"
global slb_col_5 "gear <= 3"
global slb_col_6 "reg"
global slb_col_7 "areg"
* change regression method for column 4
global stc_rgc_col_4 "areg"
global stc_opt_col_4 ", absorb(foreign)"
global stc_rgc_col_7 "areg"
global stc_opt_col_7 ", absorb(foreign)"
* this means the third column's lhs var will be weight
global svr_lhs_col_3 "weight"
* below changing condition for 5th and 3rd column, append to existing conditions
global sif_cdn_col_5 "& gear_ratio <= 3"
global sif_cdn_col_3 `"& trunk != 5 & ~strpos(make, "Ford")"'
* append these variables to column 4 and 5 estimations
global svr_rhs_col_4 "weight"
global svr_rhs_col_5 "turn"
/////////////////////////////////////////////////
///--- B2. Panel Specific Strings
/////////////////////////////////////////////////
* Panel titling, 1 2 3 get panel specific titles, other use base
global slb_pan_1 "Panel A, foreign == 0"
global slb_pan_2 "Panel B, foreign == 1"
global slb_pan_3 "Panel C, length >= 190"
* Panel Specific Notes
global slb_pan_nte_1 `""This panel only includes foreign == 0. Absorb no effects.""'
global slb_pan_nte_2 `""This panel then focuses only on foreign == 1""'
global slb_pan_nte_2 `"${slb_pan_nte_2} "Hi there, more notes next line""'
global slb_pan_nte_5 `""This panel is the 5th" "Yes it is the 5th, so what""'
* the 3rd panel and 6 panel lhs variable is mpg, note column override panel lhs
global svr_lhs_pan_3 "mpg"
global svr_lhs_pan_6 "mpg"
* panel specific conditioning, appending to column and base conditioning
global sif_cdn_pan_1 "& foreign == 0"
global sif_cdn_pan_2 "& foreign == 1"
global sif_cdn_pan_3 "& length >= 190"
* panel specific rhs variables, append to column and base
global svr_rhs_pan_1 "mpg headroom"
global svr_rhs_pan_4 "mpg"
* keeping
global svr_kep_pan_1 "${svr_rhs_pan_1} ${svr_rhs_col_1} ${svr_rhs_col_5}"
global svr_kep_pan_4 "${svr_rhs_pan_4} ${svr_rhs_col_1} ${svr_rhs_col_5}"
/////////////////////////////////////////////////
///--- B3. Panel and Column Specific Strings
/////////////////////////////////////////////////
* RHS for panel 5 and column 4 will have two more covariates
global svr_rhs_pan_5_col_4 "length turn"
global svr_kep_pan_4 "${svr_kep_pan_4} ${svr_rhs_pan_5_col_4}"
/////////////////////////////////////////////////
///--- C. Define Regression Strings
/////////////////////////////////////////////////
foreach it_pan_ctr of numlist 1(1)$it_pan_cnt {
foreach it_col_ctr of numlist 1(1)$it_col_cnt {
///--- Counters
global it_col_ctr "`it_col_ctr'"
global it_pan_ctr "`it_pan_ctr'"
///--- Reset Strings to Default Always, _u = use
* if there are panel or column specific values, replace, eith col or row specific
* generates: stc_rgc_u and stc_opt_u
global stc_rgc_u "${stc_rgc}"
global stc_opt_u "${stc_opt}"
global svr_lhs_u "${svr_lhs}"
global st_ls_rep "stc_rgc stc_opt svr_lhs"
foreach st_seg in $st_ls_rep {
global st_seg "`st_seg'"
* di `"${st_seg}_pan_${it_pan_ctr}: ${${st_seg}_pan_${it_pan_ctr}}"'
* di `"${st_seg}_col_${it_col_ctr}: ${${st_seg}_col_${it_col_ctr}}"'
* di `"${st_seg}_pan_${it_pan_ctr}_col_${it_col_ctr}: ${${st_seg}_pan_${it_pan_ctr}_col_${it_col_ctr}}"'
if (`"${${st_seg}_pan_${it_pan_ctr}}"' != "") {
global ${st_seg}_u `"${${st_seg}_pan_${it_pan_ctr}}"'
}
else if (`"${${st_seg}_col_${it_col_ctr}}"' != "") {
global ${st_seg}_u `"${${st_seg}_col_${it_col_ctr}}"'
}
else if (`"${${st_seg}_pan_${it_pan_ctr}_col_${it_col_ctr}}"' != "") {
global ${st_seg}_u `"${${st_seg}_pan_${it_pan_ctr}_col_${it_col_ctr}}"'
}
* di `"${st_seg}_u: ${${st_seg}_u}"'
}
* if there are panel or column specific values, append
global svr_rhs_u "${svr_rhs} ${svr_rhs_pan_${it_pan_ctr}} ${svr_rhs_col_${it_col_ctr}}"
global svr_cov_u "${svr_cov} ${svr_cov_pan_${it_pan_ctr}} ${svr_cov_col_${it_col_ctr}}"
global sif_cdn_u `"${sif_cdn} ${sif_cdn_pan_${it_pan_ctr}} ${sif_cdn_col_${it_col_ctr}}"'
///--- Compose Regression String
global srg_pan_${it_pan_ctr}_col_${it_col_ctr} `"${stc_rgc_u} ${svr_lhs_u} ${svr_rhs_u} ${svr_cov_u} ${sif_cdn_u} ${stc_opt_u}"'
///--- Display Regression String
di "PAN={$it_pan_ctr}, COL={$it_col_ctr}"
di `"${srg_pan_${it_pan_ctr}_col_${it_col_ctr}}"'
}
}
/////////////////////////////////////////////////
///--- D. Run Regressions
/////////////////////////////////////////////////
eststo clear
global it_reg_ctr = 0
///--- Loop over panels
foreach it_pan_ctr of numlist 1(1)$it_pan_cnt {
///--- Counters
global it_pan_ctr "`it_pan_ctr'"
///--- Model Store Name
global st_cur_sm_stor "smd_${it_pan_ctr}_m"
global ${st_cur_sm_stor} ""
///--- Loop over regression columns
foreach it_col_ctr of numlist 1(1)$it_col_cnt {
///--- Counters
global it_col_ctr "`it_col_ctr'"
global it_reg_ctr = ${it_reg_ctr} + 1
global st_cur_srg_name "srg_pan_${it_pan_ctr}_col_${it_col_ctr}"
///--- Regression String Name
di "PAN={$it_pan_ctr}, COL={$it_col_ctr}, ${st_cur_srg_name}"
di `"${${st_cur_srg_name}}"'
///--- Reset Strings to Default Always
global slb_col_u "${slb_col}"
global st_ls_rep "slb_col"
foreach st_seg in $st_ls_rep {
global st_seg "`st_seg'"
if ("${${st_seg}_${it_col_ctr}}" != "") {
global ${st_seg}_u `"${${st_seg}_${it_col_ctr}}"'
}
}
///--- Regress
capture $quiornot {
eststo m${it_reg_ctr}, title("${slb_col_u}") : ${$st_cur_srg_name}
}
if _rc!=0 {
///--- This means this this regression failed, proceed with empty col
* Generate a fake observation to create a new estimated model
* Then replace the observation N by setting it to 0, otherwise N = 1
capture drop aaa
gen aaa = 0 if _n == 1
eststo m${it_reg_ctr}, title("${slb_col_u}") : estpost tabstat aaa , statistics(n) c(s)
estadd scalar N = 0, replace
}
///--- Estadd Controls
* foreach st_scalar_name in $stc_sca {
* estadd local ${st_scalar_name} e(${st_scalar_name})
* }
///--- Track Regression Store
global $st_cur_sm_stor "${${st_cur_sm_stor}} m${it_reg_ctr}"
}
}
di "${${st_cur_sm_stor}}"
///--- Regression Panel String list
foreach it_pan_ctr of numlist 1(1)$it_pan_cnt {
global it_pan_ctr "`it_pan_ctr'"
global st_cur_sm_stor "smd_${it_pan_ctr}_m"
di "${st_cur_sm_stor}"
}
/////////////////////////////////////////////////
///--- E. Show Results
/////////////////////////////////////////////////
foreach it_pan_ctr of numlist 1(1)$it_pan_cnt {
global it_pan_ctr "`it_pan_ctr'"
global slb_eso_u "${slb_eso}"
global slb_tex_eso_u "${slb_tex_eso}"
global slb_pan_u "${slb_pan}"
global slb_pan_nte_u "${slb_pan_nte}"
global st_ls_rep "slb_pan slb_pan_nte"
foreach st_seg in $st_ls_rep {
global st_seg "`st_seg'"
if (`"${${st_seg}_${it_pan_ctr}}"' != "") {
global ${st_seg}_u `"${${st_seg}_${it_pan_ctr}}"'
}
}
global svr_kep_u "${svr_kep} ${svr_kep_pan_${it_pan_ctr}}"
global st_esttab_opts_main `"addnotes(${slb_pan_nte_u}) title("${slb_pan_u}") keep(${svr_kep_u}) order(${svr_kep_u})"'
global st_esttab_opts_tex `"${st_esttab_opts_main} ${slb_tex_eso_u}"'
global st_esttab_opts_oth `"${st_esttab_opts_main} ${slb_eso_u}"'
di "MODELS: ${smd_${it_pan_ctr}_m}"
di `"st_esttab_opts_main: ${st_esttab_opts_main}"'
///--- output to log
esttab ${smd_${it_pan_ctr}_m}, ${st_esttab_opts_oth}
///--- save results to html, rtf, as well as tex
if ($it_pan_ctr == 1) {
global st_replace "replace"
}
else {
global st_replace "append"
}
esttab ${smd_${it_pan_ctr}_m} using "${st_tab_html}", ${st_esttab_opts_oth} $st_replace
esttab ${smd_${it_pan_ctr}_m} using "${st_tab_rtf}", ${st_esttab_opts_oth} $st_replace
esttab ${smd_${it_pan_ctr}_m} using "${st_tab_tex}", ${st_esttab_opts_tex} $st_replace
}
/////////////////////////////////////////////////
///--- F. Log to PDF etc
/////////////////////////////////////////////////
///--- End Log and to HTML
log close
capture noisily {
log2html "${st_log_file}", replace
}
capture noisily {
// translator query smcl2pdf
translator set smcl2pdf logo off
translator set smcl2pdf fontsize 8
translator set smcl2pdf pagesize custom
translator set smcl2pdf pagewidth 9
translator set smcl2pdf pageheight 20
translator set smcl2pdf lmargin 0.4
translator set smcl2pdf rmargin 0.4
translator set smcl2pdf tmargin 0.4
translator set smcl2pdf bmargin 0.4
translate "${st_log_file}.smcl" "${st_log_file}.pdf", replace translator(smcl2pdf)
}
capture noisily {
erase "${st_log_file}.smcl"
}
|
// do "C:\Users\fan\Stata4Econ\reglin\multipanel\allpurpose\allpurpose_prog.do"
// content file of allpurpose.do
/////////////////////////////////////////////////
///--- C. Define Regression Strings
/////////////////////////////////////////////////
foreach it_pan_ctr of numlist 1(1)$it_pan_cnt {
foreach it_col_ctr of numlist 1(1)$it_col_cnt {
///--- Counters
global it_col_ctr "`it_col_ctr'"
global it_pan_ctr "`it_pan_ctr'"
///--- Reset Strings to Default Always, _u = use
* if there are panel or column specific values, replace, eith col or row specific
* generates: stc_rgc_u and stc_opt_u
global stc_rgc_u "${stc_rgc}"
global stc_opt_u "${stc_opt}"
global svr_lhs_u "${svr_lhs}"
global st_ls_rep "stc_rgc stc_opt svr_lhs"
foreach st_seg in $st_ls_rep {
global st_seg "`st_seg'"
* di `"${st_seg}_pan_${it_pan_ctr}: ${${st_seg}_pan_${it_pan_ctr}}"'
* di `"${st_seg}_col_${it_col_ctr}: ${${st_seg}_col_${it_col_ctr}}"'
* di `"${st_seg}_pan_${it_pan_ctr}_col_${it_col_ctr}: ${${st_seg}_pan_${it_pan_ctr}_col_${it_col_ctr}}"'
if (`"${${st_seg}_pan_${it_pan_ctr}}"' != "") {
global ${st_seg}_u `"${${st_seg}_pan_${it_pan_ctr}}"'
}
else if (`"${${st_seg}_col_${it_col_ctr}}"' != "") {
global ${st_seg}_u `"${${st_seg}_col_${it_col_ctr}}"'
}
else if (`"${${st_seg}_pan_${it_pan_ctr}_col_${it_col_ctr}}"' != "") {
global ${st_seg}_u `"${${st_seg}_pan_${it_pan_ctr}_col_${it_col_ctr}}"'
}
* di `"${st_seg}_u: ${${st_seg}_u}"'
}
* if there are panel or column specific values, append
global svr_rhs_u "${svr_rhs} ${svr_rhs_pan_${it_pan_ctr}} ${svr_rhs_col_${it_col_ctr}}"
global svr_cov_u "${svr_cov} ${svr_cov_pan_${it_pan_ctr}} ${svr_cov_col_${it_col_ctr}}"
global sif_cdn_u `"${sif_cdn} ${sif_cdn_pan_${it_pan_ctr}} ${sif_cdn_col_${it_col_ctr}}"'
///--- Compose Regression String
global srg_pan_${it_pan_ctr}_col_${it_col_ctr} `"${stc_rgc_u} ${svr_lhs_u} ${svr_rhs_u} ${svr_cov_u} ${sif_cdn_u} ${stc_opt_u}"'
///--- Display Regression String
di "PAN={$it_pan_ctr}, COL={$it_col_ctr}"
di `"${srg_pan_${it_pan_ctr}_col_${it_col_ctr}}"'
}
}
/////////////////////////////////////////////////
///--- D. Run Regressions
/////////////////////////////////////////////////
eststo clear
global it_reg_ctr = 0
///--- Loop over panels
foreach it_pan_ctr of numlist 1(1)$it_pan_cnt {
///--- Counters
global it_pan_ctr "`it_pan_ctr'"
///--- Model Store Name
global st_cur_sm_stor "smd_${it_pan_ctr}_m"
global ${st_cur_sm_stor} ""
///--- Loop over regression columns
foreach it_col_ctr of numlist 1(1)$it_col_cnt {
///--- Counters
global it_col_ctr "`it_col_ctr'"
global it_reg_ctr = ${it_reg_ctr} + 1
global st_cur_srg_name "srg_pan_${it_pan_ctr}_col_${it_col_ctr}"
///--- Regression String Name
di "PAN={$it_pan_ctr}, COL={$it_col_ctr}, ${st_cur_srg_name}"
di `"${${st_cur_srg_name}}"'
///--- Reset Strings to Default Always
global slb_col_u "${slb_col}"
global st_ls_rep "slb_col"
foreach st_seg in $st_ls_rep {
global st_seg "`st_seg'"
if ("${${st_seg}_${it_col_ctr}}" != "") {
global ${st_seg}_u `"${${st_seg}_${it_col_ctr}}"'
}
}
///--- Regress
capture $quiornot {
eststo m${it_reg_ctr}, title("${slb_col_u}") : ${$st_cur_srg_name}
}
if _rc!=0 {
///--- This means this this regression failed, proceed with empty col
* Generate a fake observation to create a new estimated model
* Then replace the observation N by setting it to 0, otherwise N = 1
capture drop aaa
gen aaa = 0 if _n == 1
eststo m${it_reg_ctr}, title("${slb_col_u}") : estpost tabstat aaa , statistics(n) c(s)
estadd scalar N = 0, replace
}
///--- Estadd Controls
* foreach st_scalar_name in $stc_sca {
* estadd local ${st_scalar_name} e(${st_scalar_name})
* }
///--- Track Regression Store
global $st_cur_sm_stor "${${st_cur_sm_stor}} m${it_reg_ctr}"
}
}
di "${${st_cur_sm_stor}}"
///--- Regression Panel String list
foreach it_pan_ctr of numlist 1(1)$it_pan_cnt {
global it_pan_ctr "`it_pan_ctr'"
global st_cur_sm_stor "smd_${it_pan_ctr}_m"
di "${st_cur_sm_stor}"
}
/////////////////////////////////////////////////
///--- E. Show Results
/////////////////////////////////////////////////
foreach it_pan_ctr of numlist 1(1)$it_pan_cnt {
global it_pan_ctr "`it_pan_ctr'"
global slb_eso_u "${slb_eso}"
global slb_tex_eso_u "${slb_tex_eso}"
global slb_pan_u "${slb_pan}"
global slb_pan_nte_u "${slb_pan_nte}"
global st_ls_rep "slb_pan slb_pan_nte"
foreach st_seg in $st_ls_rep {
global st_seg "`st_seg'"
if (`"${${st_seg}_${it_pan_ctr}}"' != "") {
global ${st_seg}_u `"${${st_seg}_${it_pan_ctr}}"'
}
}
global svr_kep_u "${svr_kep} ${svr_kep_pan_${it_pan_ctr}}"
global st_esttab_opts_main `"addnotes(${slb_pan_nte_u}) title("${slb_pan_u}") keep(${svr_kep_u}) order(${svr_kep_u})"'
global st_esttab_opts_tex `"${st_esttab_opts_main} ${slb_tex_eso_u}"'
global st_esttab_opts_oth `"${st_esttab_opts_main} ${slb_eso_u}"'
di "MODELS: ${smd_${it_pan_ctr}_m}"
di `"st_esttab_opts_main: ${st_esttab_opts_main}"'
///--- output to log
esttab ${smd_${it_pan_ctr}_m}, ${st_esttab_opts_oth}
///--- save results to html, rtf, as well as tex
if ($it_pan_ctr == 1) {
global st_replace "replace"
}
else {
global st_replace "append"
}
esttab ${smd_${it_pan_ctr}_m} using "${st_tab_html}", ${st_esttab_opts_oth} $st_replace
esttab ${smd_${it_pan_ctr}_m} using "${st_tab_rtf}", ${st_esttab_opts_oth} $st_replace
esttab ${smd_${it_pan_ctr}_m} using "${st_tab_tex}", ${st_esttab_opts_tex} $st_replace
}
|
cls
clear
macro drop _all
/*
Back to Fan's Stata4Econ or other repositories:
- http://fanwangecon.github.io
- http://fanwangecon.github.io/Stata4Econ
- http://fanwangecon.github.io/R4Econ
- http://fanwangecon.github.io/M4Econ
- http://fanwangecon.github.io/CodeDynaAsset/
- http://fanwangecon.github.io/Math4Econ/
- http://fanwangecon.github.io/Stat4Econ/
- http://fanwangecon.github.io/Tex4Econ
1. given a list of variables, and some conditioning statement
2. what is the subset of observations where these variables all have non missing values
3. and satisfying the condioning statements
*/
///--- Start log
set more off
capture log close _all
cd "${root_log}"
global st_link "/summ/count/fs_nonmissing"
global curlogfile "~/Stata4Econ/${st_link}"
global st_logname "select_rows_nonmissing"
log using "${curlogfile}" , replace name($st_logname)
log on $st_logname
///-- Site Link: Fan's Project Reusable Stata Codes Table of Content
di "https://fanwangecon.github.io/"
di "https://fanwangecon.github.io/Stata4Econ/"
///-- File Title
global filetitle "Stata Select Rows where Multiple Variables are All Observed, Jointly Nonmissing"
///--- Load Data
set more off
sysuse auto, clear
///--- Generating Index for Dropping
set seed 987
scalar it_drop_frac = 3
gen row_idx_it = round((_n/_N)*it_drop_frac)
gen row_idx_rand = round(it_drop_frac*uniform())
replace mpg =. if row_idx_it == row_idx_rand
set seed 123
scalar it_drop_frac = 3
replace row_idx_it = round((_n/_N)*it_drop_frac)
replace row_idx_rand = round(it_drop_frac*uniform())
replace price =. if row_idx_it == row_idx_rand
///--- list vars to include in a regression for example
global svr_list "mpg price length weight"
///--- Conditioning
global scd_bse "foreign !=."
global scd_one "& foreign == 1"
global scd_two "& gear_ratio <= 4"
///--- Drop approximately 1/2 of make randomly
egen valid = rownonmiss($svr_list) if $scd_bse $scd_one $scd_two
///--- Tabulate and list Results
tab valid
list $svr_list if valid == wordcount("$svr_list")
///--- List including rows where not all values are observed but conditioning satisfied
tab valid
list $svr_list if valid !=.
///--- End Log and to HTML
log close _all
capture noisily {
log2html "${curlogfile}", replace title($filetitle (<a href="https://github.com/FanWangEcon/Stata4Econ/blob/master${st_link}.do">DO</a>, more see: <a href="https://fanwangecon.github.io/">Fan</a> and <a href="https://fanwangecon.github.io/Stata4Econ">Stata4Econ</a>))
}
///--- to PDF
capture noisily {
translator set Results2pdf logo off
translator set Results2pdf fontsize 10
translator set Results2pdf pagesize custom
translator set Results2pdf pagewidth 8.27
translator set Results2pdf pageheight 11.69
translator set Results2pdf lmargin 0.2
translator set Results2pdf rmargin 0.2
translator set Results2pdf tmargin 0.2
translator set Results2pdf bmargin 0.2
translate @Results "${curlogfile}.pdf", replace translator(Results2pdf)
}
capture noisily {
erase "${curlogfile}.smcl"
}
|
Multi-Panel Regressions
|
cls
clear
macro drop _all
/*
Back to Fan's Stata4Econ or other repositories:
- http://fanwangecon.github.io
- http://fanwangecon.github.io/Stata4Econ
- http://fanwangecon.github.io/R4Econ
- http://fanwangecon.github.io/M4Econ
- http://fanwangecon.github.io/CodeDynaAsset/
- http://fanwangecon.github.io/Math4Econ/
- http://fanwangecon.github.io/Stat4Econ/
- http://fanwangecon.github.io/Tex4Econ
Regression Table where:
- shared regression outcome lhs variable
- for each panel, rhs variables differ
- for each column, conditioning differs, but rhs vars the same
*/
///--- File Names
global st_file_root "~\Stata4Econ\table\multipanel\tab_6col3pan\"
global st_log_file "${st_file_root}gen_reg"
global st_out_html "${st_file_root}tab_6col3pan.html"
global st_out_rtf "${st_file_root}tab_6col3pan.rtf"
global st_out_tex "${st_file_root}tab_6col3pan_texbody.tex"
///--- Start log
capture log close
log using "${st_log_file}" , replace
log on
set trace off
set tracedepth 1
/////////////////////////////////////////////////
///--- Load Data
/////////////////////////////////////////////////
set more off
sysuse auto, clear
tab rep78
tab foreign
/////////////////////////////////////////////////
///--- A1. Define Regression Variables
/////////////////////////////////////////////////
* shared regression outcome lhs variable
global svr_outcome "price"
* for each panel, rhs variables differ
global svr_rhs_panel_a "mpg ib1.rep78 displacement gear_ratio"
global svr_rhs_panel_b "headroom mpg trunk weight displacement gear_ratio"
global svr_rhs_panel_c "headroom turn length weight trunk"
* for each column, conditioning differs
global it_reg_n = 6
global sif_col_1 "weight <= 4700"
global sif_col_2 "weight <= 4500"
global sif_col_3 "weight <= 4300"
global sif_col_4 "weight <= 4100"
global sif_col_5 "weight <= 3900"
global sif_col_6 "weight <= 3700"
* esttad strings for conditioning what were included
scalar it_esttad_n = 4
matrix mt_bl_estd = J(it_esttad_n, $it_reg_n, 0)
matrix rownames mt_bl_estd = incdgr4500 incdgr4000 incdgr3500 incdgr3000
matrix colnames mt_bl_estd = reg1 reg2 reg3 reg4 reg5 reg6
matrix mt_bl_estd[1, 1] = (1\1\1\1)
matrix mt_bl_estd[1, 2] = (1\1\1\1)
matrix mt_bl_estd[1, 3] = (0\1\1\1)
matrix mt_bl_estd[1, 4] = (0\1\1\1)
matrix mt_bl_estd[1, 5] = (0\0\1\1)
matrix mt_bl_estd[1, 6] = (0\0\1\1)
global st_estd_rownames : rownames mt_bl_estd
global slb_estd_1 "the weight <= 4700"
global slb_estd_2 "the weight <= 4500"
global slb_estd_3 "the weight <= 4300"
global slb_estd_4 "the weight <= 4100"
/////////////////////////////////////////////////
///--- A2. Define Regression Technical Strings
/////////////////////////////////////////////////
///--- Technical Controls
global stc_regc "regress"
global stc_opts ", noc"
/////////////////////////////////////////////////
///--- B1. Define Regressions Panel A
/////////////////////////////////////////////////
/*
di "$srg_panel_a_col_1"
di "$srg_panel_a_col_2"
di "$srg_panel_a_col_6"
*/
foreach it_regre of numlist 1(1)$it_reg_n {
#delimit;
global srg_panel_a_col_`it_regre' "
$stc_regc $svr_outcome $svr_rhs_panel_a if ${sif_col_`it_regre'} $stc_opts
";
#delimit cr
di "${srg_panel_a_col_`it_regre'}"
}
/////////////////////////////////////////////////
///--- B2. Define Regressions Panel B
/////////////////////////////////////////////////
/*
di "$srg_panel_b_col_1"
di "$srg_panel_b_col_2"
di "$srg_panel_b_col_6"
*/
foreach it_regre of numlist 1(1)$it_reg_n {
#delimit;
global srg_panel_b_col_`it_regre' "
$stc_regc $svr_outcome $svr_rhs_panel_b if ${sif_col_`it_regre'} $stc_opts
";
#delimit cr
di "${srg_panel_b_col_`it_regre'}"
}
/////////////////////////////////////////////////
///--- B3. Define Regressions Panel C
/////////////////////////////////////////////////
/*
di "$srg_panel_c_col_1"
di "$srg_panel_c_col_2"
di "$srg_panel_c_col_6"
*/
foreach it_regre of numlist 1(1)$it_reg_n {
#delimit;
global srg_panel_c_col_`it_regre' "
$stc_regc $svr_outcome $svr_rhs_panel_c if ${sif_col_`it_regre'} $stc_opts
";
#delimit cr
di "${srg_panel_c_col_`it_regre'}"
}
/////////////////////////////////////////////////
///--- C. Run Regressions
/////////////////////////////////////////////////
eststo clear
local it_reg_ctr = 0
foreach st_panel in panel_a panel_b panel_c {
global st_cur_sm_stor "smd_`st_panel'_m"
global ${st_cur_sm_stor} ""
foreach it_regre of numlist 1(1)$it_reg_n {
local it_reg_ctr = `it_reg_ctr' + 1
global st_cur_srg_name "srg_`st_panel'_col_`it_regre'"
di "st_panel:`st_panel', it_reg_ctr:`it_reg_ctr', st_cur_srg_name:${st_cur_srg_name}"
///--- Regression
eststo m`it_reg_ctr', title("${sif_col_`it_regre'}") : ${$st_cur_srg_name}
///--- Estadd Controls
foreach st_estd_name in $st_estd_rownames {
scalar bl_estad = el(mt_bl_estd, rownumb(mt_bl_estd, "`st_estd_name'"), `it_regre')
if (bl_estad) {
estadd local `st_estd_name' "Yes"
}
else {
estadd local `st_estd_name' "No"
}
}
///--- Track Regression Store
global $st_cur_sm_stor "${${st_cur_sm_stor}} m`it_reg_ctr'"
}
di "${${st_cur_sm_stor}}"
}
di "$smd_panel_a_m"
di "$smd_panel_b_m"
di "$smd_panel_c_m"
/////////////////////////////////////////////////
///--- D1. Labeling
/////////////////////////////////////////////////
///--- Title overall
global slb_title "Outcome: Attending School or Not"
global slb_title_inner "\textbf{Male}: Subregression for All Males"
global slb_label_tex "tab:sctp"
global slb_panel_a "Group A: Coefficients for Distance to Elementary School Variables"
global slb_panel_b "Group B: Coefficients for Elementary School Physical Quality Variables"
global slb_panel_c "Group C: More Coefficientss"
global slb_bottom "Controls for each panel:"
global slb_note "${slb_starLvl}. Standard Errors clustered at village level. Each Column is a spearate regression."
///--- Show which coefficients to keep
#delimit;
global svr_coef_keep_panel_a "
mpg
2.rep78 3.rep78
4.rep78 5.rep78
";
global svr_coef_keep_panel_b "
headroom
mpg
trunk
weight
";
global svr_coef_keep_panel_c "
turn
";
#delimit cr
///--- Labeling for for Coefficients to Show
global slb_1st_ele_spc "\vspace*{0mm}\hspace*{2mm}"
global slb_fot_lst_spc "\vspace*{0mm}\hspace*{2mm}"
global rcSpaceInit "\vspace*{-5mm}\hspace*{-8mm}"
#delimit;
global svr_starts_var_panel_a "mpg";
global slb_coef_label_panel_a "
mpg "${slb_1st_ele_spc}miles per gallon"
2.rep78 "${slb_1st_ele_spc}rep78 is 2"
3.rep78 "${slb_1st_ele_spc}rep78 is 3"
4.rep78 "${slb_1st_ele_spc}rep78 is 4"
5.rep78 "${slb_1st_ele_spc}rep78 is 5"
";
#delimit cr
#delimit;
global svr_starts_var_panel_b "headroom";
global slb_coef_label_panel_b "
headroom "${slb_1st_ele_spc}headroom variable"
mpg "${slb_1st_ele_spc}miles per gallon"
trunk "${slb_1st_ele_spc}this is the trunk variable"
weight "${slb_1st_ele_spc}and here the weight variable"
";
#delimit cr
#delimit;
global svr_starts_var_panel_c "turn";
global slb_coef_label_panel_c "
turn "${slb_1st_ele_spc}variable is turn"
";
#delimit cr
/////////////////////////////////////////////////
///--- D2. Regression Display Controls
/////////////////////////////////////////////////
global slb_reg_stats "N ${st_estd_rownames}"
global slb_starLvl "* 0.10 ** 0.05 *** 0.01"
global slb_starComm "nostar"
global slb_sd_tex `"se(fmt(a2) par("\vspace*{-2mm}{\footnotesize (" ") }"))"'
global slb_cells_tex `"cells(b(star fmt(a2)) $slb_sd_tex)"'
global slb_esttab_opt_tex "${slb_cells_tex} booktabs label collabels(none) nomtitles nonumbers star(${slb_starLvl})"
global slb_sd_txt `"se(fmt(a2) par("(" ")"))"'
global slb_cells_txt `"cells(b(star fmt(a2)) $slb_sd_txt)"'
global slb_esttab_opt_txt "${slb_cells_txt} stats(${slb_reg_stats}) collabels(none) mtitle nonumbers varwidth(30) modelwidth(15) star(${slb_starLvl}) addnotes(${slb_note})"
#delimit ;
global slb_panel_a_main "
title("${slb_panel_a}")
keep(${svr_coef_keep_panel_a}) order(${svr_coef_keep_panel_a})
coeflabels($slb_coef_label_panel_a)
";
global slb_panel_b_main "
title("${slb_panel_b}")
keep(${svr_coef_keep_panel_b}) order(${svr_coef_keep_panel_b})
coeflabels($slb_coef_label_panel_b)
";
global slb_panel_c_main "
title("${slb_panel_c}")
keep(${svr_coef_keep_panel_c}) order(${svr_coef_keep_panel_c})
coeflabels($slb_coef_label_panel_c)
";
#delimit cr
/////////////////////////////////////////////////
///--- E. Regression Shows
/////////////////////////////////////////////////
esttab ${smd_panel_a_m}, ${slb_panel_a_main} ${slb_esttab_opt_txt}
esttab ${smd_panel_b_m}, ${slb_panel_b_main} ${slb_esttab_opt_txt}
esttab ${smd_panel_c_m}, ${slb_panel_c_main} ${slb_esttab_opt_txt}
/////////////////////////////////////////////////
///--- F1. Define Latex Column Groups and Column Sub-Groups
/////////////////////////////////////////////////
///--- Column Groups
global it_max_col = 8
global it_min_col = 2
global it_col_cnt = 6
global colSeq "2 4 6 8"
///--- Group 1, columns 1 and 2
global labG1 "All Age 5 to 12"
global labC1 "{\small All Villages}"
global labC2 "{\small No Teachng Points}"
///--- Group 2, columns 3 and 4
global labG2 "Girls Age 5 to 12"
global labC3 "{\small All Villages}"
global labC4 "{\small No Teachng Points}"
///--- Group 3, columns 5 and 6
global labG3 "Boys Age 5 to 12"
global labC5 "{\small All Villages}"
global labC6 "{\small No Teachng Points}"
///--- Column Widths
global perCoefColWid = 1.85
global labColWid = 6
///--- Column Fractional Adjustment, 1 = 100%
global tableAdjustBoxWidth = 1.0
/////////////////////////////////////////////////
///--- F2. Tabling Calculations
/////////////////////////////////////////////////
///--- Width Calculation
global totCoefColWid = ${perCoefColWid}*${it_col_cnt}
global totColCnt = ${it_col_cnt} + 1
global totColWid = ${labColWid} + ${totCoefColWid} + ${perCoefColWid}
global totColWidFootnote = ${labColWid} + ${totCoefColWid} + ${perCoefColWid} + ${perCoefColWid}/2
global totColWidLegend = ${labColWid} + ${totCoefColWid} + ${perCoefColWid}
global totColWidLegendthin = ${totCoefColWid} + ${perCoefColWid}
di "it_col_cnt:$it_col_cnt"
di "totCoefColWid:$totCoefColWid"
di "totCoefColWid:$totCoefColWid"
di "totCoefColWid:$totCoefColWid"
di "totCoefColWid:$totCoefColWid"
di "totCoefColWid:$totCoefColWid"
global ampersand ""
foreach curLoop of numlist 1(1)$it_col_cnt {
global ampersand "$ampersand &"
}
di "ampersand:$ampersand"
global alignCenter "m{${labColWid}cm}"
local eB1 ">{\centering\arraybackslash}m{${perCoefColWid}cm}"
foreach curLoop of numlist 1(1)$it_col_cnt {
global alignCenter "$alignCenter `eB1'"
}
di "alignCenter:$alignCenter"
/////////////////////////////////////////////////
///--- G1. Tex Sectioning
/////////////////////////////////////////////////
#delimit ;
global slb_titling_panel_a "
${svr_starts_var_panel_a} "\multicolumn{$totColCnt}{L{${totColWidLegend}cm}}{${rcSpaceInit}\textbf{${slb_panel_a}}} \\"
";
global slb_refcat_panel_a `"refcat(${slb_titling_panel_a}, nolabel)"';
#delimit cr
#delimit ;
global slb_titling_panel_b "
${svr_starts_var_panel_b} "\multicolumn{$totColCnt}{L{${totColWidLegend}cm}}{${rcSpaceInit}\textbf{${slb_panel_b}}} \\"
";
global slb_refcat_panel_b `"refcat(${slb_titling_panel_b}, nolabel)"';
#delimit cr
#delimit ;
global slb_titling_panel_c "
${svr_starts_var_panel_c} "\multicolumn{$totColCnt}{L{${totColWidLegend}cm}}{${rcSpaceInit}\textbf{${slb_panel_c}}} \\"
";
global slb_refcat_panel_c `"refcat(${slb_titling_panel_c}, nolabel)"';
#delimit cr
#delimit ;
global slb_titling_bottom `"
stats(N $st_estd_rownames,
labels(Observations
"\midrule \multicolumn{${totColCnt}}{L{${totColWid}cm}}{${rcSpaceInit}\textbf{\textit{\normalsize ${slb_bottom}}}} \\ $ampersand \\ ${slb_fot_lst_spc}${slb_estd_1}"
"${slb_fot_lst_spc}${slb_estd_2}"
"${slb_fot_lst_spc}${slb_estd_3}"
"${slb_fot_lst_spc}${slb_estd_4}"))"';
#delimit cr
/////////////////////////////////////////////////
///--- G2. Tex Headline
/////////////////////////////////////////////////
///--- C.3.A. Initialize
global row1 "&"
global row1MidLine ""
global row2 ""
global row2MidLine ""
global row3 ""
///--- B. Row 2 and row 2 midline
* global colSeq "2 3 6"
global cmidrule ""
global colCtr = -1
foreach curCol of numlist $colSeq {
global colCtr = $colCtr + 1
global curCol1Min = `curCol' - 1
if ($colCtr == 0 ) {
global minCoefCol = "`curCol'"
}
if ($colCtr != 0 ) {
global gapCnt = (`curCol' - `lastCol')
global gapWidth = (`curCol' - `lastCol')*$perCoefColWid
di "curCol1Min:$curCol1Min, lastCol:`lastCol'"
di "$gapCnt"
di "\multicolumn{$gapCnt}{C{${gapWidth}cm}}{\small no Control}"
di "\cmidrule(l{5pt}r{5pt}){`lastCol'-$curCol1Min}"
global curRow2MidLine "\cmidrule(l{5pt}r{5pt}){`lastCol'-$curCol1Min}"
global row2MidLine "$row2MidLine $curRow2MidLine"
global curRow2 "\multicolumn{$gapCnt}{L{${gapWidth}cm}}{\small ${labG${colCtr}}}"
global row2 "$row2 & $curRow2"
}
local lastCol = `curCol'
}
///--- C. Row 3
* Initial & for label column
foreach curLoop of numlist 1(1)$it_col_cnt {
global curText "${labC`curLoop'}"
global textUse "(`curLoop')"
if ("$curText" != "") {
global textUse "$curText"
}
global curRow3 "\multicolumn{1}{C{${perCoefColWid}cm}}{$textUse}"
global row3 "$row3 & $curRow3"
}
///--- D. Row 1 and midline:
global row1 "${row1} \multicolumn{${it_col_cnt}}{L{${totCoefColWid}cm}}{${slb_title_inner}}"
global row1MidLine "\cmidrule(l{5pt}r{5pt}){${minCoefCol}-${curCol1Min}}"
///--- C.3.E Print lines
di "$row1 \\"
di "$row1MidLine "
di "$row2 \\"
di "$row2MidLine"
di "$row3 \\"
///--- C.4 Together
#delimit ;
///--- 1. Section
* local section "
* \section{`fileTitle'}\vspace*{-6mm}
* ";
///--- 2. Align and Column Define
local centering "$alignCenter";
global headline "
$row1 \\
$row1MidLine
$row2 \\
$row2MidLine
$row3 \\
";
#delimit cr
/////////////////////////////////////////////////
///--- G4. Head
/////////////////////////////////////////////////
#delimit ;
global adjustBoxStart "\begin{adjustbox}{max width=${tableAdjustBoxWidth}\textwidth}";
global adjustBoxEnd "\end{adjustbox}";
global notewrap "
\addlinespace[-0.5em]
\multicolumn{${totColCnt}}{L{${totColWidFootnote}cm}}{\footnotesize\justify${slb_note}}\\
";
global startTable "\begin{table}[htbp]
\centering
\caption{${slb_title}\label{${slb_label_tex}}}${adjustBoxStart}\begin{tabular}{`centering'}
\toprule
";
global headlineAll "prehead(${startTable}${headline})";
global headlineAllNoHead "prehead(${startTable})";
global postAll "postfoot(\bottomrule ${notewrap} \end{tabular}${adjustBoxEnd}\end{table})";
#delimit cr
/////////////////////////////////////////////////
///--- H1. Output Results to HTML
/////////////////////////////////////////////////
esttab ${smd_panel_a_m} using "${st_out_html}", ${slb_panel_a_main} ${slb_esttab_opt_txt} replace
esttab ${smd_panel_b_m} using "${st_out_html}", ${slb_panel_b_main} ${slb_esttab_opt_txt} append
esttab ${smd_panel_c_m} using "${st_out_html}", ${slb_panel_c_main} ${slb_esttab_opt_txt} append
/////////////////////////////////////////////////
///--- H2. Output Results to RTF
/////////////////////////////////////////////////
esttab ${smd_panel_a_m} using "${st_out_rtf}", ${slb_panel_a_main} ${slb_esttab_opt_txt} replace
esttab ${smd_panel_b_m} using "${st_out_rtf}", ${slb_panel_b_main} ${slb_esttab_opt_txt} append
esttab ${smd_panel_c_m} using "${st_out_rtf}", ${slb_panel_c_main} ${slb_esttab_opt_txt} append
/////////////////////////////////////////////////
///--- H3. Output Results to Tex
/////////////////////////////////////////////////
esttab $smd_panel_a_m using "${st_out_tex}", ///
${slb_panel_a_main} ///
${slb_refcat_panel_a} ///
${slb_esttab_opt_tex} ///
fragment $headlineAll postfoot("") replace
esttab $smd_panel_b_m using "${st_out_tex}", ///
${slb_panel_b_main} ///
${slb_refcat_panel_b} ///
${slb_esttab_opt_tex} ///
fragment prehead("") postfoot("") append
esttab $smd_panel_c_m using "${st_out_tex}", ///
${slb_panel_c_main} ///
${slb_refcat_panel_c} ///
${slb_esttab_opt_tex} ///
${slb_titling_bottom} ///
addnotes(${slb_note}) ///
fragment prehead("") $postAll append
/////////////////////////////////////////////////
///--- I. Out Logs
/////////////////////////////////////////////////
///--- End Log and to HTML
log close
///--- to PDF
capture noisily {
translator set Results2pdf logo off
translator set Results2pdf fontsize 10
translator set Results2pdf pagesize custom
translator set Results2pdf pagewidth 11.69
translator set Results2pdf pageheight 16.53
translator set Results2pdf lmargin 0.2
translator set Results2pdf rmargin 0.2
translator set Results2pdf tmargin 0.2
translator set Results2pdf bmargin 0.2
translate @Results "${st_log_file}.pdf", replace translator(Results2pdf)
}
capture noisily {
erase "${st_log_file}.smcl"
}
|
cls
clear
macro drop _all
/*
Back to Fan's Stata4Econ or other repositories:
- http://fanwangecon.github.io
- http://fanwangecon.github.io/Stata4Econ
- http://fanwangecon.github.io/R4Econ
- http://fanwangecon.github.io/M4Econ
- http://fanwangecon.github.io/CodeDynaAsset/
- http://fanwangecon.github.io/Math4Econ/
- http://fanwangecon.github.io/Stat4Econ/
- http://fanwangecon.github.io/Tex4Econ
Regression with continous varible and discrete variables, discrete variables could interact with each other, and interact with continuous variable
*/
///--- File Names
global st_file_root "~\Stata4Econ\table\multipanel\tab_6col_cts_dis2inter\"
global st_log_file "${st_file_root}gen_reg"
global st_out_html "${st_file_root}tab_6col_cts_dis2inter.html"
global st_out_rtf "${st_file_root}tab_6col_cts_dis2inter.rtf"
global st_out_tex "${st_file_root}tab_6col_cts_dis2inter_texbody.tex"
///--- Start log
capture log close
log using "${st_log_file}" , replace
log on
set trace off
set tracedepth 1
/////////////////////////////////////////////////
///--- Load Data
/////////////////////////////////////////////////
set more off
sysuse bplong, clear
tab sex
tab agegrp
tab when
tab sex when
tab sex agegrp
egen sex_when = group(sex when), label
egen sex_agegrp = group(sex agegrp), label
egen when_agegrp = group(when agegrp), label
/////////////////////////////////////////////////
///--- A1. Define Regression Variables
/////////////////////////////////////////////////
* shared regression outcome lhs variable
global svr_outcome "bp"
* for each panel, rhs variables differ
global svr_rhs_panel_a "patient agegrp sex"
global svr_rhs_panel_b "patient ib1.agegrp ib1.sex_when"
global svr_rhs_panel_c "sex i.sex#c.patient io(1 3).sex_when io(1 4).sex_agegrp"
* for each column, conditioning differs
global it_reg_n = 6
global sif_col_1 "bp <= 185"
global sif_col_2 "bp <= 180"
global sif_col_3 "bp <= 175"
global sif_col_4 "bp <= 170"
global sif_col_5 "bp <= 165"
global sif_col_6 "bp <= 160"
* esttad strings for conditioning what were included
scalar it_esttad_n = 4
matrix mt_bl_estd = J(it_esttad_n, $it_reg_n, 0)
matrix rownames mt_bl_estd = bpge185 bpge180 bpge170 bpge160
matrix colnames mt_bl_estd = reg1 reg2 reg3 reg4 reg5 reg6
matrix mt_bl_estd[1, 1] = (1\1\1\1)
matrix mt_bl_estd[1, 2] = (0\1\1\1)
matrix mt_bl_estd[1, 3] = (0\0\1\1)
matrix mt_bl_estd[1, 4] = (0\0\1\1)
matrix mt_bl_estd[1, 5] = (0\0\0\1)
matrix mt_bl_estd[1, 6] = (0\0\0\1)
global st_estd_rownames : rownames mt_bl_estd
global slb_estd_1 "blood pressure >= 185"
global slb_estd_2 "blood pressure >= 180"
global slb_estd_3 "blood pressure >= 170"
global slb_estd_4 "blood pressure >= 160"
/////////////////////////////////////////////////
///--- A2. Define Regression Technical Strings
/////////////////////////////////////////////////
///--- Technical Controls
global stc_regc "regress"
global stc_opts ", vce(robust)"
/////////////////////////////////////////////////
///--- B1. Define Regressions Panel A
/////////////////////////////////////////////////
/*
di "$srg_panel_a_col_1"
di "$srg_panel_a_col_2"
di "$srg_panel_a_col_6"
*/
foreach it_regre of numlist 1(1)$it_reg_n {
#delimit;
global srg_panel_a_col_`it_regre' "
$stc_regc $svr_outcome $svr_rhs_panel_a if ${sif_col_`it_regre'} $stc_opts
";
#delimit cr
di "${srg_panel_a_col_`it_regre'}"
}
/////////////////////////////////////////////////
///--- B2. Define Regressions Panel B
/////////////////////////////////////////////////
/*
di "$srg_panel_b_col_1"
di "$srg_panel_b_col_2"
di "$srg_panel_b_col_6"
*/
foreach it_regre of numlist 1(1)$it_reg_n {
#delimit;
global srg_panel_b_col_`it_regre' "
$stc_regc $svr_outcome $svr_rhs_panel_b if ${sif_col_`it_regre'} $stc_opts
";
#delimit cr
di "${srg_panel_b_col_`it_regre'}"
}
/////////////////////////////////////////////////
///--- B3. Define Regressions Panel C
/////////////////////////////////////////////////
/*
di "$srg_panel_c_col_1"
di "$srg_panel_c_col_2"
di "$srg_panel_c_col_6"
*/
foreach it_regre of numlist 1(1)$it_reg_n {
#delimit;
global srg_panel_c_col_`it_regre' "
$stc_regc $svr_outcome $svr_rhs_panel_c if ${sif_col_`it_regre'} $stc_opts
";
#delimit cr
di "${srg_panel_c_col_`it_regre'}"
}
/////////////////////////////////////////////////
///--- C. Run Regressions
/////////////////////////////////////////////////
eststo clear
local it_reg_ctr = 0
foreach st_panel in panel_a panel_b panel_c {
global st_cur_sm_stor "smd_`st_panel'_m"
global ${st_cur_sm_stor} ""
foreach it_regre of numlist 1(1)$it_reg_n {
local it_reg_ctr = `it_reg_ctr' + 1
global st_cur_srg_name "srg_`st_panel'_col_`it_regre'"
di "st_panel:`st_panel', it_reg_ctr:`it_reg_ctr', st_cur_srg_name:${st_cur_srg_name}"
///--- Regression
eststo m`it_reg_ctr', title("${sif_col_`it_regre'}") : ${$st_cur_srg_name}
///--- Estadd Controls
foreach st_estd_name in $st_estd_rownames {
scalar bl_estad = el(mt_bl_estd, rownumb(mt_bl_estd, "`st_estd_name'"), `it_regre')
if (bl_estad) {
estadd local `st_estd_name' "Yes"
}
else {
estadd local `st_estd_name' "No"
}
}
///--- Track Regression Store
global $st_cur_sm_stor "${${st_cur_sm_stor}} m`it_reg_ctr'"
}
di "${${st_cur_sm_stor}}"
}
di "$smd_panel_a_m"
di "$smd_panel_b_m"
di "$smd_panel_c_m"
/////////////////////////////////////////////////
///--- D1. Labeling
/////////////////////////////////////////////////
///--- Title overall
global slb_title "Outcome: Blood Pressure"
global slb_title_inner "\textbf{Categories}: Discrete Categories and BP"
global slb_label_tex "tab:scminter"
///--- Several RHS Continuous Variables
global slb_panel_a "Panel A: Continuous Right Hand Side Variables"
///--- Continuous Variables + Several Discrete Variables
global slb_panel_b "Panel B: Two Discrete Right Hand Side Variables"
global slb_panel_b_ga "Age Groups (Compare to 30-45)"
global slb_panel_b_gb "Gender/Time Groups (Compare to Female Before)"
///--- Continuous Variables + Several Discrete Variables Interated with More Discrete Variables
global slb_panel_c "Panel C: Two Discrete Interacted Variables"
global slb_panel_c_sa "Male Dummy Interactions:"
global slb_panel_c_sb "Female Dummy Interactions:"
global slb_panel_c_sa_ga "Time Groups (Compare to Before)"
global slb_panel_c_sa_gb "Age Groups (Compare to 30-45)"
global slb_panel_c_sb_ga "Time Groups (Compare to Before)"
global slb_panel_c_sb_gb "Age Groups (Compare to 30-45)"
///--- Notes
global slb_bottom "Controls for each panel:"
global slb_note "${slb_starLvl}. Robust standard errors. Each column is a spearate regression."
///--- Show which coefficients to keep
#delimit;
global svr_coef_keep_panel_a "
agegrp sex patient
";
global svr_coef_keep_panel_b "
patient
2.agegrp 3.agegrp
2.sex_when 3.sex_when 4.sex_when
";
global svr_coef_keep_panel_c "
sex
0.sex#c.patient
2.sex_when
2.sex_agegrp 3.sex_agegrp
1.sex#c.patient
4.sex_when
5.sex_agegrp 6.sex_agegrp
";
#delimit cr
///--- Labeling for for Coefficients to Show
global slb_title_spc "\vspace*{-5mm}\hspace*{-8mm}"
global slb_dis_tlt_spc "\vspace*{-5mm}\hspace*{-8mm}"
global slb_dis_ele_spc "\vspace*{0mm}\hspace*{5mm}"
global slb_1st_ele_spc "\vspace*{0mm}\hspace*{5mm}"
global slb_fot_lst_spc "\vspace*{0mm}\hspace*{2mm}"
#delimit;
global svr_starts_var_panel_a "agegrp";
global slb_coef_label_panel_a "
agegrp "${slb_1st_ele_spc}age group"
sex "${slb_1st_ele_spc}sex variable"
patient "${slb_1st_ele_spc}patient ID"
";
#delimit cr
#delimit;
global svr_starts_var_panel_b "patient";
global svr_starts_var_panel_b_ga "2.agegrp";
global svr_starts_var_panel_b_gb "2.sex_when";
global slb_coef_label_panel_b "
patient "${slb_1st_ele_spc}patient ID"
2.agegrp "${slb_dis_ele_spc} x (46-59 yrs)"
3.agegrp "${slb_dis_ele_spc} x (>60 years)"
2.sex_when "${slb_dis_ele_spc} x male after"
3.sex_when "${slb_dis_ele_spc} x female before"
4.sex_when "${slb_dis_ele_spc} x female after"
";
#delimit cr
#delimit;
global svr_starts_var_panel_c "sex";
global svr_starts_var_panel_c_sa "0.sex#c.patient";
global svr_starts_var_panel_c_sa_ga "2.sex_when";
global svr_starts_var_panel_c_sa_gb "2.sex_agegrp";
global svr_starts_var_panel_c_sb "1.sex#c.patient";
global svr_starts_var_panel_c_sb_ga "4.sex_when";
global svr_starts_var_panel_c_sb_gb "5.sex_agegrp";
global slb_coef_label_panel_c "
sex "${slb_1st_ele_spc}male dummy"
0.sex#c.patient "${slb_dis_ele_spc} male x patient ID"
2.sex_when "${slb_dis_ele_spc} x male x after"
2.sex_agegrp "${slb_dis_ele_spc} x male x (46-59 yrs)"
3.sex_agegrp "${slb_dis_ele_spc} x male x (>60 years)"
1.sex#c.patient "${slb_dis_ele_spc} male x patient ID"
4.sex_when "${slb_dis_ele_spc} x male x after"
5.sex_agegrp "${slb_dis_ele_spc} x female x (46-59 yrs)"
6.sex_agegrp "${slb_dis_ele_spc} x female x (>60 years)"
";
#delimit cr
/////////////////////////////////////////////////
///--- D2. Regression Display Controls
/////////////////////////////////////////////////
global slb_reg_stats "N ${st_estd_rownames}"
global slb_starLvl "* 0.10 ** 0.05 *** 0.01"
global slb_starComm "nostar"
global slb_sd_tex `"se(fmt(a2) par("\vspace*{-2mm}{\footnotesize (" ") }"))"'
global slb_cells_tex `"cells(b(star fmt(a2)) $slb_sd_tex)"'
global slb_esttab_opt_tex "${slb_cells_tex} booktabs label collabels(none) nomtitles nonumbers star(${slb_starLvl})"
global slb_sd_txt `"se(fmt(a2) par("(" ")"))"'
global slb_cells_txt `"cells(b(star fmt(a2)) $slb_sd_txt)"'
global slb_esttab_opt_txt "${slb_cells_txt} stats(${slb_reg_stats}) collabels(none) mtitle nonumbers varwidth(30) modelwidth(15) star(${slb_starLvl}) addnotes(${slb_note})"
#delimit ;
global slb_panel_a_main "
title("${slb_panel_a}")
keep(${svr_coef_keep_panel_a}) order(${svr_coef_keep_panel_a})
coeflabels($slb_coef_label_panel_a)
";
global slb_panel_b_main "
title("${slb_panel_b}")
keep(${svr_coef_keep_panel_b}) order(${svr_coef_keep_panel_b})
coeflabels($slb_coef_label_panel_b)
";
global slb_panel_c_main "
title("${slb_panel_c}")
keep(${svr_coef_keep_panel_c}) order(${svr_coef_keep_panel_c})
coeflabels($slb_coef_label_panel_c)
";
#delimit cr
/////////////////////////////////////////////////
///--- E. Regression Shows
/////////////////////////////////////////////////
esttab ${smd_panel_a_m}, ${slb_panel_a_main} ${slb_esttab_opt_txt}
esttab ${smd_panel_b_m}, ${slb_panel_b_main} ${slb_esttab_opt_txt}
esttab ${smd_panel_c_m}, ${slb_panel_c_main} ${slb_esttab_opt_txt}
/////////////////////////////////////////////////
///--- F1. Define Latex Column Groups and Column Sub-Groups
/////////////////////////////////////////////////
///--- Column Groups
global it_max_col = 8
global it_min_col = 2
global it_col_cnt = 6
global colSeq "2 4 6 8"
// global st_cmidrule "\cmidrule(lr){2-3}\cmidrule(lr){4-5}\cmidrule(lr){6-7}"
global st_cmidrule "\cmidrule(lr){2-7}"
///--- Group 1, columns 1 and 2
global labG1 "All Age 5 to 12"
global labC1 "{\small All Villages}"
global labC2 "{\small No Teachng Points}"
///--- Group 2, columns 3 and 4
global labG2 "Girls Age 5 to 12"
global labC3 "{\small All Villages}"
global labC4 "{\small No Teachng Points}"
///--- Group 3, columns 5 and 6
global labG3 "Boys Age 5 to 12"
global labC5 "{\small All Villages}"
global labC6 "{\small No Teachng Points}"
///--- Column Widths
global perCoefColWid = 1.85
global labColWid = 5
///--- Column Fractional Adjustment, 1 = 100%
global tableAdjustBoxWidth = 1.0
/////////////////////////////////////////////////
///--- F2. Tabling Calculations
/////////////////////////////////////////////////
///--- Width Calculation
global totCoefColWid = ${perCoefColWid}*${it_col_cnt}
global totColCnt = ${it_col_cnt} + 1
global totColWid = ${labColWid} + ${totCoefColWid} + ${perCoefColWid}
global totColWidFootnote = ${labColWid} + ${totCoefColWid} + ${perCoefColWid} + ${perCoefColWid}/2
global totColWidLegend = ${labColWid} + ${totCoefColWid} + ${perCoefColWid}
global totColWidLegendthin = ${totCoefColWid} + ${perCoefColWid}
di "it_col_cnt:$it_col_cnt"
di "totCoefColWid:$totCoefColWid"
di "totCoefColWid:$totCoefColWid"
di "totCoefColWid:$totCoefColWid"
di "totCoefColWid:$totCoefColWid"
di "totCoefColWid:$totCoefColWid"
global ampersand ""
foreach curLoop of numlist 1(1)$it_col_cnt {
global ampersand "$ampersand &"
}
di "ampersand:$ampersand"
global alignCenter "m{${labColWid}cm}"
local eB1 ">{\centering\arraybackslash}m{${perCoefColWid}cm}"
foreach curLoop of numlist 1(1)$it_col_cnt {
global alignCenter "$alignCenter `eB1'"
}
di "alignCenter:$alignCenter"
/////////////////////////////////////////////////
///--- G1a. Tex Sectioning panel A
/////////////////////////////////////////////////
#delimit ;
global slb_titling_panel_a "
${svr_starts_var_panel_a} "\multicolumn{$totColCnt}{L{${totColWidLegend}cm}}{${slb_title_spc}\textbf{${slb_panel_a}}} \\"
";
global slb_refcat_panel_a `"refcat(${slb_titling_panel_a}, nolabel)"';
#delimit cr
/////////////////////////////////////////////////
///--- G1b. Tex Sectioning panel B
/////////////////////////////////////////////////
if ("${svr_starts_var_panel_b}" == "${svr_starts_var_panel_b_ga}") {
#delimit ;
global svr_starts_pb_andga "
${svr_starts_var_panel_b}
"\multicolumn{$totColCnt}{L{${totColWidLegend}cm}}{${slb_title_spc}\textbf{${slb_panel_b}}} \\
\multicolumn{$totColCnt}{L{${totColWidLegend}cm}}{${slb_dis_tlt_spc}\textit{${slb_panel_b_ga}}} \\"
";
#delimit cr
}
else {
#delimit ;
global svr_starts_pb_andga "
${svr_starts_var_panel_b}
"\multicolumn{$totColCnt}{L{${totColWidLegend}cm}}{${slb_title_spc}\textbf{${slb_panel_b}}} \\"
${svr_starts_var_panel_b_ga}
"\multicolumn{$totColCnt}{L{${totColWidLegend}cm}}{${slb_dis_tlt_spc}\textit{${slb_panel_b_ga}}} \\"
";
#delimit cr
}
#delimit ;
global slb_titling_panel_b "
${svr_starts_pb_andga}
${svr_starts_var_panel_b_gb}
"\multicolumn{$totColCnt}{L{${totColWidLegend}cm}}{${slb_dis_tlt_spc}\textit{${slb_panel_b_gb}}} \\"
";
global slb_refcat_panel_b `"refcat(${slb_titling_panel_b}, nolabel)"';
#delimit cr
/////////////////////////////////////////////////
///--- G1c. Tex Sectioning panel C
/////////////////////////////////////////////////
if (("${svr_starts_var_panel_c}" == "${svr_starts_var_panel_c_sa}") & ("${svr_starts_var_panel_c_sa}" == "${svr_starts_var_panel_c_sa_ga}") ) {
///--- if main = sub headings = subsub heading
#delimit ;
global slb_titling_panel_c "
${svr_starts_var_panel_c} "\multicolumn{$totColCnt}{L{${totColWidLegend}cm}}{${slb_title_spc}\textbf{${slb_panel_c}}} \\
\multicolumn{$totColCnt}{L{${totColWidLegend}cm}}{${slb_dis_tlt_spc}\textbf{\textit{${slb_panel_c_sa}}}} \\
\multicolumn{$totColCnt}{L{${totColWidLegend}cm}}{${slb_dis_tlt_spc}\textit{${slb_panel_c_sa_ga}}} \\"
${svr_starts_var_panel_c_sa_gb} "\multicolumn{$totColCnt}{L{${totColWidLegend}cm}}{${slb_dis_tlt_spc}\textit{${slb_panel_c_sa_gb}}} \\"
${svr_starts_var_panel_c_sb} "\multicolumn{$totColCnt}{L{${totColWidLegend}cm}}{${slb_dis_tlt_spc}\textbf{\textit{${slb_panel_c_sb}}}} \\
\multicolumn{$totColCnt}{L{${totColWidLegend}cm}}{${slb_dis_tlt_spc}\textit{${slb_panel_c_sb_ga}}} \\"
${svr_starts_var_panel_c_sb_gb} "\multicolumn{$totColCnt}{L{${totColWidLegend}cm}}{${slb_dis_tlt_spc}\textit{${slb_panel_c_sb_gb}}} \\"
";
global slb_refcat_panel_c `"refcat(${slb_titling_panel_c}, nolabel)"';
#delimit cr
}
else if ("${svr_starts_var_panel_c_sa}" == "${svr_starts_var_panel_c_sa_ga}") {
///--- if main, sub headings differ, but subsub = sub heading
#delimit ;
global slb_titling_panel_c "
${svr_starts_var_panel_c} "\multicolumn{$totColCnt}{L{${totColWidLegend}cm}}{${slb_title_spc}\textbf{${slb_panel_c}}} \\"
${svr_starts_var_panel_c_sa} "\multicolumn{$totColCnt}{L{${totColWidLegend}cm}}{${slb_dis_tlt_spc}\textbf{\textit{${slb_panel_c_sa}}}} \\
\multicolumn{$totColCnt}{L{${totColWidLegend}cm}}{${slb_dis_tlt_spc}\textit{${slb_panel_c_sa_ga}}} \\"
${svr_starts_var_panel_c_sa_gb} "\multicolumn{$totColCnt}{L{${totColWidLegend}cm}}{${slb_dis_tlt_spc}\textit{${slb_panel_c_sa_gb}}} \\"
${svr_starts_var_panel_c_sb} "\multicolumn{$totColCnt}{L{${totColWidLegend}cm}}{${slb_dis_tlt_spc}\textbf{\textit{${slb_panel_c_sb}}}} \\
\multicolumn{$totColCnt}{L{${totColWidLegend}cm}}{${slb_dis_tlt_spc}\textit{${slb_panel_c_sb_ga}}} \\"
${svr_starts_var_panel_c_sb_gb} "\multicolumn{$totColCnt}{L{${totColWidLegend}cm}}{${slb_dis_tlt_spc}\textit{${slb_panel_c_sb_gb}}} \\"
";
global slb_refcat_panel_c `"refcat(${slb_titling_panel_c}, nolabel)"';
#delimit cr
}
else {
///--- if main, sub, subsub heading vars differ
#delimit ;
global slb_titling_panel_c "
${svr_starts_var_panel_c} "\multicolumn{$totColCnt}{L{${totColWidLegend}cm}}{${slb_title_spc}\textbf{${slb_panel_c}}} \\"
${svr_starts_var_panel_c_sa} "${st_cmidrule}\multicolumn{$totColCnt}{L{${totColWidLegend}cm}}{${slb_dis_tlt_spc}\textbf{\textit{${slb_panel_c_sa}}}} \\"
${svr_starts_var_panel_c_sa_ga} "\multicolumn{$totColCnt}{L{${totColWidLegend}cm}}{${slb_dis_tlt_spc}\textit{${slb_panel_c_sa_ga}}} \\"
${svr_starts_var_panel_c_sa_gb} "\multicolumn{$totColCnt}{L{${totColWidLegend}cm}}{${slb_dis_tlt_spc}\textit{${slb_panel_c_sa_gb}}} \\"
${svr_starts_var_panel_c_sb} "${st_cmidrule}\multicolumn{$totColCnt}{L{${totColWidLegend}cm}}{${slb_dis_tlt_spc}\textbf{\textit{${slb_panel_c_sb}}}} \\"
${svr_starts_var_panel_c_sb_ga} "\multicolumn{$totColCnt}{L{${totColWidLegend}cm}}{${slb_dis_tlt_spc}\textit{${slb_panel_c_sb_ga}}} \\"
${svr_starts_var_panel_c_sb_gb} "\multicolumn{$totColCnt}{L{${totColWidLegend}cm}}{${slb_dis_tlt_spc}\textit{${slb_panel_c_sb_gb}}} \\"
";
global slb_refcat_panel_c `"refcat(${slb_titling_panel_c}, nolabel)"';
#delimit cr
}
/////////////////////////////////////////////////
///--- G1d. Bottom
/////////////////////////////////////////////////
#delimit ;
global slb_titling_bottom `"
stats(N $st_estd_rownames,
labels(Observations
"\midrule \multicolumn{${totColCnt}}{L{${totColWid}cm}}{${slb_title_spc}\textbf{\textit{\normalsize ${slb_bottom}}}} \\ $ampersand \\ ${slb_fot_lst_spc}${slb_estd_1}"
"${slb_fot_lst_spc}${slb_estd_2}"
"${slb_fot_lst_spc}${slb_estd_3}"
"${slb_fot_lst_spc}${slb_estd_4}"))"';
#delimit cr
/////////////////////////////////////////////////
///--- G2. Tex Headline
/////////////////////////////////////////////////
///--- C.3.A. Initialize
global row1 "&"
global row1MidLine ""
global row2 ""
global row2MidLine ""
global row3 ""
///--- B. Row 2 and row 2 midline
* global colSeq "2 3 6"
global cmidrule ""
global colCtr = -1
foreach curCol of numlist $colSeq {
global colCtr = $colCtr + 1
global curCol1Min = `curCol' - 1
if ($colCtr == 0 ) {
global minCoefCol = "`curCol'"
}
if ($colCtr != 0 ) {
global gapCnt = (`curCol' - `lastCol')
global gapWidth = (`curCol' - `lastCol')*$perCoefColWid
di "curCol1Min:$curCol1Min, lastCol:`lastCol'"
di "$gapCnt"
di "\multicolumn{$gapCnt}{C{${gapWidth}cm}}{\small no Control}"
di "\cmidrule(l{5pt}r{5pt}){`lastCol'-$curCol1Min}"
global curRow2MidLine "\cmidrule(l{5pt}r{5pt}){`lastCol'-$curCol1Min}"
global row2MidLine "$row2MidLine $curRow2MidLine"
global curRow2 "\multicolumn{$gapCnt}{L{${gapWidth}cm}}{\small ${labG${colCtr}}}"
global row2 "$row2 & $curRow2"
}
local lastCol = `curCol'
}
///--- C. Row 3
* Initial & for label column
foreach curLoop of numlist 1(1)$it_col_cnt {
global curText "${labC`curLoop'}"
global textUse "(`curLoop')"
if ("$curText" != "") {
global textUse "$curText"
}
global curRow3 "\multicolumn{1}{C{${perCoefColWid}cm}}{$textUse}"
global row3 "$row3 & $curRow3"
}
///--- D. Row 1 and midline:
global row1 "${row1} \multicolumn{${it_col_cnt}}{L{${totCoefColWid}cm}}{${slb_title_inner}}"
global row1MidLine "\cmidrule(l{5pt}r{5pt}){${minCoefCol}-${curCol1Min}}"
///--- C.3.E Print lines
di "$row1 \\"
di "$row1MidLine "
di "$row2 \\"
di "$row2MidLine"
di "$row3 \\"
///--- C.4 Together
#delimit ;
///--- 1. Section
* local section "
* \section{`fileTitle'}\vspace*{-6mm}
* ";
///--- 2. Align and Column Define
local centering "$alignCenter";
global headline "
$row1 \\
$row1MidLine
$row2 \\
$row2MidLine
$row3 \\
";
#delimit cr
/////////////////////////////////////////////////
///--- G4. Head
/////////////////////////////////////////////////
#delimit ;
global adjustBoxStart "\begin{adjustbox}{max width=${tableAdjustBoxWidth}\textwidth}";
global adjustBoxEnd "\end{adjustbox}";
global notewrap "
\addlinespace[-0.5em]
\multicolumn{${totColCnt}}{L{${totColWidFootnote}cm}}{\footnotesize\justify${slb_note}}\\
";
global startTable "\begin{table}[htbp]
\centering
\caption{${slb_title}\label{${slb_label_tex}}}${adjustBoxStart}\begin{tabular}{`centering'}
\toprule
";
global headlineAll "prehead(${startTable}${headline})";
global headlineAllNoHead "prehead(${startTable})";
global postAll "postfoot(\bottomrule ${notewrap} \end{tabular}${adjustBoxEnd}\end{table})";
#delimit cr
/////////////////////////////////////////////////
///--- H1. Output Results to HTML
/////////////////////////////////////////////////
esttab ${smd_panel_a_m} using "${st_out_html}", ${slb_panel_a_main} ${slb_esttab_opt_txt} replace
esttab ${smd_panel_b_m} using "${st_out_html}", ${slb_panel_b_main} ${slb_esttab_opt_txt} append
esttab ${smd_panel_c_m} using "${st_out_html}", ${slb_panel_c_main} ${slb_esttab_opt_txt} append
/////////////////////////////////////////////////
///--- H2. Output Results to RTF
/////////////////////////////////////////////////
esttab ${smd_panel_a_m} using "${st_out_rtf}", ${slb_panel_a_main} ${slb_esttab_opt_txt} replace
esttab ${smd_panel_b_m} using "${st_out_rtf}", ${slb_panel_b_main} ${slb_esttab_opt_txt} append
esttab ${smd_panel_c_m} using "${st_out_rtf}", ${slb_panel_c_main} ${slb_esttab_opt_txt} append
/////////////////////////////////////////////////
///--- H3. Output Results to Tex
/////////////////////////////////////////////////
esttab $smd_panel_a_m using "${st_out_tex}", ///
${slb_panel_a_main} ///
${slb_refcat_panel_a} ///
${slb_esttab_opt_tex} ///
fragment $headlineAll postfoot("") replace
esttab $smd_panel_b_m using "${st_out_tex}", ///
${slb_panel_b_main} ///
${slb_refcat_panel_b} ///
${slb_esttab_opt_tex} ///
fragment prehead("") postfoot("") append
esttab $smd_panel_c_m using "${st_out_tex}", ///
${slb_panel_c_main} ///
${slb_refcat_panel_c} ///
${slb_esttab_opt_tex} ///
${slb_titling_bottom} ///
fragment prehead("") $postAll append
/////////////////////////////////////////////////
///--- I. Out Logs
/////////////////////////////////////////////////
///--- End Log and to HTML
log close
///--- to PDF
capture noisily {
translator set Results2pdf logo off
translator set Results2pdf fontsize 10
translator set Results2pdf pagesize custom
translator set Results2pdf pagewidth 11.69
translator set Results2pdf pageheight 16.53
translator set Results2pdf lmargin 0.2
translator set Results2pdf rmargin 0.2
translator set Results2pdf tmargin 0.2
translator set Results2pdf bmargin 0.2
translate @Results "${st_log_file}.pdf", replace translator(Results2pdf)
}
capture noisily {
erase "${st_log_file}.smcl"
}
|
cls
clear
macro drop _all
/*
Back to Fan's Stata4Econ or other repositories:
- http://fanwangecon.github.io
- http://fanwangecon.github.io/Stata4Econ
- http://fanwangecon.github.io/R4Econ
- http://fanwangecon.github.io/M4Econ
- http://fanwangecon.github.io/CodeDynaAsset/
- http://fanwangecon.github.io/Math4Econ/
- http://fanwangecon.github.io/Stat4Econ/
- http://fanwangecon.github.io/Tex4Econ
Regression with continous varible and discrete variables, discrete variables could interact with each other, and interact with continuous variable
*/
///--- File Names
global st_file_root "~\Stata4Econ\table\multipanel\tab_6col_cts_inter\"
global st_log_file "${st_file_root}gen_reg"
global st_out_html "${st_file_root}tab_6col_cts_inter.html"
global st_out_rtf "${st_file_root}tab_6col_cts_inter.rtf"
global st_out_tex "${st_file_root}tab_6col_cts_inter_texbody.tex"
///--- Start log
capture log close
log using "${st_log_file}" , replace
log on
set trace off
set tracedepth 1
/////////////////////////////////////////////////
///--- Load Data
/////////////////////////////////////////////////
set more off
sysuse bplong, clear
tab sex
tab agegrp
tab when
tab sex when
tab sex agegrp
egen sex_when = group(sex when), label
egen sex_agegrp = group(sex agegrp), label
egen when_agegrp = group(when agegrp), label
/////////////////////////////////////////////////
///--- A1. Define Regression Variables
/////////////////////////////////////////////////
* shared regression outcome lhs variable
global svr_outcome "when"
* for each panel, rhs variables differ
global svr_rhs_panel_a "sex c.patient c.bp"
global svr_rhs_panel_b "sex i.sex#c.patient i.sex#c.bp"
* for each column, conditioning differs
global it_reg_n = 6
global sif_col_1 "bp <= 185"
global sif_col_2 "bp <= 180"
global sif_col_3 "bp <= 175"
global sif_col_4 "bp <= 170"
global sif_col_5 "bp <= 165"
global sif_col_6 "bp <= 160"
* esttad strings for conditioning what were included
scalar it_esttad_n = 4
matrix mt_bl_estd = J(it_esttad_n, $it_reg_n, 0)
matrix rownames mt_bl_estd = bpge185 bpge180 bpge170 bpge160
matrix colnames mt_bl_estd = reg1 reg2 reg3 reg4 reg5 reg6
matrix mt_bl_estd[1, 1] = (1\1\1\1)
matrix mt_bl_estd[1, 2] = (0\1\1\1)
matrix mt_bl_estd[1, 3] = (0\0\1\1)
matrix mt_bl_estd[1, 4] = (0\0\1\1)
matrix mt_bl_estd[1, 5] = (0\0\0\1)
matrix mt_bl_estd[1, 6] = (0\0\0\1)
global st_estd_rownames : rownames mt_bl_estd
global slb_estd_1 "blood pressure >= 185"
global slb_estd_2 "blood pressure >= 180"
global slb_estd_3 "blood pressure >= 170"
global slb_estd_4 "blood pressure >= 160"
/////////////////////////////////////////////////
///--- A2. Define Regression Technical Strings
/////////////////////////////////////////////////
///--- Technical Controls
global stc_regc "regress"
global stc_opts ", vce(robust)"
/////////////////////////////////////////////////
///--- B1. Define Regressions Panel A
/////////////////////////////////////////////////
/*
di "$srg_panel_a_col_1"
di "$srg_panel_a_col_2"
di "$srg_panel_a_col_6"
*/
foreach it_regre of numlist 1(1)$it_reg_n {
#delimit;
global srg_panel_a_col_`it_regre' "
$stc_regc $svr_outcome $svr_rhs_panel_a if ${sif_col_`it_regre'} $stc_opts
";
#delimit cr
di "${srg_panel_a_col_`it_regre'}"
}
/////////////////////////////////////////////////
///--- B2. Define Regressions Panel B
/////////////////////////////////////////////////
/*
di "$srg_panel_b_col_1"
di "$srg_panel_b_col_2"
di "$srg_panel_b_col_6"
*/
foreach it_regre of numlist 1(1)$it_reg_n {
#delimit;
global srg_panel_b_col_`it_regre' "
$stc_regc $svr_outcome $svr_rhs_panel_b if ${sif_col_`it_regre'} $stc_opts
";
#delimit cr
di "${srg_panel_b_col_`it_regre'}"
}
/////////////////////////////////////////////////
///--- C. Run Regressions
/////////////////////////////////////////////////
eststo clear
local it_reg_ctr = 0
foreach st_panel in panel_a panel_b {
global st_cur_sm_stor "smd_`st_panel'_m"
global ${st_cur_sm_stor} ""
foreach it_regre of numlist 1(1)$it_reg_n {
local it_reg_ctr = `it_reg_ctr' + 1
global st_cur_srg_name "srg_`st_panel'_col_`it_regre'"
di "st_panel:`st_panel', it_reg_ctr:`it_reg_ctr', st_cur_srg_name:${st_cur_srg_name}"
///--- Regression
eststo m`it_reg_ctr', title("${sif_col_`it_regre'}") : ${$st_cur_srg_name}
///--- Estadd Controls
foreach st_estd_name in $st_estd_rownames {
scalar bl_estad = el(mt_bl_estd, rownumb(mt_bl_estd, "`st_estd_name'"), `it_regre')
if (bl_estad) {
estadd local `st_estd_name' "Yes"
}
else {
estadd local `st_estd_name' "No"
}
}
///--- Track Regression Store
global $st_cur_sm_stor "${${st_cur_sm_stor}} m`it_reg_ctr'"
}
di "${${st_cur_sm_stor}}"
}
di "$smd_panel_a_m"
di "$smd_panel_b_m"
/////////////////////////////////////////////////
///--- D1. Labeling
/////////////////////////////////////////////////
///--- Title overall
global slb_title "Outcome: Before or After"
global slb_title_inner "\textbf{Continuous}: BP and patient are continuous"
global slb_label_tex "tab:sccts"
///--- Several RHS Continuous Variables
global slb_panel_a "Panel A: Continuous Vars and Discretes"
///--- Continuous Variables + Several Discrete Variables
global slb_panel_b "Panel B: Interact Cts Vars with Discrete"
global slb_panel_b_ga "Interact with Male:"
global slb_panel_b_gb "Interact with Female:"
///--- Notes
global slb_bottom "Controls for each panel:"
global slb_note "${slb_starLvl}. Robust standard errors. Each column is a spearate regression."
///--- Show which coefficients to keep
#delimit;
global svr_coef_keep_panel_a "
sex patient bp
";
global svr_coef_keep_panel_b "
sex
0.sex#patient
0.sex#bp
1.sex#patient
1.sex#bp
";
#delimit cr
///--- Labeling for for Coefficients to Show
global slb_title_spc "\vspace*{-5mm}\hspace*{-8mm}"
global slb_dis_tlt_spc "\vspace*{-5mm}\hspace*{-8mm}"
global slb_dis_ele_spc "\vspace*{0mm}\hspace*{5mm}"
global slb_1st_ele_spc "\vspace*{0mm}\hspace*{5mm}"
global slb_fot_lst_spc "\vspace*{0mm}\hspace*{2mm}"
#delimit;
global svr_starts_var_panel_a "sex";
global slb_coef_label_panel_a "
sex "${slb_1st_ele_spc}sex variable (discrete)"
patient "${slb_1st_ele_spc}patient ID (cts)"
bp "${slb_1st_ele_spc}blood pressure (cts)"
";
#delimit cr
#delimit;
global svr_starts_var_panel_b "sex";
global svr_starts_var_panel_b_ga "0.sex#c.patient";
global svr_starts_var_panel_b_gb "1.sex#c.patient";
global slb_coef_label_panel_b "
sex "${slb_1st_ele_spc}sex variable (discrete)"
0.sex#c.patient "${slb_1st_ele_spc} $\times$ patient ID"
0.sex#c.bp "${slb_1st_ele_spc} $\times$ blood pressure"
1.sex#c.patient "${slb_1st_ele_spc} $\times$ patient ID"
1.sex#c.bp "${slb_1st_ele_spc} $\times$ blood pressure"
";
#delimit cr
/////////////////////////////////////////////////
///--- D2. Regression Display Controls
/////////////////////////////////////////////////
global slb_reg_stats "N ${st_estd_rownames}"
global slb_starLvl "* 0.10 ** 0.05 *** 0.01"
global slb_starComm "nostar"
global slb_sd_tex `"se(fmt(a2) par("\vspace*{-2mm}{\footnotesize (" ") }"))"'
global slb_cells_tex `"cells(b(star fmt(a2)) $slb_sd_tex)"'
global slb_esttab_opt_tex "${slb_cells_tex} booktabs label collabels(none) nomtitles nonumbers star(${slb_starLvl})"
global slb_sd_txt `"se(fmt(a2) par("(" ")"))"'
global slb_cells_txt `"cells(b(star fmt(a2)) $slb_sd_txt)"'
global slb_esttab_opt_txt "${slb_cells_txt} stats(${slb_reg_stats}) collabels(none) mtitle nonumbers varwidth(30) modelwidth(15) star(${slb_starLvl}) addnotes(${slb_note})"
#delimit ;
global slb_panel_a_main "
title("${slb_panel_a}")
keep(${svr_coef_keep_panel_a}) order(${svr_coef_keep_panel_a})
coeflabels($slb_coef_label_panel_a)
";
global slb_panel_b_main "
title("${slb_panel_b}")
keep(${svr_coef_keep_panel_b}) order(${svr_coef_keep_panel_b})
coeflabels($slb_coef_label_panel_b)
";
#delimit cr
/////////////////////////////////////////////////
///--- E. Regression Shows
/////////////////////////////////////////////////
esttab ${smd_panel_a_m}, ${slb_panel_a_main} ${slb_esttab_opt_txt}
esttab ${smd_panel_b_m}, ${slb_panel_b_main} ${slb_esttab_opt_txt}
/////////////////////////////////////////////////
///--- F1. Define Latex Column Groups and Column Sub-Groups
/////////////////////////////////////////////////
///--- Column Groups
global it_max_col = 8
global it_min_col = 2
global it_col_cnt = 6
global colSeq "2 4 6 8"
// global st_cmidrule "\cmidrule(lr){2-3}\cmidrule(lr){4-5}\cmidrule(lr){6-7}"
global st_cmidrule "\cmidrule(lr){2-7}"
///--- Group 1, columns 1 and 2
global labG1 "All Age 5 to 12"
global labC1 "{\small All Villages}"
global labC2 "{\small No Teachng Points}"
///--- Group 2, columns 3 and 4
global labG2 "Girls Age 5 to 12"
global labC3 "{\small All Villages}"
global labC4 "{\small No Teachng Points}"
///--- Group 3, columns 5 and 6
global labG3 "Boys Age 5 to 12"
global labC5 "{\small All Villages}"
global labC6 "{\small No Teachng Points}"
///--- Column Widths
global perCoefColWid = 2
global labColWid = 5
///--- Column Fractional Adjustment, 1 = 100%
global tableAdjustBoxWidth = 1.0
/////////////////////////////////////////////////
///--- F2. Tabling Calculations
/////////////////////////////////////////////////
///--- Width Calculation
global totCoefColWid = ${perCoefColWid}*${it_col_cnt}
global totColCnt = ${it_col_cnt} + 1
global totColWid = ${labColWid} + ${totCoefColWid} + ${perCoefColWid}
global totColWidFootnote = ${labColWid} + ${totCoefColWid} + ${perCoefColWid} + ${perCoefColWid}/2
global totColWidLegend = ${labColWid} + ${totCoefColWid} + ${perCoefColWid}
global totColWidLegendthin = ${totCoefColWid} + ${perCoefColWid}
di "it_col_cnt:$it_col_cnt"
di "totCoefColWid:$totCoefColWid"
di "totCoefColWid:$totCoefColWid"
di "totCoefColWid:$totCoefColWid"
di "totCoefColWid:$totCoefColWid"
di "totCoefColWid:$totCoefColWid"
global ampersand ""
foreach curLoop of numlist 1(1)$it_col_cnt {
global ampersand "$ampersand &"
}
di "ampersand:$ampersand"
global alignCenter "m{${labColWid}cm}"
local eB1 ">{\centering\arraybackslash}m{${perCoefColWid}cm}"
foreach curLoop of numlist 1(1)$it_col_cnt {
global alignCenter "$alignCenter `eB1'"
}
di "alignCenter:$alignCenter"
/////////////////////////////////////////////////
///--- G1a. Tex Sectioning panel A
/////////////////////////////////////////////////
#delimit ;
global slb_titling_panel_a "
${svr_starts_var_panel_a} "\multicolumn{$totColCnt}{L{${totColWidLegend}cm}}{${slb_title_spc}\textbf{${slb_panel_a}}} \\"
";
global slb_refcat_panel_a `"refcat(${slb_titling_panel_a}, nolabel)"';
#delimit cr
/////////////////////////////////////////////////
///--- G1b. Tex Sectioning panel B
/////////////////////////////////////////////////
if ("${svr_starts_var_panel_b}" == "${svr_starts_var_panel_b_ga}") {
#delimit ;
global svr_starts_pb_andga "
${svr_starts_var_panel_b}
"\multicolumn{$totColCnt}{L{${totColWidLegend}cm}}{${slb_title_spc}\textbf{${slb_panel_b}}} \\
\multicolumn{$totColCnt}{L{${totColWidLegend}cm}}{${slb_dis_tlt_spc}\textit{${slb_panel_b_ga}}} \\"
";
#delimit cr
}
else {
#delimit ;
global svr_starts_pb_andga "
${svr_starts_var_panel_b}
"\multicolumn{$totColCnt}{L{${totColWidLegend}cm}}{${slb_title_spc}\textbf{${slb_panel_b}}} \\"
${svr_starts_var_panel_b_ga}
"\multicolumn{$totColCnt}{L{${totColWidLegend}cm}}{${slb_dis_tlt_spc}\textit{${slb_panel_b_ga}}} \\"
";
#delimit cr
}
#delimit ;
global slb_titling_panel_b "
${svr_starts_pb_andga}
${svr_starts_var_panel_b_gb}
"\multicolumn{$totColCnt}{L{${totColWidLegend}cm}}{${slb_dis_tlt_spc}\textit{${slb_panel_b_gb}}} \\"
";
global slb_refcat_panel_b `"refcat(${slb_titling_panel_b}, nolabel)"';
#delimit cr
/////////////////////////////////////////////////
///--- G1d. Bottom
/////////////////////////////////////////////////
#delimit ;
global slb_titling_bottom `"
stats(N $st_estd_rownames,
labels(Observations
"\midrule \multicolumn{${totColCnt}}{L{${totColWid}cm}}{${slb_title_spc}\textbf{\textit{\normalsize ${slb_bottom}}}} \\ $ampersand \\ ${slb_fot_lst_spc}${slb_estd_1}"
"${slb_fot_lst_spc}${slb_estd_2}"
"${slb_fot_lst_spc}${slb_estd_3}"
"${slb_fot_lst_spc}${slb_estd_4}"))"';
#delimit cr
/////////////////////////////////////////////////
///--- G2. Tex Headline
/////////////////////////////////////////////////
///--- C.3.A. Initialize
global row1 "&"
global row1MidLine ""
global row2 ""
global row2MidLine ""
global row3 ""
///--- B. Row 2 and row 2 midline
* global colSeq "2 3 6"
global cmidrule ""
global colCtr = -1
foreach curCol of numlist $colSeq {
global colCtr = $colCtr + 1
global curCol1Min = `curCol' - 1
if ($colCtr == 0 ) {
global minCoefCol = "`curCol'"
}
if ($colCtr != 0 ) {
global gapCnt = (`curCol' - `lastCol')
global gapWidth = (`curCol' - `lastCol')*$perCoefColWid
di "curCol1Min:$curCol1Min, lastCol:`lastCol'"
di "$gapCnt"
di "\multicolumn{$gapCnt}{C{${gapWidth}cm}}{\small no Control}"
di "\cmidrule(l{5pt}r{5pt}){`lastCol'-$curCol1Min}"
global curRow2MidLine "\cmidrule(l{5pt}r{5pt}){`lastCol'-$curCol1Min}"
global row2MidLine "$row2MidLine $curRow2MidLine"
global curRow2 "\multicolumn{$gapCnt}{L{${gapWidth}cm}}{\small ${labG${colCtr}}}"
global row2 "$row2 & $curRow2"
}
local lastCol = `curCol'
}
///--- C. Row 3
* Initial & for label column
foreach curLoop of numlist 1(1)$it_col_cnt {
global curText "${labC`curLoop'}"
global textUse "(`curLoop')"
if ("$curText" != "") {
global textUse "$curText"
}
global curRow3 "\multicolumn{1}{C{${perCoefColWid}cm}}{$textUse}"
global row3 "$row3 & $curRow3"
}
///--- D. Row 1 and midline:
global row1 "${row1} \multicolumn{${it_col_cnt}}{L{${totCoefColWid}cm}}{${slb_title_inner}}"
global row1MidLine "\cmidrule(l{5pt}r{5pt}){${minCoefCol}-${curCol1Min}}"
///--- C.3.E Print lines
di "$row1 \\"
di "$row1MidLine "
di "$row2 \\"
di "$row2MidLine"
di "$row3 \\"
///--- C.4 Together
#delimit ;
///--- 1. Section
* local section "
* \section{`fileTitle'}\vspace*{-6mm}
* ";
///--- 2. Align and Column Define
local centering "$alignCenter";
global headline "
$row1 \\
$row1MidLine
$row2 \\
$row2MidLine
$row3 \\
";
#delimit cr
/////////////////////////////////////////////////
///--- G4. Head
/////////////////////////////////////////////////
#delimit ;
global adjustBoxStart "\begin{adjustbox}{max width=${tableAdjustBoxWidth}\textwidth}";
global adjustBoxEnd "\end{adjustbox}";
global notewrap "
\addlinespace[-0.5em]
\multicolumn{${totColCnt}}{L{${totColWidFootnote}cm}}{\footnotesize\justify${slb_note}}\\
";
global startTable "\begin{table}[htbp]
\centering
\caption{${slb_title}\label{${slb_label_tex}}}${adjustBoxStart}\begin{tabular}{`centering'}
\toprule
";
global headlineAll "prehead(${startTable}${headline})";
global headlineAllNoHead "prehead(${startTable})";
global postAll "postfoot(\bottomrule ${notewrap} \end{tabular}${adjustBoxEnd}\end{table})";
#delimit cr
/////////////////////////////////////////////////
///--- H1. Output Results to HTML
/////////////////////////////////////////////////
esttab ${smd_panel_a_m} using "${st_out_html}", ${slb_panel_a_main} ${slb_esttab_opt_txt} replace
esttab ${smd_panel_b_m} using "${st_out_html}", ${slb_panel_b_main} ${slb_esttab_opt_txt} append
/////////////////////////////////////////////////
///--- H2. Output Results to RTF
/////////////////////////////////////////////////
esttab ${smd_panel_a_m} using "${st_out_rtf}", ${slb_panel_a_main} ${slb_esttab_opt_txt} replace
esttab ${smd_panel_b_m} using "${st_out_rtf}", ${slb_panel_b_main} ${slb_esttab_opt_txt} append
/////////////////////////////////////////////////
///--- H3. Output Results to Tex
/////////////////////////////////////////////////
esttab $smd_panel_a_m using "${st_out_tex}", ///
${slb_panel_a_main} ///
${slb_refcat_panel_a} ///
${slb_esttab_opt_tex} ///
fragment $headlineAll postfoot("") replace
esttab $smd_panel_b_m using "${st_out_tex}", ///
${slb_panel_b_main} ///
${slb_refcat_panel_b} ///
${slb_esttab_opt_tex} ///
${slb_titling_bottom} ///
fragment prehead("") $postAll append
/////////////////////////////////////////////////
///--- I. Out Logs
/////////////////////////////////////////////////
///--- End Log and to HTML
log close
///--- to PDF
capture noisily {
translator set Results2pdf logo off
translator set Results2pdf fontsize 10
translator set Results2pdf pagesize custom
translator set Results2pdf pagewidth 11.69
translator set Results2pdf pageheight 16.53
translator set Results2pdf lmargin 0.2
translator set Results2pdf rmargin 0.2
translator set Results2pdf tmargin 0.2
translator set Results2pdf bmargin 0.2
translate @Results "${st_log_file}.pdf", replace translator(Results2pdf)
}
capture noisily {
erase "${st_log_file}.smcl"
}
|
cls
clear
macro drop _all
/*
Back to Fan's Stata4Econ or other repositories:
- http://fanwangecon.github.io
- http://fanwangecon.github.io/Stata4Econ
- http://fanwangecon.github.io/R4Econ
- http://fanwangecon.github.io/M4Econ
- http://fanwangecon.github.io/CodeDynaAsset/
- http://fanwangecon.github.io/Math4Econ/
- http://fanwangecon.github.io/Stat4Econ/
- http://fanwangecon.github.io/Tex4Econ
Regression with discrete variables, discrete variables could interact with each other
*/
///--- File Names
global st_file_root "~\Stata4Econ\table\multipanel\tab_6col_dis2inter\"
global st_log_file "${st_file_root}gen_reg"
global st_out_html "${st_file_root}tab_6col_dis2inter.html"
global st_out_rtf "${st_file_root}tab_6col_dis2inter.rtf"
global st_out_tex "${st_file_root}tab_6col_dis2inter_texbody.tex"
///--- Start log
capture log close
log using "${st_log_file}" , replace
log on
set trace off
set tracedepth 1
/////////////////////////////////////////////////
///--- Load Data
/////////////////////////////////////////////////
set more off
sysuse bplong, clear
tab sex
tab agegrp
tab when
tab sex when
tab sex agegrp
egen sex_when = group(sex when), label
egen sex_agegrp = group(sex agegrp), label
egen when_agegrp = group(when agegrp), label
/////////////////////////////////////////////////
///--- A1. Define Regression Variables
/////////////////////////////////////////////////
* shared regression outcome lhs variable
global svr_outcome "bp"
* for each panel, rhs variables differ
global svr_rhs_panel_a "agegrp sex"
global svr_rhs_panel_b "ib1.agegrp ib1.sex_when"
global svr_rhs_panel_c "sex io(1 3).sex_when io(1 4).sex_agegrp"
* for each column, conditioning differs
global it_reg_n = 6
global sif_col_1 "bp <= 185"
global sif_col_2 "bp <= 180"
global sif_col_3 "bp <= 175"
global sif_col_4 "bp <= 170"
global sif_col_5 "bp <= 165"
global sif_col_6 "bp <= 160"
* esttad strings for conditioning what were included
scalar it_esttad_n = 4
matrix mt_bl_estd = J(it_esttad_n, $it_reg_n, 0)
matrix rownames mt_bl_estd = bpge185 bpge180 bpge170 bpge160
matrix colnames mt_bl_estd = reg1 reg2 reg3 reg4 reg5 reg6
matrix mt_bl_estd[1, 1] = (1\1\1\1)
matrix mt_bl_estd[1, 2] = (0\1\1\1)
matrix mt_bl_estd[1, 3] = (0\0\1\1)
matrix mt_bl_estd[1, 4] = (0\0\1\1)
matrix mt_bl_estd[1, 5] = (0\0\0\1)
matrix mt_bl_estd[1, 6] = (0\0\0\1)
global st_estd_rownames : rownames mt_bl_estd
global slb_estd_1 "blood pressure >= 185"
global slb_estd_2 "blood pressure >= 180"
global slb_estd_3 "blood pressure >= 170"
global slb_estd_4 "blood pressure >= 160"
/////////////////////////////////////////////////
///--- A2. Define Regression Technical Strings
/////////////////////////////////////////////////
///--- Technical Controls
global stc_regc "regress"
global stc_opts ", vce(robust)"
/////////////////////////////////////////////////
///--- B1. Define Regressions Panel A
/////////////////////////////////////////////////
/*
di "$srg_panel_a_col_1"
di "$srg_panel_a_col_2"
di "$srg_panel_a_col_6"
*/
foreach it_regre of numlist 1(1)$it_reg_n {
#delimit;
global srg_panel_a_col_`it_regre' "
$stc_regc $svr_outcome $svr_rhs_panel_a if ${sif_col_`it_regre'} $stc_opts
";
#delimit cr
di "${srg_panel_a_col_`it_regre'}"
}
/////////////////////////////////////////////////
///--- B2. Define Regressions Panel B
/////////////////////////////////////////////////
/*
di "$srg_panel_b_col_1"
di "$srg_panel_b_col_2"
di "$srg_panel_b_col_6"
*/
foreach it_regre of numlist 1(1)$it_reg_n {
#delimit;
global srg_panel_b_col_`it_regre' "
$stc_regc $svr_outcome $svr_rhs_panel_b if ${sif_col_`it_regre'} $stc_opts
";
#delimit cr
di "${srg_panel_b_col_`it_regre'}"
}
/////////////////////////////////////////////////
///--- B3. Define Regressions Panel C
/////////////////////////////////////////////////
/*
di "$srg_panel_c_col_1"
di "$srg_panel_c_col_2"
di "$srg_panel_c_col_6"
*/
foreach it_regre of numlist 1(1)$it_reg_n {
#delimit;
global srg_panel_c_col_`it_regre' "
$stc_regc $svr_outcome $svr_rhs_panel_c if ${sif_col_`it_regre'} $stc_opts
";
#delimit cr
di "${srg_panel_c_col_`it_regre'}"
}
/////////////////////////////////////////////////
///--- C. Run Regressions
/////////////////////////////////////////////////
eststo clear
local it_reg_ctr = 0
foreach st_panel in panel_a panel_b panel_c {
global st_cur_sm_stor "smd_`st_panel'_m"
global ${st_cur_sm_stor} ""
foreach it_regre of numlist 1(1)$it_reg_n {
local it_reg_ctr = `it_reg_ctr' + 1
global st_cur_srg_name "srg_`st_panel'_col_`it_regre'"
di "st_panel:`st_panel', it_reg_ctr:`it_reg_ctr', st_cur_srg_name:${st_cur_srg_name}"
///--- Regression
eststo m`it_reg_ctr', title("${sif_col_`it_regre'}") : ${$st_cur_srg_name}
///--- Estadd Controls
foreach st_estd_name in $st_estd_rownames {
scalar bl_estad = el(mt_bl_estd, rownumb(mt_bl_estd, "`st_estd_name'"), `it_regre')
if (bl_estad) {
estadd local `st_estd_name' "Yes"
}
else {
estadd local `st_estd_name' "No"
}
}
///--- Track Regression Store
global $st_cur_sm_stor "${${st_cur_sm_stor}} m`it_reg_ctr'"
}
di "${${st_cur_sm_stor}}"
}
di "$smd_panel_a_m"
di "$smd_panel_b_m"
di "$smd_panel_c_m"
/////////////////////////////////////////////////
///--- D1. Labeling
/////////////////////////////////////////////////
///--- Title overall
global slb_title "Outcome: Blood Pressure"
global slb_title_inner "\textbf{Categories}: Discrete Categories and BP"
global slb_label_tex "tab:scminter"
///--- Several RHS Continuous Variables
global slb_panel_a "Panel A: Continuous Right Hand Side Variables"
///--- Continuous Variables + Several Discrete Variables
global slb_panel_b "Panel B: Two Discrete Right Hand Side Variables"
global slb_panel_b_ga "Age Groups (Compare to 30-45)"
global slb_panel_b_gb "Gender/Time Groups (Compare to Female Before)"
///--- Continuous Variables + Several Discrete Variables Interated with More Discrete Variables
global slb_panel_c "Panel C: Two Discrete Interacted Variables"
global slb_panel_c_sa "Male Dummy Interactions:"
global slb_panel_c_sb "Female Dummy Interactions:"
global slb_panel_c_sa_ga "Time Groups (Compare to Before)"
global slb_panel_c_sa_gb "Age Groups (Compare to 30-45)"
global slb_panel_c_sb_ga "Time Groups (Compare to Before)"
global slb_panel_c_sb_gb "Age Groups (Compare to 30-45)"
///--- Notes
global slb_bottom "Controls for each panel:"
global slb_note "${slb_starLvl}. Robust standard errors. Each column is a spearate regression."
///--- Show which coefficients to keep
#delimit;
global svr_coef_keep_panel_a "
agegrp sex
";
global svr_coef_keep_panel_b "
2.agegrp 3.agegrp
2.sex_when 3.sex_when 4.sex_when
";
global svr_coef_keep_panel_c "
sex
2.sex_when
2.sex_agegrp 3.sex_agegrp
4.sex_when
5.sex_agegrp 6.sex_agegrp
";
#delimit cr
///--- Labeling for for Coefficients to Show
global slb_title_spc "\vspace*{-5mm}\hspace*{-8mm}"
global slb_dis_tlt_spc "\vspace*{-5mm}\hspace*{-8mm}"
global slb_dis_ele_spc "\vspace*{0mm}\hspace*{5mm}"
global slb_1st_ele_spc "\vspace*{0mm}\hspace*{5mm}"
global slb_fot_lst_spc "\vspace*{0mm}\hspace*{2mm}"
#delimit;
global svr_starts_var_panel_a "agegrp";
global slb_coef_label_panel_a "
agegrp "${slb_1st_ele_spc}age group"
sex "${slb_1st_ele_spc}sex variable"
";
#delimit cr
#delimit;
global svr_starts_var_panel_b "2.agegrp";
global svr_starts_var_panel_b_ga "2.agegrp";
global svr_starts_var_panel_b_gb "2.sex_when";
global slb_coef_label_panel_b "
2.agegrp "${slb_dis_ele_spc} x (46-59 yrs)"
3.agegrp "${slb_dis_ele_spc} x (>60 years)"
2.sex_when "${slb_dis_ele_spc} x male after"
3.sex_when "${slb_dis_ele_spc} x female before"
4.sex_when "${slb_dis_ele_spc} x female after"
";
#delimit cr
#delimit;
global svr_starts_var_panel_c "sex";
global svr_starts_var_panel_c_sa "2.sex_when";
global svr_starts_var_panel_c_sa_ga "2.sex_when";
global svr_starts_var_panel_c_sa_gb "2.sex_agegrp";
global svr_starts_var_panel_c_sb "4.sex_when";
global svr_starts_var_panel_c_sb_ga "4.sex_when";
global svr_starts_var_panel_c_sb_gb "5.sex_agegrp";
global slb_coef_label_panel_c "
sex "${slb_1st_ele_spc}male dummy"
2.sex_when "${slb_dis_ele_spc} x male x after"
2.sex_agegrp "${slb_dis_ele_spc} x male x (46-59 yrs)"
3.sex_agegrp "${slb_dis_ele_spc} x male x (>60 years)"
4.sex_when "${slb_dis_ele_spc} x male x after"
5.sex_agegrp "${slb_dis_ele_spc} x female x (46-59 yrs)"
6.sex_agegrp "${slb_dis_ele_spc} x female x (>60 years)"
";
#delimit cr
/////////////////////////////////////////////////
///--- D2. Regression Display Controls
/////////////////////////////////////////////////
global slb_reg_stats "N ${st_estd_rownames}"
global slb_starLvl "* 0.10 ** 0.05 *** 0.01"
global slb_starComm "nostar"
global slb_sd_tex `"se(fmt(a2) par("\vspace*{-2mm}{\footnotesize (" ") }"))"'
global slb_cells_tex `"cells(b(star fmt(a2)) $slb_sd_tex)"'
global slb_esttab_opt_tex "${slb_cells_tex} booktabs label collabels(none) nomtitles nonumbers star(${slb_starLvl})"
global slb_sd_txt `"se(fmt(a2) par("(" ")"))"'
global slb_cells_txt `"cells(b(star fmt(a2)) $slb_sd_txt)"'
global slb_esttab_opt_txt "${slb_cells_txt} stats(${slb_reg_stats}) collabels(none) mtitle nonumbers varwidth(30) modelwidth(15) star(${slb_starLvl}) addnotes(${slb_note})"
#delimit ;
global slb_panel_a_main "
title("${slb_panel_a}")
keep(${svr_coef_keep_panel_a}) order(${svr_coef_keep_panel_a})
coeflabels($slb_coef_label_panel_a)
";
global slb_panel_b_main "
title("${slb_panel_b}")
keep(${svr_coef_keep_panel_b}) order(${svr_coef_keep_panel_b})
coeflabels($slb_coef_label_panel_b)
";
global slb_panel_c_main "
title("${slb_panel_c}")
keep(${svr_coef_keep_panel_c}) order(${svr_coef_keep_panel_c})
coeflabels($slb_coef_label_panel_c)
";
#delimit cr
/////////////////////////////////////////////////
///--- E. Regression Shows
/////////////////////////////////////////////////
esttab ${smd_panel_a_m}, ${slb_panel_a_main} ${slb_esttab_opt_txt}
esttab ${smd_panel_b_m}, ${slb_panel_b_main} ${slb_esttab_opt_txt}
esttab ${smd_panel_c_m}, ${slb_panel_c_main} ${slb_esttab_opt_txt}
/////////////////////////////////////////////////
///--- F1. Define Latex Column Groups and Column Sub-Groups
/////////////////////////////////////////////////
///--- Column Groups
global it_max_col = 8
global it_min_col = 2
global it_col_cnt = 6
global colSeq "2 4 6 8"
// global st_cmidrule "\cmidrule(lr){2-3}\cmidrule(lr){4-5}\cmidrule(lr){6-7}"
global st_cmidrule "\cmidrule(lr){2-7}"
///--- Group 1, columns 1 and 2
global labG1 "All Age 5 to 12"
global labC1 "{\small All Villages}"
global labC2 "{\small No Teachng Points}"
///--- Group 2, columns 3 and 4
global labG2 "Girls Age 5 to 12"
global labC3 "{\small All Villages}"
global labC4 "{\small No Teachng Points}"
///--- Group 3, columns 5 and 6
global labG3 "Boys Age 5 to 12"
global labC5 "{\small All Villages}"
global labC6 "{\small No Teachng Points}"
///--- Column Widths
global perCoefColWid = 1.85
global labColWid = 5
///--- Column Fractional Adjustment, 1 = 100%
global tableAdjustBoxWidth = 1.0
/////////////////////////////////////////////////
///--- F2. Tabling Calculations
/////////////////////////////////////////////////
///--- Width Calculation
global totCoefColWid = ${perCoefColWid}*${it_col_cnt}
global totColCnt = ${it_col_cnt} + 1
global totColWid = ${labColWid} + ${totCoefColWid} + ${perCoefColWid}
global totColWidFootnote = ${labColWid} + ${totCoefColWid} + ${perCoefColWid} + ${perCoefColWid}/2
global totColWidLegend = ${labColWid} + ${totCoefColWid} + ${perCoefColWid}
global totColWidLegendthin = ${totCoefColWid} + ${perCoefColWid}
di "it_col_cnt:$it_col_cnt"
di "totCoefColWid:$totCoefColWid"
di "totCoefColWid:$totCoefColWid"
di "totCoefColWid:$totCoefColWid"
di "totCoefColWid:$totCoefColWid"
di "totCoefColWid:$totCoefColWid"
global ampersand ""
foreach curLoop of numlist 1(1)$it_col_cnt {
global ampersand "$ampersand &"
}
di "ampersand:$ampersand"
global alignCenter "m{${labColWid}cm}"
local eB1 ">{\centering\arraybackslash}m{${perCoefColWid}cm}"
foreach curLoop of numlist 1(1)$it_col_cnt {
global alignCenter "$alignCenter `eB1'"
}
di "alignCenter:$alignCenter"
/////////////////////////////////////////////////
///--- G1a. Tex Sectioning panel A
/////////////////////////////////////////////////
#delimit ;
global slb_titling_panel_a "
${svr_starts_var_panel_a} "\multicolumn{$totColCnt}{L{${totColWidLegend}cm}}{${slb_title_spc}\textbf{${slb_panel_a}}} \\"
";
global slb_refcat_panel_a `"refcat(${slb_titling_panel_a}, nolabel)"';
#delimit cr
/////////////////////////////////////////////////
///--- G1b. Tex Sectioning panel B
/////////////////////////////////////////////////
if ("${svr_starts_var_panel_b}" == "${svr_starts_var_panel_b_ga}") {
#delimit ;
global svr_starts_pb_andga "
${svr_starts_var_panel_b}
"\multicolumn{$totColCnt}{L{${totColWidLegend}cm}}{${slb_title_spc}\textbf{${slb_panel_b}}} \\
\multicolumn{$totColCnt}{L{${totColWidLegend}cm}}{${slb_dis_tlt_spc}\textit{${slb_panel_b_ga}}} \\"
";
#delimit cr
}
else {
#delimit ;
global svr_starts_pb_andga "
${svr_starts_var_panel_b}
"\multicolumn{$totColCnt}{L{${totColWidLegend}cm}}{${slb_title_spc}\textbf{${slb_panel_b}}} \\"
${svr_starts_var_panel_b_ga}
"\multicolumn{$totColCnt}{L{${totColWidLegend}cm}}{${slb_dis_tlt_spc}\textit{${slb_panel_b_ga}}} \\"
";
#delimit cr
}
#delimit ;
global slb_titling_panel_b "
${svr_starts_pb_andga}
${svr_starts_var_panel_b_gb}
"\multicolumn{$totColCnt}{L{${totColWidLegend}cm}}{${slb_dis_tlt_spc}\textit{${slb_panel_b_gb}}} \\"
";
global slb_refcat_panel_b `"refcat(${slb_titling_panel_b}, nolabel)"';
#delimit cr
/////////////////////////////////////////////////
///--- G1c. Tex Sectioning panel C
/////////////////////////////////////////////////
if (("${svr_starts_var_panel_c}" == "${svr_starts_var_panel_c_sa}") & ("${svr_starts_var_panel_c_sa}" == "${svr_starts_var_panel_c_sa_ga}") ) {
///--- if main = sub headings = subsub heading
#delimit ;
global slb_titling_panel_c "
${svr_starts_var_panel_c} "\multicolumn{$totColCnt}{L{${totColWidLegend}cm}}{${slb_title_spc}\textbf{${slb_panel_c}}} \\
\multicolumn{$totColCnt}{L{${totColWidLegend}cm}}{${slb_dis_tlt_spc}\textbf{\textit{${slb_panel_c_sa}}}} \\
\multicolumn{$totColCnt}{L{${totColWidLegend}cm}}{${slb_dis_tlt_spc}\textit{${slb_panel_c_sa_ga}}} \\"
${svr_starts_var_panel_c_sa_gb} "\multicolumn{$totColCnt}{L{${totColWidLegend}cm}}{${slb_dis_tlt_spc}\textit{${slb_panel_c_sa_gb}}} \\"
${svr_starts_var_panel_c_sb} "\multicolumn{$totColCnt}{L{${totColWidLegend}cm}}{${slb_dis_tlt_spc}\textbf{\textit{${slb_panel_c_sb}}}} \\
\multicolumn{$totColCnt}{L{${totColWidLegend}cm}}{${slb_dis_tlt_spc}\textit{${slb_panel_c_sb_ga}}} \\"
${svr_starts_var_panel_c_sb_gb} "\multicolumn{$totColCnt}{L{${totColWidLegend}cm}}{${slb_dis_tlt_spc}\textit{${slb_panel_c_sb_gb}}} \\"
";
global slb_refcat_panel_c `"refcat(${slb_titling_panel_c}, nolabel)"';
#delimit cr
}
else if ("${svr_starts_var_panel_c_sa}" == "${svr_starts_var_panel_c_sa_ga}") {
///--- if main, sub headings differ, but subsub = sub heading
#delimit ;
global slb_titling_panel_c "
${svr_starts_var_panel_c} "\multicolumn{$totColCnt}{L{${totColWidLegend}cm}}{${slb_title_spc}\textbf{${slb_panel_c}}} \\"
${svr_starts_var_panel_c_sa} "\multicolumn{$totColCnt}{L{${totColWidLegend}cm}}{${slb_dis_tlt_spc}\textbf{\textit{${slb_panel_c_sa}}}} \\
\multicolumn{$totColCnt}{L{${totColWidLegend}cm}}{${slb_dis_tlt_spc}\textit{${slb_panel_c_sa_ga}}} \\"
${svr_starts_var_panel_c_sa_gb} "\multicolumn{$totColCnt}{L{${totColWidLegend}cm}}{${slb_dis_tlt_spc}\textit{${slb_panel_c_sa_gb}}} \\"
${svr_starts_var_panel_c_sb} "\multicolumn{$totColCnt}{L{${totColWidLegend}cm}}{${slb_dis_tlt_spc}\textbf{\textit{${slb_panel_c_sb}}}} \\
\multicolumn{$totColCnt}{L{${totColWidLegend}cm}}{${slb_dis_tlt_spc}\textit{${slb_panel_c_sb_ga}}} \\"
${svr_starts_var_panel_c_sb_gb} "\multicolumn{$totColCnt}{L{${totColWidLegend}cm}}{${slb_dis_tlt_spc}\textit{${slb_panel_c_sb_gb}}} \\"
";
global slb_refcat_panel_c `"refcat(${slb_titling_panel_c}, nolabel)"';
#delimit cr
}
else {
///--- if main, sub, subsub heading vars differ
#delimit ;
global slb_titling_panel_c "
${svr_starts_var_panel_c} "\multicolumn{$totColCnt}{L{${totColWidLegend}cm}}{${slb_title_spc}\textbf{${slb_panel_c}}} \\"
${svr_starts_var_panel_c_sa} "${st_cmidrule}\multicolumn{$totColCnt}{L{${totColWidLegend}cm}}{${slb_dis_tlt_spc}\textbf{\textit{${slb_panel_c_sa}}}} \\"
${svr_starts_var_panel_c_sa_ga} "\multicolumn{$totColCnt}{L{${totColWidLegend}cm}}{${slb_dis_tlt_spc}\textit{${slb_panel_c_sa_ga}}} \\"
${svr_starts_var_panel_c_sa_gb} "\multicolumn{$totColCnt}{L{${totColWidLegend}cm}}{${slb_dis_tlt_spc}\textit{${slb_panel_c_sa_gb}}} \\"
${svr_starts_var_panel_c_sb} "${st_cmidrule}\multicolumn{$totColCnt}{L{${totColWidLegend}cm}}{${slb_dis_tlt_spc}\textbf{\textit{${slb_panel_c_sb}}}} \\"
${svr_starts_var_panel_c_sb_ga} "\multicolumn{$totColCnt}{L{${totColWidLegend}cm}}{${slb_dis_tlt_spc}\textit{${slb_panel_c_sb_ga}}} \\"
${svr_starts_var_panel_c_sb_gb} "\multicolumn{$totColCnt}{L{${totColWidLegend}cm}}{${slb_dis_tlt_spc}\textit{${slb_panel_c_sb_gb}}} \\"
";
global slb_refcat_panel_c `"refcat(${slb_titling_panel_c}, nolabel)"';
#delimit cr
}
/////////////////////////////////////////////////
///--- G1d. Bottom
/////////////////////////////////////////////////
#delimit ;
global slb_titling_bottom `"
stats(N $st_estd_rownames,
labels(Observations
"\midrule \multicolumn{${totColCnt}}{L{${totColWid}cm}}{${slb_title_spc}\textbf{\textit{\normalsize ${slb_bottom}}}} \\ $ampersand \\ ${slb_fot_lst_spc}${slb_estd_1}"
"${slb_fot_lst_spc}${slb_estd_2}"
"${slb_fot_lst_spc}${slb_estd_3}"
"${slb_fot_lst_spc}${slb_estd_4}"))"';
#delimit cr
/////////////////////////////////////////////////
///--- G2. Tex Headline
/////////////////////////////////////////////////
///--- C.3.A. Initialize
global row1 "&"
global row1MidLine ""
global row2 ""
global row2MidLine ""
global row3 ""
///--- B. Row 2 and row 2 midline
* global colSeq "2 3 6"
global cmidrule ""
global colCtr = -1
foreach curCol of numlist $colSeq {
global colCtr = $colCtr + 1
global curCol1Min = `curCol' - 1
if ($colCtr == 0 ) {
global minCoefCol = "`curCol'"
}
if ($colCtr != 0 ) {
global gapCnt = (`curCol' - `lastCol')
global gapWidth = (`curCol' - `lastCol')*$perCoefColWid
di "curCol1Min:$curCol1Min, lastCol:`lastCol'"
di "$gapCnt"
di "\multicolumn{$gapCnt}{C{${gapWidth}cm}}{\small no Control}"
di "\cmidrule(l{5pt}r{5pt}){`lastCol'-$curCol1Min}"
global curRow2MidLine "\cmidrule(l{5pt}r{5pt}){`lastCol'-$curCol1Min}"
global row2MidLine "$row2MidLine $curRow2MidLine"
global curRow2 "\multicolumn{$gapCnt}{L{${gapWidth}cm}}{\small ${labG${colCtr}}}"
global row2 "$row2 & $curRow2"
}
local lastCol = `curCol'
}
///--- C. Row 3
* Initial & for label column
foreach curLoop of numlist 1(1)$it_col_cnt {
global curText "${labC`curLoop'}"
global textUse "(`curLoop')"
if ("$curText" != "") {
global textUse "$curText"
}
global curRow3 "\multicolumn{1}{C{${perCoefColWid}cm}}{$textUse}"
global row3 "$row3 & $curRow3"
}
///--- D. Row 1 and midline:
global row1 "${row1} \multicolumn{${it_col_cnt}}{L{${totCoefColWid}cm}}{${slb_title_inner}}"
global row1MidLine "\cmidrule(l{5pt}r{5pt}){${minCoefCol}-${curCol1Min}}"
///--- C.3.E Print lines
di "$row1 \\"
di "$row1MidLine "
di "$row2 \\"
di "$row2MidLine"
di "$row3 \\"
///--- C.4 Together
#delimit ;
///--- 1. Section
* local section "
* \section{`fileTitle'}\vspace*{-6mm}
* ";
///--- 2. Align and Column Define
local centering "$alignCenter";
global headline "
$row1 \\
$row1MidLine
$row2 \\
$row2MidLine
$row3 \\
";
#delimit cr
/////////////////////////////////////////////////
///--- G4. Head
/////////////////////////////////////////////////
#delimit ;
global adjustBoxStart "\begin{adjustbox}{max width=${tableAdjustBoxWidth}\textwidth}";
global adjustBoxEnd "\end{adjustbox}";
global notewrap "
\addlinespace[-0.5em]
\multicolumn{${totColCnt}}{L{${totColWidFootnote}cm}}{\footnotesize\justify${slb_note}}\\
";
global startTable "\begin{table}[htbp]
\centering
\caption{${slb_title}\label{${slb_label_tex}}}${adjustBoxStart}\begin{tabular}{`centering'}
\toprule
";
global headlineAll "prehead(${startTable}${headline})";
global headlineAllNoHead "prehead(${startTable})";
global postAll "postfoot(\bottomrule ${notewrap} \end{tabular}${adjustBoxEnd}\end{table})";
#delimit cr
/////////////////////////////////////////////////
///--- H1. Output Results to HTML
/////////////////////////////////////////////////
esttab ${smd_panel_a_m} using "${st_out_html}", ${slb_panel_a_main} ${slb_esttab_opt_txt} replace
esttab ${smd_panel_b_m} using "${st_out_html}", ${slb_panel_b_main} ${slb_esttab_opt_txt} append
esttab ${smd_panel_c_m} using "${st_out_html}", ${slb_panel_c_main} ${slb_esttab_opt_txt} append
/////////////////////////////////////////////////
///--- H2. Output Results to RTF
/////////////////////////////////////////////////
esttab ${smd_panel_a_m} using "${st_out_rtf}", ${slb_panel_a_main} ${slb_esttab_opt_txt} replace
esttab ${smd_panel_b_m} using "${st_out_rtf}", ${slb_panel_b_main} ${slb_esttab_opt_txt} append
esttab ${smd_panel_c_m} using "${st_out_rtf}", ${slb_panel_c_main} ${slb_esttab_opt_txt} append
/////////////////////////////////////////////////
///--- H3. Output Results to Tex
/////////////////////////////////////////////////
esttab $smd_panel_a_m using "${st_out_tex}", ///
${slb_panel_a_main} ///
${slb_refcat_panel_a} ///
${slb_esttab_opt_tex} ///
fragment $headlineAll postfoot("") replace
esttab $smd_panel_b_m using "${st_out_tex}", ///
${slb_panel_b_main} ///
${slb_refcat_panel_b} ///
${slb_esttab_opt_tex} ///
fragment prehead("") postfoot("") append
esttab $smd_panel_c_m using "${st_out_tex}", ///
${slb_panel_c_main} ///
${slb_refcat_panel_c} ///
${slb_esttab_opt_tex} ///
${slb_titling_bottom} ///
fragment prehead("") $postAll append
/////////////////////////////////////////////////
///--- I. Out Logs
/////////////////////////////////////////////////
///--- End Log and to HTML
log close
///--- to PDF
capture noisily {
translator set Results2pdf logo off
translator set Results2pdf fontsize 10
translator set Results2pdf pagesize custom
translator set Results2pdf pagewidth 11.69
translator set Results2pdf pageheight 16.53
translator set Results2pdf lmargin 0.2
translator set Results2pdf rmargin 0.2
translator set Results2pdf tmargin 0.2
translator set Results2pdf bmargin 0.2
translate @Results "${st_log_file}.pdf", replace translator(Results2pdf)
}
capture noisily {
erase "${st_log_file}.smcl"
}
|
cls
clear
macro drop _all
/*
Back to Fan's Stata4Econ or other repositories:
- http://fanwangecon.github.io
- http://fanwangecon.github.io/Stata4Econ
- http://fanwangecon.github.io/R4Econ
- http://fanwangecon.github.io/M4Econ
- http://fanwangecon.github.io/CodeDynaAsset/
- http://fanwangecon.github.io/Math4Econ/
- http://fanwangecon.github.io/Stat4Econ/
- http://fanwangecon.github.io/Tex4Econ
Three discrete variables, Interacted with each other. Suppose there are 3 categories for each, then there are 27 interactions.
*/
///--- File Names
global st_file_root "~\Stata4Econ\table\multipanel\tab_6col_dis3inter\"
global st_log_file "${st_file_root}gen_reg"
global st_out_html "${st_file_root}tab_6col_dis3inter.html"
global st_out_rtf "${st_file_root}tab_6col_dis3inter.rtf"
global st_out_tex "${st_file_root}tab_6col_dis3inter_texbody.tex"
///--- Start log
capture log close
log using "${st_log_file}" , replace
log on
set trace off
set tracedepth 1
/////////////////////////////////////////////////
///--- Load Data
/////////////////////////////////////////////////
set more off
sysuse bplong, clear
clonevar female = sex
tab female
tab agegrp
tab when
tab female when
tab female agegrp
egen female_when = group(female when), label
egen female_agegrp = group(female agegrp), label
egen when_agegrp = group(when agegrp), label
egen female_when_agegrp = group(female when agegrp), label
/////////////////////////////////////////////////
///--- A1. Define Regression Variables
/////////////////////////////////////////////////
* shared regression outcome lhs variable
global svr_outcome "bp"
* for each panel, rhs variables differ
global svr_rhs_panel_a "ib0.female io(1).when_agegrp#ib0.female"
* for each column, conditioning differs
global it_reg_n = 6
global sif_col_1 "bp <= 185"
global sif_col_2 "bp <= 180"
global sif_col_3 "bp <= 175"
global sif_col_4 "bp <= 170"
global sif_col_5 "bp <= 165"
global sif_col_6 "bp <= 160"
* esttad strings for conditioning what were included
scalar it_esttad_n = 4
matrix mt_bl_estd = J(it_esttad_n, $it_reg_n, 0)
matrix rownames mt_bl_estd = bpge185 bpge180 bpge170 bpge160
matrix colnames mt_bl_estd = reg1 reg2 reg3 reg4 reg5 reg6
matrix mt_bl_estd[1, 1] = (1\1\1\1)
matrix mt_bl_estd[1, 2] = (0\1\1\1)
matrix mt_bl_estd[1, 3] = (0\0\1\1)
matrix mt_bl_estd[1, 4] = (0\0\1\1)
matrix mt_bl_estd[1, 5] = (0\0\0\1)
matrix mt_bl_estd[1, 6] = (0\0\0\1)
global st_estd_rownames : rownames mt_bl_estd
global slb_estd_1 "blood pressure >= 185"
global slb_estd_2 "blood pressure >= 180"
global slb_estd_3 "blood pressure >= 170"
global slb_estd_4 "blood pressure >= 160"
/////////////////////////////////////////////////
///--- A2. Define Regression Technical Strings
/////////////////////////////////////////////////
///--- Technical Controls
global stc_regc "regress"
global stc_opts ", vce(robust)"
/////////////////////////////////////////////////
///--- B1. Define Regressions Panel A
/////////////////////////////////////////////////
/*
di "$srg_panel_a_col_1"
di "$srg_panel_a_col_2"
di "$srg_panel_a_col_6"
*/
foreach it_regre of numlist 1(1)$it_reg_n {
#delimit;
global srg_panel_a_col_`it_regre' "
$stc_regc $svr_outcome $svr_rhs_panel_a if ${sif_col_`it_regre'} $stc_opts
";
#delimit cr
di "${srg_panel_a_col_`it_regre'}"
}
/////////////////////////////////////////////////
///--- C. Run Regressions
/////////////////////////////////////////////////
eststo clear
local it_reg_ctr = 0
local st_panel "panel_a"
global st_cur_sm_stor "smd_`st_panel'_m"
global ${st_cur_sm_stor} ""
foreach it_regre of numlist 1(1)$it_reg_n {
local it_reg_ctr = `it_reg_ctr' + 1
global st_cur_srg_name "srg_`st_panel'_col_`it_regre'"
di "st_panel:`st_panel', it_reg_ctr:`it_reg_ctr', st_cur_srg_name:${st_cur_srg_name}"
///--- Regression
eststo m`it_reg_ctr', title("${sif_col_`it_regre'}") : ${$st_cur_srg_name}
///--- Estadd Controls
foreach st_estd_name in $st_estd_rownames {
scalar bl_estad = el(mt_bl_estd, rownumb(mt_bl_estd, "`st_estd_name'"), `it_regre')
if (bl_estad) {
estadd local `st_estd_name' "Yes"
}
else {
estadd local `st_estd_name' "No"
}
}
///--- Track Regression Store
global $st_cur_sm_stor "${${st_cur_sm_stor}} m`it_reg_ctr'"
}
di "${${st_cur_sm_stor}}"
di "$smd_panel_a_m"
/////////////////////////////////////////////////
///--- D1. Labeling
/////////////////////////////////////////////////
///--- Title overall
global slb_title "Outcome: Blood Pressure"
global slb_title_inner "\textbf{Categories}: Discrete Categories and BP"
global slb_label_tex "tab:scminter"
///--- Several RHS Continuous Variables
global slb_panel_a "Compare to Base Line Group: (30-45) x Before x Male"
global slb_panel_a_sa "\textbf{Female} Specific Interaction Effects"
global slb_panel_a_sa_ga "Interact with Age Group \textbf{30 to 45}:"
global slb_panel_a_sa_gb "Interact with Age Group \textbf{46 to 59}:"
global slb_panel_a_sa_gc "Interact with Age Group \textbf{60+}:"
global slb_panel_a_sb "\textbf{Male} Specific Interaction Effects"
global slb_panel_a_sb_ga "${slb_panel_a_sa_ga}"
global slb_panel_a_sb_gb "${slb_panel_a_sa_gb}"
global slb_panel_a_sb_gc "${slb_panel_a_sa_gc}"
///--- Notes
global slb_bottom "Controls for each panel:"
global slb_note "${slb_starLvl}. Robust standard errors. Each column is a spearate regression."
///--- Show which coefficients to keep
#delimit;
global svr_coef_keep_panel_a "
1.female
2.when_agegrp#0.female
3.when_agegrp#0.female
4.when_agegrp#0.female
5.when_agegrp#0.female
6.when_agegrp#0.female
2.when_agegrp#1.female
3.when_agegrp#1.female
4.when_agegrp#1.female
5.when_agegrp#1.female
6.when_agegrp#1.female
";
#delimit cr
///--- Labeling for for Coefficients to Show
global slb_title_spc "\vspace*{-5mm}\hspace*{-8mm}"
global slb_dis_tlt_spc "\vspace*{-5mm}\hspace*{-8mm}"
global slb_dis_ele_spc "\vspace*{0mm}\hspace*{5mm}"
global slb_1st_ele_spc "\vspace*{0mm}\hspace*{5mm}"
global slb_fot_lst_spc "\vspace*{0mm}\hspace*{2mm}"
#delimit;
global svr_starts_var_panel_a "1.female";
global svr_starts_var_panel_a_sa "2.when_agegrp#0.female";
global svr_starts_var_panel_a_sa_ga "2.when_agegrp#0.female";
global svr_starts_var_panel_a_sa_gb "3.when_agegrp#0.female";
global svr_starts_var_panel_a_sa_gc "5.when_agegrp#0.female";
global svr_starts_var_panel_a_sb "2.when_agegrp#1.female";
global svr_starts_var_panel_a_sb_ga "2.when_agegrp#1.female";
global svr_starts_var_panel_a_sb_gb "3.when_agegrp#1.female";
global svr_starts_var_panel_a_sb_gc "5.when_agegrp#1.female";
global slb_coef_label_panel_a "
1.female "${slb_dis_ele_spc} female intercept"
2.when_agegrp#0.female "${slb_dis_ele_spc} x female x after"
3.when_agegrp#0.female "${slb_dis_ele_spc} x female x before"
4.when_agegrp#0.female "${slb_dis_ele_spc} x female x after"
5.when_agegrp#0.female "${slb_dis_ele_spc} x female x before"
6.when_agegrp#0.female "${slb_dis_ele_spc} x female x after"
2.when_agegrp#1.female "${slb_dis_ele_spc} x female x after"
3.when_agegrp#1.female "${slb_dis_ele_spc} x female x before"
4.when_agegrp#1.female "${slb_dis_ele_spc} x female x after"
5.when_agegrp#1.female "${slb_dis_ele_spc} x female x before"
6.when_agegrp#1.female "${slb_dis_ele_spc} x female x after"
";
#delimit cr
/////////////////////////////////////////////////
///--- D2. Regression Display Controls
/////////////////////////////////////////////////
global slb_reg_stats "N ${st_estd_rownames}"
global slb_starLvl "* 0.10 ** 0.05 *** 0.01"
global slb_starComm "nostar"
global slb_sd_tex `"se(fmt(a2) par("\vspace*{-2mm}{\footnotesize (" ") }"))"'
global slb_cells_tex `"cells(b(star fmt(a2)) $slb_sd_tex)"'
global slb_esttab_opt_tex "${slb_cells_tex} booktabs label collabels(none) nomtitles nonumbers star(${slb_starLvl})"
global slb_sd_txt `"se(fmt(a2) par("(" ")"))"'
global slb_cells_txt `"cells(b(star fmt(a2)) $slb_sd_txt)"'
global slb_esttab_opt_txt "${slb_cells_txt} stats(${slb_reg_stats}) collabels(none) mtitle nonumbers varwidth(30) modelwidth(15) star(${slb_starLvl}) addnotes(${slb_note})"
#delimit ;
global slb_panel_a_main "
title("${slb_panel_a}")
keep(${svr_coef_keep_panel_a}) order(${svr_coef_keep_panel_a})
coeflabels(${slb_coef_label_panel_a})
";
#delimit cr
/////////////////////////////////////////////////
///--- E. Regression Shows
/////////////////////////////////////////////////
di `"${slb_panel_a_main}"'
di `"${slb_esttab_opt_txt}"'
esttab ${smd_panel_a_m}, ${slb_panel_a_main} ${slb_esttab_opt_txt}
/////////////////////////////////////////////////
///--- F1. Define Latex Column Groups and Column Sub-Groups
/////////////////////////////////////////////////
///--- Column Groups
global it_max_col = 8
global it_min_col = 2
global it_col_cnt = 6
global colSeq "2 4 6 8"
// global st_cmidrule "\cmidrule(lr){2-3}\cmidrule(lr){4-5}\cmidrule(lr){6-7}"
global st_cmidrule "\cmidrule(lr){2-7}"
///--- Group 1, columns 1 and 2
global labG1 "All Age 5 to 12"
global labC1 "{\small All Villages}"
global labC2 "{\small No Teachng Points}"
///--- Group 2, columns 3 and 4
global labG2 "Girls Age 5 to 12"
global labC3 "{\small All Villages}"
global labC4 "{\small No Teachng Points}"
///--- Group 3, columns 5 and 6
global labG3 "Boys Age 5 to 12"
global labC5 "{\small All Villages}"
global labC6 "{\small No Teachng Points}"
///--- Column Widths
global perCoefColWid = 1.85
global labColWid = 5
///--- Column Fractional Adjustment, 1 = 100%
global tableAdjustBoxWidth = 1.0
/////////////////////////////////////////////////
///--- F2. Tabling Calculations
/////////////////////////////////////////////////
///--- Width Calculation
global totCoefColWid = ${perCoefColWid}*${it_col_cnt}
global totColCnt = ${it_col_cnt} + 1
global totColWid = ${labColWid} + ${totCoefColWid} + ${perCoefColWid}
global totColWidFootnote = ${labColWid} + ${totCoefColWid} + ${perCoefColWid} + ${perCoefColWid}/2
global totColWidLegend = ${labColWid} + ${totCoefColWid} + ${perCoefColWid}
global totColWidLegendthin = ${totCoefColWid} + ${perCoefColWid}
di "it_col_cnt:$it_col_cnt"
di "totCoefColWid:$totCoefColWid"
di "totCoefColWid:$totCoefColWid"
di "totCoefColWid:$totCoefColWid"
di "totCoefColWid:$totCoefColWid"
di "totCoefColWid:$totCoefColWid"
global ampersand ""
foreach curLoop of numlist 1(1)$it_col_cnt {
global ampersand "$ampersand &"
}
di "ampersand:$ampersand"
global alignCenter "m{${labColWid}cm}"
local eB1 ">{\centering\arraybackslash}m{${perCoefColWid}cm}"
foreach curLoop of numlist 1(1)$it_col_cnt {
global alignCenter "$alignCenter `eB1'"
}
di "alignCenter:$alignCenter"
/////////////////////////////////////////////////
///--- G1c. Tex Sectioning panel A
/////////////////////////////////////////////////
if (("${svr_starts_var_panel_a}" == "${svr_starts_var_panel_a_sa}") & ("${svr_starts_var_panel_a_sa}" == "${svr_starts_var_panel_a_sa_ga}") ) {
///--- if main = sub headings = subsub heading
#delimit ;
global slb_titling_panel_a "
${svr_starts_var_panel_a} "\multicolumn{$totColCnt}{L{${totColWidLegend}cm}}{${slb_title_spc}\textbf{${slb_panel_a}}} \\
\multicolumn{$totColCnt}{L{${totColWidLegend}cm}}{${slb_dis_tlt_spc}\textbf{\textit{${slb_panel_a_sa}}}} \\
\multicolumn{$totColCnt}{L{${totColWidLegend}cm}}{${slb_dis_tlt_spc}\textit{${slb_panel_a_sa_ga}}} \\"
${svr_starts_var_panel_a_sa_gb} "\multicolumn{$totColCnt}{L{${totColWidLegend}cm}}{${slb_dis_tlt_spc}\textit{${slb_panel_a_sa_gb}}} \\"
${svr_starts_var_panel_a_sa_gc} "\multicolumn{$totColCnt}{L{${totColWidLegend}cm}}{${slb_dis_tlt_spc}\textit{${slb_panel_a_sa_gc}}} \\"
${svr_starts_var_panel_a_sb} "\multicolumn{$totColCnt}{L{${totColWidLegend}cm}}{${slb_dis_tlt_spc}\textbf{\textit{${slb_panel_a_sb}}}} \\
\multicolumn{$totColCnt}{L{${totColWidLegend}cm}}{${slb_dis_tlt_spc}\textit{${slb_panel_a_sb_ga}}} \\"
${svr_starts_var_panel_a_sb_gb} "\multicolumn{$totColCnt}{L{${totColWidLegend}cm}}{${slb_dis_tlt_spc}\textit{${slb_panel_a_sb_gb}}} \\"
${svr_starts_var_panel_a_sb_gc} "\multicolumn{$totColCnt}{L{${totColWidLegend}cm}}{${slb_dis_tlt_spc}\textit{${slb_panel_a_sb_gc}}} \\"
";
global slb_refcat_panel_a `"refcat(${slb_titling_panel_a}, nolabel)"';
#delimit cr
}
else if ("${svr_starts_var_panel_a_sa}" == "${svr_starts_var_panel_a_sa_ga}") {
///--- if main, sub headings differ, but subsub = sub heading
#delimit ;
global slb_titling_panel_a "
${svr_starts_var_panel_a} "\multicolumn{$totColCnt}{L{${totColWidLegend}cm}}{${slb_title_spc}\textbf{${slb_panel_a}}} \\"
${svr_starts_var_panel_a_sa} "\multicolumn{$totColCnt}{L{${totColWidLegend}cm}}{${slb_dis_tlt_spc}\textbf{\textit{${slb_panel_a_sa}}}} \\
\multicolumn{$totColCnt}{L{${totColWidLegend}cm}}{${slb_dis_tlt_spc}\textit{${slb_panel_a_sa_ga}}} \\"
${svr_starts_var_panel_a_sa_gb} "\multicolumn{$totColCnt}{L{${totColWidLegend}cm}}{${slb_dis_tlt_spc}\textit{${slb_panel_a_sa_gb}}} \\"
${svr_starts_var_panel_a_sa_gc} "\multicolumn{$totColCnt}{L{${totColWidLegend}cm}}{${slb_dis_tlt_spc}\textit{${slb_panel_a_sa_gc}}} \\"
${svr_starts_var_panel_a_sb} "\multicolumn{$totColCnt}{L{${totColWidLegend}cm}}{${slb_dis_tlt_spc}\textbf{\textit{${slb_panel_a_sb}}}} \\
\multicolumn{$totColCnt}{L{${totColWidLegend}cm}}{${slb_dis_tlt_spc}\textit{${slb_panel_a_sb_ga}}} \\"
${svr_starts_var_panel_a_sb_gb} "\multicolumn{$totColCnt}{L{${totColWidLegend}cm}}{${slb_dis_tlt_spc}\textit{${slb_panel_a_sb_gb}}} \\"
${svr_starts_var_panel_a_sb_gc} "\multicolumn{$totColCnt}{L{${totColWidLegend}cm}}{${slb_dis_tlt_spc}\textit{${slb_panel_a_sb_gc}}} \\"
";
global slb_refcat_panel_a `"refcat(${slb_titling_panel_a}, nolabel)"';
#delimit cr
}
else {
///--- if main, sub, subsub heading vars differ
#delimit ;
global slb_titling_panel_a "
${svr_starts_var_panel_a} "\multicolumn{$totColCnt}{L{${totColWidLegend}cm}}{${slb_title_spc}\textbf{${slb_panel_a}}} \\"
${svr_starts_var_panel_a_sa} "${st_cmidrule}\multicolumn{$totColCnt}{L{${totColWidLegend}cm}}{${slb_dis_tlt_spc}\textbf{\textit{${slb_panel_a_sa}}}} \\"
${svr_starts_var_panel_a_sa_ga} "\multicolumn{$totColCnt}{L{${totColWidLegend}cm}}{${slb_dis_tlt_spc}\textit{${slb_panel_a_sa_ga}}} \\"
${svr_starts_var_panel_a_sa_gb} "\multicolumn{$totColCnt}{L{${totColWidLegend}cm}}{${slb_dis_tlt_spc}\textit{${slb_panel_a_sa_gb}}} \\"
${svr_starts_var_panel_a_sa_gc} "\multicolumn{$totColCnt}{L{${totColWidLegend}cm}}{${slb_dis_tlt_spc}\textit{${slb_panel_a_sa_gc}}} \\"
${svr_starts_var_panel_a_sb} "${st_cmidrule}\multicolumn{$totColCnt}{L{${totColWidLegend}cm}}{${slb_dis_tlt_spc}\textbf{\textit{${slb_panel_a_sb}}}} \\"
${svr_starts_var_panel_a_sb_ga} "\multicolumn{$totColCnt}{L{${totColWidLegend}cm}}{${slb_dis_tlt_spc}\textit{${slb_panel_a_sb_ga}}} \\"
${svr_starts_var_panel_a_sb_gb} "\multicolumn{$totColCnt}{L{${totColWidLegend}cm}}{${slb_dis_tlt_spc}\textit{${slb_panel_a_sb_gb}}} \\"
${svr_starts_var_panel_a_sb_gc} "\multicolumn{$totColCnt}{L{${totColWidLegend}cm}}{${slb_dis_tlt_spc}\textit{${slb_panel_a_sb_gc}}} \\"
";
global slb_refcat_panel_a `"refcat(${slb_titling_panel_a}, nolabel)"';
#delimit cr
}
/////////////////////////////////////////////////
///--- G1d. Bottom
/////////////////////////////////////////////////
#delimit ;
global slb_titling_bottom `"
stats(N $st_estd_rownames,
labels(Observations
"\midrule \multicolumn{${totColCnt}}{L{${totColWid}cm}}{${slb_title_spc}\textbf{\textit{\normalsize ${slb_bottom}}}} \\ $ampersand \\ ${slb_fot_lst_spc}${slb_estd_1}"
"${slb_fot_lst_spc}${slb_estd_2}"
"${slb_fot_lst_spc}${slb_estd_3}"
"${slb_fot_lst_spc}${slb_estd_4}"))"';
#delimit cr
/////////////////////////////////////////////////
///--- G2. Tex Headline
/////////////////////////////////////////////////
///--- C.3.A. Initialize
global row1 "&"
global row1MidLine ""
global row2 ""
global row2MidLine ""
global row3 ""
///--- B. Row 2 and row 2 midline
* global colSeq "2 3 6"
global cmidrule ""
global colCtr = -1
foreach curCol of numlist $colSeq {
global colCtr = $colCtr + 1
global curCol1Min = `curCol' - 1
if ($colCtr == 0 ) {
global minCoefCol = "`curCol'"
}
if ($colCtr != 0 ) {
global gapCnt = (`curCol' - `lastCol')
global gapWidth = (`curCol' - `lastCol')*$perCoefColWid
di "curCol1Min:$curCol1Min, lastCol:`lastCol'"
di "$gapCnt"
di "\multicolumn{$gapCnt}{C{${gapWidth}cm}}{\small no Control}"
di "\cmidrule(l{5pt}r{5pt}){`lastCol'-$curCol1Min}"
global curRow2MidLine "\cmidrule(l{5pt}r{5pt}){`lastCol'-$curCol1Min}"
global row2MidLine "$row2MidLine $curRow2MidLine"
global curRow2 "\multicolumn{$gapCnt}{L{${gapWidth}cm}}{\small ${labG${colCtr}}}"
global row2 "$row2 & $curRow2"
}
local lastCol = `curCol'
}
///--- C. Row 3
* Initial & for label column
foreach curLoop of numlist 1(1)$it_col_cnt {
global curText "${labC`curLoop'}"
global textUse "(`curLoop')"
if ("$curText" != "") {
global textUse "$curText"
}
global curRow3 "\multicolumn{1}{C{${perCoefColWid}cm}}{$textUse}"
global row3 "$row3 & $curRow3"
}
///--- D. Row 1 and midline:
global row1 "${row1} \multicolumn{${it_col_cnt}}{L{${totCoefColWid}cm}}{${slb_title_inner}}"
global row1MidLine "\cmidrule(l{5pt}r{5pt}){${minCoefCol}-${curCol1Min}}"
///--- C.3.E Print lines
di "$row1 \\"
di "$row1MidLine "
di "$row2 \\"
di "$row2MidLine"
di "$row3 \\"
///--- C.4 Together
#delimit ;
///--- 1. Section
* local section "
* \section{`fileTitle'}\vspace*{-6mm}
* ";
///--- 2. Align and Column Define
local centering "$alignCenter";
global headline "
$row1 \\
$row1MidLine
$row2 \\
$row2MidLine
$row3 \\
";
#delimit cr
/////////////////////////////////////////////////
///--- G4. Head
/////////////////////////////////////////////////
#delimit ;
global adjustBoxStart "\begin{adjustbox}{max width=${tableAdjustBoxWidth}\textwidth}";
global adjustBoxEnd "\end{adjustbox}";
global notewrap "
\addlinespace[-0.5em]
\multicolumn{${totColCnt}}{L{${totColWidFootnote}cm}}{\footnotesize\justify${slb_note}}\\
";
global startTable "\begin{table}[htbp]
\centering
\caption{${slb_title}\label{${slb_label_tex}}}${adjustBoxStart}\begin{tabular}{`centering'}
\toprule
";
global headlineAll "prehead(${startTable}${headline})";
global headlineAllNoHead "prehead(${startTable})";
global postAll "postfoot(\bottomrule ${notewrap} \end{tabular}${adjustBoxEnd}\end{table})";
#delimit cr
/////////////////////////////////////////////////
///--- H1. Output Results to HTML
/////////////////////////////////////////////////
esttab ${smd_panel_a_m} using "${st_out_html}", ${slb_panel_a_main} ${slb_esttab_opt_txt} replace
/////////////////////////////////////////////////
///--- H2. Output Results to RTF
/////////////////////////////////////////////////
esttab ${smd_panel_a_m} using "${st_out_rtf}", ${slb_panel_a_main} ${slb_esttab_opt_txt} replace
/////////////////////////////////////////////////
///--- H3. Output Results to Tex
/////////////////////////////////////////////////
esttab $smd_panel_a_m using "${st_out_tex}", ///
${slb_panel_a_main} ///
${slb_refcat_panel_a} ///
${slb_esttab_opt_tex} ///
${slb_titling_bottom} ///
fragment $headlineAll $postAll replace
/////////////////////////////////////////////////
///--- I. Out Logs
/////////////////////////////////////////////////
///--- End Log and to HTML
log close
///--- to PDF
capture noisily {
translator set Results2pdf logo off
translator set Results2pdf fontsize 10
translator set Results2pdf pagesize custom
translator set Results2pdf pagewidth 11.69
translator set Results2pdf pageheight 16.53
translator set Results2pdf lmargin 0.2
translator set Results2pdf rmargin 0.2
translator set Results2pdf tmargin 0.2
translator set Results2pdf bmargin 0.2
translate @Results "${st_log_file}.pdf", replace translator(Results2pdf)
}
capture noisily {
erase "${st_log_file}.smcl"
}
|
cls
clear
macro drop _all
/*
Back to Fan's Stata4Econ or other repositories:
- http://fanwangecon.github.io
- http://fanwangecon.github.io/Stata4Econ
- http://fanwangecon.github.io/R4Econ
- http://fanwangecon.github.io/M4Econ
- http://fanwangecon.github.io/CodeDynaAsset/
- http://fanwangecon.github.io/Math4Econ/
- http://fanwangecon.github.io/Stat4Econ/
- http://fanwangecon.github.io/Tex4Econ
Regression with continous varible and discrete variables, discrete variables could interact with each other, and interact with continuous variable
*/
///--- File Names
global st_file_root "~\Stata4Econ\table\tabsumm\tab_mcol_npanel\"
global st_log_file "${st_file_root}gen_reg"
global st_out_html "${st_file_root}tab_mcol_npanel.html"
global st_out_rtf "${st_file_root}tab_mcol_npanel.rtf"
global st_out_tex "${st_file_root}tab_mcol_npanel_texbody.tex"
///--- Start log
capture log close
log using "${st_log_file}" , replace
log on
set trace off
set tracedepth 1
/////////////////////////////////////////////////
///--- Load Data
/////////////////////////////////////////////////
set more off
sysuse bplong, clear
tab sex
tab agegrp
tab when
tab sex when
tab sex agegrp
egen sex_when = group(sex when), label
egen sex_agegrp = group(sex agegrp), label
egen when_agegrp = group(when agegrp), label
drop if agegrp == 2 & sex_when == 3
set seed 123
gen rand1 = floor(runiform()*2)
gen rand2 = floor(runiform()*20)
gen rand3 = floor(runiform()*3000)
/////////////////////////////////////////////////
///--- A1. Define Regression Variables
/////////////////////////////////////////////////
* shared variables to summarize over
global svr_summ "bp patient rand1 rand2"
* for each column, conditioning differs
global it_colcate_n = 4
global it_rowcate_n = 3
global sif_colcate_1 "sex_when == 1"
global sif_colcate_2 "sex_when == 2"
global sif_colcate_3 "sex_when == 3"
global sif_colcate_4 "sex_when == 4"
global sif_rowcate_1 "agegrp == 1"
global sif_rowcate_2 "agegrp == 2"
global sif_rowcate_3 "agegrp == 3"
/////////////////////////////////////////////////
///--- A2. Titling
/////////////////////////////////////////////////
global slb_title "Cross Tabulate Age, Gender and Time Statistics"
global slb_title_inner "Tabulate Stats: \textbf{Mean} (\textit{S.D.})"
global slb_label_tex "tab:sctabsumm"
/////////////////////////////////////////////////
///--- A3. Row Labeling
/////////////////////////////////////////////////
///--- Row Tab Names
global slb_rowcate_1 "Group 1: Age 30 to 45"
global slb_rowcate_2 "Group 2: Age 46 to 59"
global slb_rowcate_3 "Group 3: Age >60"
///--- Var Subgroup Subtitling
global slb_subvargrp_1 "Summ Group One (cts)"
global slb_subvargrp_2 "Summ Group Two (discrete)"
///--- Labeling for each variable
global slb_var_spc "\hspace*{3mm}"
label variable bp "${slb_var_spc}Blood pressure"
label variable patient "${slb_var_spc}Patient ID"
label variable rand1 "${slb_var_spc}Random \textit{Male} or \textit{Female}"
label variable rand2 "${slb_var_spc}Random Three Cates \textbf{after}"
label variable rand3 "${slb_var_spc}Random Thousands"
///--- Labeling Head Tag
global svr_first "bp"
global svr_first_subvargrp_1 "bp"
global svr_first_subvargrp_2 "rand1"
/////////////////////////////////////////////////
///--- A4. Column Labeling
/////////////////////////////////////////////////
///--- Column Groups
global colSeq "2 4 6"
global st_cmidrule "\cmidrule(lr){2-3}\cmidrule(lr){4-5}"
///--- Group 1, columns 1 and 2
global labG1 "Male"
global labC1 "{\small Before}"
global labC2 "{\small After}"
///--- Group 2, columns 3 and 4
global labG2 "Female"
global labC3 "{\small Before}"
global labC4 "{\small After}"
///--- Column Widths
global perCoefColWid = 1.75
global labColWid = 7
global footExtraWidth = 1.1
global slb_title_spc "\vspace*{-3mm}"
global slb_foot_spc "\vspace*{-3mm}"
///--- Column Fractional Adjustment, 1 = 100%
global tableAdjustBoxWidth = 1.0
/////////////////////////////////////////////////
///--- A5. Additional Statistics
/////////////////////////////////////////////////
///--- Notes
global slb_bottom "Controls for each panel:"
global slb_note "Summary statistics cross tabulate for various variables. Table shows mean and standard deviation for each group in parenthesis."
/////////////////////////////////////////////////
///--- A6. Define Summarizing Technical Strings
/////////////////////////////////////////////////
///--- Technical Controls
global stc_regc "estpost tabstat"
global stc_opts ", statistics(mean sd p10 p50 p90) c(s)"
global stc_stats_main "mean"
global stc_stats_paren "sd"
/////////////////////////////////////////////////
///--- B1. Define Stats Summary for Each Tabulate Category
/////////////////////////////////////////////////
/*
di "$srg_cate_row1_col1"
di "$srg_cate_row2_col2"
di "$srg_cate_row1_col2"
*/
foreach it_rowcate of numlist 1(1)$it_rowcate_n {
foreach it_colcate of numlist 1(1)$it_colcate_n {
#delimit;
global srg_cate_row`it_rowcate'_col`it_colcate' "
$stc_regc $svr_summ if ${sif_colcate_`it_colcate'} & ${sif_rowcate_`it_rowcate'}
";
#delimit cr
di "${srg_cate_row`it_rowcate'_col`it_colcate'}"
}
}
/////////////////////////////////////////////////
///--- C. Run Regressions
/////////////////////////////////////////////////
eststo clear
local it_tabcell_ctr = 0
foreach it_rowcate of numlist 1(1)$it_rowcate_n {
global st_cur_sm_store "smd_`it_rowcate'_m"
global ${st_cur_sm_store} ""
foreach it_colcate of numlist 1(1)$it_colcate_n {
local it_tabcell_ctr = `it_tabcell_ctr' + 1
global st_cur_srg_name "srg_cate_row`it_rowcate'_col`it_colcate'"
di "it_rowcate:`it_rowcate', it_tabcell_ctr:`it_tabcell_ctr', st_cur_srg_name:${st_cur_srg_name}"
///--- Summ Stats
count if ${sif_colcate_`it_colcate'} & ${sif_rowcate_`it_rowcate'}
global curcount = r(N)
if ($curcount>1) {
eststo m`it_tabcell_ctr', title("${sif_colcate_`it_colcate'}") : ${$st_cur_srg_name} ${stc_opts}
}
else {
///--- This means this tabulated subgroup has N = 0
* Generate a fake observation to create a new estimated model
* Then replace the observation N by setting it to 0, otherwise N = 1
capture drop aaa
gen aaa = 0 if _n == 1
eststo m`it_tabcell_ctr', title("${sif_colcate_`it_colcate'}") : estpost tabstat aaa , statistics(n) c(s)
estadd scalar N = 0, replace
}
///--- Track Regression Store
global $st_cur_sm_store "${${st_cur_sm_store}} m`it_tabcell_ctr'"
}
di "${${st_cur_sm_store}}"
}
di "$smd_1_m"
di "$smd_2_m"
di "$smd_3_m"
/////////////////////////////////////////////////
///--- D2. Regression Display Controls
/////////////////////////////////////////////////
global slb_reg_stats "N"
global sd `""'
global keepcellstats "cells(mean(fmt(a2)) $sd) wide"
global slb_sd_tex `"${stc_stats_paren}(fmt(a2) par("\vspace*{-2mm}{\footnotesize (" ") }"))"'
global slb_cells_tex `"cells(${stc_stats_main}(fmt(a2)) $slb_sd_tex) wide"'
global slb_esttab_opt_tex "${slb_cells_tex} booktabs label collabels(none) nomtitles nonumbers star(${slb_starLvl})"
global slb_sd_txt `"${stc_stats_paren}(fmt(a2) par("(" ")"))"'
global slb_cells_txt `"cells(${stc_stats_main}(fmt(a2)) $slb_sd_txt) wide"'
global slb_esttab_opt_txt "${slb_cells_txt} stats(${slb_reg_stats}) collabels(none) mtitle nonumbers varwidth(30) modelwidth(15) star(${slb_starLvl}) addnotes(${slb_note})"
/////////////////////////////////////////////////
///--- E. Summ Stats Shows
/////////////////////////////////////////////////
foreach it_rowcate of numlist 1(1)$it_rowcate_n {
esttab ${smd_`it_rowcate'_m}, title("${slb_rowcate_`it_rowcate'}") ${slb_esttab_opt_txt}
}
/////////////////////////////////////////////////
///--- F2. Tabling Calculations
/////////////////////////////////////////////////
///--- Width Calculation
global totCoefColWid = ${perCoefColWid}*${it_colcate_n}
global totColCnt = ${it_colcate_n} + 1
global totColWid = ${labColWid} + ${totCoefColWid}
global totColWidFootnote = ${labColWid} + ${totCoefColWid} + ${footExtraWidth}
global totColWidLegend = ${labColWid} + ${totCoefColWid}
global totColWidLegendthin = ${totCoefColWid}
di "it_colcate_n:$it_colcate_n"
di "totCoefColWid:$totCoefColWid"
global ampersand ""
foreach curLoop of numlist 1(1)$it_colcate_n {
global ampersand "$ampersand &"
}
di "ampersand:$ampersand"
global alignCenter "m{${labColWid}cm}"
local eB1 ">{\centering\arraybackslash}m{${perCoefColWid}cm}"
foreach curLoop of numlist 1(1)$it_colcate_n {
global alignCenter "$alignCenter `eB1'"
}
di "alignCenter:$alignCenter"
/////////////////////////////////////////////////
///--- G1a. Tex Sectioning each panel
/////////////////////////////////////////////////
foreach it_rowcate of numlist 1(1)$it_rowcate_n {
#delimit ;
global slb_titling_panel_`it_rowcate' "
${svr_first} "\multicolumn{$totColCnt}{p{${totColWidLegend}cm}}{${slb_title_spc}\textbf{${slb_rowcate_`it_rowcate'}}} \\"
";
global slb_refcat_panel_`it_rowcate' `"refcat(${slb_titling_panel_`it_rowcate'}, nolabel)"';
#delimit cr
}
/////////////////////////////////////////////////
///--- G1d. Bottom
/////////////////////////////////////////////////
#delimit ;
global slb_titling_bottom `"
stats(N,
labels(Observations
"\midrule \multicolumn{${totColCnt}}{L{${totColWid}cm}}{${slb_title_spc}\textbf{\textit{\normalsize ${slb_bottom}}}}"))"';
#delimit cr
/////////////////////////////////////////////////
///--- G2. Tex Headline
/////////////////////////////////////////////////
///--- C.3.A. Initialize
global row1 "&"
global row1MidLine ""
global row2 ""
global row2MidLine ""
global row3 ""
///--- B. Row 2 and row 2 midline
* global colSeq "2 3 6"
global cmidrule ""
global colCtr = -1
foreach curCol of numlist $colSeq {
global colCtr = $colCtr + 1
global curCol1Min = `curCol' - 1
if ($colCtr == 0 ) {
global minCoefCol = "`curCol'"
}
if ($colCtr != 0 ) {
global gapCnt = (`curCol' - `lastCol')
global gapWidth = (`curCol' - `lastCol')*$perCoefColWid
di "curCol1Min:$curCol1Min, lastCol:`lastCol'"
di "$gapCnt"
di "\multicolumn{$gapCnt}{C{${gapWidth}cm}}{\small no Control}"
di "\cmidrule(l{5pt}r{5pt}){`lastCol'-$curCol1Min}"
global curRow2MidLine "\cmidrule(l{5pt}r{5pt}){`lastCol'-$curCol1Min}"
global row2MidLine "$row2MidLine $curRow2MidLine"
global curRow2 "\multicolumn{$gapCnt}{C{${gapWidth}cm}}{\small ${labG${colCtr}}}"
global row2 "$row2 & $curRow2"
}
local lastCol = `curCol'
}
///--- C. Row 3
* Initial & for label column
foreach curLoop of numlist 1(1)$it_colcate_n {
global curText "${labC`curLoop'}"
global textUse "(`curLoop')"
if ("$curText" != "") {
global textUse "$curText"
}
global curRow3 "\multicolumn{1}{C{${perCoefColWid}cm}}{$textUse}"
global row3 "$row3 & $curRow3"
}
///--- D. Row 1 and midline:
global row1 "${row1} \multicolumn{${it_colcate_n}}{p{${totCoefColWid}cm}}{${slb_title_inner}}"
global row1MidLine "\cmidrule(l{5pt}r{5pt}){${minCoefCol}-${curCol1Min}}"
///--- C.3.E Print lines
di "$row1 \\"
di "$row1MidLine "
di "$row2 \\"
di "$row2MidLine"
di "$row3 \\"
///--- C.4 Together
#delimit ;
///--- 1. Section
* local section "
* \section{`fileTitle'}\vspace*{-6mm}
* ";
///--- 2. Align and Column Define
local centering "$alignCenter";
global headline "
$row1 \\
$row1MidLine
$row2 \\
$row2MidLine
$row3 \\
";
#delimit cr
/////////////////////////////////////////////////
///--- G4. Head
/////////////////////////////////////////////////
#delimit ;
global adjustBoxStart "\begin{adjustbox}{max width=${tableAdjustBoxWidth}\textwidth}";
global adjustBoxEnd "\end{adjustbox}";
global notewrap "
\multicolumn{${totColCnt}}{p{${totColWidFootnote}cm}}{${slb_foot_spc} \footnotesize\justify ${slb_note}}\\
";
global startTable "\begin{table}[htbp]
\centering
\caption{${slb_title}\label{${slb_label_tex}}}${adjustBoxStart}\begin{tabular}{`centering'}
\toprule
";
global headlineAll "prehead(${startTable}${headline})";
global headlineAllNoHead "prehead(${startTable})";
global postAll "postfoot(\bottomrule ${notewrap} \end{tabular}${adjustBoxEnd}\end{table})";
#delimit cr
/////////////////////////////////////////////////
///--- H1. Output Results to HTML
/////////////////////////////////////////////////
esttab ${smd_1_m} using "${st_out_html}", title("${slb_rowcate_`it_rowcate'}") ${slb_esttab_opt_txt} replace
esttab ${smd_1_m} using "${st_out_rtf}", title("${slb_rowcate_`it_rowcate'}") ${slb_esttab_opt_txt} replace
foreach it_rowcate of numlist 2(1)$it_rowcate_n {
esttab ${smd_`it_rowcate'_m} using "${st_out_html}", title("${slb_rowcate_`it_rowcate'}") ${slb_esttab_opt_txt} append
esttab ${smd_`it_rowcate'_m} using "${st_out_rtf}", title("${slb_rowcate_`it_rowcate'}") ${slb_esttab_opt_txt} append
}
/////////////////////////////////////////////////
///--- H2. Output Results to Tex
/////////////////////////////////////////////////
esttab ${smd_1_m} using "${st_out_tex}", ///
title("${slb_rowcate_1}") ///
${slb_refcat_panel_1} ///
${slb_esttab_opt_tex} ///
fragment $headlineAll postfoot("") replace
global it_rowcate_n_mins_1 = $it_rowcate_n - 1
foreach it_rowcate of numlist 2(1)$it_rowcate_n_mins_1 {
esttab ${smd_`it_rowcate'_m} using "${st_out_tex}", ///
title("${slb_rowcate_`it_rowcate'}") ///
${slb_refcat_panel_`it_rowcate'} ///
${slb_esttab_opt_tex} ///
fragment prehead("") postfoot("") append
}
esttab ${smd_${it_rowcate_n}_m} using "${st_out_tex}", ///
title("${slb_rowcate_${it_rowcate_n}}") ///
${slb_refcat_panel_${it_rowcate_n}} ///
${slb_esttab_opt_tex} ///
${slb_titling_bottom} ///
fragment prehead("") $postAll append
/////////////////////////////////////////////////
///--- I. Out Logs
/////////////////////////////////////////////////
///--- End Log and to HTML
log close
///--- to PDF
capture noisily {
translator set Results2pdf logo off
translator set Results2pdf fontsize 10
translator set Results2pdf pagesize custom
translator set Results2pdf pagewidth 11.69
translator set Results2pdf pageheight 16.53
translator set Results2pdf lmargin 0.2
translator set Results2pdf rmargin 0.2
translator set Results2pdf tmargin 0.2
translator set Results2pdf bmargin 0.2
translate @Results "${st_log_file}.pdf", replace translator(Results2pdf)
}
capture noisily {
erase "${st_log_file}.smcl"
}
|
/*
statapush: Stata module for sending push notifications
Authors: William L. Schpero and Vikram Jambulapati
Contact: william.schpero@yale.edu
Date: 060516
Version: 3.0
*/
capture program drop statapush
program define statapush
version 12.1
syntax [using/], Message(string) [Token(string) Userid(string)] [Attach(string)] [Provider(string)]
* Load default preferences if token/userid not given
if "`token'" == "" {
_statapushprefgrab
local token "`r(token)'"
local userid "`r(userid)'"
local provider "`r(provider)'"
}
* Pick pushcmd based on provider
if "`provider'" == "" | lower("`provider'") == "pushover" {
local pushcmd "_statapush"
}
else if lower("`provider'") == "pushbullet" {
local pushcmd "_statapushbullet"
}
else if lower("`provider'") == "ifttt" {
local pushcmd "_statapushifttt"
}
else {
display as error "Invalid provider: `provider'. Need to use 'pushover', 'pushbullet', or 'ifttt'."
exit 198
}
* Check whether attach specified correctly, if so reassign as an option
if "`attach'" != "" & lower("`provider'") == "pushbullet" {
local attach a("`attach'")
}
else if "`attach'" != "" & lower("`provider'") != "pushbullet" {
display as error "Only 'pushbullet' supports 'attach'."
exit 198
}
* Run the do file if "using" specified, otherwise just push message
if "`using'" != "" {
capture noisily do "`using'"
if _rc == 0 {
`pushcmd', t(`token') u(`userid') m(`message') `attach'
}
else {
`pushcmd', t(`token') u(`userid') m("There's an error in `using'.")
}
}
else {
`pushcmd', t(`token') u(`userid') m(`message') `attach'
}
end
* Pushover command
capture program drop _statapush
program define _statapush
version 12.1
syntax, Token(string) Userid(string) Message(string)
quietly !curl -s -F "token=`token'" -F "user=`userid'" -F "title=statapush" -F "message=`message'" https://api.pushover.net/1/messages.json
display as text "Notification pushed at `c(current_time)' via Pushover"
end
* IFTTT command
capture program drop _statapushifttt
program define _statapushifttt
version 12.1
syntax, Token(string) Message(string) [Userid(string)]
quietly !curl -X POST -H "Content-Type: application/json" -d "{\"value1\": \"StataPush\", \"value2\": \"`message'\"}" https://maker.ifttt.com/trigger/StataPush/with/key/`token'
display as text "Notification pushed at `c(current_time)' via IFTTT"
end
* Pushbullet command
capture program drop _statapushbullet
program define _statapushbullet
version 12.1
syntax, Token(string) Message(string) [Userid(string)] [Attach(string)]
if "`attach'" == "" {
quietly !curl -u "`token'": -X POST https://api.pushbullet.com/v2/pushes --header "Content-Type: application/json" --data-binary "{\"type\": \"note\", \"title\": \"StataPush\", \"body\": \"`message'\"}"
}
else {
quietly capture _uploadpushbullet, t("`token'") a("`attach'")
if _rc == 601 {
display as error "File not found: `attach'. Will attempt to notify without attachment."
quietly !curl -u "`token'": -X POST https://api.pushbullet.com/v2/pushes --header "Content-Type: application/json" --data-binary "{\"type\": \"note\", \"title\": \"StataPush\", \"body\": \"`message'\"}"
}
else {
local file_url "`r(file_url)'"
local upload_url "`r(upload_url)'"
local file_type "`r(file_type)'"
quietly !curl --header "Access-Token: `token'" --header "Content-Type: application/json" --data-binary "{\"type\": \"file\", \"title\": \"StataPush\", \"body\": \"`message'\", \"file_name\": \"`attach'\", \"file_type\": \"`file_type'\", \"file_url\": \"`file_url'\"}" --request POST https://api.pushbullet.com/v2/pushes
}
}
display as text "Notification pushed at `c(current_time)' via Pushbullet"
end
* Upload a file with Pushbullet
capture program drop _uploadpushbullet
program define _uploadpushbullet, rclass
version 12.1
syntax, Token(string) Attach(string)
* Confirm file
quietly capture confirm file "`attach'"
if _rc != 0 {
display as error "File not found: `attach'."
exit 601
}
* Get file extension
local next "`attach'"
gettoken extension next: next, parse(".")
while (`"`next'"' != "") {
gettoken extension next: next, parse(".")
}
* Set file type
if inlist("`extension'", "png", "wmf", "jpeg", "jpg") {
local file_type "image/`extension'"
}
else if inlist("`extension'", "eps", "ps") {
local file_type "application/postscript"
}
else if inlist("`extension'", "pdf") {
local file_type "application/pdf"
}
else if inlist("`extension'", "log", "txt") {
local file_type "text/plain"
}
else {
local file_type "application/octet-stream"
}
* Request URL from Pushbullet and save response to temporary file
tempfile responsetxt
quietly !curl --header "Access-Token: `token'" --header "Content-Type: application/json" --data-binary "{\"file_name\": \"`attach'\", \"file_type\": \"`file_type'\"}" --request POST https://api.pushbullet.com/v2/upload-request >> "`responsetxt'"
* Read JSON response and extract URL info
file open jsonfile using `responsetxt', read text
file read jsonfile line
quietly display regexm(`"`macval(line)'"', "file_url.+,")
local file_url = substr(regexs(0), 12, length(regexs(0)) - 13)
quietly display regexm(`"`macval(line)'"', "upload_url.+}")
local upload_url = substr(regexs(0), 14, length(regexs(0)) - 15)
file close jsonfile
* Upload file
quietly !curl -i -X POST `upload_url' -F "file=@`attach'"
return local file_url "`file_url'"
return local upload_url "`upload_url'"
return local file_type "`file_type'"
end
* Pull StataPush preferences
capture program drop _statapushprefgrab
program define _statapushprefgrab, rclass
quietly findfile statapushconfig.ado
quietly include "`r(fn)'"
return local provider "`default_provider'"
return local token "``default_provider'_token'"
return local userid "``default_provider'_userid'"
end
|
/*
statapushpref: Stata module to save preferences for statapush
Authors: William L. Schpero and Vikram Jambulapati
Contact: william.schpero@yale.edu
Date: 022016
Version: 1.0
*/
capture program drop statapushpref
program define statapushpref
version 12.1
syntax, Token(string) Userid(string) Provider(string)
local provider = lower("`provider'")
quietly findfile statapushconfig.ado
local statapushconfig "`r(fn)'"
quietly file open statapushpref_ado using "`statapushconfig'", write append
quietly file write statapushpref_ado "local default_provider `provider'" _n
quietly file write statapushpref_ado "local `provider'_token `token'" _n
quietly file write statapushpref_ado "local `provider'_userid `userid'" _n
quietly file close statapushpref_ado
if _rc == 0 {
display as result "Your preferences have been saved in statapushconfig.ado."
}
end
|
tempname fout
file open `fout' using "mystanmodel.stan", write replace
#delimit ;
foreach line in
"data {"
" int N;"
" real x[N];"
"}"
"parameters{"
" real mu;"
"}"
{;
#delimit cr
file write `fout' "`line'" _n
}
file close `fout'
//stan ...
|
capture program drop inline
program define inline
syntax , MODELfile(string) [THISfile(string)]
// what if modelfile already exists? should there be a replace suboption?
tempname fin
local tdir=c(tmpdir)
// fetch temp do-file copy if no thisfile has been named
if "`thisfile'"=="" {
tempname lsin
if "$S_OS"=="windows" {
shell dir `tdir' -b -o:-D >> tdir-ls
}
else {
shell ls `tdir' -t >> tdir-ls
}
tempname lsin
capture file close `lsin'
file open `lsin' using "tdir-ls", read text
// assumes there's nothing else on the 1st line
file read `lsin' thisfile // is this OK? it will overwrite the thisfile local
while substr("`thisname'",1,2)!="SD" { // this SD* is not true for all Stata-OS combinations
file read `lsin' thisname
if "$S_OS"=="windows" {
local thisfile "`tdir'\`thisname'"
}
else {
local thisfile "`tdir'/`thisname'"
}
if r(eof)==1 {
dis as error "Could not locate a do-file in the Stata temporary folder."
dis as error "Try giving the path and file name with the 'thisfile' option"
capture file close `lsin'
error 1
}
}
capture file close `lsin'
}
tempname fin
capture file close `fin'
file open `fin' using "`thisfile'" , read text
file read `fin' line
tokenize `"`line'"'
local line1=`"`1'"'
file read `fin' line
tokenize `"`line'"'
while (("`line1'"!="/*" | substr(`"`1'"',1,4)!="data") & !r(eof)) {
local line1="`1'"
file read `fin' line
tokenize `"`line'"'
}
if r(eof) {
di "Model command not found"
file close `fin'
exit(0)
}
tempname fout
capture file close `fout'
file open `fout' using "`modelfile'" , write replace
file write `fout' "`line'" _n
file read `fin' line
while ("`line'"!="*/") {
file write `fout' "`line'" _n
file read `fin' line
}
file close `fin'
file close `fout'
end
|
capture program drop rdump
program define rdump
version 11.0
syntax [, Rfile(string) Matrices(string) Globals(string) REPlace]
// set default file name
if "`rfile'"=="" {
local rfile="R_in.R"
}
// check whether rfile exists already
capture confirm file "`rfile'"
if !_rc & "`replace'"=="" {
display as error "`rfile' already exists; use the replace option if you want to overwrite it"
error 602
}
else if !_rc & "`replace'"!="" {
erase "`rfile'"
}
// open rfile
tempname dataf
quietly file open dataf using "`rfile'", write text replace
// write matrices
if "`matrices'"!="" {
if "`matrices'"=="all" {
local matrices: all matrices
}
foreach mat in `matrices' {
capture confirm matrix `mat'
// -stan- will quietly ignore names of matrices that don't exist
if !_rc {
local mrow=rowsof(`mat')
local mcol=colsof(`mat')
if `mrow'==1 { // row matrix: write as vector
if `mcol'==1 { // special case of 1x1 matrix: write as scalar
local mval=`mat'[1,1]
if `mval'==. {
local mval = "NA"
}
file write dataf "`mat' <- `mval'" _n
}
else {
file write dataf "`mat' <- c("
local mcolminusone=`mcol'-1
forvalues i=1/`mcolminusone' {
local mval=`mat'[1,`i']
if `mval'==. {
local mval = "NA"
}
file write dataf "`mval',"
}
local mval=`mat'[1,`mcol']
if `mval'==. {
local mval = "NA"
}
file write dataf "`mval')" _n
}
}
else if `mcol'==1 & `mrow'>1 { // column matrix: write as vector
file write dataf "`mat' <- c("
local mrowminusone=`mrow'-1
forvalues i=1/`mrowminusone' {
local mval=`mat'[`i',1]
if `mval'==. {
local mval = "NA"
}
file write dataf "`mval',"
}
local mval=`mat'[`mrow',1]
if `mval'==. {
local mval = "NA"
}
file write dataf "`mval')" _n
}
else { // otherwise, write as matrix
file write dataf "`mat' <- structure(c("
local mrowminusone=`mrow'-1
local mcolminusone=`mcol'-1
forvalues j=1/`mcolminusone' {
forvalues i=1/`mrow' {
local mval=`mat'[`i',`j']
if `mval'==. {
local mval = "NA"
}
file write dataf "`mval',"
}
}
forvalues i=1/`mrowminusone' { // write final column
local mval=`mat'[`i',`mcol']
if `mval'==. {
local mval = "NA"
}
file write dataf "`mval',"
}
// write final cell
local mval=`mat'[`mrow',`mcol']
if `mval'==. {
local mval = "NA"
}
file write dataf "`mval'), .Dim=c(`mrow',`mcol'))" _n
}
}
}
}
// write globals
if "`globals'"!="" {
if "`globals'"=="all" {
local globals: all globals
}
foreach g in `globals' {
// -stan- will quietly ignore non-numeric & non-existent globals
capture confirm number ${`g'}
if !_rc {
if ${`g'} ==. {
global `g' = "NA"
}
file write dataf "`g' <- ${`g'}" _n
}
}
}
file close dataf
end
|
// Bernoulli example:
// replace with your cmdstan folder's path
global cmdstandir "/root/cmdstan/cmdstan-2.6.2"
// here we present four different ways of combining the Stan model with your do-file
//##############################################
// Version 1: write a separate model file
/* For this example, we can just copy the Bernoulli.stan file
from the examples folder, but you would typically write your
.stan file in a text editor and save it */
if lower("$S_OS")=="windows" {
!copy examples\bernoulli\bernoulli.stan bernoulli.stan
}
else {
!cp ./examples/bernoulli/bernoulli.stan bernoulli.stan
}
// make your data
clear
set obs 10
gen y=0
replace y=1 in 2
replace y=1 in 10
count
global N=r(N)
// call Stan, providing the modelfile option
stan y, modelfile("bernoulli.stan") cmd("$cmdstandir") globals("N")
//###########################################
/* Version 2: specify the model inline, the John Thompson way (in a comment block),
naming THIS do-file in the thisfile option */
// make your data
clear
set obs 10
gen y=0
replace y=1 in 2
replace y=1 in 10
count
global N=r(N)
// here's the model:
/*
data {
int<lower=0> N;
int<lower=0,upper=1> y[N];
}
parameters {
real<lower=0,upper=1> theta;
}
model {
theta ~ beta(1,1);
for (n in 1:N)
y[n] ~ bernoulli(theta);
}
*/
// call Stan with the inline and thisfile options.
// modelfile now tells it where to save your model
stan y, inline thisfile("/root/git/statastan/stan-example.do") ///
modelfile("inline-bernoulli.stan") ///
cmd("$cmdstandir") globals("N") load mode
//###############################################################
/* Version 3: use the comment block, but don't provide thisfile - Stata
will go looking for it in c(tmpdir), which saves you typing in the
do-file name and path, but might not work sometimes */
// make your data
clear
set obs 10
gen y=0
replace y=1 in 2
replace y=1 in 10
count
global N=r(N)
// here's the model:
/*
data {
int<lower=0> N;
int<lower=0,upper=1> y[N];
}
parameters {
real<lower=0,upper=1> theta;
}
model {
theta ~ beta(1,1);
for (n in 1:N)
y[n] ~ bernoulli(theta);
}
*/
stan y, inline modelfile("inline-bernoulli.stan") ///
cmd("$cmdstandir") globals("N") load mode
//###############################################################
/* Version 4: specify the model inline, the Charles Opondo way, so
it is written to the text file of your choosing but everything is
controlled from the do-file */
// make the data
clear
set obs 10
gen y=0
replace y=1 in 2
replace y=1 in 10
count
global N=r(N)
// write the model from Stata into a plain text file
tempname writemodel
file open `writemodel' using "mystanmodel.stan", write replace
#delimit ;
foreach line in
"data { "
" int<lower=0> N; "
" int<lower=0,upper=1> y[N];"
"} "
"parameters {"
" real<lower=0,upper=1> theta;"
"} "
"model {"
" theta ~ beta(1,1);"
" for (n in 1:N) "
" y[n] ~ bernoulli(theta);"
"}"
{;
#delimit cr
file write `writemodel' "`line'" _n
}
file close `writemodel'
// call Stan
stan y, modelfile("mystanmodel.stan") cmd("$cmdstandir") globals("N") load mode
/* there are two reasons why we prefer the Opondo method:
1. We can't rule out other processes making SD* or STD* files in the
tmpdir while Stata is running (or indeed other parts of your do-file(s)
2. Naming the do-file inside itself is a particularly perverse form of
hard-coding. It is likely to cause you trouble later.
Nevertheless, we admire Prof Thompson's achievements interfacing with BUGS, and
do not intend to detract from any of that useful body of work.
Remember, if your Stan model somehow required quotes, then you would
have to make sure you use Stata compound quotes
(see http://www.stata.com/meeting/5uk/program/quotes1.html)
Also, note that Stigler's law of eponymy is hard at work here. */
|
capture program drop stan
program define stan
version 11.0
syntax varlist [if] [in] [, DATAfile(string) MODELfile(string) ///
INLINE THISFILE(string) RERUN ///
INITsfile(string) LOAD DIAGnose OUTPUTfile(string) MODESFILE(string) ///
CHAINFile(string) WINLOGfile(string) SEED(integer -1) CHAINS(integer 1) ///
WARMUP(integer -1) ITER(integer -1) THIN(integer -1) CMDstandir(string) ///
MODE SKipmissing MATrices(string) GLobals(string) KEEPFiles ///
STEPSIZE(real 1) STEPSIZEJITTER(real 0) NOPywarn]
/* options:
datafile: name to write data into in R/S format (in working dir)
modelfile: name of Stan model (that you have already saved)
must end in .stan
(following John Thompson's lead, if modelfile=="", then look for
a comment block in your do-file that begins with a line:
"data {" and this will be written out as the model
inline: read in the model from a comment block in this do-file
thisfile: optional, to use with inline; gives the path and name of the
current active do-file, used to locate the model inline. If
thisfile is omitted, Stata will look at the most recent SD*
file in c(tmpdir)
rerun: if specified, uses the existing executable file with the same name as
modelfile (in Windows, it will have .exe extension). This should exist in the
cmdstandir (see below). Be aware it will be copied into the working directory,
overwriting any existing file of that name.
initsfile: name of initial values file in R/S that you have already saved
load: read iterations into Stata
diagnose: run gradient diagnostics
outputfile: name of file to contain Stan output
modesfile: CSV file to contain posterior modes and BFGS log-probs
Careful not to mix this up with modelfile!
chainfile: name of CSV file to contain chain (trimmed version of output.csv)
to support parallel processing, numbers will be added to this (but not yet)
winlogfile: in Windows, where to store stdout & stderr before displaying on the screen
seed: RNG seed
chains: number of chains
warmup: number of warmup (burn-in) steps
iter: number of samples to retain after warmup
thin: keep every nth sample, for autocorrelation
cmdstandir: CmdStan path (not including /bin)
mode: run Stan's optimize funtion to get posterior mode
skipmissing: omit missing values variablewise to Stan (caution required!!!)
matrices: list of matrices to write, or 'all'
globals: list of global macro names to write, or 'all'
keepfiles: if stated, all files generated are kept in the working directory; if not,
all are deleted except the modelfile, C++ code, the executable, the modesfile and
the chainfile.
stepsize: HMC stepsize, gets passed to CmdStan
stepsize_jitter: HMC stepsize jitter, gets passed to CmdStan
nopywarn: do not show the warning for Stata version 16 and up, advising use of Python
Notes:
non-existent globals and matrices, and non-numeric globals, get quietly ignored
missing values are removed casewise by default
users need to take care not to leave output file names as defaults if they
have anything called output.csv or modes.csv etc. - these will be overwritten!
*/
local statacurrentversion=c(stata_version)
if `statacurrentversion'>15.9 & "`nopywarn'"=="" {
display as text "Note! Since Stata version 16.0, Stan can be accessed via Python integration. Stan developers recommend this for speed and stability. StataStan (the -stan- command) is not maintained for Stata 16.0 and later versions."
}
local statastanversion="1.2.4"
local wdir="`c(pwd)'"
local cdir="`cmdstandir'"
// get CmdStan version
tempname cmdstanversioncheck // note this is tempname not tempfile
if lower("$S_OS")=="windows" {
shell "`cdir'\bin\stanc" --version >> "`cmdstanversioncheck'"
}
else {
shell "`cdir'/bin/stanc" --version >> "`cmdstanversioncheck'"
}
file open cv using "`cmdstanversioncheck'", read
file read cv cvline
local cmdstanversion=substr("`cvline'",15,.)
dis as result "StataStan version: `statastanversion'"
dis as result "CmdStan version: `cmdstanversion'"
file close cv
if lower("$S_OS")=="windows" {
qui shell del "`cmdstanversioncheck'"
}
else {
qui shell rm "`cmdstanversioncheck'"
}
// defaults
if "`datafile'"=="" {
local datafile="statastan_data.R"
}
if "`modelfile'"=="" {
local modelfile="statastan_model.stan"
}
/* we assume the modelfile ends ".stan" (CmdStan requires this) because we
will chop the last 5 chars off to make the execfile name */
if "`initsfile'"=="" {
local initlocation=1
}
else {
if lower("$S_OS")=="windows" {
local initlocation="`wdir'\\`initsfile'"
}
else {
local initlocation="`wdir'/`initsfile'"
}
}
// this holds the entered name but .csv will be appended later
if "`outputfile'"=="" {
local outputfile="output"
}
if "`modesfile'"=="" {
local modesfile="modes.csv"
}
if "`chainfile'"=="" {
local chainfile="statastan_chains.csv"
}
if "`chainfile'"=="`outputfile'" | "`chainfile'"=="`outputfile'.csv" {
print as error "chainfile and outputfile cannot have the same name"
error 1
}
if "`winlogfile'"=="" {
local winlogfile="winlog.txt" // this only gets used in Windows
}
local lenmod=length("`modelfile'")-5
local execfile=substr("`modelfile'",1,`lenmod')
local deleteme="`execfile'"
if lower("$S_OS")=="windows" {
local execfile="`deleteme'"+".exe"
}
local cppfile="`deleteme'"+".hpp"
// strings to insert into shell command
if `seed'==(-1) {
local seedcom=""
}
else {
local seedcom="random seed=`seed'"
}
if `chains'<1 {
dis as error "You must specify 1 or more chains"
error 1
}
if `warmup'==(-1) {
local warmcom=""
}
else {
local warmcom="num_warmup=`warmup'"
}
if `iter'==(-1) {
local itercom=""
}
else {
local itercom="num_samples=`iter'"
}
if `thin'==(-1) {
local thincom=""
}
else {
local thincom="thin=`thin'"
}
local stepcom="stepsize=`stepsize'"
local stepjcom="stepsize_jitter=`stepsizejitter'"
// check for existing files
tempfile outputcheck
if lower("$S_OS")=="windows" {
shell if exist "`cdir'\\`outputfile'*.csv" (echo yes) else (echo no) >> "`outputcheck'"
}
else {
shell test -e "`cdir'/`outputfile'*.csv" && echo "yes" || echo "no" >> "`outputcheck'"
}
file open oc using "`outputcheck'", read
file read oc ocline
if "`ocline'"=="yes" {
dis as error "There are already one or more files in `cdir' called `outputfile'*.csv"
dis as error "These may be overwritten by StataStan or incorrectly included in posterior summaries."
dis as error "Please rename or move them to avoid data loss or errors."
file close oc
error 1
}
file close oc
if lower("$S_OS")=="windows" {
shell if exist "`wdir'\\`outputfile'*.csv" (echo yes) else (echo no) >> "`outputcheck'"
}
else {
shell test -e "`wdir'/`outputfile'*.csv" && echo "yes" || echo "no" >> "`outputcheck'"
}
file open oc using "`outputcheck'", read
file read oc ocline
if "`ocline'"=="yes" {
dis as error "There are already one or more files in `wdir' called `outputfile'*.csv"
dis as error "These may be overwritten by StataStan or incorrectly included in posterior summaries."
dis as error "Please rename or move them to avoid data loss or errors."
error 1
}
file close oc
preserve
if "`if'"!="" | "`in'"!="" {
keep `if' `in'
}
// drop missing data casewise
if "`skipmissing'"!="skipmissing" {
foreach v of local varlist {
qui count if `v'!=.
local nthisvar=r(N)
qui drop if `v'==. & `nthisvar'>1
}
}
// the capture block ensures the file handles are closed at the end, no matter what
capture noisily {
// inline (John Thompson's approach) model written to .stan file
if "`inline'"!="" {
tempname fin
tempfile tdirls
local tdir=c(tmpdir)
// fetch temp do-file copy if no thisfile has been named
if "`thisfile'"=="" {
tempname lsin
if lower("$S_OS")=="windows" {
shell dir `tdir' -b -o:-D >> `tdirls'
}
else {
shell ls `tdir' -t >> `tdirls'
}
tempname lsin
capture file close `lsin'
file open `lsin' using `tdirls', read text
// assumes there's nothing else on the 1st line
file read `lsin' thisfile // is this OK? it will overwrite the thisfile local
if lower("$S_OS")=="windows" {
local tempprefix="STD"
}
else {
local tempprefix="SD"
}
while substr("`thisname'",1,2)!="`tempprefix'" {
file read `lsin' thisname
if lower("$S_OS")=="windows" {
local thisfile "`tdir'\`thisname'"
}
else {
local thisfile "`tdir'/`thisname'"
}
if r(eof)==1 {
dis as error "Could not locate a do-file in the Stata temporary folder."
dis as error "Try giving the path and file name with the 'thisfile' option"
capture file close `lsin'
error 1
}
}
capture file close `lsin'
}
tempname fin
capture file close `fin'
file open `fin' using "`thisfile'" , read text
file read `fin' line
tokenize `"`line'"'
local line1=`"`1'"'
file read `fin' line
tokenize `"`line'"'
while (("`line1'"!="/*" | substr(`"`1'"',1,4)!="data") & !r(eof)) {
local line1="`1'"
file read `fin' line
tokenize `"`line'"'
}
if r(eof) {
dis as error "Model command not found"
capture file close `fin'
error 1
}
tempname fout
capture file close `fout'
file open `fout' using "`modelfile'" , write replace
file write `fout' "`line'" _n
file read `fin' line
while ("`line'"!="*/") {
file write `fout' "`line'" _n
file read `fin' line
}
file close `fin'
file close `fout'
}
// write data file in R/S format
// first, write out the data in Stata's memory
// this can only cope with scalars (n=1) and vectors; matrices & globals are named in the option
file open dataf using `datafile', write text replace
foreach v of local varlist {
confirm numeric variable `v'
local linenum=1
qui count if `v'!=.
local nthisvar=r(N)
if `nthisvar'>1 {
file write dataf "`v' <- c("
if "`skipmissing'"=="skipmissing" {
local nlines=0
local i=1
local linedata=`v'[`i']
while `nlines'<`nthisvar' {
if `linedata'!=. & `nlines'<(`nthisvar'-1) {
file write dataf "`linedata', "
local ++i
local ++nlines
local linedata=`v'[`i']
}
else if `linedata'!=. & `nlines'==(`nthisvar'-1) {
file write dataf "`linedata')" _n
local ++nlines
}
else {
local ++i
local linedata=`v'[`i']
}
}
}
else {
forvalues i=1/`nthisvar' {
local linedata=`v'[`i']
if `i'<`nthisvar' {
file write dataf "`linedata', "
}
else {
file write dataf "`linedata')" _n
}
}
}
}
else if `nthisvar'==1 {
local linedata=`v'[1]
file write dataf "`v' <- `linedata'" _n
}
}
// write matrices
if "`matrices'"!="" {
if "`matrices'"=="all" {
local matrices: all matrices
}
foreach mat in `matrices' {
capture confirm matrix `mat'
// -stan- will quietly ignore names of matrices that don't exist
if !_rc {
local mrow=rowsof(`mat')
local mcol=colsof(`mat')
if `mrow'==1 { // row matrix: write as vector
if `mcol'==1 { // special case of 1x1 matrix: write as scalar
local mval=`mat'[1,1]
file write dataf "`mat' <- `mval'" _n
}
else {
file write dataf "`mat' <- c("
local mcolminusone=`mcol'-1
forvalues i=1/`mcolminusone' {
local mval=`mat'[1,`i']
file write dataf "`mval',"
}
local mval=`mat'[1,`mcol']
file write dataf "`mval')" _n
}
}
else if `mcol'==1 & `mrow'>1 { // column matrix: write as vector
file write dataf "`mat' <- c("
local mrowminusone=`mrow'-1
forvalues i=1/`mrowminusone' {
local mval=`mat'[`i',1]
file write dataf "`mval',"
}
local mval=`mat'[`mrow',1]
file write dataf "`mval')" _n
}
else { // otherwise, write as matrix
file write dataf "`mat' <- structure(c("
local mrowminusone=`mrow'-1
local mcolminusone=`mcol'-1
forvalues j=1/`mcolminusone' {
forvalues i=1/`mrow' {
local mval=`mat'[`i',`j']
file write dataf "`mval',"
}
}
forvalues i=1/`mrowminusone' { // write final column
local mval=`mat'[`i',`mcol']
file write dataf "`mval',"
}
// write final cell
local mval=`mat'[`mrow',`mcol']
file write dataf "`mval'), .Dim=c(`mrow',`mcol'))" _n
}
}
}
}
// write globals
if "`globals'"!="" {
if "`globals'"=="all" {
local globals: all globals
}
foreach g in `globals' {
// -stan- will quietly ignore non-numeric & non-existent globals
capture confirm number ${`g'}
if !_rc {
file write dataf "`g' <- ${`g'}" _n
}
}
}
}
file close dataf
restore
/*#############################################################
######################## Windows code #########################
#############################################################*/
if lower("$S_OS")=="windows" {
// unless re-running an existing compiled executable, move model to cmdstandir
if "`rerun'"!="rerun" {
// check if modelfile already exists in cdir
capture confirm file "`cdir'\\`modelfile'"
if !_rc {
// check they are different before copying and compiling
tempfile working
shell fc /lb2 "`wdir'\\`modelfile'" "`cdir'\\`modelfile'" > "`working'"
// if different shell copy "`wdir'\\`modelfile'" "`cdir'\\`modelfile'"
}
else {
windowsmonitor, command(copy "`wdir'\\`modelfile'" "`cdir'\\`modelfile'") ///
winlogfile(`winlogfile') waitsecs(30)
}
}
else {
windowsmonitor, command(copy "`wdir'\\`execfile'" "`cdir'\\`execfile'") ///
winlogfile(`winlogfile') waitsecs(30)
}
! copy "`cdir'\`winlogfile'" "`wdir'\winlog1"
qui cd "`cdir'"
if "`rerun'"=="" {
dis as result "###############################"
dis as result "### Output from compiling ###"
dis as result "###############################"
windowsmonitor, command(make "`execfile'") winlogfile(`winlogfile') waitsecs(30)
}
! copy `cdir'\`winlogfile' `wdir'
! copy "`cdir'\`cppfile'" "`wdir'\`cppfile'"
! copy "`cdir'\`execfile'" "`wdir'\`execfile'"
dis as result "##############################"
dis as result "### Output from sampling ###"
dis as result "##############################"
if `chains'==1 {
windowsmonitor, command(`cdir'\\`execfile' method=sample `warmcom' `itercom' `thincom' algorithm=hmc `stepcom' `stepjcom' `seedcom' output file="`wdir'\\`outputfile'.csv" data file="`wdir'\\`datafile'") ///
winlogfile(`winlogfile') waitsecs(30)
}
else {
windowsmonitor, command(for /l %%x in (1,1,`chains') do start /b /w `cdir'\\`execfile' id=%%x method=sample `warmcom' `itercom' `thincom' algorithm=hmc `stepcom' `stepjcom' `seedcom' output file="`wdir'\\`outputfile'%%x.csv" data file="`wdir'\\`datafile'") ///
winlogfile(`winlogfile') waitsecs(30)
}
! copy "`cdir'\`winlogfile'" "`wdir'\winlog3"
! copy "`cdir'\`outputfile'*.csv" "`wdir'\`outputfile'*.csv"
windowsmonitor, command(bin\stansummary.exe "`wdir'\\`outputfile'*.csv") winlogfile(`winlogfile') waitsecs(30)
// reduce csv file
if `chains'==1 {
file open ofile using "`wdir'\\`outputfile'.csv", read
file open rfile using "`wdir'\\`chainfile'", write text replace
capture noisily {
file read ofile oline
while r(eof)==0 {
if length("`oline'")!=0 {
local firstchar=substr("`oline'",1,1)
if "`firstchar'"!="#" {
file write rfile "`oline'" _n
}
}
file read ofile oline
}
}
file close ofile
file close rfile
}
else {
local headerline=1 // flags up when writing the variable names in the header
file open ofile using "`wdir'\\`outputfile'1.csv", read
file open rfile using "`wdir'\\`chainfile'", write text replace
capture noisily {
file read ofile oline
while r(eof)==0 {
if length("`oline'")!=0 {
local firstchar=substr("`oline'",1,1)
if "`firstchar'"!="#" {
if `headerline'==1 {
file write rfile "`oline',chain" _n
local headerline=0
}
else {
file write rfile "`oline',1" _n
}
}
}
file read ofile oline
}
}
file close ofile
forvalues i=2/`chains' {
file open ofile using "`wdir'\\`outputfile'`i'.csv", read
capture noisily {
file read ofile oline
while r(eof)==0 {
if length("`oline'")!=0 {
local firstchar=substr("`oline'",1,1)
// skip comments and (because these are chains 2-n)
// the variable names (which always start with lp__)
if "`firstchar'"!="#" & "`firstchar'"!="l" {
file write rfile "`oline',`i'" _n
}
}
file read ofile oline
}
}
file close ofile
}
file close rfile
}
if "`mode'"=="mode" {
dis as result "#############################################"
dis as result "### Output from optimizing to find mode ###"
dis as result "#############################################"
windowsmonitor, command(`cdir'\\`execfile' optimize data file="`wdir'\\`datafile'" output file="`wdir'\\`outputfile'.csv") ///
winlogfile(`winlogfile') waitsecs(30)
// extract mode and lp__ from output.csv
file open ofile using "`wdir'\\`outputfile'.csv", read
file open mfile using "`wdir'\\`modesfile'", write text replace
capture noisily {
file read ofile oline
while r(eof)==0 {
if length("`oline'")!=0 {
local firstchar=substr("`oline'",1,1)
if "`firstchar'"!="#" {
file write mfile "`oline'" _n
}
}
file read ofile oline
}
}
file close ofile
file close mfile
preserve
insheet using "`wdir'\\`modesfile'", comma names clear
local lp=lp__[1]
dis as result "Log-probability at maximum: `lp'"
drop lp__
xpose, clear varname
qui count
local npars=r(N)
forvalues i=1/`npars' {
local parname=_varname[`i']
label define parlab `i' "`parname'", add
}
encode _varname, gen(Parameter) label(parlab)
gen str14 Posterior="Mode"
tabdisp Parameter Posterior, cell(v1) cellwidth(9) left
restore
}
if "`diagnose'"=="diagnose" {
dis as result "#################################"
dis as result "### Output from diagnostics ###"
dis as result "#################################"
windowsmonitor, command(`cdir'\\`execfile' diagnose data file="`wdir'\\`datafile'") ///
winlogfile("`wdir'\\`winlogfile'") waitsecs(30)
}
// tidy up files
qui shell del "`winlogfile'"
qui shell del "wmbatch.bat"
qui shell del "`modelfile'"
qui shell copy "`cppfile'" "`wdir'\\`cppfile'"
qui shell copy "`execfile'" "`wdir'\\`execfile'"
if "`keepfiles'"=="" {
qui shell del "`wdir'\\`winlogfile'"
qui shell del "`wdir'\\wmbatch.bat"
qui shell del "`wdir'\\`outputfile'*.csv"
}
qui shell del "`cdir'\\`cppfile'"
qui shell del "`cdir'\\`execfile'"
qui cd "`wdir'"
}
/*#######################################################
#################### Linux / Mac code ###################
#######################################################*/
else {
// unless re-running an existing compiled executable, move model to cmdstandir
if "`rerun'"!="rerun" {
// check if modelfile already exists in cdir
capture confirm file "`cdir'/`modelfile'"
if !_rc {
// check they are different before copying and compiling
tempfile working
shell diff -b "`wdir'/`modelfile'" "`cdir'/`modelfile'" > "`working'"
tempname wrk
file open `wrk' using "`working'", read text
file read `wrk' line
if "`line'" !="" {
shell cp "`wdir'/`modelfile'" "`cdir'/`modelfile'"
}
}
else {
shell cp "`wdir'/`modelfile'" "`cdir'/`modelfile'"
}
shell cp "`wdir'/`modelfile'" "`cdir'/`modelfile'"
}
else {
shell cp "`wdir'/`execfile'" "`cdir'/`execfile'"
}
qui cd "`cdir'"
if "`rerun'"=="" {
dis as result "###############################"
dis as result "### Output from compiling ###"
dis as result "###############################"
shell make "`execfile'"
// leave modelfile in cdir so make can check need to re-compile
// qui shell rm "`cdir'/`modelfile'"
}
dis as result "##############################"
dis as result "### Output from sampling ###"
dis as result "##############################"
if `chains'==1 {
shell ./`execfile' method=sample `warmcom' `itercom' `thincom' algorithm=hmc `stepcom' `stepjcom' `seedcom' output file="`wdir'/`outputfile'.csv" data file="`wdir'/`datafile'"
}
else {
shell for i in {1..`chains'}; do ./`execfile' id=\$i method=sample `warmcom' `itercom' `thincom' algorithm=hmc `stepcom' `stepjcom' `seedcom' output file="`wdir'/`outputfile'\$i.csv" data file="`wdir'/`datafile'" & done
}
shell bin/stansummary `wdir'/`outputfile'*.csv
// reduce csv file
if `chains'==1 {
file open ofile using "`wdir'/`outputfile'.csv", read
file open rfile using "`wdir'/`chainfile'", write text replace
capture noisily {
file read ofile oline
while r(eof)==0 {
if length("`oline'")!=0 {
local firstchar=substr("`oline'",1,1)
if "`firstchar'"!="#" {
file write rfile "`oline'" _n
}
}
file read ofile oline
}
}
file close ofile
file close rfile
}
else {
local headerline=1 // flags up when writing the variable names in the header
file open ofile using "`wdir'/`outputfile'1.csv", read
file open rfile using "`wdir'/`chainfile'", write text replace
capture noisily {
file read ofile oline
while r(eof)==0 {
if length("`oline'")!=0 {
local firstchar=substr("`oline'",1,1)
if "`firstchar'"!="#" {
if `headerline'==1 {
file write rfile "`oline',chain" _n
local headerline=0
}
else {
file write rfile "`oline',1" _n
}
}
}
file read ofile oline
}
}
file close ofile
forvalues i=2/`chains' {
file open ofile using "`wdir'/`outputfile'`i'.csv", read
capture noisily {
file read ofile oline
while r(eof)==0 {
if length("`oline'")!=0 {
local firstchar=substr("`oline'",1,1)
// skip comments and (because these are chains 2-n)
// the variable names (which always start with lp__)
if "`firstchar'"!="#" & "`firstchar'"!="l" {
file write rfile "`oline',`i'" _n
}
}
file read ofile oline
}
}
file close ofile
}
file close rfile
}
if "`mode'"=="mode" {
dis as result "#############################################"
dis as result "### Output from optimizing to find mode ###"
dis as result "#############################################"
shell "`cdir'/`execfile'" optimize data file="`wdir'/`datafile'" output file="`wdir'/`outputfile'.csv"
// extract mode and lp__ from output.csv
file open ofile using "`wdir'/`outputfile'.csv", read
file open mfile using "`wdir'/`modesfile'", write text replace
capture noisily {
file read ofile oline
while r(eof)==0 {
if length("`oline'")!=0 {
local firstchar=substr("`oline'",1,1)
if "`firstchar'"!="#" {
file write mfile "`oline'" _n
}
}
file read ofile oline
}
}
file close ofile
file close mfile
preserve
insheet using "`wdir'/`modesfile'", comma names clear
local lp=lp__[1]
dis as result "Log-probability at maximum: `lp'"
drop lp__
xpose, clear varname
qui count
local npars=r(N)
forvalues i=1/`npars' {
local parname=_varname[`i']
label define parlab `i' "`parname'", add
}
encode _varname, gen(Parameter) label(parlab)
gen str14 Posterior="Mode"
tabdisp Parameter Posterior, cell(v1) cellwidth(9) left
restore
}
if "`diagnose'"=="diagnose" {
dis as result "#################################"
dis as result "### Output from diagnostics ###"
dis as result "#################################"
shell "`cdir'/`execfile'" diagnose data file="`wdir'/`datafile'"
}
// tidy up files
qui shell rm "`winlogfile'"
qui shell rm "wmbatch.bat"
qui shell rm "`modelfile'"
qui shell cp "`cppfile'" "`wdir'/`cppfile'"
qui shell cp "`execfile'" "`wdir'/`execfile'"
if "`keepfiles'"=="" {
qui shell rm "`wdir'/`outputfile'.csv"
}
qui shell rm "`cdir'/`cppfile'"
qui shell rm "`cdir'/`execfile'"
qui cd "`wdir'"
}
if "`load'"=="load" {
dis as result "############################################"
dis as result "### Now loading Stan output into Stata ###"
dis as result "############################################"
// read in output and tabulate
insheet using "`chainfile'", comma names clear
qui ds
local allvars=r(varlist)
gettoken v1 vn: allvars, parse(" ")
while "`v1'"!="energy__" {
gettoken v1 vn: vn, parse(" ")
}
tabstat `vn', stat(n mean sd semean min p1 p5 p25 p50 p75 p95 p99)
foreach v of local vn {
qui centile `v', c(2.5 97.5)
local cent025_`v'=r(c_1)
local cent975_`v'=r(c_2)
dis as result "95% CI for `v': `cent025_`v'' to `cent975_`v''"
}
}
end
|
capture program drop windowsmonitor
program define windowsmonitor
version 11.0
syntax ,COMMAND(string asis) [ WINLOGfile(string asis) waitsecs(integer 10) ]
// stop if operating system is not Windows
if lower("$S_OS")!="windows" {
dis as error "windowsmonitor can only run under a Windows operating system"
error 601
}
// default winlogfile
if ("`winlogfile'"=="") {
tempfile winlogfile
}
else {
// delete any existing winlogfile
! del "`winlogfile'"
}
// construct batch file
tempfile wmbatch
capture file close sb
capture noisily { // to ensure files are closed
// file open sb using "`wmbatch'", write text replace
file open sb using "wmbatch.bat", write text replace
file write sb `"`macval(command)'"' _n
file write sb "echo Finished!" _n
}
capture file close sb
// issue command, piping output to winlogfile
//winexec "`wmbatch'" > "`winlogfile'"
winexec "wmbatch.bat" > "`winlogfile'" 2>&1
// wait up to waitsecs seconds for winlogfile to appear
local loopcount=0
capture confirm file "`winlogfile'"
while _rc & (`loopcount'<`waitsecs') {
sleep 1000
capture confirm file "`winlogfile'"
local ++loopcount
}
if _rc {
dis as error "No output detected from Windows after `waitsecs' seconds"
error 601
}
// start reading from winlogfile
capture file close sout
capture noisily { // to ensure files are closed
file open sout using "`winlogfile'", read text
local linecount=0
while(`"`macval(lastline)'"'!="Finished!") {
sleep 2000
// display everything after the linecount-th line
file seek sout 0
file read sout line
local newlinecount=1
if `newlinecount'>`linecount' {
dis as result `"`macval(line)'"'
}
while r(eof)==0 {
file read sout line
if r(eof)==0 {
local ++newlinecount
if `newlinecount'>`linecount' {
dis as result `"`macval(line)'"'
}
local lastline=`"`macval(line)'"'
}
}
local linecount=`newlinecount'
}
}
capture file close sout
end
|
version 14.0
capture program drop stan_schools
program define stan_schools
syntax varlist [if] [in] [, MODELfile(string asis) ///
Clusterid(varname) ///
RSlopes(varlist) ///
GLOBALS(string asis) ///
BETAPriors(string asis) ///
HETvar(varname) ///
THETAPrior(string asis) ///
PHIPrior(string asis) ///
SDEPrior(string asis) ///
GAMMAPrior(string asis) ///
SIGMAPrior(string asis) ///
STANOpts(string asis)]
tokenize `varlist'
// get depvar
local depvar="`1'"
macro shift
// parse varlist, count indep vars
local nbetas=1
while "``nbetas''"!="" {
local x_`nbetas'="``nbetas''" // we'll use these names in the stan model
local ++nbetas // count how many coefficients we will need
}
local --nbetas // reject the last one
if "`rslopes'"!="" {
// parse xj, count random slopes
tokenize `rslopes'
local nalphas=1
while "``nalphas''"!="" {
local z_`nalphas'="``nalphas''" // we'll use these names in the stan model
local ++nalphas // count how many coefficients we will need (+ intercept)
}
// we don't reduce nalphas because of the random intercept
}
dis as result "nalphas: `nalphas'"
dis as result "nbetas: `nbetas'"
// defaults
foreach pp in betapriors thetaprior phiprior sdeprior {
if "``pp''"=="" {
local `pp'="normal(0,100)"
}
}
if "`modelfile'"=="" {
local modelfile="stan_schools_model.stan"
}
// count the clusters
qui tab `clusterid'
global M=r(r)
// count the observations
qui count
global N=r(N)
// open modelfile
tempname mf
file open `mf' using `modelfile', write text replace
file write `mf' "# A Bayesian hierarchical linear regression model" _n
file write `mf' "# following the BUGS 'Schools' example," _n
file write `mf' "# written using stan_schools (see https://github.com/stan-dev/statastan)" _n _n
file write `mf' "data {" _n
file write `mf' _tab "int<lower=0> N;" _n
file write `mf' _tab "int<lower=0> M;" _n
file write `mf' _tab "int `clusterid'[N];" _n
forvalues i=1/`nbetas' {
file write `mf' _tab "real `x_`i''[N];" _n
}
if `nalphas'>1 {
local slopes=`nalphas'-1 // random intercept has no data
forvalues i=1/`slopes' {
file write `mf' _tab "real `z_`i''[N];" _n
}
}
file write `mf' _tab "real `depvar'[N];" _n
file write `mf' _tab "cov_matrix[`nalphas'] R;" _n
file write `mf' "}" _n
file write `mf' "transformed data {" _n
file write `mf' _tab "vector[`nalphas'] gamma_mu;" _n
file write `mf' _tab "cov_matrix[`nalphas'] gamma_Sigma;" _n
file write `mf' _tab "cov_matrix[`nalphas'] invR;" _n
file write `mf' _tab "invR <- inverse(R);" _n
file write `mf' _tab "for (i in 1:`nalphas') gamma_mu[i] <- 0;" _n
file write `mf' _tab "for (i in 1:`nalphas') for (j in 1:`nalphas') gamma_Sigma[i, j] <- 0;" _n
file write `mf' _tab "for (i in 1:`nalphas') gamma_Sigma[i, i] <- 100;" _n
file write `mf' "}" _n
file write `mf' "parameters {"
file write `mf' _tab "real beta[`nbetas'];" _n
file write `mf' _tab "vector[`nalphas'] alpha[M];" _n
file write `mf' _tab "vector[`nalphas'] gamma;" _n
file write `mf' _tab "cov_matrix[`nalphas'] Sigma;" _n
// if hetvar, include formula for log-variance
if "`hetvar'"!="" {
file write `mf' _tab "real theta;" _n
file write `mf' _tab "real phi;" _n
}
// if not, SD of error
else {
file write `mf' _tab "real<lower=0> sde;" _n
}
file write `mf' "}" _n
file write `mf' "model {" _n
file write `mf' _tab "real Ymu[N];" _n
file write `mf' _tab "for(p in 1:N) {" _n
// write the linear predictor
file write `mf' _tab _tab "Ymu[p] <- alpha[`clusterid'[p], 1]" _n
if `nalphas'>1 {
forvalues i=1/`slopes' {
local iplus=`i'+1
file write `mf' _tab(3) " + alpha[`clusterid'[p],`iplus']*`z_`i''[p]" _n
}
}
forvalues i=1/`nbetas' {
file write `mf' _tab(3) " + beta[`i']*`x_`i''[p]" _n
}
file write `mf' _tab(3)";" _n
file write `mf' _tab "}" _n _n
// heteroskedastic or not?
if "`hetvar'"!="" {
file write `mf' _tab "`depvar' ~ normal(Ymu, exp(-.5*(theta + phi*`hetvar'))); " _n _n
}
else {
file write `mf' _tab "`depvar' ~ normal(Ymu, sde); " _n _n
}
file write `mf' _tab "# Priors for fixed effects:" _n
file write `mf' _tab "beta ~ `betapriors';" _n
if "`hetvar'"!="" {
file write `mf' _tab "theta ~ `thetaprior';" _n
file write `mf' _tab "phi ~ `phiprior';" _n _n
}
else {
file write `mf' _tab "sde ~ `sdeprior';" _n _n
}
// at present, you can't change the alpha priors
file write `mf' _tab "# Priors for random coefficients:" _n
file write `mf' _tab "for (m in 1:M) alpha[m] ~ multi_normal(gamma, Sigma);" _n
file write `mf' _tab "# Hyper-priors:" _n
file write `mf' _tab "gamma ~ multi_normal(gamma_mu, gamma_Sigma);" _n
file write `mf' _tab "Sigma ~ inv_wishart(`nalphas', invR);" _n
file write `mf' "}" _n
file close `mf'
// stan with stanopts
stan `varlist' `rslopes' `clusterid', globals("`globals'") ///
modelfile("`modelfile'") `stanopts'
end
/* To do:
allow a sequence of different betapriors
allow i. notation
option to feed data into previously compiled model
initial values
nori: no random intercept
Notes:
I maintain the same Greek latters as in BUGS, despite them being unusual in places
We assume hetvar is in varlist
You have to specify modelfile in modelfile() OR in stanopts()
*/
|
cd "C:\Users\RLGrant\Dropbox\Software\StataStan\stan_examples"
use "schools.dta", clear
qui tab school
global M=r(r)
qui count
global N=r(N)
stan_schools Y VR1 VR2 Gender LRT, rslopes(denom2 denom3 sgender) ///
globals("N M") clusterid(school) ///
stanopts(cmd("C:/Users/RLGrant/cmdstan-2.6.2"))
|
// uncomment if Stata-SVM is not installed to the system
adopath ++ "../../src"
use classification_bug
// training which does the wrong thing
svm category q*, prob
// tuning parameters pulled out of a hat from https://github.com/scikit-learn/scikit-learn/issues/4800
// which causes
//svm category q*, prob c(10) gamma(0.01)
// this is equally successful
//svm category q*, prob c(100)
predict P1
predict P2, prob
tab category
tab P1
tab P2
|
* experimenting with building subcommands
* I want to the equivalent of python's "def wrap(a, *rest): if a == 1: one(*rest)", where the arguments are
* Unfortunately, the Stata quoting rules are more arcane than even bash's
* references: poor amounts of googling, Nick Cox, and cluster.ado and duplicates.ado in the base library (neither of which
program sub
di "1: 0=`0'"
gettoken subcmd 0 : 0
di "2a: subcmd=`subcmd'"
* wtf: this version works
di "2b: 0=`0'"
* while this version, the that's even more quoted, tries to evaluate the contents of the macro (and so dies with e.g. "using not found" because it thinks "using" is a Stata variable)
*di "2b: 0="`0'""
di "3: doing svm_`subcmd' `0'"
sub_`subcmd' "`0'" /*<-- this doesn't do what you think: it passes a *single* argument to the the subcommand*/
di "4"
end
program sub_x
di "I am sub_x and 0=`0'"
syntax , [o(int 2)]
di "o = `o'"
end
program sub_y
di "I am sub_y and 0=`0'"
end
|
sub x , o(7)
sub x using haphazard lolcats, in the everworld(8) of time(-1)
sub y , eighty
|
// usage: stata do stata_wrap.do $SCRIPT $LOG $RC
//
// Wraps $SCRIPT so that its output goes to $LOG and error code to $RC,
// and it ensures Stata comes down at the end even if there was an error.
// - it's an imperfect wrapper: $LOG ends up with header and footer cruft from 'log'
set linesize 119
* make all platforms consistent in their batch output;
* NOTE: this is autoincluded to all tests via makefile magic
* tip buried in http://web.stanford.edu/group/ssds/cgi-bin/drupal/files/Guides/Stata_Unix__2011.pdf
* ALSO these comments are *after* the set, because it affects how stata prints out comments.
local TRACE : env TRACE /* you can't use env by itself, for some reason */
// BEWARE: stata resets the trace setting when a do file quits
// do running this as "do
if("`TRACE'"!="") {
set trace on
set more off
}
args script log rc_log
//di as err "script = `script', log = `log', rc = `rc_log'"
log using "`log'", text replace
capture noisily do "`script'"
log close
//di as err "rc = `=_rc'"
tempname fd
file open `fd' using "`rc_log'", write text replace
file write `fd' "`=_rc'" _n
file close `fd'
// force-quit Stata
exit, clear STATA
|
/* export_svmlight: export the Stata dataset to a .svmlight format file. See _svmlight.c */
program _svmlight, plugin /*load the C extension if not already loaded*/
program define export_svmlight
version 13
syntax varlist(numeric) [if] [in] using/
quietly {
capture plugin call _svmlight `varlist' `if' `in', "export" "`using'"
}
end
|
/* import_svmlight: import an .svmlight format file, replacing the current Stata dataset. See _svmlight.c */
* The variables created will be 'y' and 'x%d' for %d=[1 through max(feature_id)].
* Feature IDs are always positive integers, in svmlight format, according to its source code.
* TODO: rename to svm_use and figure out how to support the dual 'svm use filename' and 'svm use varlist using filename' that the built-in use does
* it will be possible, just maybe ugly
program _svmlight, plugin /*load the C extension if not already loaded*/
program define import_svmlight
version 13
syntax using/, [clip]
quietly {
* Do the pre-loading, to count how much space we need
plugin call _svmlight, "import" "pre" "`using'"
* HACK: Stata's various versions all have a hard upper limit on the number of variables; for example StataIC has 2048 (2^11) and StataMP has 2^15
* ADDITIONALLY, Stata has an off-by-one bug: the max you can actually pass to a C plugin is one less [citation needed]
* We simply clamp the number of variables to get around this, leaving room for 1 for the Y variable and 1 to avoid the off-by-one bug
* This needs to be handled better. Perhaps we should let the user give varlist (but if they don't give it, default to all in the file??)
if(`=_svm_load_M+1' > `c(max_k_theory)'-1-1) {
di as error "Warning: your version of Stata will not allow `=_svm_load_M+1' variables nor be able to use the C plugin with that many."
if("`clip'"!="") {
di as error "Clamping to `=c(max_k_theory)-1-1'."
scalar _svm_load_M = `=c(max_k_theory)-1-1-1' /*remember: the extra -1 is to account for the Y column, and the extra extra -1 is the leave room for a prediction column*/
}
else {
exit 1
}
}
* handle error cases; I do this explicitly so
if(`=_svm_load_M'<1) {
* because Stata programming is all with macros, if this is a bad variable it doesn't cause a sensible crash,
* instead of causes either "invalid syntax" or some sort of mysterious "invalid operation" error
* (in particular "newlist x1-x0" is invalid)
* checking this doesn't cover all the ways M can be bad (e.g. it could be a string)
di as error "Need at least one feature to load"
exit 1
}
if(`=_svm_load_N'<1) {
* this one
di as error "Need at least one observation to load"
exit 1
}
* make a new, empty, dataset of exactly the size we need
clear
* Make variables y x1 x2 x3 ... x`=_svm_load_M'
generate double y = .
* this weird newlist syntax is the official suggestion for making a set of new variables in "help foreach"
foreach j of newlist x1-x`=_svm_load_M' {
* make a new variable named "xj" where j is an integer
* specify "double" because libsvm uses doubles and the C interface uses doubles, yet the default is floats
generate double `j' = .
}
* Make observations 1 .. `=_svm_load_N'
* Stata will fill in the missing value for each at this point
set obs `=_svm_load_N'
* Delete the "local variables"
* Do this here in case the next step crashes
* I am programming in BASIC.
scalar drop _svm_load_N _svm_load_M
* Do the actual loading
* "*" means "all variables". We need to pass this in because in addition to C plugins only being able to read and write to variables that already exist,
* they can only read and write to variables specified in varlist
* (mata does not have this sort of restriction.)
capture plugin call _svmlight *, "import" "`using'"
}
end
* load the given svmlight-format file into memory
* the outcome variable (the first one on each line) is loaded in y, the rest are loaded into x<label>, where <label> is the label listed in the file before each value
* note! this *will* clear your current dataset
* NB: it is not clear to me if it is easier or hard to do this in pure-Stata than to try to jerry-rig C into the mix (the main trouble with C is that extensions cannot create new variables, and we need to create new variables as we discover them)
* it is *definitely* *SLOWER* to do this in pure Stata. svm-train loads the same test dataset in a fraction of a second where this takes 90s (on an SSD and i7).
program define svm_load_purestata
* this makes macro `using' contain a filename
syntax using/
* .svmlight is meant to be a sparse format where variables go missing all the time
* so we do the possibly-quadratic-runtime thing: add one row at a time to the dataset
* using the deep magic of "set obs `=_N+1`; replace var = value in l". I think 'in l' means 'in last' but 'in last' doesn't work.
* tip originally from Nick Cox: http://statalist.1588530.n2.nabble.com/Adding-rows-to-datasheet-td4784525.html
* I suspect this inefficency is intrinsic.
* (libsvm's svm-train.c handles this problem by doing two passes over the data: once to count what it has to load, and twice to actually allocate memory and load it; we should profile for which method is faster in Stata)
tempname fd
file open `fd' using "`using'", read text
* get rid of the old data
clear
* we know svmlight files always have exactly one y vector
generate double y = .
file read `fd' line
while r(eof)==0 {
*display "read `line'" /*DEBUG*/
quiet set obs `=_N+1'
gettoken Y T : line
quiet replace y = `Y' in l
*di "T=`T'" /*DEBUG*/
* this does [(name, value = X.split(":")) for X in line.split()]
* and it puts the results into the table.
local j = 1
while("`T'" != "") {
*if(`j' > 10) continue, break /*DEBUG*/
gettoken X T : T
gettoken name X : X, parse(":")
gettoken X value : X, parse(":")
*di "@ `=_N' `name' = `value'" /*DEBUG*/
capture quiet generate double x`name' = . /*UNCONDITIONALLY make a new variable*/
capture quiet replace x`name' = `value' in l
if(`=_rc' != 0) continue, break /*something went wrong, probably that we couldn't make a new variable (due to memory or built-in Stata constraints). Just try the next observation*/
local j = `j' + 1
}
*list /*DEBUG: see the state after after new observation*/
file read `fd' line
}
file close `fd'
end
|
/* svmachines: the entry point to the support vector fitting algorithm */
program define svmachines
*! version 1.1.0
version 13
//plugin call does not handle factor variables.
// xi can pre-expand factors into indicator columns and then evaluate some code.
// However xi interacts badly with "plugin call"; just tweaking the code that calls into
// the plugin to read "xi: plugin call _svm, train" fails. xi needs to run pure Stata.
// Further, xi runs its passed code in the global scope and can't access inner routines,
// which means the pure Stata must be in a *separate file* (_svm_train.ado).
xi: _svm_train `0'
end
|
/* svmacines_example: download and run the requested sample code from the svmachines package */
/* */
/* To use this with a different package, just replace every "svmachines". */
program define svmachines_example
version 13
example svmachines `0'
end
/* example: runs example in safe isolation, downloading them from your package as needed
*
* Nick Guenther <nguenthe@uwaterloo.ca>, June 2015.
* BSD License.
*
* Your examples must be in .do files named `example'_example.do
* and should be listed in your package's ancillary files (with "f").
*
* For example, if you have invented "triple dog dare"
* regression in a package "tddr", you might make a
* triple_dog_dare_regression_79_example.do.
* In your tddr.pkg file list
* f triple_dog_dare_regression_79_example.do
* which will cause it to be an ancillary file and not get installed with the rest of the package.
* In your .sthlp file, after a manually-made copy of the code, put
* {it:({stata "example tddr triple_dog_dare_regression_79":click to run})}
* (you can use 'example' anywhere you like, of course, but it's most obvious use is
* in glue for helpfiles, which can only run one command at a time).
*
* When the user clicks that link, it will download to their working directory, run
* and then clean up after itself as if it never did, except that the file will be handy
* for the user to inspect and play with.
*
* TODO:
* [ ] consider making the convention `pkg'_`example'_example.do
*/
program define example
version 13
// parse arguments
gettoken pkg 0 : 0
gettoken example 0 : 0
capture findfile `example'_example.do
if(_rc != 0) {
// download ancillaries, which should include the examples
di as txt "Downloading `pkg' ancillary files"
ado_from `pkg'
capture noisily net get `pkg', from(`r(from)')
capture findfile `example'_example.do
if(_rc != 0) {
di as error "Unable to find `example' example."
exit 3
}
}
// save the user's dataset
// if the user actually wants to run the example into their current session they can just "do" it a second time
qui snapshot save // this is faster(?) than preserve, and seems to be just as effective, although it requires manual restoration at the end
local snapshot = `r(snapshot)'
//preserve
qui clear
// run example
capture noisily do `example'_example.do, nostop
qui snapshot restore `snapshot'
//restore // this is unneeded, because this runs automatically at scope end
end
/* ado_from: return the URL or path that a package was installed from.
* This is to glue over that 'net get' doesn't do this already.
*
*/
program define ado_from, rclass
version 13
// parse arguments
gettoken pkg 0 : 0
local from = ""
local curpkg = ""
tempname fd
// scan stata.trk for the source
// this is not a full stata.trk parser, it only implements what I need
// a typical entry looks like
// ...
// e
// S http://fmwww.bc.edu/repec/bocode/p
// N psidtools.pkg
// ...
// the loop ends when we run off the end of the file or we have found
// the matching package and its source
qui findfile stata.trk
file open `fd' using "`r(fn)'", read text
while(!("`curpkg'"=="`pkg'.pkg" & "`from'"!="")) {
file read `fd' line
if(r(eof) != 0) {
di as error "`pkg' not found in stata.trk"
exit 9
}
// extract line type
gettoken T line : line
if("`T'"=="S") {
// source line; record from
gettoken from : line
}
else if("`T'"=="e") {
// end of package; clear state
local from = ""
local curpkg = ""
}
else if("`T'"=="N") {
// package file name
gettoken curpkg : line
}
}
// assumption: the stata.trk file should have defined an S line in each pkg block
// if not, something bad happened
assert "`from'"!=""
return clear
return local from = "`from'"
end
|
/* svm_ensurelib: at runtime, make sure libsvm is available and loadable */
/* This would be ensurelib.ado, but for packaging safety, because Stata
has no sensible way of protecting against namespace conflicts, this
gets the same prefix as the rest of the package.
If you want to use ensurelib by itself then delete this header and first function and rename the file,
and rename the plugins loaded below.
*/
program define svm_ensurelib
version 13
// call the real ensurelib, which is defined below (as an inner "helper" function)
ensurelib svm
end
/* ensurelib: edit the OS shared library path to ensure shared library dependencies will be found when Stata loads plugins.
*
* This allows you to bundle non-plugin DLLs, which you will have to do to create wrapper plugins
* (unless you want to statically link, which is a almost always wrong).
*
* Nick Guenther <nguenthe@uwaterloo.ca>, June 2015.
* BSD License.
*
* Example usage:
* Suppose you have joesstore.plugin which is linked (dynamically) against library joesmeat and veggiegarden.
* For Windows, OS X, and *nix joesmeat should be, respectively, compiled to joesmeat.dll, libjoesmeat.dylib,
* or libjoesmeat.so, and similarly for veggiegarden. It should be distributed to users' adopaths with the
* special *capitalized* .pkg commands
* G WIN joesmeat.dll
* G MACINTEL libjoesmeat.dylib
* G UNIX libjoesmeat.so
* Then, in your code
* ensurelib joesmeat
* ensurelib veggiegarden
* program joesstore, plugin
*
*
* libraryname should be as in your (gcc!) linker commandline: e.g. if you specify "-ljoesmeat" there, specific "joesmeat" here.
* This will search your adopath for the file named
* Windows: libraryname.dll
* OS X: liblibraryname.dylib
* *nix: liblibraryname.so
* and add the specific directory that file is in (e.g. C:\ado\plus\l\) to your shared library path
* Windows: %PATH%
* OS X: $DYLD_LIBRARY_PATH
* *nix: $LIBRARY_PATH
* But if it does not find the library in your adopath, it will let the system use its usual library directories.
*
* Roughly, it is as if we have done:
* export LD_LIBARY_PATH=$ADOPATH:$LD_LIBRARY_PATH
* but in a cross-platform way which also handles Stata's tricky alphabetical installation chunks ([M-5] adosubdir()).
*
* Since Stata usually includes "." in the adopath, you can use this during development as well:
* just keep the DLLs you plan to bundle in your working directory.
*
* We follow close to [MinGW's naming rules](http://www.mingw.org/wiki/specify_the_libraries_for_the_linker_to_use),
* except that since we're only loading shared (not static) libraries, on Windows there is only one option just like the rest.
* In particular from MinGW's rules, **if your library on Windows is uses the aberrant lib<name>.dll** you will must either:
* - special-case your loading on Windows to call "ensurelib lib<name>"**,
* - change the naming scheme of the .dll to conform to Windows standard: <name>.dll.
* This problem generally only comes up with libraries that have been ported carelessly from *nix.
*
* Works on Windows, OS X, and Linux (which are the only platforms Stata supports)
*
* Dependencies:
* _setenv.plugin
* _getenv.plugin (Stata provides the "environment" macro function for read-only env access,
* but it doesn't seem to be live: it just caches the env at boot, not expecting it to be edited)
*
* TODO:
* [ ] Pull this into a separate .pkg
* Stata has, essentially, a global install namespace and no dependency tracking.
* So what happens if two packages bundle this? Does the second overwrite the first?
* Get denied? Mysteriously break the first one? And what happens if one package uninstalls?
* [ ] Is this worth factoring further? maybe "prependpath" could come out?
*/
capture noisily {
program _svm_getenv, plugin
program _svm_setenv, plugin
program _svm_dlopenable, plugin
}
if(_rc != 0) {
di as error "ensurelib's prerequisites are missing. If you are running this from the source repo you need to 'make'."
exit _rc
}
program define ensurelib
version 13
gettoken lib 0 : 0
syntax /* deny further args */
/* this handles libraries whose names on Windows follow the aberrant "lib<name>.dll" format,
which commonly happens when unix libraries get ported without much care to Windows.
The logic is stupid-simple here: first try lib<name>.dll,
and if that works we assume it's correct, no further questions asked.
Otherwise we fall back to <name>.dll.
On non-Windows systems, we immediately fall back to the regular path,
which looks up lib<name>.so or <name>.dylib or whatever else dlopen() does.
*/
if("`c(os)'"=="Windows") {
capture _ensurelib "lib`lib'"
if(_rc==0) {
// success!
exit
}
}
_ensurelib `lib'
end
program define _ensurelib
version 13
gettoken libname 0 : 0
if("`libname'"=="") {
di as error "ensurelib: argument required"
exit 1
}
syntax , []/* disallow everything else */
/* platform-settings */
// libvar == platform specific environment variable that can be edited (there may be more than one option)
// sep == platform specific path separator
// dl{prefix,ext} == what to wrap the libname in to generate the library filename
if("`c(os)'"=="Windows") {
local libvar = "PATH"
local sep = ";"
local dlprefix = ""
local dlext = "dll"
}
else if("`c(os)'"=="MacOSX") {
local libvar = "DYLD_LIBRARY_PATH" /* or is this DYLD_FALLBACK_LIBRARY_PATH ?? */
local sep = ":"
local dlprefix = "lib"
local dlext = "dylib"
}
else if("`c(os)'"=="Unix") { //i.e. Linux, and on Linux really only like Fedora and Ubuntu; Stata doesn't test builds for others.
local libvar = "LD_LIBRARY_PATH"
local sep = ":"
local dlprefix = "lib"
local dlext = "so"
}
else {
di as error "ensurelib: Unsupported OS `c(os)'"
exit 1
}
/* wrap the library name into a file name */
local lib = "`dlprefix'`libname'.`dlext'"
/* If the lib is in the adopath, prepend its path to the system library path */
capture quietly findfile "`lib'"
if(_rc==0) {
/* the path to the library on the adopath */
local adolib = "`r(fn)'"
/* extract the directory from the file path */
mata pathsplit("`adolib'",adopath="",lib="") //_Stata_ doesn't have pathname manipulation, but _mata_ does. the ="" are to declare variables (variables need to be declared before use, even if they are just for output)
mata st_local("adopath",adopath) // getting values out of mata to Stata is inconsistent: numerics in r() go through st_numscalar(), strings have to go through st_global(), however non-r() scalars have to go through st_strscalar
mata st_global("lib",lib)
/* prepend the discovered library path (adopath) to the system library path (libvar) */
// get the current value of libvar into libpath
plugin call _svm_getenv, "`libvar'"
local libpath = "`_getenv'"
// skip prepending if adopath is already there in `libvar', to prevent explosion
local k = ustrpos("`libpath'", "`adopath'")
if(`k' == 0) {
// prepend
plugin call _svm_setenv, "`libvar'" "`adopath'`sep'`libpath'"
}
}
/* Check that the library is now loadable */
/* by checking here, we prevent Stata's "unable to load [...].plugin" with an error which points out the actual problem. */
capture plugin call _svm_dlopenable, "`lib'"
if(_rc!=0) {
di as error "ensurelib: unable to load `libname'. You must install dynamic link library `libname' to use this program."
exit _rc
}
end
|
/* svm_predict: after fitting an SVM model with svm, construct predicted classes/values (depending on the type of the active SVM) */
/* load the C extension */
svm_ensurelib // check for libsvm
program _svmachines, plugin // load the wrapper for libsvm
program define svm_predict, eclass
version 13
syntax newvarname [if] [in], [PROBability] [scores] [Verbose]
local target = "`varlist'"
local _in = "`in'" //these need to be stashed because the hack below will smash them
local _if = "`if'"
if("`probability'"!="" & "`scores'"!="") {
di as err "Error: probability and scores are mutually exclusive options."
exit 2
}
// C plugins can only speak to variables mentioned in the varlist they are called with
// that is, if we are going predict on some vectors, we need to know what X variables we're
// predicting on in their entirety before we call down to C--and they should match what
// I haven't discovered how regress and friends manage to know which variables to predict on
// the only place I see them record what they did is in e(cmdline)
// but that has cruft in it
// the easiest way I can think to extract the predictor list is to *reparse* the command line
// TODO: consider if it's saner to simply pre-store e(indepvars) or e(predictors) or something
local 0 = "`e(cmdline)'"
gettoken cmd 0 : 0 /*remove the command which was artificially tacked on by svm_train*/
syntax varlist [if] [in], * //* puts the remainder in `options' and allows this code to be isolated from svm_train (it's not like we actually could tweak anything, since the svm_model is stored on the plugin's C heap)
if("`e(svm_type)'"!="ONE_CLASS") {
gettoken y varlist : varlist // pop the first variable
assert "`y'" == "`e(depvar)'" // and check consistency with the svm_train
// make the target column
// it is safe to assume that `target' is a valid variable name: "syntax" above enforces that
// and it should be safe to assume the same about `e(depvar)': unless the user is messing with us (in which case, more power to them), it should have been created by svm_train and validated at that point
quietly clone `target' `e(depvar)' if 0 //'if 0' leaves the values as missing, which is important: we don't want a bug in the plugin to translate to source values sitting in the variable (and thus inflating the observed prediction rate)
local L : variable label `target'
if("`L'"!="") {
label variable `target' "Predicted `L'"
}
}
else {
//ONE_CLASS
quietly gen int `target' = .
label variable `target' "Within support"
}
if("`probability'"!="") {
// allocate space (we use new variables) to put probability estimates for each class for each prediction
// ensure model is a classification
// this duplicates code over in svm_train, but I think this is safest:
// svm_import allows you to pull in svm_models created by other libsvm
// interfaces, and they mostly don't have this protection.
if("`e(svm_type)'" != "SVC" & "`e(svm_type)'" != "NU_SVC") {
// in svm-predict.c, the equivalent section is:
/*
* if (predict_probability && (svm_type==SVC || svm_type==NU_SVC))
* predict_label = svm_predict_probability(model,x,prob_estimates);
* else
* predict_label = svm_predict(model,x);
*/
// it is cleaner to error out, rather than silently change the parameters, which is what the command line tools do
di as error "Error: trained model is a `e(svm_type)'. You can only use the probability option with classification models (SVC, NU_SVC)."
exit 2
}
// save the top level description to splay across the stemmed variables
local D : variable label `target'
// Collect (and create) the probability columns
// TODO: get it to generate the columns in the "levelsof" order, but actually use them in the libsvm order
// -> right now it is in the libsvm order, which is fine. the results are correct. they're just not as convenient.
// BEWARE: the order of iteration here is critical:
// it MUST match the order in svm_model->labels or results will silently be permuted
// the only way to achieve this is to record the order in svm_model->labels and loop over that explicitly, which is what e(levels) is for
assert "`e(levels)'" != ""
foreach l in `e(levels)' {
// l is the "label" for each class, but it's just an integer (whatever was in the original data table)
// We try to label each column by the appropriate string label, for readability,
// but if it doesn't exist we fall back on the integer label.
//
// The command to do this is poorly documented. What this line does is
// look up the value label for value `l'
// *or* give back `l' unchanged if `target' has no labels
// which is precisely what we want it to do here.
local L : label (`e(depvar)') `l'
// compute the full variable name for level `l'
local stemmed = "`target'_`L'"
local stemmed = strtoname("`stemmed'") //sanitize the new name; this summarily avoids problems like one of your classes being "1.5"
// finally, allocate it
// unlike `target' which clones its source, we use doubles
// because these are meant to hold probabilities
// TODO: what happens if there's a name collision partially through this loop?
// what I want to happen is for any name collision or other bug to abort (i.e. rollback) the entire operation
// This can be achieved with "snapshot": snapshot; capture {}; if(fail) { rollback to snapshot }"
quietly generate double `stemmed' = .
label variable `stemmed' "Pr(`D'==`L')"
// attach the newcomers to the varlist so the plugin is allowed to edit them
local varlist = "`varlist' `stemmed'"
}
}
else if("`scores'"!="") { // else-if because these options are mutually exclusive (which is enforced above)
// Allocate space for the decision values
// This is more complicated because we need to go down a lower triangle of a matrix -- so, a length-changing nested loop.
// we have to use word("`e(levels)'", i) to extract the ith level
// which means we have an extra layer of indirection to deal with, so there's x_i the index into e(labels), x the integer label, and X the string (or possibly integer) label
// we need to split the cases of classification and non-classification models
// reason i: non-classification models have model->label == NULL which means e(levels) is missing which breaks this code
// reason ii: non-classification models only have one decision value, so the sensible label is just "`target'_score"
if("`e(svm_type)'" == "ONE_CLASS" | "`e(svm_type)'" == "SVR" | "`e(svm_type)'" == "NU_SVR") {
// generate the name of the new column.
// it is, unfortunate, somewhat terse, in hopes of keeping within 32 characters
local stemmed = "`target'_score"
local stemmed = strtoname("`stemmed'") //make it Stata-safe
// allocate the decision value column
quietly generate double `stemmed' = .
label variable `stemmed' "`target' svm score"
// attach the newcomers to the varlist so the plugin is allowed to edit them
local varlist = "`varlist' `stemmed'"
}
else if("`e(svm_type)'" == "SVC" | "`e(svm_type)'" == "NU_SVC") {
local no_levels = `e(N_class)'
forvalues l_i = 1/`no_levels' {
//di "l_i = `l_i'"
local l = word("`e(levels)'", `l_i')
local L : label (`e(depvar)') `l'
forvalues r_i = `=`l_i'+1'/`no_levels' {
//di "r_i = `r_i'"
local r = word("`e(levels)'", `r_i') // map the index into the labels
local R : label (`e(depvar)') `r'
//di "generating svm score column (`l_i',`r_i') <=> (`l',`r') <=> (`L',`R')"
// generate the name of the new column.
// it is, unfortunate, somewhat terse, in hopes of keeping within 32 characters
local stemmed = "`target'_`L'_`R'"
local stemmed = strtoname("`stemmed'") //make it Stata-safe
// allocate the decision value column
quietly generate double `stemmed' = .
label variable `stemmed' "`target' svm score `L' vs `R'"
// attach the newcomers to the varlist so the plugin is allowed to edit them
local varlist = "`varlist' `stemmed'"
}
}
}
else {
di as error "Unrecognized svm_type `e(svm_type)'; unable to define svm score columns."
exit 2
}
}
// call down into C
// we indicate "probability" mode by passing a non-empty list of levels
// this list implicitly *removes* from the set range of variables to predict from: the trailing variables are instead write locations
// (this feels like programming a hardware driver)
// Subtlety: we don't quote levels, on the assumption that it is always a list of integers;
// that way, the levels are pre-tokenized and the count easily available as argc
plugin call _svmachines `target' `varlist' `_if' `_in', `verbose' predict `probability' `scores'
if("`e(svm_type)'"=="ONE_CLASS") {
// libsvm gives {1,-1} for its one-class predictions;
// normalize these to {1,0}
qui replace `target' = 0 if `target' == -1
}
end
/* clone.ado: generate a perfect copy of a variable: type, labels, etc.
syntax:
clone newvar oldvar [if] [in]
You can use 'if' and 'in' to control what values; values that don't match will be set to missing.
If you want to clone a variable's metadata but not values use the idiom ". clone new old if 0".
NB: The reason the syntax is not "clone newvar = oldvar", even though that would fit the pattern
set by generate and egen, is that syntax's =/exp option insists on parsing numeric expressions,
so string variables wouldn't be cloneable.
*/
program define clone
version 13
// parse once to extract the basic pieces of syntax
syntax namelist [if] [in]
local _if = "`if'" //save these for later; the other syntax commands will smash them
local _in = "`in'"
gettoken target source : namelist
// enforce types
confirm new variable `target'
confirm variable `source'
// save attributes
local T : type `source' //the data type
local N : variable label `source' //the human readable description
local V : value label `source' // the name of the label map in use, if there is one
// Stata maintains a dictionary of dictionaries, each of which
// maps integers to strings. Multiple variables can share a dictionary,
// though it is rare except for e.g. "boolean"
// make new variable
generate `T' `target' = `source' `_if' `_in'
// clone attributes if they exist
// (except for type, which always exists and cannot be reassigned without
// another 'generate' doing a whole new malloc())
if("`N'"!="") {
label variable `target' "`N'" //Yes, the setters and getters are...
}
if("`V'"!="") {
label value `target' "`V'" //...in fact reverses of each other
}
end
|
/* model2stata: a subroutine to convert the global struct svm_model that lives in the DLL to a mixture of e() entries, variables, and matrices.
*
* Besides being usefully modular, this *must* be its own subroutine because it needs to be marked eclass.
* This is because, due to limitations in the Stata C API, there has to be an awkward dance to get the information out:
* _svmachines.plugin writes to the (global!) scalar dictionary and then this subroutine code copies those entries to e().
*
* as with svm_load, the extension function is called multiple times with sub-sub-commands, because it doesn't have permission to perform all the operations needed
* if passed, SV specifies a column to create and then record svm_model->sv_indecies into
*/
/* load the C extension */
svm_ensurelib // check for libsvm
program _svmachines, plugin // load the wrapper for libsvm
program define _svm_model2stata, eclass
version 13
syntax [if] [in], [SV(string)] [Verbose]
* as with loading, this has to call in and out of the plugin because chicken/egg:
* the plugin doesn't have permission to allocate Stata memory (in this case matrices),
* but we don't know how much to allocate before interrogating the svm_model
* Phase 1
* the total number of observations
* this gets set by _svmachines.c::train(); it doesn't exist for a model loaded via import().
* nevertheless it is in this file instead of svm_train.ado, because it is most similar here
* but we cap { } around it so the other case is tolerable
capture {
ereturn scalar N = _model2stata_N
scalar drop _model2stata_N
}
/*an undefined macro will inconsistently cause an eval error because `have_rho'==1 will eval to ==1 will eval to "unknown variable"*/
/*so just define them ahead of time to be safe*/
local have_sv_indices = 0
local have_sv_coef = 0
local have_rho = 0
local labels = ""
plugin call _svmachines `if' `in', `verbose' "_model2stata" 1
* the total number of (detected?) classes
ereturn scalar N_class = _model2stata_nr_class
scalar drop _model2stata_nr_class
* the number of support vectors
ereturn scalar N_SV = _model2stata_l
scalar drop _model2stata_l
* Phase 2
* Allocate Stata matrices and copy the libsvm matrices and vectors
if(`have_sv_coef'==1 & `e(N_class)'>1 & `e(N_SV)'>0) {
capture noisily {
matrix sv_coef = J(e(N_class)-1,e(N_SV),.)
// there doesn't seem to be an easy way to generate a list of strings with a prefix in Stata
// so: the inefficient way
local cols = ""
forval j = 1/`e(N_SV)' {
local cols = "`cols' SV`j'"
}
matrix colnames sv_coef = `cols'
// TODO: rows
// there is one row per class *less one*. the rows probably represent decision boundaries, then. I'm not sure what this should be labelled.
// matrix rownames sv_coef = class1..class`e(N_SV)'
}
}
if(`have_rho'==1 & `e(N_class)'>0) {
capture noisily matrix rho = J(e(N_class),e(N_class),.)
}
* TODO: also label the rows according to model->label (libsvm's "labels" are just more integers, but it helps to be consistent anyway);
* I can easily extract ->label with the same code, but attaching it to the rownames of the other is tricky
capture noisily {
plugin call _svmachines `if' `in', `verbose' "_model2stata" 2
// Label the resulting matrices and vectors with the 'labels' array, if we have it
if("`labels'"!="") {
ereturn local levels = strtrim("`labels'")
capture matrix rownames rho = `labels'
capture matrix colnames rho = `labels'
}
}
* Phase 3
* Export the SVs
if("`sv'"!="") {
if(`have_sv_indices'==0) {
di as err "Warning: SV statuses missing. Perhaps your underlying version of libsvm is too old to support sv()."
}
else {
capture noisily {
// he internal libsvm format is a list of indices
// we want indicators, which are convenient for Stata
// so we *start* with all 0s (rather than missings) and overwrite with 1s as we discover SVs
quietly generate `sv' `if' `in' = 0
plugin call _svmachines `sv' `if' `in', `verbose' "_model2stata" 3
}
}
}
* Phase 4
* Export the rest of the values to e()
* We *cannot* export matrices to e() from the C interface, hence we have to do this very explicit thing
* NOTE: 'ereturn matrix' erases the old name (unless you specify ,copy), which is why we don't have to explicitly drop things
* 'ereturn scalar' doesn't do this, because Stata loves being consistent. Just go read the docs for 'syntax' and see how easy it is.
* All of these are silenced because various things might kill any of them, and we want failures to be independent of each other
quietly capture ereturn matrix sv_coef = sv_coef
quietly capture ereturn matrix rho = rho
end
|
/* _svm_train: this is the meat of the Stata interface to the fitting algorithm.
This is called by svm_train; though Stata programs can call subprograms defined in the same file as them,
similar to Matlab, this has to be a separate file as the special command 'xi' used there apparently cannot
*/
/* load the C extension */
svm_ensurelib // check for libsvm
program _svmachines, plugin // load the wrapper for libsvm
program define _svm_train, eclass
version 13
/* argument parsing */
// these defaults were taken from svm-train.c
// (except that we have shrinking off by default)
#delimit ;
syntax varlist (numeric)
[if] [in]
[,
// strings cannot have default values
// ints and reals *must*
// (and yes the only other data types known to syntax are int and real, despite the stata datatypes being str, int, byte, float, double, ...)
//
// also be careful of the mixed-case shenanigans
Type(string)
Kernel(string)
Gamma(real 0) COEF0(real 0) DEGree(int 3)
C(real 1) EPSilon(real 0.1) NU(real 0.5)
// weights() --> char* weight_label[], double weight[nr_weight] // how should this work?
// apparently syntax has a special 'weights' argument which is maybe meant just for this purpose
// but how to pass it on?
TOLerance(real 0.001)
SHRINKing PROBability
CACHE_size(int 100)
// if specified, a column to generate to mark which rows were detected as SVs
SV(string)
// turn on internal libsvm printing
Verbose
//set the C random seed
seed(int 1)
];
#delimit cr
// stash because we run syntax again below, which will smash these
local cmd = "`0'"
local _varlist = "`varlist'"
local _if = "`if'"
local _in = "`in'"
// make the string variables case insensitive (by forcing them to CAPS and letting the .c deal with them that way)
local type = upper("`type'")
local kernel = upper("`kernel'")
// translate the boolean flags into integers
// the protocol here is silly, because syntax special-cases "no" prefixes:
// *if* the user gives the no form of the option, a macro is defined with "noprobability" in lower case in it
// in all *other* cases, the macro is undefined (so if you eval it you get "")
// conversely, with regular option flags, if the user gives it you get a macro with "shrinking" in it, and otherwise the macro is undefined
if("`shrinking'"=="shrinking") {
local shrinking = 1
}
else {
local shrinking = 0
}
if("`probability'"=="probability") {
local probability = 1
}
else {
local probability = 0
}
/* fill in default values (only for the string vars, because syntax doesn't support defaults for them) */
if("`type'"=="") {
local type = "SVC"
}
if("`kernel'"=="") {
local kernel = "RBF"
}
/* preprocessing */
if("`type'" == "ONE_CLASS") {
// handle the special-case that one-class is unsupervised and so takes no
// libsvm still reads a Y vector though; it just, apparently, ignores it
// rather than tweaking numbers to be off-by-one, the easiest is to silently
// duplicate the pointer to one of the variables.
gettoken Y : _varlist
local _varlist = "`Y' `_varlist'"
}
else {
gettoken depvar indepvars : _varlist
}
/* sanity checks */
if("`type'" == "SVC" | "`type'" == "NU_SVC") {
// "ensure" type is categorical
local T : type `depvar'
/*
if("`T'"=="float" | "`T'"=="double") {
di as error "Warning: `depvar' is a `T', which is usually used for continuous variables."
di as error " SV classification will cast real numbers to integers before fitting." //<-- this is done by libsvm with no control from us
di as error
di as error " If your outcome is actually categorical, consider storing it so:"
di as error " . tempvar B"
di as error " . generate byte \`B' = `depvar'" //CAREFUL: B is meant to be quoted and depvar is meant to be unquoted.
di as error " . drop `depvar'"
di as error " . rename \`B' `depvar'"
di as error " (If your category coding uses floating point levels you must choose a different coding)"
di as error
di as error " Alternately, consider SV regression: type(SVR) or type(NU_SVR)."
di as error
}
*/
}
if(`probability'==1) {
// ensure model is a classification
if("`type'" != "SVC" & "`type'" != "NU_SVC") {
// the command line tools *allow* this combination, but at prediction time silently change the parameters
// "Errors should never pass silently. Unless explicitly silenced." -- Tim Peters, The Zen of Python
di as error "Error: requested model is a `type'. You can only use the probability option with classification models (SVC, NU_SVC)."
exit 2
}
}
if("`sv'"!="") {
// fail-fast on name errors in sv()
local 0 = "`sv'"
syntax newvarname
}
/* call down into C */
/* CAREFUL: epsilon() => svm_param->p and tol() => svm_param->epsilon */
#delimit ;
plugin call _svmachines `_varlist' `_if' `_in',
`verbose' // notice: this is *not* in quotes, which means that if it's not there it's not there at all
"train"
"`type'" "`kernel'"
"`gamma'" "`coef0'" "`degree'"
"`c'" "`epsilon'" "`nu'"
"`tolerance'"
"`shrinking'" "`probability'"
"`cache_size'" "`seed'"
;
#delimit cr
// *reparse* the command line in order to fix varlist at it's current value.
// If "varlist" includes tokens that get expanded to multiple variables
// then when svm_predict reparses it again, it will get a different set.
local 0 = "`cmd'"
syntax varlist [if] [in], [*]
local cmd = "`varlist' `if' `in', `options'"
/* fixup the e() dictionary */
ereturn clear
// set standard Stata estimation (e()) properties
ereturn local cmd = "svmachines"
ereturn local cmdline = "`e(cmd)' `cmd'"
ereturn local predict = "svm_predict" //this is a function pointer, or as close as Stata has to that: causes "predict" to run "svm_predict"
ereturn local estat = "svm_estat" //ditto. NOT IMPLEMENTED
ereturn local title = "Support Vector Machine"
ereturn local model = "svmachines"
ereturn local svm_type = "`type'"
ereturn local svm_kernel = "`kernel'"
ereturn local depvar = "`depvar'" //NB: if depvar is "", namely if we're in ONE_CLASS, then Stata effectively ignores this line (which we want).
//ereturn local indepvars = "`indepvars'" //XXX Instead svm_predict reparses cmdline. This needs vetting.
// append the svm_model structure to e()
_svm_model2stata `_if' `_in', sv(`sv') `verbose'
end
|
// Setup
sysuse auto
// Machine learning methods like SVM are very easy to overfit.
// To compensate, it is important to split data into training and testing sets, fit on
// the former and measure performance on the latter, so that performance measurements
// are not artificially inflated by data they've already seen.
// But after splitting the proportion of classes can become unbalanced.
// The reliable way to handle this is a stratified split, a split that
// fixes the proportions of each class in each partition of each class.
// The quick and dirty way is a shuffle:
set seed 9876
gen u = uniform()
sort u
// before the actual train/test split:
local split = floor(_N/2)
local train = "1/`=`split'-1'"
local test = "`split'/`=_N'"
// Fit the classification model on the training set, with 'verbose' enabled.
// Training cannot handle missing data; here we elide it, but usually you should impute.
svmachines foreign price-gear_ratio if !missing(rep78) in `train', v
// Predict on the test set.
// Unlike training, predict can handle missing data: it simply predicts missing.
predict P in `test'
// Compute error rate: the percentage of mispredictions is the mean of err.
gen err = foreign != P in `test'
sum err in `test'
|
// Setup
use attitude_indicators
// Shuffle
set seed 12998
gen u = uniform()
sort u
// Train/test split
local split = floor(_N*3/4)
local train = "1/`=`split'-1'"
local test = "`split'/`=_N'"
// Model
svmachines attitude q* in `train', kernel(poly) gamma(0.5) coef0(7) prob
predict P in `test', prob
// the value in column P matches the column P_<attitude> with the highest probability
list attitude P* in `test'
// Compute error rate.
gen err = attitude != P in `test'
sum err in `test'
// Beware:
// predict, prob is a *different algorithm* than predict, and can disagree about predictions.
// This disagreement will become absurd if combined with poor tuning.
predict P2 in `test'
gen agree = P == P2 in `test'
sum agree in `test'
|
// Setup
use attitude_indicators
// Shuffle
set seed 4532
gen u = uniform()
sort u
// Train/test split
local split = floor(_N*3/4)
local train = "1/`=`split'-1'"
local test = "`split'/`=_N'"
// In general, you need to do grid-search to find good tuning parameters.
// These values of kernel, gamma, and coef0 just happened to be good enough.
svmachines attitude q* in `train', kernel(poly) gamma(0.5) coef0(7)
predict P in `test'
// Compute error rate.
gen err = attitude != P in `test'
sum err in `test'
// An overly high percentage of SVs means overfitting
di "Percentage that are support vectors: `=round(100*e(N_SV)/e(N),.3)'"
|
// Setup
webuse highschool
// Shuffle
set seed 793742
gen u = uniform()
sort u
// Train/test split
local split = floor(_N/2)
local train = "1/`=`split'-1'"
local test = "`split'/`=_N'"
// Regression is invoked with type(svr) or type(nu_svr).
// Notice that you can expand factors (categorical predictors) into sets of
// indicator (boolean/dummy) columns with standard i. syntax, and you can
// record which observations were chosen as support vectors with sv().
svmachines weight height i.race i.sex in `train', type(svr) sv(Is_SV)
// Examine which observations were SVs. Ideally, a small number of SVs are enough.
tab Is_SV in `train'
predict P in `test'
// Compute residuals.
gen res = (weight - P) in `test'
sum res
|
/* export_svmlight: export the Stata dataset to a .svmlight format file. See _svmlight.c */
program _svmlight, plugin /*load the C extension if not already loaded*/
program define export_svmlight
version 13
syntax varlist(numeric) [if] [in] using/
quietly {
capture plugin call _svmlight `varlist' `if' `in', "export" "`using'"
}
end
|
/* import_svmlight: import an .svmlight format file, replacing the current Stata dataset. See _svmlight.c */
* The variables created will be 'y' and 'x%d' for %d=[1 through max(feature_id)].
* Feature IDs are always positive integers, in svmlight format, according to its source code.
* TODO: rename to svm_use and figure out how to support the dual 'svm use filename' and 'svm use varlist using filename' that the built-in use does
* it will be possible, just maybe ugly
program _svmlight, plugin /*load the C extension if not already loaded*/
program define import_svmlight
version 13
syntax using/, [clip]
quietly {
* Do the pre-loading, to count how much space we need
plugin call _svmlight, "import" "pre" "`using'"
* HACK: Stata's various versions all have a hard upper limit on the number of variables; for example StataIC has 2048 (2^11) and StataMP has 2^15
* ADDITIONALLY, Stata has an off-by-one bug: the max you can actually pass to a C plugin is one less [citation needed]
* We simply clamp the number of variables to get around this, leaving room for 1 for the Y variable and 1 to avoid the off-by-one bug
* This needs to be handled better. Perhaps we should let the user give varlist (but if they don't give it, default to all in the file??)
if(`=_svm_load_M+1' > `c(max_k_theory)'-1-1) {
di as error "Warning: your version of Stata will not allow `=_svm_load_M+1' variables nor be able to use the C plugin with that many."
if("`clip'"!="") {
di as error "Clamping to `=c(max_k_theory)-1-1'."
scalar _svm_load_M = `=c(max_k_theory)-1-1-1' /*remember: the extra -1 is to account for the Y column, and the extra extra -1 is the leave room for a prediction column*/
}
else {
exit 1
}
}
* handle error cases; I do this explicitly so
if(`=_svm_load_M'<1) {
* because Stata programming is all with macros, if this is a bad variable it doesn't cause a sensible crash,
* instead of causes either "invalid syntax" or some sort of mysterious "invalid operation" error
* (in particular "newlist x1-x0" is invalid)
* checking this doesn't cover all the ways M can be bad (e.g. it could be a string)
di as error "Need at least one feature to load"
exit 1
}
if(`=_svm_load_N'<1) {
* this one
di as error "Need at least one observation to load"
exit 1
}
* make a new, empty, dataset of exactly the size we need
clear
* Make variables y x1 x2 x3 ... x`=_svm_load_M'
generate double y = .
* this weird newlist syntax is the official suggestion for making a set of new variables in "help foreach"
foreach j of newlist x1-x`=_svm_load_M' {
* make a new variable named "xj" where j is an integer
* specify "double" because libsvm uses doubles and the C interface uses doubles, yet the default is floats
generate double `j' = .
}
* Make observations 1 .. `=_svm_load_N'
* Stata will fill in the missing value for each at this point
set obs `=_svm_load_N'
* Delete the "local variables"
* Do this here in case the next step crashes
* I am programming in BASIC.
scalar drop _svm_load_N _svm_load_M
* Do the actual loading
* "*" means "all variables". We need to pass this in because in addition to C plugins only being able to read and write to variables that already exist,
* they can only read and write to variables specified in varlist
* (mata does not have this sort of restriction.)
capture plugin call _svmlight *, "import" "`using'"
}
end
* load the given svmlight-format file into memory
* the outcome variable (the first one on each line) is loaded in y, the rest are loaded into x<label>, where <label> is the label listed in the file before each value
* note! this *will* clear your current dataset
* NB: it is not clear to me if it is easier or hard to do this in pure-Stata than to try to jerry-rig C into the mix (the main trouble with C is that extensions cannot create new variables, and we need to create new variables as we discover them)
* it is *definitely* *SLOWER* to do this in pure Stata. svm-train loads the same test dataset in a fraction of a second where this takes 90s (on an SSD and i7).
program define svm_load_purestata
* this makes macro `using' contain a filename
syntax using/
* .svmlight is meant to be a sparse format where variables go missing all the time
* so we do the possibly-quadratic-runtime thing: add one row at a time to the dataset
* using the deep magic of "set obs `=_N+1`; replace var = value in l". I think 'in l' means 'in last' but 'in last' doesn't work.
* tip originally from Nick Cox: http://statalist.1588530.n2.nabble.com/Adding-rows-to-datasheet-td4784525.html
* I suspect this inefficency is intrinsic.
* (libsvm's svm-train.c handles this problem by doing two passes over the data: once to count what it has to load, and twice to actually allocate memory and load it; we should profile for which method is faster in Stata)
tempname fd
file open `fd' using "`using'", read text
* get rid of the old data
clear
* we know svmlight files always have exactly one y vector
generate double y = .
file read `fd' line
while r(eof)==0 {
*display "read `line'" /*DEBUG*/
quiet set obs `=_N+1'
gettoken Y T : line
quiet replace y = `Y' in l
*di "T=`T'" /*DEBUG*/
* this does [(name, value = X.split(":")) for X in line.split()]
* and it puts the results into the table.
local j = 1
while("`T'" != "") {
*if(`j' > 10) continue, break /*DEBUG*/
gettoken X T : T
gettoken name X : X, parse(":")
gettoken X value : X, parse(":")
*di "@ `=_N' `name' = `value'" /*DEBUG*/
capture quiet generate double x`name' = . /*UNCONDITIONALLY make a new variable*/
capture quiet replace x`name' = `value' in l
if(`=_rc' != 0) continue, break /*something went wrong, probably that we couldn't make a new variable (due to memory or built-in Stata constraints). Just try the next observation*/
local j = `j' + 1
}
*list /*DEBUG: see the state after after new observation*/
file read `fd' line
}
file close `fd'
end
|
/* svmachines: the entry point to the support vector fitting algorithm */
program define svmachines
*! version 1.1.0
version 13
//plugin call does not handle factor variables.
// xi can pre-expand factors into indicator columns and then evaluate some code.
// However xi interacts badly with "plugin call"; just tweaking the code that calls into
// the plugin to read "xi: plugin call _svm, train" fails. xi needs to run pure Stata.
// Further, xi runs its passed code in the global scope and can't access inner routines,
// which means the pure Stata must be in a *separate file* (_svm_train.ado).
xi: _svm_train `0'
end
|
/* svmacines_example: download and run the requested sample code from the svmachines package */
/* */
/* To use this with a different package, just replace every "svmachines". */
program define svmachines_example
version 13
example svmachines `0'
end
/* example: runs example in safe isolation, downloading them from your package as needed
*
* Nick Guenther <nguenthe@uwaterloo.ca>, June 2015.
* BSD License.
*
* Your examples must be in .do files named `example'_example.do
* and should be listed in your package's ancillary files (with "f").
*
* For example, if you have invented "triple dog dare"
* regression in a package "tddr", you might make a
* triple_dog_dare_regression_79_example.do.
* In your tddr.pkg file list
* f triple_dog_dare_regression_79_example.do
* which will cause it to be an ancillary file and not get installed with the rest of the package.
* In your .sthlp file, after a manually-made copy of the code, put
* {it:({stata "example tddr triple_dog_dare_regression_79":click to run})}
* (you can use 'example' anywhere you like, of course, but it's most obvious use is
* in glue for helpfiles, which can only run one command at a time).
*
* When the user clicks that link, it will download to their working directory, run
* and then clean up after itself as if it never did, except that the file will be handy
* for the user to inspect and play with.
*
* TODO:
* [ ] consider making the convention `pkg'_`example'_example.do
*/
program define example
version 13
// parse arguments
gettoken pkg 0 : 0
gettoken example 0 : 0
capture findfile `example'_example.do
if(_rc != 0) {
// download ancillaries, which should include the examples
di as txt "Downloading `pkg' ancillary files"
ado_from `pkg'
capture noisily net get `pkg', from(`r(from)')
capture findfile `example'_example.do
if(_rc != 0) {
di as error "Unable to find `example' example."
exit 3
}
}
// save the user's dataset
// if the user actually wants to run the example into their current session they can just "do" it a second time
qui snapshot save // this is faster(?) than preserve, and seems to be just as effective, although it requires manual restoration at the end
local snapshot = `r(snapshot)'
//preserve
qui clear
// run example
capture noisily do `example'_example.do, nostop
qui snapshot restore `snapshot'
//restore // this is unneeded, because this runs automatically at scope end
end
/* ado_from: return the URL or path that a package was installed from.
* This is to glue over that 'net get' doesn't do this already.
*
*/
program define ado_from, rclass
version 13
// parse arguments
gettoken pkg 0 : 0
local from = ""
local curpkg = ""
tempname fd
// scan stata.trk for the source
// this is not a full stata.trk parser, it only implements what I need
// a typical entry looks like
// ...
// e
// S http://fmwww.bc.edu/repec/bocode/p
// N psidtools.pkg
// ...
// the loop ends when we run off the end of the file or we have found
// the matching package and its source
qui findfile stata.trk
file open `fd' using "`r(fn)'", read text
while(!("`curpkg'"=="`pkg'.pkg" & "`from'"!="")) {
file read `fd' line
if(r(eof) != 0) {
di as error "`pkg' not found in stata.trk"
exit 9
}
// extract line type
gettoken T line : line
if("`T'"=="S") {
// source line; record from
gettoken from : line
}
else if("`T'"=="e") {
// end of package; clear state
local from = ""
local curpkg = ""
}
else if("`T'"=="N") {
// package file name
gettoken curpkg : line
}
}
// assumption: the stata.trk file should have defined an S line in each pkg block
// if not, something bad happened
assert "`from'"!=""
return clear
return local from = "`from'"
end
|
/* svm_ensurelib: at runtime, make sure libsvm is available and loadable */
/* This would be ensurelib.ado, but for packaging safety, because Stata
has no sensible way of protecting against namespace conflicts, this
gets the same prefix as the rest of the package.
If you want to use ensurelib by itself then delete this header and first function and rename the file,
and rename the plugins loaded below.
*/
program define svm_ensurelib
version 13
// call the real ensurelib, which is defined below (as an inner "helper" function)
ensurelib svm
end
/* ensurelib: edit the OS shared library path to ensure shared library dependencies will be found when Stata loads plugins.
*
* This allows you to bundle non-plugin DLLs, which you will have to do to create wrapper plugins
* (unless you want to statically link, which is a almost always wrong).
*
* Nick Guenther <nguenthe@uwaterloo.ca>, June 2015.
* BSD License.
*
* Example usage:
* Suppose you have joesstore.plugin which is linked (dynamically) against library joesmeat and veggiegarden.
* For Windows, OS X, and *nix joesmeat should be, respectively, compiled to joesmeat.dll, libjoesmeat.dylib,
* or libjoesmeat.so, and similarly for veggiegarden. It should be distributed to users' adopaths with the
* special *capitalized* .pkg commands
* G WIN joesmeat.dll
* G MACINTEL libjoesmeat.dylib
* G UNIX libjoesmeat.so
* Then, in your code
* ensurelib joesmeat
* ensurelib veggiegarden
* program joesstore, plugin
*
*
* libraryname should be as in your (gcc!) linker commandline: e.g. if you specify "-ljoesmeat" there, specific "joesmeat" here.
* This will search your adopath for the file named
* Windows: libraryname.dll
* OS X: liblibraryname.dylib
* *nix: liblibraryname.so
* and add the specific directory that file is in (e.g. C:\ado\plus\l\) to your shared library path
* Windows: %PATH%
* OS X: $DYLD_LIBRARY_PATH
* *nix: $LIBRARY_PATH
* But if it does not find the library in your adopath, it will let the system use its usual library directories.
*
* Roughly, it is as if we have done:
* export LD_LIBARY_PATH=$ADOPATH:$LD_LIBRARY_PATH
* but in a cross-platform way which also handles Stata's tricky alphabetical installation chunks ([M-5] adosubdir()).
*
* Since Stata usually includes "." in the adopath, you can use this during development as well:
* just keep the DLLs you plan to bundle in your working directory.
*
* We follow close to [MinGW's naming rules](http://www.mingw.org/wiki/specify_the_libraries_for_the_linker_to_use),
* except that since we're only loading shared (not static) libraries, on Windows there is only one option just like the rest.
* In particular from MinGW's rules, **if your library on Windows is uses the aberrant lib<name>.dll** you will must either:
* - special-case your loading on Windows to call "ensurelib lib<name>"**,
* - change the naming scheme of the .dll to conform to Windows standard: <name>.dll.
* This problem generally only comes up with libraries that have been ported carelessly from *nix.
*
* Works on Windows, OS X, and Linux (which are the only platforms Stata supports)
*
* Dependencies:
* _setenv.plugin
* _getenv.plugin (Stata provides the "environment" macro function for read-only env access,
* but it doesn't seem to be live: it just caches the env at boot, not expecting it to be edited)
*
* TODO:
* [ ] Pull this into a separate .pkg
* Stata has, essentially, a global install namespace and no dependency tracking.
* So what happens if two packages bundle this? Does the second overwrite the first?
* Get denied? Mysteriously break the first one? And what happens if one package uninstalls?
* [ ] Is this worth factoring further? maybe "prependpath" could come out?
*/
capture noisily {
program _svm_getenv, plugin
program _svm_setenv, plugin
program _svm_dlopenable, plugin
}
if(_rc != 0) {
di as error "ensurelib's prerequisites are missing. If you are running this from the source repo you need to 'make'."
exit _rc
}
program define ensurelib
version 13
gettoken lib 0 : 0
syntax /* deny further args */
/* this handles libraries whose names on Windows follow the aberrant "lib<name>.dll" format,
which commonly happens when unix libraries get ported without much care to Windows.
The logic is stupid-simple here: first try lib<name>.dll,
and if that works we assume it's correct, no further questions asked.
Otherwise we fall back to <name>.dll.
On non-Windows systems, we immediately fall back to the regular path,
which looks up lib<name>.so or <name>.dylib or whatever else dlopen() does.
*/
if("`c(os)'"=="Windows") {
capture _ensurelib "lib`lib'"
if(_rc==0) {
// success!
exit
}
}
_ensurelib `lib'
end
program define _ensurelib
version 13
gettoken libname 0 : 0
if("`libname'"=="") {
di as error "ensurelib: argument required"
exit 1
}
syntax , []/* disallow everything else */
/* platform-settings */
// libvar == platform specific environment variable that can be edited (there may be more than one option)
// sep == platform specific path separator
// dl{prefix,ext} == what to wrap the libname in to generate the library filename
if("`c(os)'"=="Windows") {
local libvar = "PATH"
local sep = ";"
local dlprefix = ""
local dlext = "dll"
}
else if("`c(os)'"=="MacOSX") {
local libvar = "DYLD_LIBRARY_PATH" /* or is this DYLD_FALLBACK_LIBRARY_PATH ?? */
local sep = ":"
local dlprefix = "lib"
local dlext = "dylib"
}
else if("`c(os)'"=="Unix") { //i.e. Linux, and on Linux really only like Fedora and Ubuntu; Stata doesn't test builds for others.
local libvar = "LD_LIBRARY_PATH"
local sep = ":"
local dlprefix = "lib"
local dlext = "so"
}
else {
di as error "ensurelib: Unsupported OS `c(os)'"
exit 1
}
/* wrap the library name into a file name */
local lib = "`dlprefix'`libname'.`dlext'"
/* If the lib is in the adopath, prepend its path to the system library path */
capture quietly findfile "`lib'"
if(_rc==0) {
/* the path to the library on the adopath */
local adolib = "`r(fn)'"
/* extract the directory from the file path */
mata pathsplit("`adolib'",adopath="",lib="") //_Stata_ doesn't have pathname manipulation, but _mata_ does. the ="" are to declare variables (variables need to be declared before use, even if they are just for output)
mata st_local("adopath",adopath) // getting values out of mata to Stata is inconsistent: numerics in r() go through st_numscalar(), strings have to go through st_global(), however non-r() scalars have to go through st_strscalar
mata st_global("lib",lib)
/* prepend the discovered library path (adopath) to the system library path (libvar) */
// get the current value of libvar into libpath
plugin call _svm_getenv, "`libvar'"
local libpath = "`_getenv'"
// skip prepending if adopath is already there in `libvar', to prevent explosion
local k = ustrpos("`libpath'", "`adopath'")
if(`k' == 0) {
// prepend
plugin call _svm_setenv, "`libvar'" "`adopath'`sep'`libpath'"
}
}
/* Check that the library is now loadable */
/* by checking here, we prevent Stata's "unable to load [...].plugin" with an error which points out the actual problem. */
capture plugin call _svm_dlopenable, "`lib'"
if(_rc!=0) {
di as error "ensurelib: unable to load `libname'. You must install dynamic link library `libname' to use this program."
exit _rc
}
end
|
/* svm_predict: after fitting an SVM model with svm, construct predicted classes/values (depending on the type of the active SVM) */
/* load the C extension */
svm_ensurelib // check for libsvm
program _svmachines, plugin // load the wrapper for libsvm
program define svm_predict, eclass
version 13
syntax newvarname [if] [in], [PROBability] [scores] [Verbose]
local target = "`varlist'"
local _in = "`in'" //these need to be stashed because the hack below will smash them
local _if = "`if'"
if("`probability'"!="" & "`scores'"!="") {
di as err "Error: probability and scores are mutually exclusive options."
exit 2
}
// C plugins can only speak to variables mentioned in the varlist they are called with
// that is, if we are going predict on some vectors, we need to know what X variables we're
// predicting on in their entirety before we call down to C--and they should match what
// I haven't discovered how regress and friends manage to know which variables to predict on
// the only place I see them record what they did is in e(cmdline)
// but that has cruft in it
// the easiest way I can think to extract the predictor list is to *reparse* the command line
// TODO: consider if it's saner to simply pre-store e(indepvars) or e(predictors) or something
local 0 = "`e(cmdline)'"
gettoken cmd 0 : 0 /*remove the command which was artificially tacked on by svm_train*/
syntax varlist [if] [in], * //* puts the remainder in `options' and allows this code to be isolated from svm_train (it's not like we actually could tweak anything, since the svm_model is stored on the plugin's C heap)
if("`e(svm_type)'"!="ONE_CLASS") {
gettoken y varlist : varlist // pop the first variable
assert "`y'" == "`e(depvar)'" // and check consistency with the svm_train
// make the target column
// it is safe to assume that `target' is a valid variable name: "syntax" above enforces that
// and it should be safe to assume the same about `e(depvar)': unless the user is messing with us (in which case, more power to them), it should have been created by svm_train and validated at that point
quietly clone `target' `e(depvar)' if 0 //'if 0' leaves the values as missing, which is important: we don't want a bug in the plugin to translate to source values sitting in the variable (and thus inflating the observed prediction rate)
local L : variable label `target'
if("`L'"!="") {
label variable `target' "Predicted `L'"
}
}
else {
//ONE_CLASS
quietly gen int `target' = .
label variable `target' "Within support"
}
if("`probability'"!="") {
// allocate space (we use new variables) to put probability estimates for each class for each prediction
// ensure model is a classification
// this duplicates code over in svm_train, but I think this is safest:
// svm_import allows you to pull in svm_models created by other libsvm
// interfaces, and they mostly don't have this protection.
if("`e(svm_type)'" != "SVC" & "`e(svm_type)'" != "NU_SVC") {
// in svm-predict.c, the equivalent section is:
/*
* if (predict_probability && (svm_type==SVC || svm_type==NU_SVC))
* predict_label = svm_predict_probability(model,x,prob_estimates);
* else
* predict_label = svm_predict(model,x);
*/
// it is cleaner to error out, rather than silently change the parameters, which is what the command line tools do
di as error "Error: trained model is a `e(svm_type)'. You can only use the probability option with classification models (SVC, NU_SVC)."
exit 2
}
// save the top level description to splay across the stemmed variables
local D : variable label `target'
// Collect (and create) the probability columns
// TODO: get it to generate the columns in the "levelsof" order, but actually use them in the libsvm order
// -> right now it is in the libsvm order, which is fine. the results are correct. they're just not as convenient.
// BEWARE: the order of iteration here is critical:
// it MUST match the order in svm_model->labels or results will silently be permuted
// the only way to achieve this is to record the order in svm_model->labels and loop over that explicitly, which is what e(levels) is for
assert "`e(levels)'" != ""
foreach l in `e(levels)' {
// l is the "label" for each class, but it's just an integer (whatever was in the original data table)
// We try to label each column by the appropriate string label, for readability,
// but if it doesn't exist we fall back on the integer label.
//
// The command to do this is poorly documented. What this line does is
// look up the value label for value `l'
// *or* give back `l' unchanged if `target' has no labels
// which is precisely what we want it to do here.
local L : label (`e(depvar)') `l'
// compute the full variable name for level `l'
local stemmed = "`target'_`L'"
local stemmed = strtoname("`stemmed'") //sanitize the new name; this summarily avoids problems like one of your classes being "1.5"
// finally, allocate it
// unlike `target' which clones its source, we use doubles
// because these are meant to hold probabilities
// TODO: what happens if there's a name collision partially through this loop?
// what I want to happen is for any name collision or other bug to abort (i.e. rollback) the entire operation
// This can be achieved with "snapshot": snapshot; capture {}; if(fail) { rollback to snapshot }"
quietly generate double `stemmed' = .
label variable `stemmed' "Pr(`D'==`L')"
// attach the newcomers to the varlist so the plugin is allowed to edit them
local varlist = "`varlist' `stemmed'"
}
}
else if("`scores'"!="") { // else-if because these options are mutually exclusive (which is enforced above)
// Allocate space for the decision values
// This is more complicated because we need to go down a lower triangle of a matrix -- so, a length-changing nested loop.
// we have to use word("`e(levels)'", i) to extract the ith level
// which means we have an extra layer of indirection to deal with, so there's x_i the index into e(labels), x the integer label, and X the string (or possibly integer) label
// we need to split the cases of classification and non-classification models
// reason i: non-classification models have model->label == NULL which means e(levels) is missing which breaks this code
// reason ii: non-classification models only have one decision value, so the sensible label is just "`target'_score"
if("`e(svm_type)'" == "ONE_CLASS" | "`e(svm_type)'" == "SVR" | "`e(svm_type)'" == "NU_SVR") {
// generate the name of the new column.
// it is, unfortunate, somewhat terse, in hopes of keeping within 32 characters
local stemmed = "`target'_score"
local stemmed = strtoname("`stemmed'") //make it Stata-safe
// allocate the decision value column
quietly generate double `stemmed' = .
label variable `stemmed' "`target' svm score"
// attach the newcomers to the varlist so the plugin is allowed to edit them
local varlist = "`varlist' `stemmed'"
}
else if("`e(svm_type)'" == "SVC" | "`e(svm_type)'" == "NU_SVC") {
local no_levels = `e(N_class)'
forvalues l_i = 1/`no_levels' {
//di "l_i = `l_i'"
local l = word("`e(levels)'", `l_i')
local L : label (`e(depvar)') `l'
forvalues r_i = `=`l_i'+1'/`no_levels' {
//di "r_i = `r_i'"
local r = word("`e(levels)'", `r_i') // map the index into the labels
local R : label (`e(depvar)') `r'
//di "generating svm score column (`l_i',`r_i') <=> (`l',`r') <=> (`L',`R')"
// generate the name of the new column.
// it is, unfortunate, somewhat terse, in hopes of keeping within 32 characters
local stemmed = "`target'_`L'_`R'"
local stemmed = strtoname("`stemmed'") //make it Stata-safe
// allocate the decision value column
quietly generate double `stemmed' = .
label variable `stemmed' "`target' svm score `L' vs `R'"
// attach the newcomers to the varlist so the plugin is allowed to edit them
local varlist = "`varlist' `stemmed'"
}
}
}
else {
di as error "Unrecognized svm_type `e(svm_type)'; unable to define svm score columns."
exit 2
}
}
// call down into C
// we indicate "probability" mode by passing a non-empty list of levels
// this list implicitly *removes* from the set range of variables to predict from: the trailing variables are instead write locations
// (this feels like programming a hardware driver)
// Subtlety: we don't quote levels, on the assumption that it is always a list of integers;
// that way, the levels are pre-tokenized and the count easily available as argc
plugin call _svmachines `target' `varlist' `_if' `_in', `verbose' predict `probability' `scores'
if("`e(svm_type)'"=="ONE_CLASS") {
// libsvm gives {1,-1} for its one-class predictions;
// normalize these to {1,0}
qui replace `target' = 0 if `target' == -1
}
end
/* clone.ado: generate a perfect copy of a variable: type, labels, etc.
syntax:
clone newvar oldvar [if] [in]
You can use 'if' and 'in' to control what values; values that don't match will be set to missing.
If you want to clone a variable's metadata but not values use the idiom ". clone new old if 0".
NB: The reason the syntax is not "clone newvar = oldvar", even though that would fit the pattern
set by generate and egen, is that syntax's =/exp option insists on parsing numeric expressions,
so string variables wouldn't be cloneable.
*/
program define clone
version 13
// parse once to extract the basic pieces of syntax
syntax namelist [if] [in]
local _if = "`if'" //save these for later; the other syntax commands will smash them
local _in = "`in'"
gettoken target source : namelist
// enforce types
confirm new variable `target'
confirm variable `source'
// save attributes
local T : type `source' //the data type
local N : variable label `source' //the human readable description
local V : value label `source' // the name of the label map in use, if there is one
// Stata maintains a dictionary of dictionaries, each of which
// maps integers to strings. Multiple variables can share a dictionary,
// though it is rare except for e.g. "boolean"
// make new variable
generate `T' `target' = `source' `_if' `_in'
// clone attributes if they exist
// (except for type, which always exists and cannot be reassigned without
// another 'generate' doing a whole new malloc())
if("`N'"!="") {
label variable `target' "`N'" //Yes, the setters and getters are...
}
if("`V'"!="") {
label value `target' "`V'" //...in fact reverses of each other
}
end
|
/* model2stata: a subroutine to convert the global struct svm_model that lives in the DLL to a mixture of e() entries, variables, and matrices.
*
* Besides being usefully modular, this *must* be its own subroutine because it needs to be marked eclass.
* This is because, due to limitations in the Stata C API, there has to be an awkward dance to get the information out:
* _svmachines.plugin writes to the (global!) scalar dictionary and then this subroutine code copies those entries to e().
*
* as with svm_load, the extension function is called multiple times with sub-sub-commands, because it doesn't have permission to perform all the operations needed
* if passed, SV specifies a column to create and then record svm_model->sv_indecies into
*/
/* load the C extension */
svm_ensurelib // check for libsvm
program _svmachines, plugin // load the wrapper for libsvm
program define _svm_model2stata, eclass
version 13
syntax [if] [in], [SV(string)] [Verbose]
* as with loading, this has to call in and out of the plugin because chicken/egg:
* the plugin doesn't have permission to allocate Stata memory (in this case matrices),
* but we don't know how much to allocate before interrogating the svm_model
* Phase 1
* the total number of observations
* this gets set by _svmachines.c::train(); it doesn't exist for a model loaded via import().
* nevertheless it is in this file instead of svm_train.ado, because it is most similar here
* but we cap { } around it so the other case is tolerable
capture {
ereturn scalar N = _model2stata_N
scalar drop _model2stata_N
}
/*an undefined macro will inconsistently cause an eval error because `have_rho'==1 will eval to ==1 will eval to "unknown variable"*/
/*so just define them ahead of time to be safe*/
local have_sv_indices = 0
local have_sv_coef = 0
local have_rho = 0
local labels = ""
plugin call _svmachines `if' `in', `verbose' "_model2stata" 1
* the total number of (detected?) classes
ereturn scalar N_class = _model2stata_nr_class
scalar drop _model2stata_nr_class
* the number of support vectors
ereturn scalar N_SV = _model2stata_l
scalar drop _model2stata_l
* Phase 2
* Allocate Stata matrices and copy the libsvm matrices and vectors
if(`have_sv_coef'==1 & `e(N_class)'>1 & `e(N_SV)'>0) {
capture noisily {
matrix sv_coef = J(e(N_class)-1,e(N_SV),.)
// there doesn't seem to be an easy way to generate a list of strings with a prefix in Stata
// so: the inefficient way
local cols = ""
forval j = 1/`e(N_SV)' {
local cols = "`cols' SV`j'"
}
matrix colnames sv_coef = `cols'
// TODO: rows
// there is one row per class *less one*. the rows probably represent decision boundaries, then. I'm not sure what this should be labelled.
// matrix rownames sv_coef = class1..class`e(N_SV)'
}
}
if(`have_rho'==1 & `e(N_class)'>0) {
capture noisily matrix rho = J(e(N_class),e(N_class),.)
}
* TODO: also label the rows according to model->label (libsvm's "labels" are just more integers, but it helps to be consistent anyway);
* I can easily extract ->label with the same code, but attaching it to the rownames of the other is tricky
capture noisily {
plugin call _svmachines `if' `in', `verbose' "_model2stata" 2
// Label the resulting matrices and vectors with the 'labels' array, if we have it
if("`labels'"!="") {
ereturn local levels = strtrim("`labels'")
capture matrix rownames rho = `labels'
capture matrix colnames rho = `labels'
}
}
* Phase 3
* Export the SVs
if("`sv'"!="") {
if(`have_sv_indices'==0) {
di as err "Warning: SV statuses missing. Perhaps your underlying version of libsvm is too old to support sv()."
}
else {
capture noisily {
// he internal libsvm format is a list of indices
// we want indicators, which are convenient for Stata
// so we *start* with all 0s (rather than missings) and overwrite with 1s as we discover SVs
quietly generate `sv' `if' `in' = 0
plugin call _svmachines `sv' `if' `in', `verbose' "_model2stata" 3
}
}
}
* Phase 4
* Export the rest of the values to e()
* We *cannot* export matrices to e() from the C interface, hence we have to do this very explicit thing
* NOTE: 'ereturn matrix' erases the old name (unless you specify ,copy), which is why we don't have to explicitly drop things
* 'ereturn scalar' doesn't do this, because Stata loves being consistent. Just go read the docs for 'syntax' and see how easy it is.
* All of these are silenced because various things might kill any of them, and we want failures to be independent of each other
quietly capture ereturn matrix sv_coef = sv_coef
quietly capture ereturn matrix rho = rho
end
|
/* _svm_train: this is the meat of the Stata interface to the fitting algorithm.
This is called by svm_train; though Stata programs can call subprograms defined in the same file as them,
similar to Matlab, this has to be a separate file as the special command 'xi' used there apparently cannot
*/
/* load the C extension */
svm_ensurelib // check for libsvm
program _svmachines, plugin // load the wrapper for libsvm
program define _svm_train, eclass
version 13
/* argument parsing */
// these defaults were taken from svm-train.c
// (except that we have shrinking off by default)
#delimit ;
syntax varlist (numeric)
[if] [in]
[,
// strings cannot have default values
// ints and reals *must*
// (and yes the only other data types known to syntax are int and real, despite the stata datatypes being str, int, byte, float, double, ...)
//
// also be careful of the mixed-case shenanigans
Type(string)
Kernel(string)
Gamma(real 0) COEF0(real 0) DEGree(int 3)
C(real 1) EPSilon(real 0.1) NU(real 0.5)
// weights() --> char* weight_label[], double weight[nr_weight] // how should this work?
// apparently syntax has a special 'weights' argument which is maybe meant just for this purpose
// but how to pass it on?
TOLerance(real 0.001)
SHRINKing PROBability
CACHE_size(int 100)
// if specified, a column to generate to mark which rows were detected as SVs
SV(string)
// turn on internal libsvm printing
Verbose
//set the C random seed
seed(int 1)
];
#delimit cr
// stash because we run syntax again below, which will smash these
local cmd = "`0'"
local _varlist = "`varlist'"
local _if = "`if'"
local _in = "`in'"
// make the string variables case insensitive (by forcing them to CAPS and letting the .c deal with them that way)
local type = upper("`type'")
local kernel = upper("`kernel'")
// translate the boolean flags into integers
// the protocol here is silly, because syntax special-cases "no" prefixes:
// *if* the user gives the no form of the option, a macro is defined with "noprobability" in lower case in it
// in all *other* cases, the macro is undefined (so if you eval it you get "")
// conversely, with regular option flags, if the user gives it you get a macro with "shrinking" in it, and otherwise the macro is undefined
if("`shrinking'"=="shrinking") {
local shrinking = 1
}
else {
local shrinking = 0
}
if("`probability'"=="probability") {
local probability = 1
}
else {
local probability = 0
}
/* fill in default values (only for the string vars, because syntax doesn't support defaults for them) */
if("`type'"=="") {
local type = "SVC"
}
if("`kernel'"=="") {
local kernel = "RBF"
}
/* preprocessing */
if("`type'" == "ONE_CLASS") {
// handle the special-case that one-class is unsupervised and so takes no
// libsvm still reads a Y vector though; it just, apparently, ignores it
// rather than tweaking numbers to be off-by-one, the easiest is to silently
// duplicate the pointer to one of the variables.
gettoken Y : _varlist
local _varlist = "`Y' `_varlist'"
}
else {
gettoken depvar indepvars : _varlist
}
/* sanity checks */
if("`type'" == "SVC" | "`type'" == "NU_SVC") {
// "ensure" type is categorical
local T : type `depvar'
/*
if("`T'"=="float" | "`T'"=="double") {
di as error "Warning: `depvar' is a `T', which is usually used for continuous variables."
di as error " SV classification will cast real numbers to integers before fitting." //<-- this is done by libsvm with no control from us
di as error
di as error " If your outcome is actually categorical, consider storing it so:"
di as error " . tempvar B"
di as error " . generate byte \`B' = `depvar'" //CAREFUL: B is meant to be quoted and depvar is meant to be unquoted.
di as error " . drop `depvar'"
di as error " . rename \`B' `depvar'"
di as error " (If your category coding uses floating point levels you must choose a different coding)"
di as error
di as error " Alternately, consider SV regression: type(SVR) or type(NU_SVR)."
di as error
}
*/
}
if(`probability'==1) {
// ensure model is a classification
if("`type'" != "SVC" & "`type'" != "NU_SVC") {
// the command line tools *allow* this combination, but at prediction time silently change the parameters
// "Errors should never pass silently. Unless explicitly silenced." -- Tim Peters, The Zen of Python
di as error "Error: requested model is a `type'. You can only use the probability option with classification models (SVC, NU_SVC)."
exit 2
}
}
if("`sv'"!="") {
// fail-fast on name errors in sv()
local 0 = "`sv'"
syntax newvarname
}
/* call down into C */
/* CAREFUL: epsilon() => svm_param->p and tol() => svm_param->epsilon */
#delimit ;
plugin call _svmachines `_varlist' `_if' `_in',
`verbose' // notice: this is *not* in quotes, which means that if it's not there it's not there at all
"train"
"`type'" "`kernel'"
"`gamma'" "`coef0'" "`degree'"
"`c'" "`epsilon'" "`nu'"
"`tolerance'"
"`shrinking'" "`probability'"
"`cache_size'" "`seed'"
;
#delimit cr
// *reparse* the command line in order to fix varlist at it's current value.
// If "varlist" includes tokens that get expanded to multiple variables
// then when svm_predict reparses it again, it will get a different set.
local 0 = "`cmd'"
syntax varlist [if] [in], [*]
local cmd = "`varlist' `if' `in', `options'"
/* fixup the e() dictionary */
ereturn clear
// set standard Stata estimation (e()) properties
ereturn local cmd = "svmachines"
ereturn local cmdline = "`e(cmd)' `cmd'"
ereturn local predict = "svm_predict" //this is a function pointer, or as close as Stata has to that: causes "predict" to run "svm_predict"
ereturn local estat = "svm_estat" //ditto. NOT IMPLEMENTED
ereturn local title = "Support Vector Machine"
ereturn local model = "svmachines"
ereturn local svm_type = "`type'"
ereturn local svm_kernel = "`kernel'"
ereturn local depvar = "`depvar'" //NB: if depvar is "", namely if we're in ONE_CLASS, then Stata effectively ignores this line (which we want).
//ereturn local indepvars = "`indepvars'" //XXX Instead svm_predict reparses cmdline. This needs vetting.
// append the svm_model structure to e()
_svm_model2stata `_if' `_in', sv(`sv') `verbose'
end
|
// Setup
sysuse auto
// Machine learning methods like SVM are very easy to overfit.
// To compensate, it is important to split data into training and testing sets, fit on
// the former and measure performance on the latter, so that performance measurements
// are not artificially inflated by data they've already seen.
// But after splitting the proportion of classes can become unbalanced.
// The reliable way to handle this is a stratified split, a split that
// fixes the proportions of each class in each partition of each class.
// The quick and dirty way is a shuffle:
set seed 9876
gen u = uniform()
sort u
// before the actual train/test split:
local split = floor(_N/2)
local train = "1/`=`split'-1'"
local test = "`split'/`=_N'"
// Fit the classification model on the training set, with 'verbose' enabled.
// Training cannot handle missing data; here we elide it, but usually you should impute.
svmachines foreign price-gear_ratio if !missing(rep78) in `train', v
// Predict on the test set.
// Unlike training, predict can handle missing data: it simply predicts missing.
predict P in `test'
// Compute error rate: the percentage of mispredictions is the mean of err.
gen err = foreign != P in `test'
sum err in `test'
|
// Setup
use attitude_indicators
// Shuffle
set seed 12998
gen u = uniform()
sort u
// Train/test split
local split = floor(_N*3/4)
local train = "1/`=`split'-1'"
local test = "`split'/`=_N'"
// Model
svmachines attitude q* in `train', kernel(poly) gamma(0.5) coef0(7) prob
predict P in `test', prob
// the value in column P matches the column P_<attitude> with the highest probability
list attitude P* in `test'
// Compute error rate.
gen err = attitude != P in `test'
sum err in `test'
// Beware:
// predict, prob is a *different algorithm* than predict, and can disagree about predictions.
// This disagreement will become absurd if combined with poor tuning.
predict P2 in `test'
gen agree = P == P2 in `test'
sum agree in `test'
|
// Setup
use attitude_indicators
// Shuffle
set seed 4532
gen u = uniform()
sort u
// Train/test split
local split = floor(_N*3/4)
local train = "1/`=`split'-1'"
local test = "`split'/`=_N'"
// In general, you need to do grid-search to find good tuning parameters.
// These values of kernel, gamma, and coef0 just happened to be good enough.
svmachines attitude q* in `train', kernel(poly) gamma(0.5) coef0(7)
predict P in `test'
// Compute error rate.
gen err = attitude != P in `test'
sum err in `test'
// An overly high percentage of SVs means overfitting
di "Percentage that are support vectors: `=round(100*e(N_SV)/e(N),.3)'"
|
// Setup
webuse highschool
// Shuffle
set seed 793742
gen u = uniform()
sort u
// Train/test split
local split = floor(_N/2)
local train = "1/`=`split'-1'"
local test = "`split'/`=_N'"
// Regression is invoked with type(svr) or type(nu_svr).
// Notice that you can expand factors (categorical predictors) into sets of
// indicator (boolean/dummy) columns with standard i. syntax, and you can
// record which observations were chosen as support vectors with sv().
svmachines weight height i.race i.sex in `train', type(svr) sv(Is_SV)
// Examine which observations were SVs. Ideally, a small number of SVs are enough.
tab Is_SV in `train'
predict P in `test'
// Compute residuals.
gen res = (weight - P) in `test'
sum res
|
program define clockseed
set seed `= mod(clock("$S_DATE $S_TIME","D M 20Y hms"),2^31)'
end
|
/* clone.ado: generate a perfect copy of a variable: type, labels, etc.
syntax:
clone newvar oldvar [if] [in]
You can use 'if' and 'in' to control what values; values that don't match will be set to missing.
If you want to clone a variable's metadata but not values use the idiom ". clone new old if 0".
NB: The reason the syntax is not "clone newvar = oldvar", even though that would fit the pattern
set by generate and egen, is that syntax's =/exp option insists on parsing numeric expressions,
so string variables wouldn't be cloneable.
*/
program define clone
// parse once to extract the basic pieces of syntax
syntax namelist [if] [in]
local _if = "`if'" //save these for later; the other syntax commands will smash them
local _in = "`in'"
gettoken target source : namelist
// enforce types
confirm new variable `target'
confirm variable `source'
// save attributes
local T : type `source' //the data type
local N : variable label `source' //the human readable description
local V : value label `source' // the name of the label map in use, if there is one
// Stata maintains a dictionary of dictionaries, each of which
// maps integers to strings. Multiple variables can share a dictionary,
// though it is rare except for e.g. "boolean"
// make new variable
generate `T' `target' = `source' `_if' `_in'
// clone attributes if they exist
// (except for type, which always exists and cannot be reassigned without
// another 'generate' doing a whole new malloc())
if("`N'"!="") {
label variable `target' "`N'" //Yes, the setters and getters are...
}
if("`V'"!="") {
label value `target' "`V'" //...in fact reverses of each other
}
end
|
/* cv: cross-validated predictions from any Stata estimation command.
*
* Breaks a dataset into a number of subsets ("folds"), and for each
* runs an estimator on everything but that subset, and predicts results.
* In this way, the fit is unbiased by the estimation process, because
* each prediction is made from data which were never involved in fitting.
* Sometimes this is described as a way to control overfitting.
*
*
* In general there are two ways to measure the performance of an estimator:
* looking at its residuals -- in machine learning jargon, the training error
* looking at its cross-validated residuals -- the cross-validation error
* The cv error is an estimate of the generalization error --- the performance
* of your model on data it has never seen before --- which is usually what you care about in applications.
* The ratio of the two tells you if you are over- or under- fitting:
* - if training error is higher, you are underfitting---you do not have enough degrees of freedom in your model---because the training is not doing as well on the full dataset as it does on slices (this is rare)
* - if training error is lower, you are overfitting---you have too many degrees of freedom, and you're fitting noise instead of signal---, because the training error is underestimating the generalization error.
*
* (There is also a third measurement:
* looking at it's test-set residuals -- the testing error
* Cross-validation is usually used in conjunction with grid-search to tune tuning-parameters.
* This in itself can be thought of as a type of estimation, and hence keeping a spare set of data
* purely for testing immunizes your performance from overfitting in the grid-search,
* even though each individual section of grid-search should be immune from overfitting.)
*
*
* syntax:
* cv estimator y x1 [x2 x3 ...]; target [target2 ...] [if] [in], [folds(#) shuffle est(options to estimator) pred(options to predict)]
*
* estimator should be a standard Stata estimation command which can be followed by a call to "predict target if"; only one-variable
* prediction is supported, so mlogit's extended form and other multilabel learning techniques cannot be used with cv at this time.
* folds is the number of folds to make. More is more accurate but slower.
* As a special case, pass folds(1) to simply train and predict at once.
* To do leave-one-out cross-validation (LOOCV), pass the number of observations (e.g. folds(`=_N'), though if you use if/in you'll need to modify that).
*
* to further reduce bias, consider passing shuffle, but make sure you {help set seed:seed} the RNG well
* (e.g. see {help clockseed} or {help truernd}).
*
* Example:
*
* . sysuse auto
* . svm foreign headroom gear_ratio weight, type(svc) gamma(0.4) c(51)
* . predict P
* . gen err = foreign != P
* . qui sum err
* . di "Training error rate: `r(mean)'"
* . drop P err
* .
* . cv svm foreign headroom gear_ratio weight; P, folds(`=floor(_N/3)') est(type(svc) gamma(0.4) c(51))
* . gen err = foreign != P
* . qui sum err
* . di "Cross-validated error rate: `r(mean)'"
*
* Example of if/in:
* . cv P svm gear_ratio foreign headroom weight if gear_ratio > 3 in 22/63, folds(4) shuffle type(svr) eps(0.5)
*
* You can use this with "unsupervised" estimators---ones which take no {help depvar} (y)---too.
* cv passes whatever options you give it directly to the estimator; all it handles it the folding.
*
*
* See also:
* {help gridsearch}
*
*
* TODO:
* [x] ability to pass arguments to predict
* [ ] mlogit_p takes multiple *variables* to predict into; is there any reasonable way to handle this?
* [x] 'if' and 'in'
* [ ] stratified folding
* [ ] is 'shuffle' a huge slowdown??
* [ ] maybe this should be called "cross_predict" because we don't actually do the validation...
*/
program define cv, eclass
/* parse arguments */
gettoken target 0 : 0
gettoken estimator 0 : 0
syntax varlist (fv ts) [if] [in], [folds(string) shuffle ESTimate_options(str) PREDict_options(str)]
confirm name `estimator'
confirm new variable `target'
//Stata if funky: because I use [*] above, if I declare folds(int 5) and you pass a real (e.g. folds(18.5)), rather than giving the usual "option folds() incorrectly specified" error, Stata *ignores* that folds, gives the default value, and pushes the wrong folds into the `options' macro, which is really the worst of all worlds
// instead, I take a string (i.e. anything) to ensure the folds option always,
// and then parse manually
if("`folds'"=="") {
local folds = 5
}
confirm integer number `folds'
//di as txt "folds= `folds' options=`options'" //DEBUG
qui count `if' `in'
if(`folds'<1 | `folds'>=`r(N)') {
di as error "Invalid number of folds: `folds'. Must be between 2 and the number of active observations `r(N)'."
exit 1
}
if(`folds'==1) {
// special case: 1-fold is the same as just traing
`estimator' `varlist' `if' `in', `estimate_options'
predict `target', `predict_options'
exit
}
if("`strata'" != "") {
confirm variable `strata'
di as error "cv: stratification not implemented."
exit 2
}
/* generate folds */
// the easiest way to do this in Stata is simply to mark a new column
// and stamp out id numbers into it
// the tricky part is dealing with if/in
// and the trickier (and currently not implemented) part is dealing with
// stratification (making sure each fold has equal proportions of a categorical variable)
tempvar fold
// compute the size of each group *as a float*
// derivation:
// we have r(N) items in total -- you can also think of this as the last item, which should get mapped to group `folds'
// we want `folds' groups
// if we divide each _n by `folds' then the largest ID generated is r(N)/`folds' == # of items per group
// so we can't do that
// if instead we divide each _n by r(N)/`folds', then the largest is r(N)/(r(N)/`folds') = `folds'
// Also, maybe clearer, this python script empirically proves the formula:
/*
for G in range(1,302):
for N in range(G,1302):
folds = {k: len(list(g)) for k,g in groupby(int((i-1)//(N/G)+1) for i in range(1,N+1)) }
print("N =", N, "G =", G, "keys:", set(folds.keys()));
assert set(folds.keys()) == set(range(1,G+1))
*/
qui count `if' `in'
local g = `r(N)'/`folds'
// generate a pseudo-_n which is the observation *within the if/in subset*
// if you do not give if/in this is should be equal to _n
qui gen int `fold' = 1 `if' `in'
/* shuffling */
// this is tricky: shuffling has to happen *after* partially generating fold IDs,
// because the shuffle invalidates the `in', but it must happen *before* the IDs
// are actually assigned because otherwise there's no point
if("`shuffle'"!="") {
tempvar original_order
tempvar random_order
qui gen `original_order' = _n
qui gen `random_order' = uniform()
sort `random_order'
}
qui replace `fold' = sum(`fold') if !missing(`fold') //egen has 'fill()' which is more complicated than this, and so does not allow if/in. None of its other options seem to be what I want.
// map the pseudo-_n into a fold id number
// nopromote causes integer instead of floating point division, which is needed for id numbers
//Stata counts from 1, which is why the -1 and +1s are there
// (because the proper computation should happen counting from 0, but nooo)
qui replace `fold' = (`fold'-1)/`g'+1 if !missing(`fold'), nopromote
// because shuffling can only affect which folds data ends up in,
// immediately after generating fold labels we can put the data back as they were.
// (i prefer rather do this early lest something later break and the dataset be mangled)
// (this can't use snapshot or preserve because restoring those will erase `fold')
if("`shuffle'"!="") {
sort `original_order'
}
// make sure the trickery above worked, more or less
qui sum `fold'
assert `r(min)'==1
assert `r(max)'==`folds'
qui levelsof `fold'
assert `: word count `r(levels)''==`folds'
/* cross-predict */
// We don't actually predict into target directly, because most estimation commands
// get annoyed at you trying to overwrite an old variable (even if an unused region).
// Instead we repeatedly predict into B, copy the fold into target, and destroy B.
//
// We don't actually create `target' until we have done one fold, at which point we *clone* it
// because we do not know what types/labels the predictor wants to attach to its predictions,
// (which can lead to strangeness if the predictor is inconsistent with itself)
tempvar B
forvalues f = 1/`folds' {
// train on everything that isn't the fold
qui count if `fold' != `f' & !missing(`fold') /* this needs !missing(`fold') because <value> != . counts as true. Notice this is duplicated below! */
di "[fold `f'/`folds': training on `r(N)' observations]"
capture `estimator' `varlist' if `fold' != `f' & !missing(`fold'), `estimate_options'
if(_rc!=0) {
di as error "`estimator' failed"
exit _rc
}
// predict on the fold
qui count if `fold' == `f'
di "[fold `f'/`folds': predicting on `r(N)' observations]"
predict `B' if `fold' == `f', `predict_options'
// on the first fold, *clone* B to our real output
if(`f' == 1) {
qui clone `target' `B' if 0
}
// save the predictions from the current fold
qui replace `target' = `B' if `fold' == `f'
drop `B'
}
/* clean up */
// drop e(), because its contents at this point are only valid for the last fold
// and that's just confusing
ereturn clear
end
|
/* ensurepkg: automagically install needed packages.
*
* syntax:
* ensureado cmd, [pkg(package_name) from(url)]
*
* cmd: to detect if the package is installed, look for either of "`cmd" and "`cmd'.ado" in the adopath.
* pkg: specify pkg explicitly if the command and package do not have the same name.
* from: if needed, install pkg from this URL; if not given, attemps first StataCorp and then the ssc.
* if the package exists, *this is ignored*.
*
* This is a rolling-release system: `pkg' will always be updated to the most recent version if it can.
* If the package does not declare a Distribution-Date it will never update.
* If the network is down, the old version will be used silently.
* These may or may not cause confusion and bugs for your users.
*
*
* ensurepkg is meant to glue over the lack of dependency tracking in Stata's .pkg format.
* At the top of every piece of code which has dependencies, declare like them this:
*
* // example.ado
* ensurepkg norm // Ansari & Mussida normalization subroutine
* ensurepkg psid, pkg(psidtools) // Kohler's panel income data API
* ensurepkg boost, from("http://schonlau.net/stata") // Schonlau's machine learning boosting library
* ensurepkg _getenv.plugin, pkg(env) // Guenther's environment accessors
* program define example {
* ...
* norm x
* boost x y z
* ...
* }
*
*
* Nick Guenther <nguenthe@uwaterloo.ca> 2015
* BSD License
*/
program define ensurepkg
// parse arguments
syntax name, [pkg(string) from(string) noupdate]
local ado = "`namelist'"
if("`pkg'"=="") {
local pkg = "`ado'"
}
// test if `ado' is installed
// it would be nice if we could use instead 'ado dir'
// but 'ado dir' doesn't offer a programmatic interface.
// Maybe there's something in Mata...
capture which `ado'
if(_rc!=0) {
// it's not, so install it
if("`from'"!="") {
net install `pkg', from(`from')
}
else {
capture noisily net install `pkg'
if(_rc!=0) {
ssc install `pkg'
}
}
// recurse, to double-check the installation worked
// DoS WARNING: this will cause an infinite loop
// if the remote package exists but
// does not include the named command.
// (but Stata has bigger security problems in its package system than a DoS)
quietly ensurepkg `ado'
}
else {
// if already installed
if("`update'"=="noupdate") {
exit
}
// make sure package is at the latest version.
capture adoupdate `pkg', update
if(_rc==631 | _rc==677) {
// special case: if the network is down, *succeed*
// 631 - DNS failed
// 677 - TCP failed
exit
}
else {
exit _rc
}
}
end
|
// grid search for good SVM parameters
// by Matthias Schonlau
// enhanced to use cv.ado by Nick Guenther
//
// this is not a function because
// - it is technically difficult:
// - looping over ranges of arbitrary parameters is muddy (the sklearn GridSearch object has to take a dictionary of parameter->[set of values to try], which just looks bad, but is probably as good as you're going to get
// - doing that in *Stata* is extra muddy
// -
// - you always need to inspect the results before going on because there may be multiple equally good regions to search down into
set more off
clockseed
use tests/joe_dutch_merged
///////////////////////////////////////////////////////////
// randomize order
gen u=uniform()
sort u
///////////////////////////////////////////////////////////
// scaling autonormalize
sum _textnum_tokens
gen ntoken_stan= (_textnum_tokens -r(mean)) / r(sd)
///////////////////////////////////////////////////////////
// on top of the train/test splits within cv, hold out some data which never get trained on ever
gen train = _n<=(_N*.70)
local train = "train==1"
local test = "train==0"
// nevermind!
replace train = 1
// these columns are not a part of the dataset
// however Stata only lets us have one dataset
// so we just use this convention: never look in these columns when you mean to look in the others
// entries in them are filled in one
gen accuracy=.
gen C=.
gen gamma=.
// XXX there's a memory leak in _svm.plugin which means that running 5+-fold CV on this dataset crashes before it can finish
local folds = 2
local i = 0
foreach C of numlist 0.01 1 100 10000 100000 {
foreach G of numlist .0001 .001 .01 .1 1 10 {
local i = "`++i'"
di as txt "svm category {indepvars} if `train' , c(`C') gamma(`G') cache(1024)"
// generate accuracy measurements using 5-fold cross-validation
cv pred svm category ntoken_stan q* if `train' , folds(`folds') shuffle est(c(`C') gamma(`G') cache(1024))
gen acc = pred == category
qui sum acc
local accuracy = `r(mean)'
// save results to our side-table (which is joined to the main table, but don't tell anyone that)
qui replace C = `C' in `i'
qui replace gamma = `G' in `i'
qui replace accuracy = `accuracy' in `i'
drop pred acc
list C gamma accuracy in `i'
di ""
}
}
list C gamma accuracy in 1/`i'
twoway contour accuracy gamma C, yscale(log) xscale(log) ///
ylabel(.0001 .001 .01 .1 1 10) xlabel(0.01 1 100 10000 100000 ) ///
ccuts(0(0.1)1) zlabel(#10, format(%9.2f))
graph export svm_contour_cv_`folds'.pdf , replace
// XXX temporary
exit, clear
|
/* mlogit_predict: extract classifications from an mlogit model
* mlogit only gives probabilities. actually translating this to classes is hard.
*
*/
program define mlogit_predict
if("`e(cmd)'"!="mlogit") {
di as error "mlogit_predict must only be run after mlogit"
exit 1
}
gettoken target 0 : 0
confirm new var `target'
qui clone `target' `e(depvar)' if 0
local L : variable label `target'
if("`L'"!="") {
label variable `target' "Predicted `L'"
}
// this is the standard max-finding algorithm
// written sideways so that it can use Stata's vectorizations
// try not to get lost
tempvar max_p
tempvar cur_p
qui gen `max_p' = 0
levelsof `e(depvar)', local(levels)
foreach c of local levels {
qui predict `cur_p', outcome(`c')
replace `target' = `c' if `cur_p' > `max_p'
replace `max_p' = `cur_p' if `cur_p' > `max_p'
qui drop `cur_p'
}
end
|
// Setup
pause on
sysuse nlsw88, clear
// This dataset has labour data: employment conditions crossed with demographic information.
// (for clarity, we cut the small amount of respondents which answered "other"
// and the few rows with missing data that svmachines cannot tolerate)
// (in a real analysis you should handle your missing data more thoughtfully)
drop if race == 3
drop if missing(wage)
drop if missing(hours)
// If we separate by race, we can see that the support of the bivariate (wage, hours worked) differs.
// A first guess: the shape is the same for white and black respondents, but white respondents have a wider range.
twoway (scatter wage hours), by(race)
pause "Type q to continue."
// We will now ask one-class SVM to detect the shape of that less varied region,
// to give us a sense of the black labour market in 1988.
svmachines wage hours if race == 2, type(one_class) sv(SV_wage_hours)
// There is a well balanced mix of support to non-support vectors. This is a good sign.
tab SV_wage_hours
// Now, plot whether each point "empirically" is in the distribution or not
// to demonstrate the detected distribution
// (you could also construct an evenly spaced grid of test points to get better resolution)
predict S
twoway (scatter wage hours if !S) ///
(scatter wage hours if S), ///
title("SVM Estimated Labour Distribution") ///
legend(label(1 "Outliers") label(2 "Within Support"))
pause "Type q to continue."
// The result looks degenerate: the entire predicted distribution is along the line hours=40.
// By jittering, we can see why this happened: in the black respondents,
// the bulk have a strict 40 hours work week and low pay.
// one_class detects and reflects the huge weight at the center,
// culling the spread as irrelevant.
twoway (scatter wage hours if !S, jitter(5)) ///
(scatter wage hours if S, jitter(5)), ///
title("SVM Estimated Labour Distribution, fuzzed") ///
legend(label(1 "Outliers") label(2 "Within Support"))
pause "Type q to continue."
// We can summarize how one_class handled both sets test and training sets
tab S race, col
// Notice that the percentage of matches in the training set is higher than in the test set,
// because the training extracted the distribution of the test set. Seeing this difference
// supports our intution that the distribution for white respondents differs from black.
|
clockseed
sysuse auto
svm foreign headroom gear_ratio weight, type(svc) gamma(0.4) c(51)
predict P
gen err = foreign != P
qui sum err
di "Training error rate: `r(mean)'"
drop P err
cv P svm foreign headroom gear_ratio weight, folds(`=floor(_N/5)') shuffle est(type(svc) gamma(0.4) c(51))
gen err = foreign != P
qui sum err
di "Cross-validated error rate: `r(mean)'"
|
// ethyl acrylate data , Bates and Watts, A1.10
clear
import excel using "tests/ethyl.xlsx", firstrow
gen viscosity = exp(ln_viscosity)
set seed 100
gen u=uniform()
sort u
/////////////////////////////////////////////////////////////////////
local trainn=30
gen train= 0
replace train=1 if _n<=`trainn'
graph matrix viscosity pressure temp
twoway contour viscosity pressure temp
regress viscosity pressure temp if train
predict res,res
predict pred
predict rstan, rstan
bysort train: egen mse_reg=total((viscosity-pred)^2/`trainn')
// linear contour plot
twoway contour pred pressure temp
qnorm res
scatter rstan pred
exit
////////////////////////////////////////////////////////////////////
// standardization does not affect the influences or predictions
/*
qui sum temp
gen temp_sta=(temp-r(mean)) / r(sd)
qui sum pressure
gen pressure_sta=(pressure-r(mean)) / r(sd)
*/
cap drop predb
boost viscosity temp pressure if train, dist(normal) pred(predb) influence shrink(0.1) inter(3)
bysort train: egen mse_boost=total((viscosity-predb)^2/`trainn')
qui svmachines viscosity temp pressure if train, eps(1) c(1) gamma(.1) type(SVR)
predict preds
bysort train: egen mse_svm=total((viscosity-preds)^2/`trainn')
bysort train: sum mse_svm mse_reg mse_boost
|
* export_svmlight.do
sysuse auto
capture rm "tests/auto.svmlight"
// notice: auto contains a string variable and its class variable is last
// we explicitly rearrange them during the export stating the order of variables (Stata handles the indirection, hiding it from the plugin)
export_svmlight foreign price-gear_ratio using "tests/auto.svmlight"
type "tests/auto.svmlight", lines(10)
|
program _svm_getenv, plugin
plugin call _svm_getenv, PATH
di "_getenv=`_getenv'"
|
* loadplugin.do
* unconditionally unload the plugin
* this is only relevant if this do file is reused in the same session
capture program drop _svmachines
* load it!
program _svmachines, plugin
|
* export.do
sysuse auto
svmachines foreign price-gear_ratio if !missing(rep78) in 41/60 /*train on some of the data; this range is chosen to cover 10 each of each kind of car*/
predict P in 50/70 /* test, on both part of the training and part of the testing data */
* fill in the rest and observe the error rate
* Stata convention (which predict enforces) is that we have to predict into a *new* variable
* (if we want to reuse the old one we have to drop it first; if we want to merge the results we need to make a second variable and then use ..other commands.. to do the merge)
predict P2
list foreign P P2
generate error = abs(foreign != P2)
summarize error
|
* train.do
sysuse auto
drop make
order gear_ratio // gear_ratio is a floating point value, but it only lives between 2 and 4 in this dataset, so libsvm casts it to classes 2 and 3
svmachines * if !missing(rep78)
do tests/helpers/inspect_model.do
predict Q
list `e(depvar)' Q
|
* train.do
sysuse auto
drop make
order headroom // headroom is a floating point variable but comes in .5-increment levels
svmachines * if !missing(rep78), sv(SV)
list SV
do tests/helpers/inspect_model.do
predict Q
list `e(depvar)' Q
|
* predict_oneclass.do
sysuse auto
replace foreign = 3 if _n < 10
replace foreign = 4 if _n > 10 & _n < 20
svmachines price-gear_ratio if !missing(rep78), sv(SV) type(one_class)
predict P
list P*
|
* predict_oneclass_scores.do
sysuse auto
replace foreign = 3 if _n < 10
replace foreign = 4 if _n > 10 & _n < 20
svmachines price-gear_ratio if !missing(rep78), sv(SV) type(one_class)
predict P, scores
list P*
|
* export.do
sysuse auto
svmachines foreign price-gear_ratio if !missing(rep78), prob
capture noisily predict P, prob
capture noisily predict P2
list foreign P*
|
* predict_probability_strangelabels
* Ensure that the complicated labelling code buried in predict, prob behaves itself even when labels are shifted
* The final list should show that the probability columns are labelled with strings which mostly match what the actual values were.
sysuse auto
replace foreign = foreign+7
label define space_station 7 "DS9" 8 "Ferengi" // foreign was 0/1, now it's 7/8
label values foreign space_station
svmachines foreign price-gear_ratio if !missing(rep78), prob
capture noisily predict P, prob
capture noisily predict P2
list foreign P*
|
* predict_scores.do
sysuse auto
svmachines foreign price-gear_ratio if !missing(rep78)
capture noisily predict P, scores
list foreign P*
|
* predict_scores_and_probability.do
sysuse auto
svmachines foreign price-gear_ratio if !missing(rep78), prob
predict P1, prob
predict P2, scores
capture noisily predict P3, prob scores
if(_rc == 0) {
di as err "prob and scores should be mutually exclusive options"
exit 1
}
list foreign P*
|
* predict_scores_multiclass
* like predict_scores, but this exercises the complicated triangular label matrix code by making sure there's more than one pair
sysuse auto
label define origin 2 "Dodge", add
label define origin 3 "Buick", add
replace foreign = 2 in 20/22
replace foreign = 3 in 4/10
svmachines foreign price-gear_ratio if !missing(rep78)
capture noisily predict P, scores
list foreign P*
desc P*
ereturn list
|
* predict_svr.do
quietly do tests/train_svr.do
predict P in 50/70
predict P2
generate error = abs(price - P2) //notice: subtraction, not comparison, because we're regressing, not classifying
list price P P2 error
summarize error
|
* predict_svr_scores.do
quietly do tests/train_svr.do
predict P in 50/70 /* test, on both part of the training and part of the testing data */
predict P2, scores
list price P*
|
* preimport_svmlight.do
program _svmlight, plugin
plugin call _svmlight, "import" "pre" "tests/duke.svmlight"
scalar list
|
local mQQ : env QQ
di "First: mQQ=`mQQ'"
// the \$ quotes the $, because otherwise Stata, apparently, tries to interpret it
!echo And the shell says: \$QQ
program _svm_setenv, plugin
plugin call _svm_setenv, QQ "hello me hearties"
local mQQ : env QQ
di "After: mQQ=`mQQ'"
!echo And the shell says: \$QQ
|
* train.do
sysuse auto
svmachines foreign price-gear_ratio if !missing(rep78)
do tests/helpers/inspect_model.do
|
* train.do
sysuse auto
svmachines foreign price-gear_ratio if !missing(rep78), sv(SV)
list SV
do tests/helpers/inspect_model.do
|