licenses
sequencelengths 1
3
| version
stringclasses 677
values | tree_hash
stringlengths 40
40
| path
stringclasses 1
value | type
stringclasses 2
values | size
stringlengths 2
8
| text
stringlengths 25
67.1M
| package_name
stringlengths 2
41
| repo
stringlengths 33
86
|
---|---|---|---|---|---|---|---|---|
[
"MIT"
] | 0.1.0 | 50e804aa999e452ce4bf5edc50493838bd744e09 | code | 4829 | import Base: isnan
import DataFrames: DataFrame, ncol, convert
export GLRM
# TODO: identify categoricals automatically from PooledDataArray columns
default_real_loss = HuberLoss
default_bool_loss = LogisticLoss
default_ord_loss = MultinomialOrdinalLoss
function GLRM(df::DataFrame, k::Int;
losses = Loss[], rx = QuadReg(.01), ry = QuadReg(.01),
offset = true, scale = false,
prob_scale = true, NaNs_to_Missing = true)
if NaNs_to_Missing
df = copy(df)
NaNs_to_Missing!(df)
end
if losses == Loss[] # if losses not specified, identify ordinal, boolean and real columns
# change the wal get_reals, etc work.
#reals, real_losses = get_reals(df)
#bools, bool_losses = get_bools(df)
#ordinals, ordinal_losses = get_ordinals(df)
#easier to use just one function for this usecase.
reals, real_losses, bools, bool_losses, ordinals, ordinal_losses = get_loss_types(df)
A = [df[:,reals] df[:,bools] df[:,ordinals]]
labels = [names(df)[reals]; names(df)[bools]; names(df)[ordinals]]
losses = [real_losses; bool_losses; ordinal_losses]
else # otherwise require one loss function per column
A = df
ncol(df)==length(losses) ? labels = names(df) : error("please input one loss per column of dataframe")
end
# identify which entries in data frame have been observed (ie are not N/A)
obs = observations(A)
# initialize X and Y
X = randn(k,size(A,1))
Y = randn(k,embedding_dim(losses))
# form model
rys = Array{Regularizer}(undef, length(losses))
for i=1:length(losses)
if isa(losses[i].domain, OrdinalDomain) && embedding_dim(losses[i])>1 #losses[i], MultinomialOrdinalLoss) || isa(losses[i], OrdisticLoss)
rys[i] = OrdinalReg(copy(ry))
else
rys[i] = copy(ry)
end
end
glrm = GLRM(A, losses, rx, rys, k, obs=obs, X=X, Y=Y, offset=offset, scale=scale)
# scale model so it really computes the MAP estimator of the parameters
if prob_scale
prob_scale!(glrm)
end
return glrm, labels
end
function get_loss_types(df::DataFrame)
m,n = size(df)
reals = fill(false,n)
bools = fill(false,n)
ordinals = fill(false,n)
for j in 1:n
# assuming there are no columns with *all* values missing. (which would make it a non-informative column)
t = eltype(collect(skipmissing(df[:,j]))[1])
if(t == Float64)
reals[j] = true
elseif (t == Bool)
bools[j] = true
elseif (t == Int) || (t == Int32) || (t == Int64)
ordinals[j] = true
end
end
n1 = sum(reals)
real_losses = Array{Loss}(undef, n1)
for i=1:n1
real_losses[i] = default_real_loss()
end
n2 = sum(bools)
bool_losses = Array{Loss}(undef, n2)
for i in 1:n2
bool_losses[i] = default_bool_loss()
end
n3 = sum(ordinals)
ord_idx = (1:size(df,2))[ordinals]
maxs = zeros(n3,1)
mins = zeros(n3,1)
for j in 1:n3
col = df[:,ord_idx[j]]
try
maxs[j] = maximum(skipmissing(col))
mins[j] = minimum(skipmissing(col))
catch
nothing
end
end
# set losses and regularizers
ord_losses = Array{Loss}(undef, n3)
for i=1:n3
ord_losses[i] = default_ord_loss(Int(maxs[i]))
end
return reals,real_losses,bools,bool_losses,ordinals,ord_losses
end
function get_reals(df::DataFrame)
m,n = size(df)
reals = [typeof(df[:,i])<:AbstractArray{Float64,1} for i in 1:n]
n1 = sum(reals)
losses = Array{Loss}(undef, n1)
for i=1:n1
losses[i] = default_real_loss()
end
return reals, losses
end
function get_bools(df::DataFrame)
m,n = size(df)
bools = [isa(df[:,i], AbstractArray{Bool,1}) for i in 1:n]
n1 = sum(bools)
losses = Array{Loss}(undef, n1)
for i=1:n1
losses[i] = default_bool_loss()
end
return bools, losses
end
function get_ordinals(df::DataFrame)
m,n = size(df)
# there must be a better way to check types...
ordinals = [(isa(df[:,i], AbstractArray{Int,1}) ||
isa(df[:,i], AbstractArray{Int32,1}) ||
isa(df[:,i], AbstractArray{Int64,1})) for i in 1:n]
nord = sum(ordinals)
ord_idx = (1:size(df,2))[ordinals]
maxs = zeros(nord,1)
mins = zeros(nord,1)
for i in 1:nord
col = df[:,ord_idx[i]]
try
maxs[i] = maximum(dropmissing(col))
mins[i] = minimum(dropmissing(col))
catch
nothing
end
end
# set losses and regularizers
losses = Array{Loss}(undef, nord)
for i=1:nord
losses[i] = default_ord_loss(Int(maxs[i]))
end
return ordinals, losses
end
| MatrixCompletion | https://github.com/jasonsun0310/MatrixCompletion.jl.git |
|
[
"MIT"
] | 0.1.0 | 50e804aa999e452ce4bf5edc50493838bd744e09 | code | 4509 | import LinearAlgebra: size, axpy!
import LinearAlgebra.BLAS: gemm!
abstract type AbstractGLRM end
export AbstractGLRM, GLRM, getindex, size, scale_regularizer!
const ObsArray = Union{Array{Array{Int,1},1}, Array{UnitRange{Int},1}}
### GLRM TYPE
mutable struct GLRM<:AbstractGLRM
A # The data table
losses::Array{Loss,1} # array of loss functions
rx::Array{Regularizer,1} # Array of regularizers to be applied to each column of X
ry::Array{Regularizer,1} # Array of regularizers to be applied to each column of Y
k::Int # Desired rank
observed_features::ObsArray # for each example, an array telling which features were observed
observed_examples::ObsArray # for each feature, an array telling in which examples the feature was observed
X::AbstractArray{Float64,2} # Representation of data in low-rank space. A ≈ X'Y
Y::AbstractArray{Float64,2} # Representation of features in low-rank space. A ≈ X'Y
end
# usage notes:
# * providing argument `obs` overwrites arguments `observed_features` and `observed_examples`
# * offset and scale are *false* by default to avoid unexpected behavior
# * convenience methods for calling are defined in utilities/conveniencemethods.jl
function GLRM(A, losses::Array, rx::Array, ry::Array, k::Int;
# the following tighter definition fails when you form an array of a tighter subtype than the abstract type, eg Array{QuadLoss,1}
# function GLRM(A::AbstractArray, losses::Array{Loss,1}, rx::Array{Regularizer,1}, ry::Array{Regularizer,1}, k::Int;
X = randn(k,size(A,1)), Y = randn(k,embedding_dim(losses)),
obs = nothing, # [(i₁,j₁), (i₂,j₂), ... (iₒ,jₒ)]
observed_features = fill(1:size(A,2), size(A,1)), # [1:n, 1:n, ... 1:n] m times
observed_examples = fill(1:size(A,1), size(A,2)), # [1:m, 1:m, ... 1:m] n times
offset = false, scale = false,
checknan = true, sparse_na = true)
# Check dimensions of the arguments
m,n = size(A)
if length(losses)!=n error("There must be as many losses as there are columns in the data matrix") end
if length(rx)!=m error("There must be either one X regularizer or as many X regularizers as there are rows in the data matrix") end
if length(ry)!=n error("There must be either one Y regularizer or as many Y regularizers as there are columns in the data matrix") end
if size(X)!=(k,m) error("X must be of size (k,m) where m is the number of rows in the data matrix. This is the transpose of the standard notation used in the paper, but it makes for better memory management. \nsize(X) = $(size(X)), size(A) = $(size(A)), k = $k") end
if size(Y)!=(k,embedding_dim(losses)) error("Y must be of size (k,d) where d is the sum of the embedding dimensions of all the losses. \n(1 for real-valued losses, and the number of categories for categorical losses).") end
# Determine observed entries of data
if obs==nothing && sparse_na && isa(A,SparseMatrixCSC)
obs = findall(!iszero, A) # observed indices (list of CartesianIndices)
end
if obs==nothing # if no specified array of tuples, use what was explicitly passed in or the defaults (all)
# println("no obs given, using observed_features and observed_examples")
glrm = GLRM(A,losses,rx,ry,k, observed_features, observed_examples, X,Y)
else # otherwise unpack the tuple list into arrays
# println("unpacking obs into array")
glrm = GLRM(A,losses,rx,ry,k, sort_observations(obs,size(A)...)..., X,Y)
end
# check to make sure X is properly oriented
if size(glrm.X) != (k, size(A,1))
# println("transposing X")
glrm.X = glrm.X'
end
# check none of the observations are NaN
if checknan
for i=1:size(A,1)
for j=glrm.observed_features[i]
if isnan(A[i,j])
error("Observed value in entry ($i, $j) is NaN.")
end
end
end
end
if scale # scale losses (and regularizers) so they all have equal variance
equilibrate_variance!(glrm)
end
if offset # don't penalize the offset of the columns
add_offset!(glrm)
end
return glrm
end
parameter_estimate(glrm::GLRM) = (glrm.X, glrm.Y)
function scale_regularizer!(glrm::GLRM, newscale::Number)
mul!(glrm.rx, newscale)
mul!(glrm.ry, newscale)
return glrm
end
| MatrixCompletion | https://github.com/jasonsun0310/MatrixCompletion.jl.git |
|
[
"MIT"
] | 0.1.0 | 50e804aa999e452ce4bf5edc50493838bd744e09 | code | 7964 | # Supported domains: Real, Boolean, Ordinal, Periodic, Count
# The purpose of domains is to be able to impute over different possible values of `a` regardless of
# the loss that was used in the GLRM. The reason for doing this is to evaluate the performance of GLRMS.
# For instance, let's say we use PCA (QuadLoss losses) to model a binary data frame (not the best idea).
# In order to override the standard imputation with `impute(QuadLoss(), u)`, which assumes imputation over the reals,
# we can use `impute(BoolDomain(), QuadLoss(), u)` and see which of {-1,1} is best. The reason we want to be able to
# do this is to compare a baseline model (e.g. PCA) with a more logical model using heterogenous losses,
# yet still give each model the same amount of information regarding how imputation should be done.
# The domains themselves are defined in domains.jl
# In order to accomplish this we define a series of domains that describe how imputation should be performed over
# them. Each combination of domain and loss must have the following:
# Methods:
# `impute(D::my_Domain, l::my_loss_type, u::Float64) ::Float64`
# Imputes aᵤ = argmin l(u,a) over the range of possible values of a. The range of
# possible values of a should be implicitly or explicitly provided by `D`.
# There should be an impute method for every combination of datatype and loss.
# `error_metric(D::my_Domain, l::my_loss_type, u::Float64, a::Number) ::Float64`
# First calls aᵤ = impute(l,u), then uses the type of `my_D` to pick a
# good measure of error- either 1-0 misclassification or squared difference.
# DataTypes are assigned to each column of the data and are not part of the low-rank model itself, they just serve
# as a way to evaluate the performance of the low-rank model.
export impute, error_metric, errors
# function for general use
roundcutoff(x,a::T,b::T) where T<:Number = T(min(max(round(x),a),b))
# Error metrics for general use
squared_error(a_imputed::Number, a::Number) = (a_imputed-a)^2
misclassification(a_imputed::T, a::T) where T = float(!(a_imputed==a)) # return 0.0 if equal, 1.0 else
# use the default loss domain imputation if no domain provided
impute(l::Loss, u::Float64) = impute(l.domain, l, u)
########################################## REALS ##########################################
# Real data can take values from ℜ
impute(D::RealDomain, l::DiffLoss, u::Float64) = u # by the properties of any DiffLoss
impute(D::RealDomain, l::PoissonLoss, u::Float64) = exp(u)
impute(D::RealDomain, l::OrdinalHingeLoss, u::Float64) = roundcutoff(u, l.min, l.max)
impute(D::RealDomain, l::LogisticLoss, u::Float64) = error("Logistic loss always imputes either +∞ or -∞ given a∈ℜ")
function impute(D::RealDomain, l::WeightedHingeLoss, u::Float64)
@warn("It doesn't make sense to use HingeLoss to impute data that can take values in ℜ")
1/u
end
function error_metric(D::RealDomain, l::Loss, u::Float64, a::Number)
a_imputed = impute(D, l, u)
squared_error(a_imputed, a)
end
########################################## BOOLS ##########################################
# Boolean data should take values from {-1,1}
# sign of u
impute(D::BoolDomain, l::ClassificationLoss, u::Float64) = u>=0 ? true : false
# Evaluate w/ a=-1 and a=1 and see which is better according to that loss.
# This is fast and works for any loss.
impute(D::BoolDomain, l::Loss, u::Float64) = evaluate(l,u,false)<evaluate(l,u,true) ? false : true
function error_metric(D::BoolDomain, l::Loss, u::Float64, a::Number)
a_imputed = impute(D, l, u)
misclassification(a_imputed, a)
end
########################################## ORDINALS ##########################################
# Ordinal data should take integer values ranging from `min` to `max`
impute(D::OrdinalDomain, l::DiffLoss, u::Float64) = roundcutoff(u, D.min, D.max)
impute(D::OrdinalDomain, l::PoissonLoss, u::Float64) = roundcutoff(exp(u), D.min , D.max)
impute(D::OrdinalDomain, l::OrdinalHingeLoss, u::Float64) = roundcutoff(u, D.min, D.max)
impute(D::OrdinalDomain, l::LogisticLoss, u::Float64) = u>0 ? D.max : D.min
function impute(D::OrdinalDomain, l::WeightedHingeLoss, u::Float64)
@warn("It doesn't make sense to use HingeLoss to impute ordinals")
a_imputed = (u>0 ? ceil(1/u) : floor(1/u))
roundcutoff(a_imputed, D.min, D.max)
end
impute(D::OrdinalDomain, l::OrdisticLoss, u::AbstractArray) = argmin(u.^2)
# MultinomialOrdinalLoss
# l(u, a) = -log(p(u, a))
# = u[1] + ... + u[a-1] - u[a] - ... - u[end] +
# log(sum_{a'}(exp(u[1] + ... + u[a'-1] - u[a'] - ... - u[end])))
#
# so given u,
# the most probable value a is the index of the first
# positive entry of u
function impute(D::OrdinalDomain, l::MultinomialOrdinalLoss, u::AbstractArray)
enforce_MNLOrdRules!(u)
eu = exp.(u)
p = [1-eu[1], -diff(eu)..., eu[end]]
return argmax(p)
end
# generic method
function impute(D::OrdinalDomain, l::Loss, u::AbstractArray)
(D.min:D.max)[argmin([evaluate(l, u, i) for i in D.min:D.max])]
end
function error_metric(D::OrdinalDomain, l::Loss, u::Float64, a::Number)
a_imputed = impute(D, l, u)
squared_error(a_imputed, a)
end
########################################## CATEGORICALS ##########################################
# Categorical data should take integer values ranging from 1 to `max`
impute(D::CategoricalDomain, l::MultinomialLoss, u::Array{Float64}) = argmax(u)
impute(D::CategoricalDomain, l::OvALoss, u::Array{Float64}) = argmax(u)
function error_metric(D::CategoricalDomain, l::Loss, u::Array{Float64}, a::Number)
a_imputed = impute(D, l, u)
misclassification(a_imputed, a)
end
########################################## PERIODIC ##########################################
# Periodic data can take values from ℜ, but given a period T, we should have error_metric(a,a+T) = 0
# Since periodic data can take any real value, we can use the real-valued imputation methods
impute(D::PeriodicDomain, l::Loss, u::Float64) = impute(RealDomain(), l, u)
# When imputing a periodic variable, we restrict ourselves to the domain [0,T]
pos_mod(T::Float64, x::Float64) = x>0 ? x%T : (x%T)+T # takes a value and finds its equivalent positive modulus
function error_metric(D::PeriodicDomain, l::Loss, u::Float64, a::Number)
a_imputed = impute(D, l, u)
# remap both a and a_imputed to [0,T] to check for a ≡ a_imputed
squared_error(pos_mod(D.T,a_imputed), pos_mod(D.T,a))
end
########################################## COUNTS ##########################################
# Count data can take values over ℕ, which we approximate as {0, 1, 2 ... `max_count`}
# Our approximation of ℕ is really an ordinal
impute(D::CountDomain, l::Loss, u::Float64) = impute(OrdinalDomain(0,D.max_count), l, u)
function error_metric(D::CountDomain, l::Loss, u::Float64, a::Number)
a_imputed = impute(D, l, u)
squared_error(a_imputed, a)
end
####################################################################################
# Use impute and error_metric over arrays
function impute(domains::Array{DomainSubtype,1},
losses::Array{LossSubtype,1},
U::Array{Float64,2}) where {DomainSubtype<:Domain, LossSubtype<:Loss}
m, d = size(U)
n = length(losses)
yidxs = get_yidxs(losses)
A_imputed = Array{Number}(undef, (m, n));
for f in 1:n
for i in 1:m
if length(yidxs[f]) > 1
A_imputed[i,f] = impute(domains[f], losses[f], vec(U[i,yidxs[f]]))
else
A_imputed[i,f] = impute(domains[f], losses[f], U[i,yidxs[f]])
end
end
end
return A_imputed
end
function impute(losses::Array{LossSubtype,1}, U::Array{Float64,2}) where LossSubtype<:Loss
domains = Domain[l.domain for l in losses]
impute(domains, losses, U)
end
function errors(domains::Array{Domain,1}, losses::Array{Loss,1},
U::Array{Float64,2}, A::AbstractArray )
err = zeros(size(A))
m,n = size(A)
for j in 1:n
for i in 1:m
err[i,j] = error_metric(domains[j], losses[j], U[i,j], A[i,j])
end
end
return err
end
| MatrixCompletion | https://github.com/jasonsun0310/MatrixCompletion.jl.git |
|
[
"MIT"
] | 0.1.0 | 50e804aa999e452ce4bf5edc50493838bd744e09 | code | 5129 | import StatsBase: sample, wsample
export init_kmeanspp!, init_svd!, init_nndsvd!
import Arpack: svds
# kmeans++ initialization, but with missing data
# we make sure never to look at "unobserved" entries in A
# so that models can be honestly cross validated, for example
function init_kmeanspp!(glrm::GLRM)
m,n = size(glrm.A)
k = glrm.k
possible_centers = Set(1:m)
glrm.Y = randn(k,n)
# assign first center randomly
i = sample(1:m)
setdiff!(possible_centers, i)
glrm.Y[1,glrm.observed_features[i]] = glrm.A[i,glrm.observed_features[i]]
# assign next centers one by one
for l=1:k-1
min_dists_per_obs = zeros(m)
for i in possible_centers
d = zeros(l)
for j in glrm.observed_features[i]
for ll=1:l
d[ll] += evaluate(glrm.losses[j], glrm.Y[ll,j], glrm.A[i,j])
end
end
min_dists_per_obs[i] = minimum(d)/length(glrm.observed_features[i])
end
furthest_index = wsample(1:m,min_dists_per_obs)
glrm.Y[l+1,glrm.observed_features[furthest_index]] = glrm.A[furthest_index,glrm.observed_features[furthest_index]]
end
return glrm
end
function init_svd!(glrm::GLRM; offset=true, scale=true, TOL = 1e-10)
# only offset if the glrm model is offset
offset = offset && typeof(glrm.rx) == lastentry1
# only scale if we also offset
scale = scale && offset
m,n = size(glrm.A)
k = glrm.k
# find spans of loss functions (for multidimensional losses)
yidxs = get_yidxs(glrm.losses)
d = maximum(yidxs[end])
# create a matrix representation of A with the same dimensions as X*Y
# by expanding out all data types with embedding dimension greater than 1
if all(map(length, yidxs) .== 1)
Areal = glrm.A # save time, but in this case we'll still have a DataFrame
else
Areal = zeros(m, d)
for f=1:n
if length(yidxs[f]) == 1
Areal[glrm.observed_examples[f], yidxs[f]] =
glrm.A[glrm.observed_examples[f], f]
else
if isa(glrm.losses[f].domain, CategoricalDomain)
levels = datalevels(glrm.losses[f])
for e in glrm.observed_examples[f]
for ilevel in 1:length(levels)
Areal[e, yidxs[f][ilevel]] =
(glrm.A[e, f] == levels[ilevel] ? 1 : -1)
end
end
elseif isa(glrm.losses[f].domain, OrdinalDomain)
embed_dim = embedding_dim(glrm.losses[f])
mymean = mean(glrm.A[glrm.observed_examples[f], f])
levels = datalevels(glrm.losses[f])
for e in glrm.observed_examples[f]
for ilevel in 1:(length(levels)-1)
Areal[e, yidxs[f][ilevel]] =
(glrm.A[e, f] > levels[ilevel] ? 1 : -1)
end
end
else
error("No default mapping to real valued matrix for domains of type $typeof(glrm.losses[f].domain)")
end
end
end
end
# standardize A, respecting missing values
means = zeros(d)
stds = zeros(d)
Astd = zeros(m, d)
for f in 1:n
for j in yidxs[f]
nomissing = Areal[glrm.observed_examples[f],j]
means[j] = mean(nomissing)
if isnan(means[j])
means[j] = 1
end
stds[j] = std(nomissing)
if stds[j] < TOL || isnan(stds[j])
stds[j] = 1
end
Astd[glrm.observed_examples[f],j] = Areal[glrm.observed_examples[f],j] .- means[j]
end
end
if offset
k -= 1
glrm.X[end,:] = 1
glrm.Y[end,:] = means
if scale
Astd = Astd ./ stds
end
if k <= 0
@warn("Using an offset on a rank 1 model fits *only* the offset. To fit an offset + 1 low rank component, use k=2.")
return glrm
end
end
# options for rescaling:
# 1) scale Astd so its mean is the same as the mean of the observations
Astd *= m*n/sum(map(length, glrm.observed_features))
# 2) scale columns inversely proportional to number of entries in them & so that column mean is same as mean of observations in it
# intuition: noise in a dense column is low rank, so downweight dense columns
# Astd *= diagm(m./map(length, glrm.observed_examples))
# 3) scale columns proportional to scale of regularizer & so that column mean is same as mean of observations in it
# Astd *= diagm(m./map(scale, glrm.ry))
# ASVD = rsvd(Astd, k) - slower than built-in svds, and fails for sparse matrices
ASVD = svds(Astd, nsv = k)[1]
# initialize with the top k components of the SVD,
# rescaling by the variances
@assert(size(glrm.X, 1) >= k)
@assert(size(glrm.X, 2) >= m)
@assert(size(glrm.Y, 1) >= k)
@assert(size(glrm.Y, 2) >= d)
glrm.X[1:k,1:m] = Diagonal(sqrt.(ASVD.S))*ASVD.U' # recall X is transposed as per column major order.
glrm.Y[1:k,1:d] = Diagonal(sqrt.(ASVD.S))*ASVD.Vt*Diagonal(stds)
return glrm
end
include("initialize_nmf.jl")
| MatrixCompletion | https://github.com/jasonsun0310/MatrixCompletion.jl.git |
|
[
"MIT"
] | 0.1.0 | 50e804aa999e452ce4bf5edc50493838bd744e09 | code | 1395 | import NMF.nndsvd
function init_nndsvd!(glrm::GLRM; scale::Bool=true, zeroh::Bool=false,
variant::Symbol=:std, max_iters::Int=0)
# NNDSVD initialization:
# Boutsidis C, Gallopoulos E (2007). SVD based initialization: A head
# start for nonnegative matrix factorization. Pattern Recognition
m,n = size(glrm.A)
# only initialize based on observed entries
A_init = zeros(m,n)
for i = 1:n
A_init[glrm.observed_examples[i],i] = glrm.A[glrm.observed_examples[i],i]
end
# scale all columns by the Loss.scale parameter
if scale
for i = 1:n
A_init[:,i] .*= glrm.losses[i].scale
end
end
# run the first nndsvd initialization
W,H = nndsvd(A_init, glrm.k, zeroh=zeroh, variant=variant)
glrm.X = W'
glrm.Y = H
# If max_iters>0 do a soft impute for the missing entries of A.
# Iterate: Estimate missing entries of A with W*H
# Update (W,H) nndsvd estimate based on new A
for iter = 1:max_iters
# Update missing entries of A_init
for j = 1:n
for i = setdiff(1:m,glrm.observed_examples[j])
A_init[i,j] = dot(glrm.X[:,i],glrm.Y[:,j])
end
end
# Re-estimate W and H
W,H = nndsvd(A_init, glrm.k, zeroh=zeroh, variant=variant)
glrm.X = W'
glrm.Y = H
end
end
| MatrixCompletion | https://github.com/jasonsun0310/MatrixCompletion.jl.git |
|
[
"MIT"
] | 0.1.0 | 50e804aa999e452ce4bf5edc50493838bd744e09 | code | 25835 | # Predefined loss functions
# You may also implement your own loss by subtyping the abstract type Loss.
#
# Losses must have the following:
# Fields:
# `scale::Float64`
# This field represents a scalar weight assigned to the loss function: w*l(u,a)
# `domain::natural_Domain`
# The "natural" domain that the loss function was meant to handle. E.g. BoolDomain for LogisticLoss,
# RealDomain for QuadLoss, etc.
# Other fields may be also be included to encode parameters of the loss function, encode the range or
# set of possible values of the data, etc.
#
# Methods:
# `my_loss_type(args..., scale=1.0::Float64;
# domain=natural_Domain(args[range]...), kwargs...) ::my_loss_type`
# Constructor for the loss type. The first few arguments are parameters for
# which there isn't a rational default (a loss may not need any of these).
# The last positional argument should be the scale, which should default to 1.
# There must be a default domain which is a Domain, which may take arguments from
# the list of positional arguments. Parameters besides the scale for which there are
# reasonable defaults should be included as keyword arguments (there may be none).
# `evaluate(l::my_loss_type, u::Float64, a::Number) ::Float64`
# Evaluates the function l(u,a) where u is the approximation of a
# `grad(l::my_loss_type, u::Float64, a::Number) ::Float64`
# Evaluates the gradient of the loss at the given point (u,a)
# In addition, loss functions should preferably implement methods:
# `M_estimator(l::my_loss_type, a::AbstractArray) ::Float64`
# Finds uₒ = argmin ∑l(u,aᵢ) which is the best single estimate of the array `a`
# If `M_estimator` is not implemented, a live optimization procedure will be used when this function is
# called in order to compute loss function scalings. The live optimization may be slow, so an analytic
# implementation is preferable.
# `impute(d::Domain, l::my_loss_type, u::Array{Float64})` (in impute_and_err.jl)
# Finds a = argmin l(u,a), the most likely value for an observation given a parameter u
import Base: *, convert
import Optim: optimize, LBFGS
# export Loss,
# DiffLoss, ClassificationLoss, SingleDimLoss, # categories of Losses
# QuadLoss, L1Loss, HuberLoss, QuantileLoss, # losses for predicting reals
# PoissonLoss, # losses for predicting integers
# HingeLoss, WeightedHingeLoss, LogisticLoss, # losses for predicting booleans
# OrdinalHingeLoss, OrdisticLoss, MultinomialOrdinalLoss, BvSLoss, # losses for predicting ordinals
# MultinomialLoss, OvALoss, # losses for predicting nominals (categoricals)
# PeriodicLoss, # losses for predicting periodic variables
# evaluate, grad, M_estimator, # methods on losses
# avgerror, scale, mul!, *,
# embedding_dim, get_yidxs, datalevels, domain
abstract type Loss end
# a DiffLoss is one in which l(u,a) = f(u-a) AND argmin f(x) = 0
# for example, QuadLoss(u,a)=(u-a)² and we can write f(x)=x² and x=u-a
abstract type DiffLoss<:Loss end
# a ClassificationLoss is one in which observed values are true = 1 or false = 0 = -1 AND argmin_a L(u,a) = u>=0 ? true : false
abstract type ClassificationLoss<:Loss end
# Single Dimensional losses are DiffLosses or ClassificationLosses, which allow optimized evaluate and grad functions
const SingleDimLoss = Union{DiffLoss, ClassificationLoss}
mul!(l::Loss, newscale::Number) = (l.scale = newscale; l)
scale(l::Loss) = l.scale
*(newscale::Number, l::Loss) = (newl = copy(l); mul!(newl, newscale))
*(l::Loss, newscale::Number) = (newl = copy(l); mul!(newl, newscale))
domain(l::Loss) = l.domain
### embedding dimensions: mappings from losses/columns of A to columns of Y
# default number of columns
# number of columns is higher for multidimensional losses
embedding_dim(l::Loss) = 1
embedding_dim(l::Array{LossSubtype,1}) where LossSubtype<:Loss = sum(map(embedding_dim, l))
# find spans of loss functions (for multidimensional losses)
function get_yidxs(losses::Array{LossSubtype,1}) where LossSubtype<:Loss
n = length(losses)
ds = map(embedding_dim, losses)
d = sum(ds)
featurestartidxs = cumsum(append!([1], ds))
# find which columns of Y map to which columns of A (for multidimensional losses)
U = Union{UnitRange{Int}, Int}
yidxs = Array{U}(undef, n)
for f = 1:n
if ds[f] == 1
yidxs[f] = featurestartidxs[f]
else
yidxs[f] = featurestartidxs[f]:featurestartidxs[f]+ds[f]-1
end
end
return yidxs
end
### promote integers to floats if given as the argument u
## causes ambiguity warnings
# evaluate(l::Loss, u::Number, a) = evaluate(l,convert(Float64,u),a)
# grad(l::Loss, u::Number, a) = grad(l,convert(Float64,u),a)
# evaluate{T<:Number}(l::Loss, u::Array{T,1}, a) = evaluate(l,convert(Array{Float64,1},u),a)
# grad{T<:Number}(l::Loss, u::Array{T,1}, a) = grad(l,convert(Array{Float64,1},u),a)
### -1,0,1::Int are translated to Booleans if loss is not defined on numbers
# convert(::Type{Bool}, x::Int) = x==1 ? true : (x==-1 || x==0) ? false : throw(InexactError("Bool method successfully overloaded by LowRankModels"))
myBool(x::Int) = x==1 ? true : (x==-1 || x==0) ? false : throw(InexactError())
evaluate(l::ClassificationLoss, u::Float64, a::Int) = evaluate(l,u,myBool(a))
grad(l::ClassificationLoss, u::Float64, a::Int) = grad(l,u,myBool(a))
M_estimator(l::ClassificationLoss, a::AbstractArray{Int,1}) = M_estimator(l,myBool(a))
### M-estimators
# The following is the M-estimator for loss functions that don't have one defined. It's also useful
# for checking that the analytic M_estimators are correct. To make sure this method is called instead
# of the loss-specific method (should only be done to test), simply pass the third paramter `test`.
# e.g. M_estimator(l,a) will call the implementation for l, but M_estimator(l,a,"test") will call the
# general-purpose optimizing M_estimator.
function M_estimator(l::Loss, a::AbstractArray; test="test")
# the function to optimize over
f = (u -> sum(map(ai->evaluate(l,u[1],ai), a))) # u is indexed because `optim` assumes input is a vector
# the gradient of that function
function g!(storage::Vector, u::Vector) # this is the format `optim` expects
storage[1] = sum(map(ai->grad(l,u[1],ai), a))
end
m = optimize(f, g!, [median(a)], LBFGS()).minimum[1]
end
# Uses uₒ = argmin ∑l(u,aᵢ) to find (1/n)*∑l(uₒ,aᵢ) which is the
# average error incurred by using the estimate uₒ for every aᵢ
function avgerror(l::Loss, a::AbstractArray)
b = collect(skipmissing(a))
m = M_estimator(l,b)
sum(map(ai->evaluate(l,m,ai),b))/length(b)
end
## Losses:
########################################## QUADRATIC ##########################################
# f: ℜxℜ -> ℜ
mutable struct QuadLoss<:DiffLoss
scale::Float64
domain::Domain
end
QuadLoss(scale=1.0::Float64; domain=RealDomain()) = QuadLoss(scale, domain)
evaluate(l::QuadLoss, u::Float64, a::Number) = l.scale*(u-a)^2
grad(l::QuadLoss, u::Float64, a::Number) = 2*(u-a)*l.scale
M_estimator(l::QuadLoss, a::AbstractArray) = mean(a)
########################################## L1 ##########################################
# f: ℜxℜ -> ℜ
mutable struct L1Loss<:DiffLoss
scale::Float64
domain::Domain
end
L1Loss(scale=1.0::Float64; domain=RealDomain()) = L1Loss(scale, domain)
evaluate(l::L1Loss, u::Float64, a::Number) = l.scale*abs(u-a)
grad(l::L1Loss, u::Float64, a::Number) = sign(u-a)*l.scale
M_estimator(l::L1Loss, a::AbstractArray) = median(a)
########################################## HUBER ##########################################
# f: ℜxℜ -> ℜ
mutable struct HuberLoss<:DiffLoss
scale::Float64
domain::Domain
crossover::Float64 # where QuadLoss loss ends and linear loss begins; =1 for standard HuberLoss
end
HuberLoss(scale=1.0::Float64; domain=RealDomain(), crossover=1.0::Float64) = HuberLoss(scale, domain, crossover)
function evaluate(l::HuberLoss, u::Float64, a::Number)
abs(u-a) > l.crossover ? (abs(u-a) - l.crossover + l.crossover^2)*l.scale : (u-a)^2*l.scale
end
grad(l::HuberLoss,u::Float64,a::Number) = abs(u-a)>l.crossover ? sign(u-a)*l.scale : (u-a)*l.scale
M_estimator(l::HuberLoss, a::AbstractArray) = median(a) # a heuristic, not the true estimator
########################################## QUANTILE ##########################################
# f: ℜxℜ -> ℜ
# define (u)_+ = max(u,0), (u)_- = max(-u,0) so (u)_+ + (u)_- = |u|
# f(u,a) = { quantile (a - u)_+ + (1-quantile) (a - u)_-
# fits the `quantile`th quantile of the distribution
mutable struct QuantileLoss<:DiffLoss
scale::Float64
domain::Domain
quantile::Float64 # fit the alphath quantile
end
QuantileLoss(scale=1.0::Float64; domain=RealDomain(), quantile=.5::Float64) = QuantileLoss(scale, domain, quantile)
function evaluate(l::QuantileLoss, u::Float64, a::Number)
diff = a-u
diff > 0 ? l.scale * l.quantile * diff : - l.scale * (1-l.quantile) * diff
end
function grad(l::QuantileLoss,u::Float64,a::Number)
diff = a-u
diff > 0 ? -l.scale * l.quantile : l.scale * (1-l.quantile)
end
M_estimator(l::QuantileLoss, a::AbstractArray) = quantile(a, l.quantile)
########################################## PERIODIC ##########################################
# f: ℜxℜ -> ℜ
# f(u,a) = w * (1 - cos((a-u)*(2*pi)/T))
# this measures how far away u and a are on a circle of circumference T.
mutable struct PeriodicLoss<:DiffLoss
T::Float64 # the length of the period
scale::Float64
domain::Domain
end
PeriodicLoss(T, scale=1.0::Float64; domain=PeriodicDomain(T)) = PeriodicLoss(T, scale, domain)
evaluate(l::PeriodicLoss, u::Float64, a::Number) = l.scale*(1-cos((a-u)*(2*pi)/l.T))
grad(l::PeriodicLoss, u::Float64, a::Number) = -l.scale*((2*pi)/l.T)*sin((a-u)*(2*pi)/l.T)
function M_estimator(l::PeriodicLoss, a::AbstractArray{Float64})
(l.T/(2*pi))*atan( sum(sin(2*pi*a/l.T)) / sum(cos(2*pi*a/l.T)) ) + l.T/2 # not kidding.
# this is the estimator, and there is a form that works with weighted measurements (aka a prior on a)
# see: http://www.tandfonline.com/doi/pdf/10.1080/17442507308833101 eq. 5.2
end
########################################## POISSON ##########################################
# f: ℜxℕ -> ℜ
# BEWARE:
# 1) this is a reparametrized poisson: we parametrize the mean as exp(u) so that u can take any real value and still produce a positive mean
# 2) THIS LOSS MAY CAUSE MODEL INSTABLITY AND DIFFICULTY FITTING.
mutable struct PoissonLoss<:Loss
scale::Float64
domain::Domain
end
PoissonLoss(max_count=2^31::Int; domain=CountDomain(max_count)::Domain) = PoissonLoss(1.0, domain)
function evaluate(l::PoissonLoss, u::Float64, a::Number)
l.scale*(exp(u) - a*u + (a==0 ? 0 : a*(log(a)-1))) # log(a!) ~ a==0 ? 0 : a*(log(a)-1)
end
grad(l::PoissonLoss, u::Float64, a::Number) = l.scale*(exp(u) - a)
M_estimator(l::PoissonLoss, a::AbstractArray) = log(mean(a))
########################################## ORDINAL HINGE ##########################################
# f: ℜx{min, min+1... max-1, max} -> ℜ
mutable struct OrdinalHingeLoss<:Loss
min::Integer
max::Integer
scale::Float64
domain::Domain
end
OrdinalHingeLoss(m1, m2, scale=1.0::Float64; domain=OrdinalDomain(m1,m2)) = OrdinalHingeLoss(m1,m2,scale,domain)
# this method should never be called directly but is needed to support copying
OrdinalHingeLoss() = OrdinalHingeLoss(1, 10, 1.0, OrdinalDomain(1,10))
OrdinalHingeLoss(m2) = OrdinalHingeLoss(1, m2, 1.0, OrdinalDomain(1, m2))
function evaluate(l::OrdinalHingeLoss, u::Float64, a::Number)
#a = round(a)
if u > l.max-1
# number of levels higher than true level
n = min(floor(u), l.max-1) - a
loss = n*(n+1)/2 + (n+1)*(u-l.max+1)
elseif u > a
# number of levels higher than true level
n = min(floor(u), l.max) - a
loss = n*(n+1)/2 + (n+1)*(u-floor(u))
elseif u > l.min+1
# number of levels lower than true level
n = a - max(ceil(u), l.min+1)
loss = n*(n+1)/2 + (n+1)*(ceil(u)-u)
else
# number of levels higher than true level
n = a - max(ceil(u), l.min+1)
loss = n*(n+1)/2 + (n+1)*(l.min+1-u)
end
return l.scale*loss
end
function grad(l::OrdinalHingeLoss, u::Float64, a::Number)
#a = round(a)
if u > a
# number of levels higher than true level
n = min(ceil(u), l.max) - a
g = n
else
# number of levels lower than true level
n = a - max(floor(u), l.min)
g = -n
end
return l.scale*g
end
M_estimator(l::OrdinalHingeLoss, a::AbstractArray) = median(a)
########################################## LOGISTIC ##########################################
# f: ℜx{-1,1}-> ℜ
mutable struct LogisticLoss<:ClassificationLoss
scale::Float64
domain::Domain
end
LogisticLoss(scale=1.0::Float64; domain=BoolDomain()) = LogisticLoss(scale, domain)
evaluate(l::LogisticLoss, u::Float64, a::Bool) = l.scale*log(1+exp(-(2a-1)*u))
grad(l::LogisticLoss, u::Float64, a::Bool) = (aa = 2a-1; -aa*l.scale/(1+exp(aa*u)))
function M_estimator(l::LogisticLoss, a::AbstractArray{Bool,1})
d, N = sum(a), length(a)
log(N + d) - log(N - d) # very satisfying
end
########################################## WEIGHTED HINGE ##########################################
# f: ℜx{-1,1} -> ℜ
# f(u,a) = { w * max(1-a*u, 0) for a = -1
# = { c * w * max(1-a*u, 0) for a = 1
mutable struct WeightedHingeLoss<:ClassificationLoss
scale::Float64
domain::Domain
case_weight_ratio::Float64 # >1 for trues to have more confidence than falses, <1 for opposite
end
WeightedHingeLoss(scale=1.0; domain=BoolDomain(), case_weight_ratio=1.0) =
WeightedHingeLoss(scale, domain, case_weight_ratio)
HingeLoss(scale=1.0::Float64; kwargs...) = WeightedHingeLoss(scale; kwargs...) # the standard HingeLoss is a special case of WeightedHingeLoss
function evaluate(l::WeightedHingeLoss, u::Float64, a::Bool)
loss = l.scale*max(1-(2*a-1)*u, 0)
if l.case_weight_ratio !==1. && a
loss *= l.case_weight_ratio
end
return loss
end
function grad(l::WeightedHingeLoss, u::Float64, a::Bool)
an = (2*a-1) # change to {-1,1}
g = (an*u>=1 ? 0 : -an*l.scale)
if l.case_weight_ratio !==1. && a
g *= l.case_weight_ratio
end
return g
end
function M_estimator(l::WeightedHingeLoss, a::AbstractArray{Bool,1})
r = length(a)/length(filter(x->x>0, a)) - 1
if l.case_weight_ratio > r
m = 1.0
elseif l.case_weight_ratio == r
m = 0.0
else
m = -1.0
end
end
########################################## MULTINOMIAL ##########################################
# f: ℜx{1, 2, ..., max-1, max} -> ℜ
# f computes the (negative log likelihood of the) multinomial logit,
# often known as the softmax function
# f(u, a) = exp(u[a]) / (sum_{a'} exp(u[a']))
# = 1 / (sum_{a'} exp(u[a'] - u[a]))
mutable struct MultinomialLoss<:Loss
max::Integer
scale::Float64
domain::Domain
end
MultinomialLoss(m, scale=1.0::Float64; domain=CategoricalDomain(m)) = MultinomialLoss(m,scale,domain)
embedding_dim(l::MultinomialLoss) = l.max
datalevels(l::MultinomialLoss) = 1:l.max # levels are encoded as the numbers 1:l.max
# in Julia v0.4, argument u is a row vector (row slice of a matrix), which in julia is 2d
# function evaluate(l::MultinomialLoss, u::Array{Float64,2}, a::Int)
# this breaks compatibility with v0.4
function evaluate(l::MultinomialLoss, u::Array{Float64,1}, a::Int)
sumexp = 0 # inverse likelihood of observation
# computing soft max directly is numerically unstable
# instead note logsumexp(a_j) = logsumexp(a_j - M) + M
# and we'll pick a good big (but not too big) M
M = maximum(u) - u[a] # prevents overflow
for j in 1:length(u)
sumexp += exp(u[j] - u[a] - M)
end
loss = log(sumexp) + M
return l.scale*loss
end
# in Julia v0.4, argument u is a row vector (row slice of a matrix), which in julia is 2d
# function grad(l::MultinomialLoss, u::Array{Float64,2}, a::Int)
# this breaks compatibility with v0.4
function grad(l::MultinomialLoss, u::Array{Float64,1}, a::Int)
g = zeros(size(u))
# Using some nice algebra, you can show
g[a] = -1
# and g[b] = -1/sum_{a' \in S} exp(u[b] - u[a'])
# the contribution of one observation to one entry of the gradient
# is always between -1 and 0
for j in 1:length(u)
M = maximum(u) - u[j] # prevents overflow
sumexp = 0
for jp in 1:length(u)
sumexp += exp(u[jp] - u[j] - M)
end
g[j] += exp(-M)/sumexp
end
return l.scale*g
end
## we'll compute it via a stochastic gradient method
## with fixed step size
function M_estimator(l::MultinomialLoss, a::AbstractArray)
u = zeros(l.max)'
for i = 1:length(a)
ai = a[i]
u -= .1*grad(l, u, ai)
end
return u
end
########################################## One vs All loss ##########################################
# f: ℜx{1, 2, ..., max-1, max} -> ℜ
mutable struct OvALoss<:Loss
max::Integer
bin_loss::Loss
scale::Float64
domain::Domain
end
OvALoss(m::Integer, scale::Float64=1.0; domain=CategoricalDomain(m), bin_loss::Loss=LogisticLoss(scale)) = OvALoss(m,bin_loss,scale,domain)
OvALoss() = OvALoss(1) # for copying correctly
embedding_dim(l::OvALoss) = l.max
datalevels(l::OvALoss) = 1:l.max # levels are encoded as the numbers 1:l.max
# in Julia v0.4, argument u is a row vector (row slice of a matrix), which in julia is 2d
# function evaluate(l::OvALoss, u::Array{Float64,2}, a::Int)
# this breaks compatibility with v0.4
function evaluate(l::OvALoss, u::Array{Float64,1}, a::Int)
loss = 0
for j in 1:length(u)
loss += evaluate(l.bin_loss, u[j], a==j)
end
return l.scale*loss
end
# in Julia v0.4, argument u is a row vector (row slice of a matrix), which in julia is 2d
# function grad(l::OvALoss, u::Array{Float64,2}, a::Int)
# this breaks compatibility with v0.4
function grad(l::OvALoss, u::Array{Float64,1}, a::Int)
g = zeros(length(u))
for j in 1:length(u)
g[j] = grad(l.bin_loss, u[j], a==j)
end
return l.scale*g
end
function M_estimator(l::OvALoss, a::AbstractArray)
u = zeros(l.max)
for j = 1:l.max
u[j] = M_estimator(l.bin_loss, a==j)
end
return u
end
########################################## Bigger vs Smaller loss ##########################################
# f: ℜx{1, 2, ..., max-1} -> ℜ
mutable struct BvSLoss<:Loss
max::Integer
bin_loss::Loss
scale::Float64
domain::Domain
end
function BvSLoss(m::Integer, scale::Float64=1.0; domain=OrdinalDomain(1,m), bin_loss::Loss=LogisticLoss(scale))
@assert(m >= 2, error("Number of levels of ordinal variable must be at least 2; got $m."))
BvSLoss(m,bin_loss,scale,domain)
end
BvSLoss() = BvSLoss(10) # for copying correctly
embedding_dim(l::BvSLoss) = l.max-1
datalevels(l::BvSLoss) = 1:l.max # levels are encoded as the numbers 1:l.max
function evaluate(l::BvSLoss, u::Array{Float64,1}, a::Int)
loss = 0
for j in 1:length(u)
loss += evaluate(l.bin_loss, u[j], a>j)
end
return l.scale*loss
end
function grad(l::BvSLoss, u::Array{Float64,1}, a::Int)
g = zeros(length(u))
for j in 1:length(u)
g[j] = grad(l.bin_loss, u[j], a>j)
end
return l.scale*g
end
function M_estimator(l::BvSLoss, a::AbstractArray)
u = zeros(l.max)
for j = 1:l.max-1
u[j] = M_estimator(l.bin_loss, a.>j)
end
return u
end
########################################## ORDERED LOGISTIC ##########################################
# f: ℜx{1, 2, ..., max-1, max} -> ℜ
# f computes the (negative log likelihood of the) multinomial logit,
# often known as the softmax function
# f(u, a) = exp(u[a]) / (sum_{a'} exp(u[a']))
mutable struct OrdisticLoss<:Loss
max::Integer
scale::Float64
domain::Domain
end
OrdisticLoss(m::Int, scale=1.0::Float64; domain=OrdinalDomain(1,m)) = OrdisticLoss(m,scale,domain)
embedding_dim(l::OrdisticLoss) = l.max
datalevels(l::OrdisticLoss) = 1:l.max # levels are encoded as the numbers 1:l.max
function evaluate(l::OrdisticLoss, u::Array{Float64,1}, a::Int)
diffusquared = u[a]^2 .- u.^2
M = maximum(diffusquared)
invlik = sum(exp, (diffusquared .- M))
loss = M + log(invlik)
return l.scale*loss
end
function grad(l::OrdisticLoss, u::Array{Float64,1}, a::Int)
g = zeros(size(u))
# Using some nice algebra, you can show
g[a] = 2*u[a]
sumexp = sum(map(j->exp(- u[j]^2), 1:length(u)))
for j in 1:length(u)
diffusquared = u[j]^2 .- u.^2
M = maximum(diffusquared)
invlik = sum(exp,(diffusquared .- M))
g[j] -= 2 * u[j] * exp(- M) / invlik
end
return l.scale*g
end
## we'll compute it via a stochastic gradient method
## with fixed step size
function M_estimator(l::OrdisticLoss, a::AbstractArray)
u = zeros(l.max)'
for i = 1:length(a)
ai = a[i]
u -= .1*grad(l, u, ai)
end
return u
end
#################### Multinomial Ordinal Logit #####################
# l: ℜ^{max-1} x {1, 2, ..., max-1, max} -> ℜ
# l computes the (negative log likelihood of the) multinomial ordinal logit.
#
# the length of the first argument u is one less than
# the number of levels of the second argument a,
# since the entries of u correspond to the division between each level
# and the one above it.
#
# XXX warning XXX
# the documentation in the comment below this point is defunct
#
# To yield a sensible pdf, the entries of u should be increasing
# (b/c they're basically the -log of the cdf at the boundary between each level)
#
# The multinomial ordinal logit corresponds to a likelihood p with
# p(u, a > i) ~ exp(-u[i]), so
# p(u, a) ~ exp(-u[1]) * ... * exp(-u[a-1]) * exp(u[a]) * ... * exp(u[end])
# = exp(- u[1] - ... - u[a-1] + u[a] + ... + u[end])
# and normalizing,
# p(u, a) = p(u, a) / sum_{a'} p(u, a')
#
# So l(u, a) = -log(p(u, a))
# = u[1] + ... + u[a-1] - u[a] - ... - u[end] +
# log(sum_{a'}(exp(u[1] + ... + u[a'-1] - u[a'] - ... - u[end])))
#
# Inspection of this loss function confirms that given u,
# the most probable value a is the index of the first
# positive entry of u
mutable struct MultinomialOrdinalLoss<:Loss
max::Integer
scale::Float64
domain::Domain
end
MultinomialOrdinalLoss(m::Int, scale=1.0::Float64; domain=OrdinalDomain(1,m)) = MultinomialOrdinalLoss(m,scale,domain)
MultinomialOrdinalLoss() = MultinomialOrdinalLoss(10) # for copying
embedding_dim(l::MultinomialOrdinalLoss) = l.max - 1
datalevels(l::MultinomialOrdinalLoss) = 1:l.max # levels are encoded as the numbers 1:l.max
function enforce_MNLOrdRules!(u; TOL=1e-3)
u[1] = min(-TOL, u[1])
for j=2:length(u)
u[j] = min(u[j], u[j-1]-TOL)
end
u
end
# argument u is a row vector (row slice of a matrix), which in julia is 2d
# todo: increase numerical stability
function evaluate(l::MultinomialOrdinalLoss, u::Array{Float64,1}, a::Int)
enforce_MNLOrdRules!(u)
if a == 1
return -l.scale*log(exp(0) - exp(u[1])) # (log(1 - exp(u[a] - 1)))
elseif a == l.max
return -l.scale*u[a-1]
else
return -l.scale*log(exp(u[a-1]) - exp(u[a])) # (u[a-1] + log(1 - exp(u[a] - u[a-1])))
end
end
# argument u is a row vector (row slice of a matrix), which in julia is 2d
function grad(l::MultinomialOrdinalLoss, u::Array{Float64,1}, a::Int)
enforce_MNLOrdRules!(u)
g = zeros(size(u))
if a == 1
g[1] = -exp(u[1])/(exp(0) - exp(u[1]))
# g[1] = 1/(1 - exp(-u[1]))
elseif a == l.max
g[a-1] = 1
else
# d = exp(u[a] - u[a-1])
# g[a] = d/(1-d)
# g[a-1] = - g[a] - 1
g[a] = -exp(u[a])/(exp(u[a-1]) - exp(u[a]))
g[a-1] = exp(u[a-1])/(exp(u[a-1]) - exp(u[a]))
end
return -l.scale*g
end
## we'll compute it via a stochastic gradient method
## with fixed step size
## (we don't need a hyper accurate estimate for this)
function M_estimator(l::MultinomialOrdinalLoss, a::AbstractVector)
u = zeros(l.max-1)'
for i = 1:length(a)
ai = a[i]
u -= .1*grad(l, u, ai)
end
return u
end
### convenience methods for evaluating and computing gradients on vectorized arguments
function evaluate(l::Loss, u::Array{Float64,1}, a::AbstractVector)
@assert size(u) == size(a)
out = 0
for i=1:length(a)
out += evaluate(l, u[i], a[i])
end
return out
end
#Optimized vector evaluate on single-dimensional losses
function evaluate(l::SingleDimLoss, u::Vector{Float64}, a::AbstractVector)
losseval = (x::Float64, y::Number) -> evaluate(l, x, y)
mapped = fill!(similar(u),0.)
map!(losseval, mapped, u, a)
reduce(+, mapped)
end
# now for multidimensional losses
function evaluate(l::Loss, u::Array{Float64,2}, a::AbstractVector)
# @show size(u,1)
# @show size(a)
@assert size(u,1) == length(a)
out = 0
for i=1:length(a)
out += evaluate(l, u[i,:], a[i])
end
return out
end
function grad(l::Loss, u::Array{Float64,1}, a::AbstractVector)
@assert size(u) == size(a)
mygrad = zeros(size(u))
for i=1:length(a)
mygrad[i] = grad(l, u[i], a[i])
end
return mygrad
end
# Optimized vector grad on single-dimensional losses
function grad(l::SingleDimLoss, u::Vector{Float64}, a::AbstractVector)
lossgrad = (x::Float64,y::Number) -> grad(l, x, y)
mapped = fill!(similar(u),0.)
map!(lossgrad, mapped, u, a)
end
# now for multidimensional losses
function grad(l::Loss, u::Array{Float64,2}, a::AbstractVector)
@assert size(u,1) == length(a)
mygrad = zeros(size(u))
for i=1:length(a)
mygrad[i,:] = grad(l, u[i,:], a[i])
end
return mygrad
end
| MatrixCompletion | https://github.com/jasonsun0310/MatrixCompletion.jl.git |
|
[
"MIT"
] | 0.1.0 | 50e804aa999e452ce4bf5edc50493838bd744e09 | code | 3669 | export sort_observations, add_offset!, fix_latent_features!,
equilibrate_variance!, prob_scale!
### OBSERVATION TUPLES TO ARRAYS
function sort_observations(obs::Union{Array{CartesianIndex{2},1},Array{Tuple{Int,Int},1}}, m::Int, n::Int; check_empty=false)
observed_features = Array{Int,1}[Int[] for i=1:m]
observed_examples = Array{Int,1}[Int[] for j=1:n]
for obsij in obs
i,j = obsij[1], obsij[2]
push!(observed_features[i],j)
push!(observed_examples[j],i)
end
if check_empty && (any(map(x->length(x)==0,observed_examples)) ||
any(map(x->length(x)==0,observed_features)))
error("Every row and column must contain at least one observation")
end
return observed_features, observed_examples
end
### SCALINGS AND OFFSETS ON GLRM
function add_offset!(glrm::AbstractGLRM)
glrm.rx, glrm.ry = map(lastentry1, glrm.rx), map(lastentry_unpenalized, glrm.ry)
return glrm
end
function fix_latent_features!(glrm::AbstractGLRM, n)
glrm.ry = Regularizer[fixed_latent_features(glrm.ry[i], glrm.Y[1:n,i])
for i in 1:length(glrm.ry)]
return glrm
end
## equilibrate variance
# scale all columns inversely proportional to mean value of loss function
# makes sense when all loss functions used are nonnegative
function equilibrate_variance!(glrm::AbstractGLRM, columns_to_scale = 1:size(glrm.A,2))
for i in columns_to_scale
nomissing = glrm.A[glrm.observed_examples[i],i]
if length(nomissing)>0
varlossi = avgerror(glrm.losses[i], nomissing)
varregi = var(nomissing) # TODO make this depend on the kind of regularization; this assumes QuadLoss
else
varlossi = 1
varregi = 1
end
if varlossi > 0
# rescale the losses and regularizers for each column by the inverse of the empirical variance
mul!(glrm.losses[i], scale(glrm.losses[i])/varlossi)
end
if varregi > 0
mul!(glrm.ry[i], scale(glrm.ry[i])/varregi)
end
end
return glrm
end
## probabilistic scaling
# scale loss function to fit -loglik of joint distribution
# makes sense when all functions used are -logliks of sensible distributions
# todo: option to scale to account for nonuniform sampling in rows or columns or both
# skipmissing(Array with missing) gives an iterator.
function prob_scale!(glrm, columns_to_scale = 1:size(glrm.A,2))
for i in columns_to_scale
nomissing = glrm.A[glrm.observed_examples[i],i]
if typeof(glrm.losses[i]) == QuadLoss && length(nomissing) > 0
varlossi = var(skipmissing(glrm.A[:,i])) # estimate the variance
if varlossi > TOL
mul!(glrm.losses[i], 1/(2*varlossi)) # this is the correct -loglik of gaussian with variance fixed at estimate
else
@warn("column $i has a variance of $varlossi; not scaling it to avoid dividing by zero.")
end
elseif typeof(glrm.losses[i]) == HuberLoss && length(nomissing) > 0
varlossi = avgerror(glrm.losses[i], glrm.A[:,i]) # estimate the width of the distribution
if varlossi > TOL
mul!(glrm.losses[i], 1/(2*varlossi)) # this is not the correct -loglik of huber with estimates for variance and mean of poisson, but that's probably ok
else
@warn("column $i has a variance of $varlossi; not scaling it to avoid dividing by zero.")
end
else # none of the other distributions have any free parameters to estimate, so this is the correct -loglik
mul!(glrm.losses[i], 1)
end
end
return glrm
end
| MatrixCompletion | https://github.com/jasonsun0310/MatrixCompletion.jl.git |
|
[
"MIT"
] | 0.1.0 | 50e804aa999e452ce4bf5edc50493838bd744e09 | code | 615 | #module Plot
import Gadfly
import DataFrames: DataFrame
export plot
function plot(df::DataFrame, xs::Symbol, ys::Array{Symbol, 1}; scale = :linear, filename=None, height=3, width=6)
dflong = vcat(map(l->stack(df,l,xs),ys)...)
if scale ==:log
p = Gadfly.plot(dflong,x=xs,y=:value,color=:variable,Gadfly.Scale.y_log10)
else
p = Gadfly.plot(dflong,x=xs,y=:value,color=:variable)
end
if !(filename==None)
println("saving figure in $filename")
Gadfly.draw(Gadfly.PDF(filename, width*Gadfly.inch, height*Gadfly.inch), p)
end
return p
end
#end # module | MatrixCompletion | https://github.com/jasonsun0310/MatrixCompletion.jl.git |
|
[
"MIT"
] | 0.1.0 | 50e804aa999e452ce4bf5edc50493838bd744e09 | code | 15806 | # Predefined regularizers
# You may also implement your own regularizer by subtyping
# the abstract type Regularizer.
# Regularizers should implement `evaluate` and `prox`.
import Base: *
export Regularizer, ProductRegularizer, # abstract types
# concrete regularizers
QuadReg, QuadConstraint,
OneReg, ZeroReg, NonNegConstraint, NonNegOneReg, NonNegQuadReg,
OneSparseConstraint, UnitOneSparseConstraint, SimplexConstraint,
KSparseConstraint,
lastentry1, lastentry_unpenalized,
fixed_latent_features, FixedLatentFeaturesConstraint,
fixed_last_latent_features, FixedLastLatentFeaturesConstraint,
OrdinalReg, MNLOrdinalReg,
RemQuadReg,
# methods on regularizers
prox!, prox,
# utilities
scale, mul!, *
# numerical tolerance
TOL = 1e-12
# regularizers
# regularizers r should have the method `prox` defined such that
# prox(r)(u,alpha) = argmin_x( alpha r(x) + 1/2 \|x - u\|_2^2)
abstract type Regularizer end
abstract type MatrixRegularizer <: LowRankModels.Regularizer end
# default inplace prox operator (slower than if inplace prox is implemented)
prox!(r::Regularizer,u::AbstractArray,alpha::Number) = (v = prox(r,u,alpha); @simd for i=1:length(u) @inbounds u[i]=v[i] end; u)
# default scaling
scale(r::Regularizer) = r.scale
mul!(r::Regularizer, newscale::Number) = (r.scale = newscale; r)
mul!(rs::Array{Regularizer}, newscale::Number) = (for r in rs mul!(r, newscale) end; rs)
*(newscale::Number, r::Regularizer) = (newr = typeof(r)(); mul!(newr, scale(r)*newscale); newr)
## utilities
function allnonneg(a::AbstractArray)
for ai in a
ai < 0 && return false
end
return true
end
## Quadratic regularization
mutable struct QuadReg<:Regularizer
scale::Float64
end
QuadReg() = QuadReg(1)
prox(r::QuadReg,u::AbstractArray,alpha::Number) = 1/(1+2*alpha*r.scale)*u
prox!(r::QuadReg,u::Array{Float64},alpha::Number) = rmul!(u, 1/(1+2*alpha*r.scale))
evaluate(r::QuadReg,a::AbstractArray) = r.scale*sum(abs2, a)
## constrained quadratic regularization
## the function r such that
## r(x) = inf if norm(x) > max_2norm
## 0 otherwise
## can be used to implement maxnorm regularization:
## constraining the maxnorm of XY to be <= mu is achieved
## by setting glrm.rx = QuadConstraint(sqrt(mu))
## and the same for every element of glrm.ry
mutable struct QuadConstraint<:Regularizer
max_2norm::Float64
end
QuadConstraint() = QuadConstraint(1)
prox(r::QuadConstraint,u::AbstractArray,alpha::Number) = (r.max_2norm)/norm(u)*u
prox!(r::QuadConstraint,u::Array{Float64},alpha::Number) = mul!(u, (r.max_2norm)/norm(u))
evaluate(r::QuadConstraint,u::AbstractArray) = norm(u) > r.max_2norm + TOL ? Inf : 0
scale(r::QuadConstraint) = 1
mul!(r::QuadConstraint, newscale::Number) = 1
## one norm regularization
mutable struct OneReg<:Regularizer
scale::Float64
end
OneReg() = OneReg(1)
function softthreshold(x::Number; alpha=1)
return max(x-alpha,0) + min(x+alpha,0)
end
prox(r::OneReg,u::AbstractArray,alpha::Number) = (st(x) = softthreshold(x; alpha=r.scale*alpha); st.(u))
prox!(r::OneReg,u::AbstractArray,alpha::Number) = (st(x) = softthreshold(x; alpha=r.scale*alpha); map!(st, u, u))
evaluate(r::OneReg,a::AbstractArray) = r.scale*sum(abs,a)
## no regularization
mutable struct ZeroReg<:Regularizer
end
prox(r::ZeroReg,u::AbstractArray,alpha::Number) = u
prox!(r::ZeroReg,u::Array{Float64},alpha::Number) = u
evaluate(r::ZeroReg,a::AbstractArray) = 0
scale(r::ZeroReg) = 0
mul!(r::ZeroReg, newscale::Number) = 0
## indicator of the nonnegative orthant
## (enforces nonnegativity, eg for nonnegative matrix factorization)
mutable struct NonNegConstraint<:Regularizer
end
prox(r::NonNegConstraint,u::AbstractArray,alpha::Number=1) = broadcast(max,u,0)
prox!(r::NonNegConstraint,u::Array{Float64},alpha::Number=1) = (@simd for i=1:length(u) @inbounds u[i] = max(u[i], 0) end; u)
function evaluate(r::NonNegConstraint,a::AbstractArray)
for ai in a
if ai<0
return Inf
end
end
return 0
end
scale(r::NonNegConstraint) = 1
mul!(r::NonNegConstraint, newscale::Number) = 1
## one norm regularization restricted to nonnegative orthant
## (enforces nonnegativity, in addition to one norm regularization)
mutable struct NonNegOneReg<:Regularizer
scale::Float64
end
NonNegOneReg() = NonNegOneReg(1)
prox(r::NonNegOneReg,u::AbstractArray,alpha::Number) = max.(u-alpha,0)
prox!(r::NonNegOneReg,u::AbstractArray,alpha::Number) = begin
nonnegsoftthreshold = (x::Number -> max.(x-alpha,0))
map!(nonnegsoftthreshold, u)
end
function evaluate(r::NonNegOneReg,a::AbstractArray)
for ai in a
if ai<0
return Inf
end
end
return r.scale*sum(a)
end
scale(r::NonNegOneReg) = 1
mul!(r::NonNegOneReg, newscale::Number) = 1
## Quadratic regularization restricted to nonnegative domain
## (Enforces nonnegativity alongside quadratic regularization)
mutable struct NonNegQuadReg
scale::Float64
end
NonNegQuadReg() = NonNegQuadReg(1)
prox(r::NonNegQuadReg,u::AbstractArray,alpha::Number) = max.(1/(1+2*alpha*r.scale)*u, 0)
prox!(r::NonNegQuadReg,u::AbstractArray,alpha::Number) = begin
mul!(u, 1/(1+2*alpha*r.scale))
maxval = maximum(u)
clamp!(u, 0, maxval)
end
function evaluate(r::NonNegQuadReg,a::AbstractArray)
for ai in a
if ai<0
return Inf
end
end
return r.scale*sumabs2(a)
end
## indicator of the last entry being equal to 1
## (allows an unpenalized offset term into the glrm when used in conjunction with lastentry_unpenalized)
mutable struct lastentry1<:Regularizer
r::Regularizer
end
lastentry1() = lastentry1(ZeroReg())
prox(r::lastentry1,u::AbstractArray{Float64,1},alpha::Number=1) = [prox(r.r,view(u,1:length(u)-1),alpha); 1]
prox!(r::lastentry1,u::AbstractArray{Float64,1},alpha::Number=1) = (prox!(r.r,view(u,1:length(u)-1),alpha); u[end]=1; u)
prox(r::lastentry1,u::AbstractArray{Float64,2},alpha::Number=1) = [prox(r.r,view(u,1:size(u,1)-1,:),alpha); ones(1, size(u,2))]
prox!(r::lastentry1,u::AbstractArray{Float64,2},alpha::Number=1) = (prox!(r.r,view(u,1:size(u,1)-1,:),alpha); u[end,:]=1; u)
evaluate(r::lastentry1,a::AbstractArray{Float64,1}) = (a[end]==1 ? evaluate(r.r,a[1:end-1]) : Inf)
evaluate(r::lastentry1,a::AbstractArray{Float64,2}) = (all(a[end,:].==1) ? evaluate(r.r,a[1:end-1,:]) : Inf)
scale(r::lastentry1) = scale(r.r)
mul!(r::lastentry1, newscale::Number) = mul!(r.r, newscale)
## makes the last entry unpenalized
## (allows an unpenalized offset term into the glrm when used in conjunction with lastentry1)
mutable struct lastentry_unpenalized<:Regularizer
r::Regularizer
end
lastentry_unpenalized() = lastentry_unpenalized(ZeroReg())
prox(r::lastentry_unpenalized,u::AbstractArray{Float64,1},alpha::Number=1) = [prox(r.r,u[1:end-1],alpha); u[end]]
prox!(r::lastentry_unpenalized,u::AbstractArray{Float64,1},alpha::Number=1) = (prox!(r.r,view(u,1:size(u,1)-1),alpha); u)
evaluate(r::lastentry_unpenalized,a::AbstractArray{Float64,1}) = evaluate(r.r,a[1:end-1])
prox(r::lastentry_unpenalized,u::AbstractArray{Float64,2},alpha::Number=1) = [prox(r.r,u[1:end-1,:],alpha); u[end,:]]
prox!(r::lastentry_unpenalized,u::AbstractArray{Float64,2},alpha::Number=1) = (prox!(r.r,view(u,1:size(u,1)-1,:),alpha); u)
evaluate(r::lastentry_unpenalized,a::AbstractArray{Float64,2}) = evaluate(r.r,a[1:end-1,:])
scale(r::lastentry_unpenalized) = scale(r.r)
mul!(r::lastentry_unpenalized, newscale::Number) = mul!(r.r, newscale)
## fixes the values of the first n elements of the column to be y
## optionally regularizes the last k-n elements with regularizer r
mutable struct fixed_latent_features<:Regularizer
r::Regularizer
y::Array{Float64,1} # the values of the fixed latent features
n::Int # length of y
end
fixed_latent_features(r::Regularizer, y::Array{Float64,1}) = fixed_latent_features(r,y,length(y))
# standalone use without another regularizer
FixedLatentFeaturesConstraint(y::Array{Float64, 1}) = fixed_latent_features(ZeroReg(),y,length(y))
prox(r::fixed_latent_features,u::AbstractArray,alpha::Number) = [r.y; prox(r.r,u[(r.n+1):end],alpha)]
function prox!(r::fixed_latent_features,u::Array{Float64},alpha::Number)
prox!(r.r,u[(r.n+1):end],alpha)
u[1:r.n]=y
u
end
evaluate(r::fixed_latent_features, a::AbstractArray) = a[1:r.n]==r.y ? evaluate(r.r, a[(r.n+1):end]) : Inf
scale(r::fixed_latent_features) = scale(r.r)
mul!(r::fixed_latent_features, newscale::Number) = mul!(r.r, newscale)
## fixes the values of the last n elements of the column to be y
## optionally regularizes the first k-n elements with regularizer r
mutable struct fixed_last_latent_features<:Regularizer
r::Regularizer
y::Array{Float64,1} # the values of the fixed latent features
n::Int # length of y
end
fixed_last_latent_features(r::Regularizer, y::Array{Float64,1}) = fixed_last_latent_features(r,y,length(y))
# standalone use without another regularizer
FixedLastLatentFeaturesConstraint(y::Array{Float64, 1}) = fixed_last_latent_features(ZeroReg(),y,length(y))
prox(r::fixed_last_latent_features,u::AbstractArray,alpha::Number) = [prox(r.r,u[(r.n+1):end],alpha); r.y]
function prox!(r::fixed_last_latent_features,u::Array{Float64},alpha::Number)
u[length(u)-r.n+1:end]=y
prox!(r.r,u[1:length(a)-r.n],alpha)
u
end
evaluate(r::fixed_last_latent_features, a::AbstractArray) = a[length(a)-r.n+1:end]==r.y ? evaluate(r.r, a[1:length(a)-r.n]) : Inf
scale(r::fixed_last_latent_features) = scale(r.r)
mul!(r::fixed_last_latent_features, newscale::Number) = mul!(r.r, newscale)
## indicator of 1-sparse vectors
## (enforces that exact 1 entry is nonzero, eg for orthogonal NNMF)
mutable struct OneSparseConstraint<:Regularizer
end
prox(r::OneSparseConstraint, u::AbstractArray, alpha::Number=0) = (idx = argmax(u); v=zeros(size(u)); v[idx]=u[idx]; v)
prox!(r::OneSparseConstraint, u::Array, alpha::Number=0) = (idx = argmax(u); ui = u[idx]; mul!(u,0); u[idx]=ui; u)
function evaluate(r::OneSparseConstraint, a::AbstractArray)
oneflag = false
for ai in a
if oneflag
if ai!=0
return Inf
end
else
if ai!=0
oneflag=true
end
end
end
return 0
end
scale(r::OneSparseConstraint) = 1
mul!(r::OneSparseConstraint, newscale::Number) = 1
## Indicator of k-sparse vectors
mutable struct KSparseConstraint<:Regularizer
k::Int
end
function evaluate(r::KSparseConstraint, a::AbstractArray)
k = r.k
nonzcount = 0
for ai in a
if nonzcount == k
if ai != 0
return Inf
end
else
if ai != 0
nonzcount += 1
end
end
end
return 0
end
function prox(r::KSparseConstraint, u::AbstractArray, alpha::Number)
k = r.k
ids = partialsortperm(u, 1:k, by=abs, rev=true)
uk = zero(u)
uk[ids] = u[ids]
uk
end
function prox!(r::KSparseConstraint, u::Array, alpha::Number)
k = r.k
ids = partialsortperm(u, 1:k, by=abs, rev=true)
vals = u[ids]
mul!(u,0)
u[ids] = vals
u
end
## indicator of 1-sparse unit vectors
## (enforces that exact 1 entry is 1 and all others are zero, eg for kmeans)
mutable struct UnitOneSparseConstraint<:Regularizer
end
prox(r::UnitOneSparseConstraint, u::AbstractArray, alpha::Number=0) = (idx = argmax(u); v=zeros(size(u)); v[idx]=1; v)
prox!(r::UnitOneSparseConstraint, u::Array, alpha::Number=0) = (idx = argmax(u); mul!(u,0); u[idx]=1; u)
function evaluate(r::UnitOneSparseConstraint, a::AbstractArray)
oneflag = false
for ai in a
if ai==0
continue
elseif ai==1
if oneflag
return Inf
else
oneflag=true
end
else
return Inf
end
end
return 0
end
scale(r::UnitOneSparseConstraint) = 1
mul!(r::UnitOneSparseConstraint, newscale::Number) = 1
## indicator of vectors in the simplex: nonnegative vectors with unit l1 norm
## (eg for QuadLoss mixtures, ie soft kmeans)
## prox for the simplex is derived by Chen and Ye in [this paper](http://arxiv.org/pdf/1101.6081v2.pdf)
mutable struct SimplexConstraint<:Regularizer
end
function prox(r::SimplexConstraint, u::AbstractArray, alpha::Number=0)
n = length(u)
y = sort(u, rev=true)
ysum = cumsum(y)
t = (ysum[end]-1)/n
for i=1:(n-1)
if (ysum[i]-1)/i >= y[i+1]
t = (ysum[i]-1)/i
break
end
end
max.(u .- t, 0)
end
function evaluate(r::SimplexConstraint,a::AbstractArray)
# check it's a unit vector
abs(sum(a)-1)>TOL && return Inf
# check every entry is nonnegative
for i=1:length(a)
a[i] < 0 && return Inf
end
return 0
end
scale(r::SimplexConstraint) = 1
mul!(r::SimplexConstraint, newscale::Number) = 1
## ordinal regularizer
## a block regularizer which
# 1) forces the first k-1 entries of each column to be the same
# 2) forces the last entry of each column to be increasing
# 3) applies an internal regularizer to the first k-1 entries of each column
## should always be used in conjunction with lastentry1 regularization on x
mutable struct OrdinalReg<:Regularizer
r::Regularizer
end
OrdinalReg() = OrdinalReg(ZeroReg())
prox(r::OrdinalReg,u::AbstractArray,alpha::Number) = (uc = copy(u); prox!(r,uc,alpha))
function prox!(r::OrdinalReg,u::AbstractArray,alpha::Number)
um = mean(u[1:end-1, :], dims=2)
prox!(r.r,um,alpha)
for i=1:size(u,1)-1
for j=1:size(u,2)
u[i,j] = um[i]
end
end
# this enforces rule 2) (increasing last row of u), but isn't exactly the prox function
# for j=2:size(u,2)
# if u[end,j-1] > u[end,j]
# m = (u[end,j-1] + u[end,j])/2
# u[end,j-1:j] = m
# end
# end
u
end
evaluate(r::OrdinalReg,a::AbstractArray) = evaluate(r.r,a[1:end-1,1])
scale(r::OrdinalReg) = scale(r.r)
mul!(r::OrdinalReg, newscale::Number) = mul!(r.r, newscale)
# make sure we don't add two offsets cuz that's weird
lastentry_unpenalized(r::OrdinalReg) = r
mutable struct MNLOrdinalReg<:Regularizer
r::Regularizer
end
MNLOrdinalReg() = MNLOrdinalReg(ZeroReg())
prox(r::MNLOrdinalReg,u::AbstractArray,alpha::Number) = (uc = copy(u); prox!(r,uc,alpha))
function prox!(r::MNLOrdinalReg,u::AbstractArray,alpha::Number; TOL=1e-3)
um = mean(u[1:end-1, :], dims=2)
prox!(r.r,um,alpha)
for i=1:size(u,1)-1
for j=1:size(u,2)
u[i,j] = um[i]
end
end
# this enforces rule 2) (decreasing last row of u, all less than 0), but isn't exactly the prox function
u[end,1] = min(-TOL, u[end,1])
for j=2:size(u,2)
u[end,j] = min(u[end,j], u[end,j-1]-TOL)
end
u
end
evaluate(r::MNLOrdinalReg,a::AbstractArray) = evaluate(r.r,a[1:end-1,1])
scale(r::MNLOrdinalReg) = scale(r.r)
mul!(r::MNLOrdinalReg, newscale::Number) = mul!(r.r, newscale)
# make sure we don't add two offsets cuz that's weird
lastentry_unpenalized(r::MNLOrdinalReg) = r
## Quadratic regularization with non-zero mean
mutable struct RemQuadReg<:Regularizer
scale::Float64
m::Array{Float64, 1}
end
RemQuadReg(m::Array{Float64, 1}) = RemQuadReg(1, m)
prox(r::RemQuadReg, u::AbstractArray, alpha::Number) =
(u + 2 * alpha * r.scale * r.m) / (1 + 2 * alpha * r.scale)
prox!(r::RemQuadReg, u::Array{Float64}, alpha::Number) = begin
broadcast!(.+, u, u, 2 * alpha * r.scale * r.m)
mul!(u, 1 / (1 + 2 * alpha * r.scale))
end
evaluate(r::RemQuadReg, a::AbstractArray) = r.scale * sum(abs2, a - r.m)
## simpler method for numbers, not arrays
evaluate(r::Regularizer, u::Number) = evaluate(r, [u])
prox(r::Regularizer, u::Number, alpha::Number) = prox(r, [u], alpha)[1]
# if step size not specified, step size = 1
prox(r::Regularizer, u) = prox(r, u, 1)
| MatrixCompletion | https://github.com/jasonsun0310/MatrixCompletion.jl.git |
|
[
"MIT"
] | 0.1.0 | 50e804aa999e452ce4bf5edc50493838bd744e09 | code | 1752 | #### randomized SVD (from Jiahao Chen, based on http://arxiv.org/pdf/0909.4061.pdf)
import LinearAlgebra: SVD
#The simplest possible randomized svd
#Inputs
# A: input matrix
# n: Number of singular value/vector pairs to find
# p: Number of extra vectors to include in computation
function rsvd(A, n, p=0)
Q = rrange(A, n, p=p)
rsvd_direct(A, Q)
end
#Algorithm 4.4: randomized subspace iteration
#A must support size(A), multiply and transpose multiply
#p is the oversampling parameter
#q controls the accuracy of the subspace found; it is the "number of power iterations"
#A good heuristic is that when the original scheme produces a basis whose
#approximation error is within a factor C of the optimum, the power scheme produces
#an approximation error within C^(1/(2q+1)) of the optimum.
function rrange(A, l::Integer; p::Integer=5, q::Integer=3)
p≥0 || error()
m, n = size(A)
l <= m || error("Cannot find $l linearly independent vectors of $m x $n matrix")
Ω = randn(n, l+p)
Q = q_from_qr(A*Ω)
for t=1:q
Q = q_from_qr(A'*Q)
Q = q_from_qr(A*Q)
end
Q = p==0 ? Q : Q[:,1:l]
end
function q_from_qr(Y, l::Integer=-1)
Q = full(qrfact!(Y)[:Q])
Q = l<0 ? Q : Q[:,1:l]
end
#Algorithm 5.1: direct SVD
#More accurate
function rsvd_direct(A, Q)
B=Q'A
S=svdfact!(B)
SVD(Q*S[:U], S[:S], S[:Vt])
end
function onepass_svd(A::AbstractArray, r::Int)
m, n = size(A)
k = 2r + 1
l = 4r + 3
Omega = randn(n,k)
Psi = randn(m,l)
Y = A*Omega
W = A'*Psi
Q,_ = qr(view(Y,:,1:k))
B = view(W,:,1:l) / (Q'*view(Psi,:,1:l)) # Q's.Psi is k x l, its pinv is l x k, so B is n x k
mysvd,_ = svds(B, nsv=r) # U is n x r
return SVD(Q*mysvd.Vt, mysvd.S, mysvd.U)
end
| MatrixCompletion | https://github.com/jasonsun0310/MatrixCompletion.jl.git |
|
[
"MIT"
] | 0.1.0 | 50e804aa999e452ce4bf5edc50493838bd744e09 | code | 6950 | # Supported domains: Real, Boolean, Ordinal, Periodic, Count
# The purpose of domains is to be able to sample over different possible values of `a` regardless of
# the loss that was used in the GLRM. The reason for doing this is to evaluate the performance of GLRMS.
# For instance, let's say we use PCA (QuadLoss losses) to model a binary data frame (not the best idea).
# In order to override the standard imputation with `sample(QuadLoss(), u)`, which assumes imputation over the reals,
# we can use `sample(BoolDomain(), QuadLoss(), u)` and see which of {-1,1} is best. The reason we want to be able to
# do this is to compare a baseline model (e.g. PCA) with a more logical model using heterogenous losses,
# yet still give each model the same amount of information regarding how imputation should be done.
# The domains themselves are defined in domains.jl
# In order to accomplish this we define a series of domains that describe how imputation should be performed over
# them. Each combination of domain and loss must have the following:
# Methods:
# `sample(D::my_Domain, l::my_loss_type, u::Float64) ::Float64`
# Samples aᵤ from among the range of possible values of a. The range of
# possible values of a should be implicitly or explicitly provided by `D`.
# There should be an sample method for every combination of datatype and loss.
# DataTypes are assigned to each column of the data and are not part of the low-rank model itself, they just serve
# as a way to evaluate the performance of the low-rank model.
import StatsBase: sample, Weights
export sample, sample_missing
########################################## REALS ##########################################
# Real data can take values from ℜ
# l.scale should be 1/var
sample(D::RealDomain, l::QuadLoss, u::Float64; noisevar=l.scale) = u + randn()/sqrt(noisevar)
########################################## BOOLS ##########################################
# Boolean data should take values from {true, false}
function sample(D::BoolDomain, l::LogisticLoss, u::Float64)
rand()<=(1/(1+exp(-u))) ? true : false
end
# generic method
# Evaluate w/ a=-1 and a=1 and see which is better according to that loss.
# This is fast and works for any loss.
function sample(D::BoolDomain, l::Loss, u::AbstractArray)
prob = exp.(-[evaluate(l, u, i) for i in (true, false)])
return sample(Weights(prob))
end
########################################## ORDINALS ##########################################
# Ordinal data should take integer values ranging from `min` to `max`
# a DiffLoss is one in which l(u,a) = f(u-a) AND argmin f(x) = 0
# for example, QuadLoss(u,a)=(u-a)² and we can write f(x)=x² and x=u-a
function sample(D::OrdinalDomain, l::DiffLoss, u::Float64)
uint = round(Int, u)
uclip = max(D.min, min(D.max, uint))
return uclip
end
# generic method
function sample(D::OrdinalDomain, l::Loss, u::AbstractArray)
prob = exp.(-[evaluate(l, u, i) for i in D.min:D.max])
return sample(Weights(prob))
end
########################################## CATEGORICALS ##########################################
# Categorical data should take integer values ranging from 1 to `max`
function sample(D::CategoricalDomain, l::MultinomialLoss, u::Array{Float64})
return sample(Weights(exp.(u)))
end
# sample(D::CategoricalDomain, l::OvALoss, u::Array{Float64}) = ??
# generic method
function sample(D::CategoricalDomain, l::Loss, u::AbstractArray)
prob = exp.(-[evaluate(l, u, i) for i in D.min:D.max])
return sample(Weights(prob))
end
########################################## PERIODIC ##########################################
# Periodic data can take values from ℜ, but given a period T, we should have error_metric(a,a+T) = 0
# Since periodic data can take any real value, we can use the real-valued imputation methods
# sample(D::PeriodicDomain, l::Loss, u::Float64) = ??
########################################## COUNTS ##########################################
# Count data can take values over ℕ, which we approximate as {0, 1, 2 ... `max_count`}
# Our approximation of ℕ is really an ordinal
sample(D::CountDomain, l::Loss, u::Float64) = sample(OrdinalDomain(0,D.max_count), l, u)
####################################################################################
# Use impute and error_metric over arrays
function sample(
domains::Array{DomainSubtype,1},
losses::Array{LossSubtype,1},
U::Array{Float64,2}) where {DomainSubtype<:Domain,LossSubtype<:Loss}
m, d = size(U)
n = length(losses)
yidxs = get_yidxs(losses)
A_sampled = Array(Number, (m, n));
for f in 1:n
for i in 1:m
if length(yidxs[f]) > 1
A_sampled[i,f] = sample(domains[f], losses[f], vec(U[i,yidxs[f]]))
else
A_sampled[i,f] = sample(domains[f], losses[f], U[i,yidxs[f]])
end
end
end
return A_sampled
end
# sample missing entries in A according to the fit model (X,Y)
function sample_missing(glrm::GLRM)
do_sample(e::Int, f::Int) = !(e in glrm.observed_examples[f])
return sample(glrm, do_sample)
end
all_entries(e::Int,f::Int) = true
# sample all entries in A according to the fit model (X,Y)
# do_sample is a function that takes an example-feature pair (e,f)
# and returns true if that entry should be replaced by a sample from the model
# is_dense controls whether the output should be a dense matrix
# it's true by default because we sample all entries by default
function sample(glrm::GLRM, do_sample::Function=all_entries, is_dense::Bool=true)
U = glrm.X'*glrm.Y
m, d = size(U)
n = length(glrm.losses)
yidxs = get_yidxs(glrm.losses)
domains = Domain[domain(l) for l in glrm.losses]
# make sure we don't mutate the type of the array A
# even if all data for some real loss take integer values
for j=1:n
if isa(domains[j], RealDomain) && isa(glrm.A[:,j], Array{Union{Missing, Int},1})
domains[j] = OrdinalDomain(minimum(dropmissing(glrm.A[j])), maximum(dropmissing(glrm.A[j])))
end
end
# compute the correct variance for real valued losses
original_scales = [l.scale for l in glrm.losses]
for j=1:n
if isa(domains[j], RealDomain)
println("old scale:", glrm.losses[j].scale)
glrm.losses[j].scale = mean((U[glrm.observed_examples[j],j] - glrm.A[glrm.observed_examples[j],j]).^2)
println("new scale:", glrm.losses[j].scale)
end
end
A_sampled = copy(glrm.A);
if is_dense && isa(A_sampled, SparseMatrixCSC)
A_sampled = Matrix(A_sampled)
end
for f in 1:n
for e in 1:m
if do_sample(e,f)
A_sampled[e,f] = sample(domains[f], glrm.losses[f], U[e,yidxs[f]])
end
end
end
# revert scales to previously defined values
for j=1:n
glrm.losses[j].scale = original_scales[j]
end
return A_sampled
end
function sample(losses::Array{LossSubtype,1}, U::Array{Float64,2}) where LossSubtype<:Loss
domains = Domain[domain(l) for l in losses]
sample(domains, losses, U)
end
### Hack to sample from non-probabilistic losses
sample(D::Domain, l::Loss, u) = impute(D, l, u)
| MatrixCompletion | https://github.com/jasonsun0310/MatrixCompletion.jl.git |
|
[
"MIT"
] | 0.1.0 | 50e804aa999e452ce4bf5edc50493838bd744e09 | code | 9359 | import ScikitLearnBase
using ScikitLearnBase: @declare_hyperparameters
export SkGLRM, PCA, QPCA, NNMF, KMeans, RPCA
################################################################################
# Shared definitions
# Note: there is redundancy in the hyperparameters. This is
# necessary if we want to offer a simple interface in PCA(), and a full
# interface in SkGLRM(). PCA(abs_tol=0.1, max_iter=200) cannot create
# `ProxGradParams(abs_tol, max_iter)` right away, because abs_tol and
# max_iter are hyperparameters and need to be visible/changeable by
# set_params for grid-search.
# There are other ways of setting it up, but this seems like the simplest.
mutable struct SkGLRM <: ScikitLearnBase.BaseEstimator
# Hyperparameters: those will be passed to GLRM, so it doesn't matter if
# they're not typed.
fit_params # if fit_params != nothing, it has priority over abs_tol, etc.
loss
rx
ry
# rx/ry_scale can be nothing, in which case they're ignored. This allows
# ry to be a vector
rx_scale
ry_scale
abs_tol::Float64
rel_tol::Float64
max_iter::Int
inner_iter::Int
k::Int
init::Function # initialization function
verbose::Bool
glrm::GLRM # left undefined by the constructor
end
# This defines `clone`, `get_params` and `set_params!`
@declare_hyperparameters(SkGLRM, [:fit_params, :init, :rx, :ry,
:rx_scale, :ry_scale, :loss,
:abs_tol, :rel_tol, :max_iter, :inner_iter, :k,
:verbose])
function do_fit!(skglrm::SkGLRM, glrm::GLRM)
fit_params = (skglrm.fit_params === nothing ?
ProxGradParams(abs_tol=skglrm.abs_tol,
rel_tol=skglrm.rel_tol,
max_iter=skglrm.max_iter) :
skglrm.fit_params)
fit!(glrm, fit_params; verbose=skglrm.verbose)
end
function ind2sub(a, i)
i2s[i]
end
function build_glrm(skglrm::SkGLRM, X, missing_values)
k = skglrm.k == -1 ? size(X, 2) : skglrm.k
i2s = CartesianIndices(missing_values)
obs = [i2s[x] for x in (LinearIndices(.!missing_values))[findall(.!missing_values)] ]
rx, ry = skglrm.rx, skglrm.ry
if skglrm.rx_scale !== nothing
rx = copy(rx)
mul!(rx, skglrm.rx_scale)
end
if skglrm.ry_scale !== nothing
ry = copy(ry)
mul!(ry, skglrm.ry_scale)
end
GLRM(X, skglrm.loss, rx, ry, k; obs=obs)
end
# The input matrix is called X (instead of A) following ScikitLearn's convention
function ScikitLearnBase.fit_transform!(skglrm::SkGLRM, X, y=nothing;
missing_values=isnan.(X))
@assert size(X)==size(missing_values)
# Reuse the standard GLRM constructor and fitting machinery
skglrm.glrm = build_glrm(skglrm, X, missing_values)
skglrm.init(skglrm.glrm)
X, _, _ = do_fit!(skglrm, skglrm.glrm)
return X'
end
function ScikitLearnBase.fit!(skglrm::SkGLRM, X, y=nothing; kwargs...)
ScikitLearnBase.fit_transform!(skglrm, X; kwargs...)
skglrm
end
""" `transform(skglrm::SkGLRM, X)` brings X to low-rank-space """
function ScikitLearnBase.transform(skglrm::SkGLRM, X;
missing_values=isnan.(X))
glrm = skglrm.glrm
ry_fixed = [FixedLatentFeaturesConstraint(glrm.Y[:, i])
for i=1:size(glrm.Y, 2)]
glrm_fixed = build_glrm(skglrm, X, missing_values)
X2, _, ch = do_fit!(skglrm, glrm_fixed)
return X2'
end
""" `transform(skglrm::SkGLRM, X)` brings X from low-rank-space back to the
original input-space """
ScikitLearnBase.inverse_transform(skglrm::SkGLRM, X) = X * skglrm.glrm.Y
# Only makes sense for KMeans
function ScikitLearnBase.predict(km::SkGLRM, X)
X2 = ScikitLearnBase.transform(km, X)
# This performs the "argmax" over the columns to get the cluster #
return mapslices(argmax, X2, 2)[:]
end
################################################################################
# Public constructors
"""
SkGLRM(; fit_params=nothing, init=glrm->nothing, k::Int=-1,
loss=QuadLoss(), rx::Regularizer=ZeroReg(), ry=ZeroReg(),
rx_scale=nothing, ry_scale=nothing,
# defaults taken from proxgrad.jl
abs_tol=0.00001, rel_tol=0.0001, max_iter=100, inner_iter=1,
verbose=false)
Generalized low rank model (GLRM). GLRMs model a data array by a low rank
matrix. GLRM makes it easy to mix and match loss functions and regularizers to
construct a model suitable for a particular data set.
Hyperparameters:
- `fit_params`: algorithm to use in fitting the GLRM. Defaults to
`ProxGradParams(abs_tol, rel_tol, skglrm.max_iter)`
- `init`: function to initialize the low-rank matrices, before the main gradient
descent loop.
- `k`: number of components (rank of the latent representation). By default,
use k=nfeatures (full rank)
- `loss`: loss function. Can be either a single `::Loss` object, or a vector
of `nfeature` loss objects, allowing for mixed inputs (eg. binary and
continuous data)
- `rx`: regularization over the hidden coefficient matrix
- `ry`: regularization over the latent features matrix. Can be either a single
regularizer, or a vector of regularizers of length nfeatures, allowing
for mixed inputs
- `rx_scale`, `ry_scale`: strength of the regularization (higher is stronger).
By default, `scale=1`. Cannot be used if `rx/ry` are vectors.
- `abs_tol, rel_tol`: tolerance criteria to stop the gradient descent iteration
- `max_iter, inner_iter`: number of iterations in the gradient descent loops
- `verbose`: print convergence information
All parameters (in particular, `rx/ry_scale`) can be tuned with
`ScikitLearn.GridSearch.GridSearchCV`
For more information on the parameters see [LowRankModels](https://github.com/madeleineudell/LowRankModels.jl)
"""
function SkGLRM(; fit_params=nothing, init=glrm->nothing, k=-1,
loss=QuadLoss(), rx=ZeroReg(), ry=ZeroReg(),
rx_scale=nothing, ry_scale=nothing,
# defaults taken from proxgrad.jl
abs_tol=0.00001, rel_tol=0.0001, max_iter=100, inner_iter=1,
verbose=false)
dummy = pca(zeros(1,1), 1) # it needs an initial value - will be overwritten
return SkGLRM(fit_params, loss, rx, ry, rx_scale, ry_scale, abs_tol,
rel_tol, max_iter,
inner_iter, k, init, verbose, dummy)
end
""" PCA(; k=-1, ...)
Principal Component Analysis with `k` components (defaults to using
`nfeatures`). Equivalent to
SkGLRM(loss=QuadLoss(), rx=ZeroReg(), ry=ZeroReg(), init=init_svd!)
See ?SkGLRM for more hyperparameters. In particular, increasing `max_iter`
(default 100) may improve convergence. """
function PCA(; kwargs...)
# principal components analysis
# minimize ||A - XY||^2
loss = QuadLoss()
r = ZeroReg()
return SkGLRM(; loss=loss, rx=r, ry=r, init=init_svd!, kwargs...)
end
""" QPCA(k=-1, rx_scale=1, ry_scale=1; ...)
Quadratically Regularized PCA with `k` components
(default: `k = nfeatures`). Equivalent to
SkGLRM(loss=QuadLoss(), rx=QuadReg(1.0), ry=QuadReg(1.0), init=init_svd!)
Regularization strength is set by `rx_scale` and `ry_scale`. See ?SkGLRM for
more hyperparameters.
"""
function QPCA(; kwargs...)
# quadratically regularized principal components analysis
# minimize ||A - XY||^2 + rx_scale*||X||^2 + ry_scale*||Y||^2
loss = QuadLoss()
r = QuadReg(1.0) # scale is set in build_glrm
return SkGLRM(; loss=loss, rx=r, ry=r, init=init_svd!, kwargs...)
end
""" NNMF(; k=-1, ...)
Non-negative matrix factorization with `k` components (default:
`k=nfeatures`). Equivalent to
SkGLRM(loss=QuadLoss(), rx=NonNegConstraint(), ry=NonNegConstraint(), init=init_svd!)
See ?SkGLRM for more hyperparameters
"""
function NNMF(; kwargs...)
# nonnegative matrix factorization
# minimize_{X>=0, Y>=0} ||A - XY||^2
loss = QuadLoss()
r = NonNegConstraint()
return SkGLRM(; loss=loss,rx=r,ry=r, init=init_svd!, kwargs...)
end
""" KMeans(; k=2, inner_iter=10, max_iter=100, ...)
K-Means algorithm. Separates the data into `k` clusters. See ?SkGLRM for more
hyperparameters. In particular, increasing `inner_iter` and `max_iter` may
improve convergence.
**IMPORTANT**: This is not the most efficient way of performing K-Means, and
the iteration may not reach convergence.
"""
function KMeans(; k=2, inner_iter=10, kwargs...)
# minimize_{columns of X are unit vectors} ||A - XY||^2
loss = QuadLoss()
rx = UnitOneSparseConstraint()
ry = ZeroReg()
return SkGLRM(k=k, loss=loss,rx=rx,ry=ry, inner_iter=inner_iter,
init=init_kmeanspp!; kwargs...)
end
""" RPCA(; k=-1, ...)
Robust PCA with `k` components (default: `k = nfeatures`). Equivalent to
SkGLRM(loss=HuberLoss(), rx=QuadReg(1.0), ry=QuadReg(1.0), init=init_svd!)
Regularization strength is set by `rx_scale` and `ry_scale`. See ?SkGLRM for
more hyperparameters. In particular, increasing `max_iter` (default 100) may
improve convergence. """
function RPCA(; kwargs...)
# robust PCA
# minimize HuberLoss(A - XY) + scale*||X||^2 + scale*||Y||^2
loss = HuberLoss()
r = QuadReg(1.0)
return SkGLRM(; loss=loss,rx=r,ry=r, init=init_svd!, kwargs...)
end
| MatrixCompletion | https://github.com/jasonsun0310/MatrixCompletion.jl.git |
|
[
"MIT"
] | 0.1.0 | 50e804aa999e452ce4bf5edc50493838bd744e09 | code | 1485 |
import LinearAlgebra: size, axpy!
import LinearAlgebra.BLAS: gemm!
#import Base: shmem_rand, shmem_randn
export ShareGLRM, share
### GLRM TYPE
mutable struct ShareGLRM{L<:Loss, R<:Regularizer}<:AbstractGLRM
A::SharedArray # The data table transformed into a coded array
losses::Array{L,1} # array of loss functions
rx::Regularizer # The regularization to be applied to each row of Xᵀ (column of X)
ry::Array{R,1} # Array of regularizers to be applied to each column of Y
k::Int # Desired rank
observed_features::ObsArray # for each example, an array telling which features were observed
observed_examples::ObsArray # for each feature, an array telling in which examples the feature was observed
X::SharedArray{Float64,2} # Representation of data in low-rank space. A ≈ X'Y
Y::SharedArray{Float64,2} # Representation of features in low-rank space. A ≈ X'Y
end
function share(glrm::GLRM)
isa(glrm.A, SharedArray) ? A = glrm.A : A = convert(SharedArray,glrm.A)
isa(glrm.X, SharedArray) ? X = glrm.X : X = convert(SharedArray, glrm.X)
isa(glrm.Y, SharedArray) ? Y = glrm.Y : Y = convert(SharedArray, glrm.Y)
return ShareGLRM(A, glrm.losses, glrm.rx, glrm.ry, glrm.k,
glrm.observed_features, glrm.observed_examples,
X, Y)
end
### todo: define objective for shared arrays so it's evaluated (safely) in parallel
| MatrixCompletion | https://github.com/jasonsun0310/MatrixCompletion.jl.git |
|
[
"MIT"
] | 0.1.0 | 50e804aa999e452ce4bf5edc50493838bd744e09 | code | 1148 | export pca, qpca, nnmf, rpca, kmeans
# principal components analysis
# minimize ||A - XY||^2
function pca(A::AbstractArray, k::Int; kwargs...)
loss = QuadLoss()
r = ZeroReg()
return GLRM(A,loss,r,r,k; kwargs...)
end
# quadratically regularized principal components analysis
# minimize ||A - XY||^2 + scale*||X||^2 + scale*||Y||^2
function qpca(A::AbstractArray, k::Int; scale=1.0::Float64, kwargs...)
loss = QuadLoss()
r = QuadReg(scale)
return GLRM(A,loss,r,r,k; kwargs...)
end
# nonnegative matrix factorization
# minimize_{X>=0, Y>=0} ||A - XY||^2
function nnmf(A::AbstractArray, k::Int; kwargs...)
loss = QuadLoss()
r = NonNegConstraint()
GLRM(A,loss,r,r,k; kwargs...)
end
# k-means
# minimize_{columns of X are unit vectors} ||A - XY||^2
function kmeans(A::AbstractArray, k::Int; kwargs...)
loss = QuadLoss()
ry = ZeroReg()
rx = UnitOneSparseConstraint()
return GLRM(A,loss,rx,ry,k; kwargs...)
end
# robust PCA
# minimize HuberLoss(A - XY) + scale*||X||^2 + scale*||Y||^2
function rpca(A::AbstractArray, k::Int; scale=1.0::Float64, kwargs...)
loss = HuberLoss()
r = QuadReg(scale)
return GLRM(A,loss,r,r,k; kwargs...)
end
| MatrixCompletion | https://github.com/jasonsun0310/MatrixCompletion.jl.git |
|
[
"MIT"
] | 0.1.0 | 50e804aa999e452ce4bf5edc50493838bd744e09 | code | 9842 | ### Proximal gradient method
export ProxGradParams, fit!
mutable struct ProxGradParams<:AbstractParams
stepsize::Float64 # initial stepsize
max_iter::Int # maximum number of outer iterations
inner_iter_X::Int # how many prox grad steps to take on X before moving on to Y (and vice versa)
inner_iter_Y::Int # how many prox grad steps to take on Y before moving on to X (and vice versa)
abs_tol::Float64 # stop if objective decrease upon one outer iteration is less than this * number of observations
rel_tol::Float64 # stop if objective decrease upon one outer iteration is less than this * objective value
min_stepsize::Float64 # use a decreasing stepsize, stop when reaches min_stepsize
end
function ProxGradParams(stepsize::Number=1.0; # initial stepsize
max_iter::Int=100, # maximum number of outer iterations
inner_iter_X::Int=1, # how many prox grad steps to take on X before moving on to Y (and vice versa)
inner_iter_Y::Int=1, # how many prox grad steps to take on Y before moving on to X (and vice versa)
inner_iter::Int=1,
abs_tol::Number=0.00001, # stop if objective decrease upon one outer iteration is less than this * number of observations
rel_tol::Number=0.0001, # stop if objective decrease upon one outer iteration is less than this * objective value
min_stepsize::Number=0.01*stepsize) # stop if stepsize gets this small
stepsize = convert(Float64, stepsize)
inner_iter_X = max(inner_iter_X, inner_iter)
inner_iter_Y = max(inner_iter_Y, inner_iter)
return ProxGradParams(convert(Float64, stepsize),
max_iter,
inner_iter_X,
inner_iter_Y,
convert(Float64, abs_tol),
convert(Float64, rel_tol),
convert(Float64, min_stepsize))
end
### FITTING
function fit!(glrm::GLRM, params::ProxGradParams;
ch::ConvergenceHistory=ConvergenceHistory("ProxGradGLRM"),
verbose=true,
kwargs...)
### initialization
A = glrm.A # rename these for easier local access
losses = glrm.losses
rx = glrm.rx
ry = glrm.ry
X = glrm.X; Y = glrm.Y
# check that we didn't initialize to zero (otherwise we will never move)
if norm(Y) == 0
Y = .1*randn(k,d)
end
k = glrm.k
m,n = size(A)
# find spans of loss functions (for multidimensional losses)
yidxs = get_yidxs(losses)
d = maximum(yidxs[end])
# check Y is the right size
if d != size(Y,2)
@warn("The width of Y should match the embedding dimension of the losses.
Instead, embedding_dim(glrm.losses) = $(embedding_dim(glrm.losses))
and size(glrm.Y, 2) = $(size(glrm.Y, 2)).
Reinitializing Y as randn(glrm.k, embedding_dim(glrm.losses).")
# Please modify Y or the embedding dimension of the losses to match,
# eg, by setting `glrm.Y = randn(glrm.k, embedding_dim(glrm.losses))`")
glrm.Y = randn(glrm.k, d)
end
XY = Array{Float64}(undef, (m, d))
gemm!('T','N',1.0,X,Y,0.0,XY) # XY = X' * Y initial calculation
# step size (will be scaled below to ensure it never exceeds 1/\|g\|_2 or so for any subproblem)
alpharow = params.stepsize*ones(m)
alphacol = params.stepsize*ones(n)
# stopping criterion: stop when decrease in objective < tol, scaled by the number of observations
scaled_abs_tol = params.abs_tol * mapreduce(length,+,glrm.observed_features)
# alternating updates of X and Y
if verbose println("Fitting GLRM") end
update_ch!(ch, 0, objective(glrm, X, Y, XY, yidxs=yidxs))
t = time()
steps_in_a_row = 0
# gradient wrt columns of X
g = zeros(k)
# gradient wrt column-chunks of Y
G = zeros(k, d)
# rowwise objective value
obj_by_row = zeros(m)
# columnwise objective value
obj_by_col = zeros(n)
# cache views for better memory management
# make sure we don't try to access memory not allocated to us
@assert(size(Y) == (k,d))
@assert(size(X) == (k,m))
# views of the columns of X corresponding to each example
ve = [view(X,:,e) for e=1:m]
# views of the column-chunks of Y corresponding to each feature y_j
# vf[f] == Y[:,f]
vf = [view(Y,:,yidxs[f]) for f=1:n]
# views of the column-chunks of G corresponding to the gradient wrt each feature y_j
# these have the same shape as y_j
gf = [view(G,:,yidxs[f]) for f=1:n]
# working variables
newX = copy(X)
newY = copy(Y)
newve = [view(newX,:,e) for e=1:m]
newvf = [view(newY,:,yidxs[f]) for f=1:n]
for i=1:params.max_iter
# STEP 1: X update
# XY = X' * Y was computed above
# reset step size if we're doing something more like alternating minimization
if params.inner_iter_X > 1 || params.inner_iter_Y > 1
for ii=1:m alpharow[ii] = params.stepsize end
for jj=1:n alphacol[jj] = params.stepsize end
end
for inneri=1:params.inner_iter_X
for e=1:m # for every example x_e == ve[e]
fill!(g, 0.) # reset gradient to 0
# compute gradient of L with respect to Xᵢ as follows:
# ∇{Xᵢ}L = Σⱼ dLⱼ(XᵢYⱼ)/dXᵢ
for f in glrm.observed_features[e]
# but we have no function dLⱼ/dXᵢ, only dLⱼ/d(XᵢYⱼ) aka dLⱼ/du
# by chain rule, the result is: Σⱼ (dLⱼ(XᵢYⱼ)/du * Yⱼ), where dLⱼ/du is our grad() function
curgrad = grad(losses[f],XY[e,yidxs[f]],A[e,f])
if isa(curgrad, Number)
axpy!(curgrad, vf[f], g)
else
# on v0.4: gemm!('N', 'T', 1.0, vf[f], curgrad, 1.0, g)
gemm!('N', 'N', 1.0, vf[f], curgrad, 1.0, g)
end
end
# take a proximal gradient step to update ve[e]
l = length(glrm.observed_features[e]) + 1 # if each loss function has lipshitz constant 1 this bounds the lipshitz constant of this example's objective
obj_by_row[e] = row_objective(glrm, e, ve[e]) # previous row objective value
while alpharow[e] > params.min_stepsize
stepsize = alpharow[e]/l
# newx = prox(rx[e], ve[e] - stepsize*g, stepsize) # this will use much more memory than the inplace version with linesearch below
## gradient step: Xᵢ += -(α/l) * ∇{Xᵢ}L
axpy!(-stepsize,g,newve[e])
## prox step: Xᵢ = prox_rx(Xᵢ, α/l)
prox!(rx[e],newve[e],stepsize)
if row_objective(glrm, e, newve[e]) < obj_by_row[e]
copyto!(ve[e], newve[e])
alpharow[e] *= 1.05
break
else # the stepsize was too big; undo and try again only smaller
copyto!(newve[e], ve[e])
alpharow[e] *= .7
if alpharow[e] < params.min_stepsize
alpharow[e] = params.min_stepsize * 1.1
break
end
end
end
end # for e=1:m
gemm!('T','N',1.0,X,Y,0.0,XY) # Recalculate XY using the new X
end # inner iteration
# STEP 2: Y update
for inneri=1:params.inner_iter_Y
fill!(G, 0.)
for f=1:n
# compute gradient of L with respect to Yⱼ as follows:
# ∇{Yⱼ}L = Σⱼ dLⱼ(XᵢYⱼ)/dYⱼ
for e in glrm.observed_examples[f]
# but we have no function dLⱼ/dYⱼ, only dLⱼ/d(XᵢYⱼ) aka dLⱼ/du
# by chain rule, the result is: Σⱼ dLⱼ(XᵢYⱼ)/du * Xᵢ, where dLⱼ/du is our grad() function
curgrad = grad(losses[f],XY[e,yidxs[f]],A[e,f])
if isa(curgrad, Number)
axpy!(curgrad, ve[e], gf[f])
else
# on v0.4: gemm!('N', 'T', 1.0, ve[e], curgrad, 1.0, gf[f])
gemm!('N', 'T', 1.0, ve[e], curgrad, 1.0, gf[f])
end
end
# take a proximal gradient step
l = length(glrm.observed_examples[f]) + 1
obj_by_col[f] = col_objective(glrm, f, vf[f])
while alphacol[f] > params.min_stepsize
stepsize = alphacol[f]/l
# newy = prox(ry[f], vf[f] - stepsize*gf[f], stepsize)
## gradient step: Yⱼ += -(α/l) * ∇{Yⱼ}L
axpy!(-stepsize,gf[f],newvf[f])
## prox step: Yⱼ = prox_ryⱼ(Yⱼ, α/l)
prox!(ry[f],newvf[f],stepsize)
new_obj_by_col = col_objective(glrm, f, newvf[f])
if new_obj_by_col < obj_by_col[f]
copyto!(vf[f], newvf[f])
alphacol[f] *= 1.05
obj_by_col[f] = new_obj_by_col
break
else
copyto!(newvf[f], vf[f])
alphacol[f] *= .7
if alphacol[f] < params.min_stepsize
alphacol[f] = params.min_stepsize * 1.1
break
end
end
end
end # for f=1:n
gemm!('T','N',1.0,X,Y,0.0,XY) # Recalculate XY using the new Y
end # inner iteration
# STEP 3: Record objective
obj = sum(obj_by_col)
t = time() - t
update_ch!(ch, t, obj)
t = time()
# STEP 4: Check stopping criterion
obj_decrease = ch.objective[end-1] - obj
if i>10 && (obj_decrease < scaled_abs_tol || obj_decrease/obj < params.rel_tol)
break
end
if verbose && i%10==0
println("Iteration $i: objective value = $(ch.objective[end])")
end
end
return glrm.X, glrm.Y, ch
end
| MatrixCompletion | https://github.com/jasonsun0310/MatrixCompletion.jl.git |
|
[
"MIT"
] | 0.1.0 | 50e804aa999e452ce4bf5edc50493838bd744e09 | code | 10099 | ### Proximal gradient method
export ProxGradParams, fit!
mutable struct ProxGradParams<:AbstractParams
stepsize::Float64 # initial stepsize
max_iter::Int # maximum number of outer iterations
inner_iter_X::Int # how many prox grad steps to take on X before moving on to Y (and vice versa)
inner_iter_Y::Int # how many prox grad steps to take on Y before moving on to X (and vice versa)
abs_tol::Float64 # stop if objective decrease upon one outer iteration is less than this * number of observations
rel_tol::Float64 # stop if objective decrease upon one outer iteration is less than this * objective value
min_stepsize::Float64 # use a decreasing stepsize, stop when reaches min_stepsize
end
function ProxGradParams(stepsize::Number=1.0; # initial stepsize
max_iter::Int=100, # maximum number of outer iterations
inner_iter_X::Int=1, # how many prox grad steps to take on X before moving on to Y (and vice versa)
inner_iter_Y::Int=1, # how many prox grad steps to take on Y before moving on to X (and vice versa)
inner_iter::Int=1,
abs_tol::Number=0.00001, # stop if objective decrease upon one outer iteration is less than this * number of observations
rel_tol::Number=0.0001, # stop if objective decrease upon one outer iteration is less than this * objective value
min_stepsize::Number=0.01*stepsize) # stop if stepsize gets this small
stepsize = convert(Float64, stepsize)
inner_iter_X = max(inner_iter_X, inner_iter)
inner_iter_Y = max(inner_iter_Y, inner_iter)
return ProxGradParams(convert(Float64, stepsize),
max_iter,
inner_iter_X,
inner_iter_Y,
convert(Float64, abs_tol),
convert(Float64, rel_tol),
convert(Float64, min_stepsize))
end
### FITTING
function fit!(glrm::GLRM, params::ProxGradParams;
ch::ConvergenceHistory=ConvergenceHistory("ProxGradGLRM"),
verbose=true,
kwargs...)
### initialization
A = glrm.A # rename these for easier local access
losses = glrm.losses
rx = glrm.rx
ry = glrm.ry
X = glrm.X; Y = glrm.Y
# check that we didn't initialize to zero (otherwise we will never move)
if norm(Y) == 0
Y = .1*randn(k,d)
end
k = glrm.k
m,n = size(A)
# find spans of loss functions (for multidimensional losses)
yidxs = get_yidxs(losses)
d = maximum(yidxs[end])
# check Y is the right size
if d != size(Y,2)
@warn("The width of Y should match the embedding dimension of the losses.
Instead, embedding_dim(glrm.losses) = $(embedding_dim(glrm.losses))
and size(glrm.Y, 2) = $(size(glrm.Y, 2)).
Reinitializing Y as randn(glrm.k, embedding_dim(glrm.losses).")
# Please modify Y or the embedding dimension of the losses to match,
# eg, by setting `glrm.Y = randn(glrm.k, embedding_dim(glrm.losses))`")
glrm.Y = randn(glrm.k, d)
end
XY = Array{Float64}(undef, (m, d))
gemm!('T','N',1.0,X,Y,0.0,XY) # XY = X' * Y initial calculation
# step size (will be scaled below to ensure it never exceeds 1/\|g\|_2 or so for any subproblem)
alpharow = params.stepsize*ones(m)
alphacol = params.stepsize*ones(n)
# stopping criterion: stop when decrease in objective < tol, scaled by the number of observations
scaled_abs_tol = params.abs_tol * mapreduce(length,+,glrm.observed_features)
# alternating updates of X and Y
if verbose println("Fitting GLRM") end
update_ch!(ch, 0, objective(glrm, X, Y, XY, yidxs=yidxs))
t = time()
steps_in_a_row = 0
# gradient wrt columns of X
g = [zeros(k) for t in 1:Threads.nthreads()]
# gradient wrt column-chunks of Y
G = zeros(k, d)
# rowwise objective value
obj_by_row = zeros(m)
# columnwise objective value
obj_by_col = zeros(n)
# cache views for better memory management
# make sure we don't try to access memory not allocated to us
@assert(size(Y) == (k,d))
@assert(size(X) == (k,m))
# views of the columns of X corresponding to each example
ve = [view(X,:,e) for e=1:m]
# views of the column-chunks of Y corresponding to each feature y_j
# vf[f] == Y[:,f]
vf = [view(Y,:,yidxs[f]) for f=1:n]
# views of the column-chunks of G corresponding to the gradient wrt each feature y_j
# these have the same shape as y_j
gf = [view(G,:,yidxs[f]) for f=1:n]
# working variables
newX = copy(X)
newY = copy(Y)
newve = [view(newX,:,e) for e=1:m]
newvf = [view(newY,:,yidxs[f]) for f=1:n]
for i=1:params.max_iter
# STEP 1: X update
# XY = X' * Y was computed above
# reset step size if we're doing something more like alternating minimization
if params.inner_iter_X > 1 || params.inner_iter_Y > 1
for ii=1:m alpharow[ii] = params.stepsize end
for jj=1:n alphacol[jj] = params.stepsize end
end
for inneri=1:params.inner_iter_X
Threads.@threads for e=1:m # for every example x_e == ve[e]
# for e=1:m # for every example x_e == ve[e]
g[Threads.threadid()] .= 0 # reset gradient to 0
# compute gradient of L with respect to Xᵢ as follows:
# ∇{Xᵢ}L = Σⱼ dLⱼ(XᵢYⱼ)/dXᵢ
for f in glrm.observed_features[e]
# but we have no function dLⱼ/dXᵢ, only dLⱼ/d(XᵢYⱼ) aka dLⱼ/du
# by chain rule, the result is: Σⱼ (dLⱼ(XᵢYⱼ)/du * Yⱼ), where dLⱼ/du is our grad() function
curgrad = grad(losses[f],XY[e,yidxs[f]],A[e,f])
if isa(curgrad, Number)
axpy!(curgrad, vf[f], g[Threads.threadid()])
else
# on v0.4: gemm!('N', 'T', 1.0, vf[f], curgrad, 1.0, g)
gemm!('N', 'N', 1.0, vf[f], curgrad, 1.0, g[Threads.threadid()])
end
end
# take a proximal gradient step to update ve[e]
l = length(glrm.observed_features[e]) + 1 # if each loss function has lipshitz constant 1 this bounds the lipshitz constant of this example's objective
obj_by_row[e] = row_objective(glrm, e, ve[e]) # previous row objective value
while alpharow[e] > params.min_stepsize
stepsize = alpharow[e]/l
# newx = prox(rx[e], ve[e] - stepsize*g, stepsize) # this will use much more memory than the inplace version with linesearch below
## gradient step: Xᵢ += -(α/l) * ∇{Xᵢ}L
axpy!(-stepsize,g[Threads.threadid()],newve[e])
## prox step: Xᵢ = prox_rx(Xᵢ, α/l)
prox!(rx[e],newve[e],stepsize)
if row_objective(glrm, e, newve[e]) < obj_by_row[e]
copyto!(ve[e], newve[e])
alpharow[e] *= 1.05 # choose a more aggressive stepsize
break
else # the stepsize was too big; undo and try again only smaller
copyto!(newve[e], ve[e])
alpharow[e] *= .7 # choose a less aggressive stepsize
if alpharow[e] < params.min_stepsize
alpharow[e] = params.min_stepsize * 1.1
break
end
end
end
end # for e=1:m
gemm!('T','N',1.0,X,Y,0.0,XY) # Recalculate XY using the new X
end # inner iteration
# STEP 2: Y update
for inneri=1:params.inner_iter_Y
G .= 0
Threads.@threads for f=1:n
# for f=1:n
# compute gradient of L with respect to Yⱼ as follows:
# ∇{Yⱼ}L = Σⱼ dLⱼ(XᵢYⱼ)/dYⱼ
for e in glrm.observed_examples[f]
# but we have no function dLⱼ/dYⱼ, only dLⱼ/d(XᵢYⱼ) aka dLⱼ/du
# by chain rule, the result is: Σⱼ dLⱼ(XᵢYⱼ)/du * Xᵢ, where dLⱼ/du is our grad() function
curgrad = grad(losses[f],XY[e,yidxs[f]],A[e,f])
if isa(curgrad, Number)
axpy!(curgrad, ve[e], gf[f])
else
# on v0.4: gemm!('N', 'T', 1.0, ve[e], curgrad, 1.0, gf[f])
gemm!('N', 'T', 1.0, ve[e], curgrad, 1.0, gf[f])
end
end
# take a proximal gradient step
l = length(glrm.observed_examples[f]) + 1
obj_by_col[f] = col_objective(glrm, f, vf[f])
while alphacol[f] > params.min_stepsize
stepsize = alphacol[f]/l
# newy = prox(ry[f], vf[f] - stepsize*gf[f], stepsize)
## gradient step: Yⱼ += -(α/l) * ∇{Yⱼ}L
axpy!(-stepsize,gf[f],newvf[f])
## prox step: Yⱼ = prox_ryⱼ(Yⱼ, α/l)
prox!(ry[f],newvf[f],stepsize)
new_obj_by_col = col_objective(glrm, f, newvf[f])
if new_obj_by_col < obj_by_col[f]
copyto!(vf[f], newvf[f])
alphacol[f] *= 1.05
obj_by_col[f] = new_obj_by_col
break
else
copyto!(newvf[f], vf[f])
alphacol[f] *= .7
if alphacol[f] < params.min_stepsize
alphacol[f] = params.min_stepsize * 1.1
break
end
end
end
end # for f=1:n
gemm!('T','N',1.0,X,Y,0.0,XY) # Recalculate XY using the new Y
end # inner iteration
# STEP 3: Record objective
obj = sum(obj_by_col)
t = time() - t
update_ch!(ch, t, obj)
t = time()
# STEP 4: Check stopping criterion
obj_decrease = ch.objective[end-1] - obj
if i>10 && (obj_decrease < scaled_abs_tol || obj_decrease/obj < params.rel_tol)
break
end
if verbose && i%10==0
println("Iteration $i: objective value = $(ch.objective[end])")
end
end
return glrm.X, glrm.Y, ch
end
| MatrixCompletion | https://github.com/jasonsun0310/MatrixCompletion.jl.git |
|
[
"MIT"
] | 0.1.0 | 50e804aa999e452ce4bf5edc50493838bd744e09 | code | 4793 | ### Streaming method
# only implemented for quadratic objectives
# TODO: add quadratic regularization
export StreamingParams, streaming_fit!, streaming_impute!
mutable struct StreamingParams<:AbstractParams
T0::Int # number of rows to use to initialize Y before streaming begins
stepsize::Float64 # stepsize (inverse of memory)
Y_update_interval::Int # how often to prox Y
end
function StreamingParams(
T0::Int=1000; # number of rows to use to initialize Y before streaming begins
stepsize::Number=1/T0, # (inverse of memory)
Y_update_interval::Int=10 # how often to prox Y
)
return StreamingParams(T0, convert(Float64, stepsize), Y_update_interval)
end
### FITTING
function streaming_fit!(glrm::GLRM, params::StreamingParams=StreamingParams();
ch::ConvergenceHistory=ConvergenceHistory("StreamingGLRM"),
verbose=true)
# make sure everything is quadratic
@assert all(map(l->isa(l, QuadLoss), glrm.losses))
@assert all(map(l->isa(l, QuadReg), glrm.rx))
@assert all(map(l->isa(l, QuadReg), glrm.ry))
# initialize Y and first T0 rows of X
init_glrm = keep_rows(glrm, params.T0)
init_svd!(init_glrm)
copy!(glrm.Y, init_glrm.Y)
copy!(view(glrm.X, :, 1:params.T0), init_glrm.X)
### initialization
A = glrm.A # rename these for easier local access
rx = glrm.rx
ry = glrm.ry
X = glrm.X; Y = glrm.Y
k = glrm.k
m,n = size(A)
# yscales = map(r->r.scale, ry)
for i=params.T0+1:m
# update x_i
obs = glrm.observed_features[i]
Yobs = Y[:, obs]
Aobs = A[i, obs]
xi = view(X, :, i)
copy!(xi, (Yobs * Yobs' + 2 * rx[i].scale * I) \ (Yobs * Aobs))
# update objective
r = Yobs'*xi - Aobs
push!(ch.objective, norm(r) ^ 2)
# # update Y
# TODO verify this is stochastic proximal gradient (with constant stepsize) for the problem
# TODO don't prox Y at every iteration
# TODO don't assume scales on all the rys are equal
# gY[:, jj] = xi * r' == r[jj] * xi # gradient of ith row objective wrt Y
for jj in 1:length(obs)
Y[:,obs[jj]] -= params.stepsize * r[jj] * xi
end
if i%params.Y_update_interval == 0
# prox!(ry, Y, params.stepsize * params.Y_update_interval)
Y ./= (1 + 2 * params.stepsize * params.Y_update_interval * ry[1].scale)
end
end
return X, Y, ch
end
### FITTING
function streaming_impute!(glrm::GLRM, params::StreamingParams=StreamingParams();
ch::ConvergenceHistory=ConvergenceHistory("StreamingGLRM"),
verbose=true)
# make sure everything is quadratic
@assert all(map(l->isa(l, QuadLoss), glrm.losses))
@assert all(map(l->isa(l, QuadReg), glrm.rx))
@assert all(map(l->isa(l, QuadReg), glrm.ry))
# initialize Y and first T0 rows of X
init_glrm = keep_rows(glrm, params.T0)
init_svd!(init_glrm)
copy!(glrm.Y, init_glrm.Y)
copy!(view(glrm.X, :, 1:params.T0), init_glrm.X)
### initialization
A = glrm.A # rename these for easier local access
Ahat = copy(glrm.A)
rx = glrm.rx
ry = glrm.ry
X = glrm.X; Y = glrm.Y
k = glrm.k
m,n = size(A)
# yscales = map(r->r.scale, ry)
for i=params.T0+1:m
# update x_i
obs = glrm.observed_features[i]
Yobs = Y[:, obs]
Aobs = A[i, obs]
xi = view(X, :, i)
copy!(xi, (Yobs * Yobs' + 2 * rx[i].scale * I) \ (Yobs * Aobs))
# impute
not_obs = setdiff(Set(1:n), Set(obs))
if length(not_obs)>0
ahat = xi'*Y
Ahat[i, not_obs] = ahat[not_obs]
end
# update objective
r = Yobs'*xi - Aobs
push!(ch.objective, norm(r) ^ 2)
# # update Y
# TODO verify this is stochastic proximal gradient (with constant stepsize) for the problem
# TODO don't prox Y at every iteration
# TODO don't assume scales on all the rys are equal
# gY[:, jj] = xi * r' == r[jj] * xi # gradient of ith row objective wrt Y
for jj in 1:length(obs)
Y[:,obs[jj]] -= params.stepsize * r[jj] * xi
end
if i%params.Y_update_interval == 0
# prox!(ry, Y, params.stepsize * params.Y_update_interval)
Y ./= (1 + 2 * params.stepsize * params.Y_update_interval * ry[1].scale)
end
end
return Ahat
end
""" Constructs new GLRM on subset of rows of the data from input glrm """
function keep_rows(glrm, r::UnitRange{Int})
@assert maximum(r) <= size(glrm.A, 1)
obs = flatten_observations(glrm.observed_features)
first_row = minimum(r)
if first_row > 1
new_obs = map( t -> (t[1]-first_row+1, t[2]), filter( t -> (t[1] in r), obs))
else
new_obs = filter( t -> (t[1] in r), obs)
end
of, oe = sort_observations(new_obs, length(r), size(glrm.A, 2))
new_glrm = GLRM(glrm.A[r,:], glrm.losses, glrm.rx[r], glrm.ry, glrm.k,
observed_features = of, observed_examples = oe)
return new_glrm
end
keep_rows(glrm, T::Int) = keep_rows(glrm, 1:T)
| MatrixCompletion | https://github.com/jasonsun0310/MatrixCompletion.jl.git |
|
[
"MIT"
] | 0.1.0 | 50e804aa999e452ce4bf5edc50493838bd744e09 | code | 5359 | ### Proximal gradient method
export SparseProxGradParams, fit!
mutable struct SparseProxGradParams<:AbstractParams
stepsize::Float64 # initial stepsize
max_iter::Int # maximum number of outer iterations
inner_iter::Int # how many prox grad steps to take on X before moving on to Y (and vice versa)
abs_tol::Float64 # stop if objective decrease upon one outer iteration is less than this
min_stepsize::Float64 # use a decreasing stepsize, stop when reaches min_stepsize
end
function SparseProxGradParams(stepsize::Number=1.0; # initial stepsize
max_iter::Int=100, # maximum number of outer iterations
inner_iter::Int=1, # how many prox grad steps to take on X before moving on to Y (and vice versa)
abs_tol::Float64=0.00001, # stop if objective decrease upon one outer iteration is less than this
min_stepsize::Float64=0.01*stepsize) # stop if stepsize gets this small
stepsize = convert(Float64, stepsize)
return SparseProxGradParams(stepsize, max_iter, inner_iter, abs_tol, min_stepsize)
end
### FITTING
function fit!(glrm::GLRM, params::SparseProxGradParams;
ch::ConvergenceHistory=ConvergenceHistory("SparseProxGradGLRM"),
verbose=true,
kwargs...)
println(params)
### initialization
A = glrm.A # rename these for easier local access
losses = glrm.losses
rx = glrm.rx
ry = glrm.ry
# at any time, glrm.X and glrm.Y will be the best model yet found, while
# X and Y will be the working variables
X = copy(glrm.X); Y = copy(glrm.Y)
k = glrm.k
m,n = size(A)
# check that we didn't initialize to zero (otherwise we will never move)
if norm(Y) == 0
Y = .1*randn(k,n)
end
# step size (will be scaled below to ensure it never exceeds 1/\|g\|_2 or so for any subproblem)
alpha = params.stepsize
# stopping criterion: stop when decrease in objective < tol
tol = params.abs_tol * mapreduce(length,+,glrm.observed_features)
# alternating updates of X and Y
if verbose println("Fitting GLRM") end
update_ch!(ch, 0, objective(glrm; sparse=true))
t = time()
steps_in_a_row = 0
g = zeros(k)
# cache views
ve = [view(X,:,e) for e=1:m]
vf = [view(Y,:,f) for f=1:n]
for i=1:params.max_iter
# STEP 1: X update
for inneri=1:params.inner_iter
for e=1:m # doing this means looping over XY in row-major order, but otherwise we couldn't parallelize over Xᵢs
rmul!(g, 0)# reset gradient to 0
# compute gradient of L with respect to Xᵢ as follows:
# ∇{Xᵢ}L = Σⱼ dLⱼ(XᵢYⱼ)/dXᵢ
for f in glrm.observed_features[e]
# but we have no function dLⱼ/dXᵢ, only dLⱼ/d(XᵢYⱼ) aka dLⱼ/du
# by chain rule, the result is: Σⱼ (dLⱼ(XᵢYⱼ)/du * Yⱼ), where dLⱼ/du is our grad() function
# our estimate for A[e,f] is given by dot(ve[e],vf[f])
axpy!(grad(losses[f],dot(ve[e],vf[f]),A[e,f]), vf[f], g)
end
# take a proximal gradient step
l = length(glrm.observed_features[e]) + 1
rmul!(g, -alpha/l)
## gradient step: Xᵢ += -(α/l) * ∇{Xᵢ}L
axpy!(1,g,ve[e])
## prox step: Xᵢ = prox_rx(Xᵢ, α/l)
prox!(rx[e],ve[e],alpha/l)
end
end
# STEP 2: Y update
for inneri=1:params.inner_iter
for f=1:n
rmul!(g, 0) # reset gradient to 0
# compute gradient of L with respect to Yⱼ as follows:
# ∇{Yⱼ}L = Σⱼ dLⱼ(XᵢYⱼ)/dYⱼ
for e in glrm.observed_examples[f]
# but we have no function dLⱼ/dYⱼ, only dLⱼ/d(XᵢYⱼ) aka dLⱼ/du
# by chain rule, the result is: Σⱼ dLⱼ(XᵢYⱼ)/du * Xᵢ, where dLⱼ/du is our grad() function
axpy!(grad(losses[f],dot(ve[e],vf[f]),A[e,f]), ve[e], g)
end
# take a proximal gradient step
l = length(glrm.observed_examples[f]) + 1
rmul!(g, -alpha/l)
## gradient step: Yⱼ += -(α/l) * ∇{Yⱼ}L
axpy!(1,g,vf[f])
## prox step: Yⱼ = prox_ryⱼ(Yⱼ, α/l)
prox!(ry[f],vf[f],alpha/l)
end
end
# STEP 3: Check objective
obj = objective(glrm, X, Y; sparse=true)
# record the best X and Y yet found
if obj < ch.objective[end]
t = time() - t
update_ch!(ch, t, obj)
copy!(glrm.X, X); copy!(glrm.Y, Y) # save new best X and Y
alpha = alpha * 1.05
steps_in_a_row = max(1, steps_in_a_row+1)
t = time()
else
# if the objective went up, reduce the step size, and undo the step
alpha = alpha / max(1.5, -steps_in_a_row)
if verbose println("obj went up to $obj; reducing step size to $alpha") end
copy!(X, glrm.X); copy!(Y, glrm.Y) # revert back to last X and Y
steps_in_a_row = min(0, steps_in_a_row-1)
end
# STEP 4: Check stopping criterion
if i>10 && (steps_in_a_row > 3 && ch.objective[end-1] - obj < tol) || alpha <= params.min_stepsize
break
end
if verbose && i%10==0
println("Iteration $i: objective value = $(ch.objective[end])")
end
end
t = time() - t
update_ch!(ch, t, ch.objective[end])
return glrm.X, glrm.Y, ch
end
| MatrixCompletion | https://github.com/jasonsun0310/MatrixCompletion.jl.git |
|
[
"MIT"
] | 0.1.0 | 50e804aa999e452ce4bf5edc50493838bd744e09 | code | 2469 | ##############################################################
### copying
##############################################################
import Base.copy
export copy, copy_estimate, GLRM
for T in :[Loss, Regularizer, AbstractGLRM].args
@eval function copy(r::$T)
fieldvals = [getfield(r, f) for f in fieldnames(typeof(r))]
return typeof(r)(fieldvals...)
end
end
# points to all the same problem data as the original input GLRM,
# but copies the estimate of the model parameters
function copy_estimate(g::GLRM)
return GLRM(g.A,g.losses,g.rx,g.ry,g.k,
g.observed_features,g.observed_examples,
copy(g.X),copy(g.Y))
end
# domains are struct, so this is ok
copy(d::Domain) = d
##############################################################
### fill singleton losses and regularizers to the right shapes
##############################################################
# fill an array of length n with copies of the object foo
fillcopies(foo, n::Int; arraytype=typeof(foo)) = arraytype[copy(foo) for i=1:n]
# singleton loss:
GLRM(A, loss::Loss, rx::Array, ry::Regularizer, k::Int; kwargs...) =
GLRM(A, fillcopies(loss, size(A, 2), arraytype=Loss), rx, fillcopies(ry, size(A, 2), arraytype=Regularizer), k; kwargs...)
GLRM(A, loss::Loss, rx::Regularizer, ry::Array, k::Int; kwargs...) =
GLRM(A, fillcopies(loss, size(A, 2), arraytype=Loss), fillcopies(rx, size(A, 1), arraytype=Regularizer), ry, k; kwargs...)
GLRM(A, loss::Loss, rx::Array, ry::Array, k::Int; kwargs...) =
GLRM(A, fillcopies(loss, size(A, 2), arraytype=Loss), rx, ry, k; kwargs...)
# singleton regularizer on x and/or y:
GLRM(A, losses::Array, rx::Regularizer, ry::Array, k::Int; kwargs...) =
GLRM(A, losses, fillcopies(rx, size(A, 1), arraytype=Regularizer), ry, k::Int; kwargs...)
GLRM(A, losses::Array, rx::Array, ry::Regularizer, k::Int; kwargs...) =
GLRM(A, losses, rx, fillcopies(ry, size(A, 2), arraytype=Regularizer), k::Int; kwargs...)
GLRM(A, losses::Array, rx::Regularizer, ry::Regularizer, k::Int; kwargs...) =
GLRM(A, losses, fillcopies(rx, size(A, 1), arraytype=Regularizer), fillcopies(ry, size(A, 2), arraytype=Regularizer), k::Int; kwargs...)
# singleton everything
GLRM(A, loss::Loss, rx::Regularizer, ry::Regularizer, k::Int; kwargs...) =
GLRM(A, fillcopies(loss, size(A, 2), arraytype=Loss), fillcopies(rx, size(A, 1), arraytype=Regularizer), fillcopies(ry, size(A, 2), arraytype=Regularizer), k::Int; kwargs...)
| MatrixCompletion | https://github.com/jasonsun0310/MatrixCompletion.jl.git |
|
[
"MIT"
] | 0.1.0 | 50e804aa999e452ce4bf5edc50493838bd744e09 | code | 1166 | using Base: depwarn
Base.@deprecate GLRM(A::AbstractArray, obs::Array{Tuple{Int, Int}, 1}, args...; kwargs...) GLRM(A, args...; obs = obs, kwargs...)
Base.@deprecate ProxGradParams(s::Number,m::Int,c::Float64,ms::Float64) ProxGradParams(s, max_iter=m, abs_tol=c, min_stepsize=ms)
Base.@deprecate expand_categoricals expand_categoricals!
Base.@deprecate errors(g::GLRM) error_metric(g)
Base.@deprecate quadratic QuadLoss
Base.@deprecate logistic LogisticLoss
Base.@deprecate huber HuberLoss
Base.@deprecate LogLoss LogisticLoss
Base.@deprecate l1 L1Loss
Base.@deprecate poisson PoissonLoss
Base.@deprecate ordinal_hinge OrdinalHingeLoss
Base.@deprecate OrdinalHinge OrdinalHingeLoss
Base.@deprecate WeightedHinge WeightedHingeLoss
Base.@deprecate periodic PeriodicLoss
Base.@deprecate quadreg QuadReg
Base.@deprecate constrained_quadreg QuadConstraint
Base.@deprecate onereg OneReg
Base.@deprecate zeroreg ZeroReg
Base.@deprecate nonnegative NonNegConstraint
Base.@deprecate onesparse OneSparseConstraint
Base.@deprecate unitonesparse UnitOneSparseConstraint
Base.@deprecate simplex SimplexConstraint
Base.@deprecate nonneg_onereg NonNegOneReg
| MatrixCompletion | https://github.com/jasonsun0310/MatrixCompletion.jl.git |
|
[
"MIT"
] | 0.1.0 | 50e804aa999e452ce4bf5edc50493838bd744e09 | code | 1315 | module BatchUtils
using ...Concepts
abstract type BatchingStrategy end
abstract type SequentialScan <: BatchingStrategy end
abstract type SampleWithReplacement <: BatchingStrategy end
abstract type SampleWithoutReplacement <: BatchingStrategy end
export BatchFactory,
next,
reset,
has_next,
initialize,
BatchingStrategy,
SequentialScan
mutable struct BatchFactory{T<:Any}
n::Optional{Int64}
batch_size::Optional{Int64}
cur_batch::Optional{Int64}
function BatchFactory{S}(; size::Int64) where S<:BatchingStrategy
return new{S}(nothing, size, nothing)
end
function BatchFactory{S}(n, batch_size) where S<:BatchingStrategy
return new{S}(n, batch_size, 1)
end
end
function initialize(obj::BatchFactory{T}, x::Array{S, 1}) where {T<:BatchingStrategy, S<:Any}
obj.n = length(x)
obj.cur_batch = 1
end
function next(obj::BatchFactory{SequentialScan})
start_pos = obj.batch_size * (obj.cur_batch - 1) + 1
end_pos = min(start_pos + obj.batch_size - 1, obj.n)
obj.cur_batch = obj.cur_batch + 1
return start_pos:end_pos
end
function Base.reset(obj::BatchFactory{SequentialScan})
obj.cur_batch = 1
end
function has_next(obj::BatchFactory{SequentialScan})
return obj.batch_size * (obj.cur_batch - 1) + 1 <= obj.n
end
end # end of BatchUtils
| MatrixCompletion | https://github.com/jasonsun0310/MatrixCompletion.jl.git |
|
[
"MIT"
] | 0.1.0 | 50e804aa999e452ce4bf5edc50493838bd744e09 | code | 3670 | using MatrixCompletion.Concepts
import LinearAlgebra
struct RelativeError end
struct AbsoluteError end
function within_radius(x;of=1e-5,at=0)
return sum(Int.(abs.(x .- at) .> of))
end
function Concepts.provide(object::RelativeError,x,y;
metric::Any = x -> LinearAlgebra.norm(x,2),
base_metric::Any=metric)
return metric(x-y)/base_metric(y)
end
function Concepts.provide(object::AbsoluteError,x,y;
metric::Any= x-> LinearAlgebra.norm(x,2))
return metric(x - y)
end
function Concepts.provide(object::Diagnostics{Int64})
return "int64"
end
function Concepts.provide(object::Diagnostics{<:Any};
reference::Optional{VecOrMatOfReals}=nothing,
input_data::Optional{VecOrMatOfReals}=nothing)
if isnothing(input_data) && isnothing(reference)
throw(DomainError("[provide(Diagnostics)]: input_data and/or reference variable missing))"))
end
return Dict("relative-error[#within-radius(1e-5)]" => Concepts.provide(RelativeError(),
input_data,reference,
metric = x -> within_radius(x),
base_metric = x -> LinearAlgebra.norm(abs.(x) .+ 1 ,0)),
"absolute-error[#within-radius(1e-5)]" => Concepts.provide(AbsoluteError(),
input_data, reference,
metric = x -> within_radius(x)),
"relative-error[#within-radius(1)]" => Concepts.provide(RelativeError(),
input_data,reference,
metric = x -> within_radius(x,of=1),
base_metric = x -> LinearAlgebra.norm(abs.(x) .+ 1 ,0)),
"absolute-error[#within-radius(1)]" => Concepts.provide(AbsoluteError(),
input_data, reference,
metric = x -> within_radius(x,of=1)),
"relative-error[L1]" => Concepts.provide(RelativeError(),
input_data,reference,
metric = x -> LinearAlgebra.norm(x,1)),
"absolute-error[L1]" => Concepts.provide(AbsoluteError(),
input_data, reference,
metric = x -> LinearAlgebra.norm(x,1)),
"relative-error[L2]" => Concepts.provide(RelativeError(),
input_data,reference,
metric = x -> LinearAlgebra.norm(x,2)^2),
"absolute-error[L2]" => Concepts.provide(AbsoluteError(),
input_data,reference,
metric = x -> LinearAlgebra.norm(x,2)^2)
)
end
# function print_diagnostics(object::Diagnostics{<:Any};
# reference::Optional{VecOrMatOfReals}=nothing,
# input_data::Optional{VecOrMatOfReals}=nothing)
# end
| MatrixCompletion | https://github.com/jasonsun0310/MatrixCompletion.jl.git |
|
[
"MIT"
] | 0.1.0 | 50e804aa999e452ce4bf5edc50493838bd744e09 | code | 4513 | @overload
function Concepts.forward_map(distribution::Union{AbstractPoisson,Type{Val{:Poisson}}},
canonical_parameter::Array{T};
non_canonical_parameter::Union{Array{Float64},Nothing} = nothing,
non_canonical_map = nothing) where T<:Real
if !isnothing(non_canonical_parameter)
## TODO
end
return exp.(canonical_parameter)
end
@overload
function Concepts.forward_map(distribution::Union{AbstractGamma,Type{Val{:Gamma}}},
canonical_parameter::Array{T};
non_canonical_parameter::Union{Array{Float64},Nothing} = nothing,
non_canonical_map = nothing) where T<:Real
if !isnothing(non_canonical_parameter)
## TODO
end
canonical_parameter = 1 ./ exp.(canonical_parameter)
return canonical_parameter
# canonical_parameter = -1 .* abs.(canonical_parameter)
# return -1 ./ canonical_parameter
# return 1 ./ canonical_parameter
end
@overload
function Concepts.forward_map(distribution::Union{AbstractNegativeBinomial,Type{Val{:NegativeBinomial}}},
canonical_parameter::Array{T};
non_canonical_parameter::Union{Array{Float64},Nothing} = nothing,
non_canonical_map = nothing,
r_estimate = nothing) where T<:Real
if !isnothing(non_canonical_parameter)
## TODO
end
# @show(r_estimate)
return r_estimate ./ (exp.(exp.(canonical_parameter)) .- 1)
end
@overload
function Concepts.forward_map(distribution::Union{AbstractBernoulli,Type{Val{:Bernoulli}}},
canonical_parameter::Array{T};
non_canonical_parameter::Union{Array{Float64},Nothing} = nothing,
non_canonical_map = nothing) where T<:Real
if !isnothing(non_canonical_parameter)
## TODO
end
ex = exp.(canonical_parameter)
return ex ./ (1 .+ ex)
# return (Int.(sign.(canonical_parameter)) .+ 1) ./ 2
end
@overload
function Concepts.forward_map(distribution::Union{AbstractGaussian,Type{Val{:Gaussian}}},
canonical_parameter::Array{T};
non_canonical_parameter::Union{Array{Float64},Nothing} = nothing,
non_canonical_map = nothing) where T<:Real
if !isnothing(non_canonical_parameter)
## TODO
end
return canonical_parameter
end
@overload
function Concepts.forward_map(distribution::Symbol,
canonical_parameter::Array{T};
non_canonical_parameter::Union{Array{Float64},Nothing} = nothing,
non_canonical_map = nothing) where T<:Real
return Concepts.forward_map(Val{distribution},canonical_parameter,
non_canonical_map=non_canonical_map,
non_canonical_parameter=non_canonical_parameter)
end
@overload
function Concepts.predict(distribution::Union{AbstractPoisson,Type{Val{:Poisson}}},mean::Any;
custom_prediction_function=nothing)
if !isnothing(custom_prediction_function)
return -1.0
end
return round.(mean)
end
@overload
function Concepts.predict(distribution::Union{AbstractBernoulli,Type{Val{:Bernoulli}}},mean::Any;
custom_prediction_function=nothing)
if !isnothing(custom_prediction_function)
return -1.0
end
return Int.(mean .> 0.5)
end
@overload
function Concepts.predict(distribution::Union{AbstractGaussian,Type{Val{:Gaussian}}},mean::Any;
custom_prediction_function=nothing)
if !isnothing(custom_prediction_function)
return -1.0
end
return mean
end
@overload
function Concepts.predict(distribution::Union{AbstractGamma,Type{Val{:Gamma}}},mean::Any;
custom_prediction_function=nothing)
if !isnothing(custom_prediction_function)
return -1.0
end
return mean
end
@overload
function Concepts.predict(distribution::Union{AbstractNegativeBinomial,Type{Val{:NegativeBinomial}}},mean::Any;
custom_prediction_function=nothing)
if !isnothing(custom_prediction_function)
return -1.0
end
return round.(mean)
end
@overload
const Concepts.predict(obj::Symbol,arg1;custom_prediction_function=nothing) =
Concepts.predict(Val{obj},arg1;custom_prediction_function=custom_prediction_function)
| MatrixCompletion | https://github.com/jasonsun0310/MatrixCompletion.jl.git |
|
[
"MIT"
] | 0.1.0 | 50e804aa999e452ce4bf5edc50493838bd744e09 | code | 3217 | module FastEigen
export FortranArnoldiMethod,
NativeLanczos,
NativeEigen,
NativeLOBPCG,
NativeArnoldiMethod,
NativeEigen,
KrylovMethods,
ARPACK
export eigs
import KrylovKit,IterativeSolvers,ArnoldiMethod,Arpack,LinearAlgebra
mutable struct FortranArnoldiMethod
maxiter::Union{Float64,Nothing}
tol::Union{Float64,Nothing}
function FortranArnoldiMethod(;maxiter=nothing,tol=nothing)
instance = new();
instance.maxiter = maxiter;
instance.tol = tol;
return instance;
end
end
mutable struct NativeLanczos
maxiter::Union{Float64,Nothing}
tol::Union{Float64,Nothing}
function NativeLanczos(;maxiter=nothing,tol=nothing)
instance = new();
instance.maxiter = maxiter;
instance.tol = tol;
return instance;
end
end
mutable struct NativeEigen end
mutable struct NativeArnoldiMethod
maxiter::Union{Float64,Nothing}
tol::Union{Float64,Nothing}
function NativeArnoldiMethod(;maxiter=nothing,tol=nothing)
instance = new();
instance.maxiter = maxiter;
instance.tol = tol;
return instance;
end
end
mutable struct NativeLOBPCG
maxiter::Union{Float64,Nothing}
tol::Union{Float64,Nothing}
function NativeLOBPCG(;maxiter=200,tol=nothing)
instance = new();
instance.maxiter = maxiter;
instance.tol = tol;
return instance;
end
end
struct KrylovMethods end
struct ARPACK end
function eigs(algorithm::KrylovMethods, x::Array{Float64, 2};
nev::Int64 = 10,
order::Symbol = :LR,
symmetric::Bool = true,
maxiter::Int64 = 100)
local λ, X
if nev <= 20
λ, X = KrylovKit.eigsolve(x, nev, order;issymmetric = symmetric)
else
λ, X = KrylovKit.eigsolve(x, nev, order;issymmetric = symmetric, krylovdim = nev + 5, maxiter = maxiter)
end
X = hcat(X...)
return λ[1:nev], X[:, 1:nev]
end
function eigs(algorithm::ARPACK, x::Array{Float64, 2};
nev::Int64 = 10,
order::Symbol = :LM,
symmetric::Bool = true,
maxiter::Int64 = 100,
tol = 0.0)
local λ, X
λ, X = Arpack.eigs(x, nev = nev, which = order, tol = tol, maxiter = maxiter)
# @show(λ)
return λ, X
end
function eigs(algorithm::NativeEigen,x::Array{Float64,2};
nev::Integer=6,eigen_vectors::Bool=true, order::Symbol=:LR)
local eigen_val_id;
eigen_decomp = LinearAlgebra.eigen(x);
if order == :LR
eigen_val_id = Base.partialsortperm(eigen_decomp.values,1:nev,rev=true);
elseif order == :SR
eigen_val_id = Base.partialsortperm(eigen_decomp.values,1:nev,rev=false)
end
if eigen_vectors == true
return eigen_decomp.values[eigen_val_id],eigen_decomp.vectors[:,eigen_val_id]
end
return eigen_decomp.values[eigen_val_id];
end
function eigs(algorithm::NativeLOBPCG,x::Array{Float64,2};
nev::Integer=6,eigen_vectors::Bool=true, order::Symbol=:LR)
local eigen_decomp;
if order == :LR
eigen_decomp = IterativeSolvers.lobpcg(x,true, nev);
end
if order == :SR
eigen_decomp = IterativeSolvers.lobpcg(x,false, nev);
end
if eigen_vectors == true
return eigen_decomp.λ, eigen_decomp.X;
end
return eigen_decomp.λ
end
end
| MatrixCompletion | https://github.com/jasonsun0310/MatrixCompletion.jl.git |
|
[
"MIT"
] | 0.1.0 | 50e804aa999e452ce4bf5edc50493838bd744e09 | code | 4116 | using ..Concepts
using ..ModelFitting
import LinearAlgebra
export IndexTracker
function has_same_dimension(data)
if length(data) == 0
return false
end
first_sz = size(data[1])
for _data in data
if size(_data) != first_sz
return false
end
end
return true
end
mutable struct IndexTracker{T<:Any} <: AbstractTracker
indices::Optional{Dict{T, Array{CartesianIndex{N}, 1}}} where N<:Any
dimension::Optional{Tuple{Vararg{Int64}}}
function IndexTracker{T}() where {T<:Any}
return new(nothing, nothing)
end
function IndexTracker{T}(data::Vararg{Array{T, N}}) where {T<:Any, N<:Any}
if !has_same_dimension(data)
@warn("Data stream of different dimension. Won't construct.")
throw(DimensionMismatch())
end
new_object = new(Dict{T,Array{CartesianIndex{N}, 1}}(), size(data[1]))
for _data in data
disjoint_join(new_object, _data)
end
return new_object
end
end
@overload
function Base.getindex(object::IndexTracker{T}, i::T) where T<:Any
if isnothing(object.indices)
@warn "indices are not constructed."
return nothing
end
return object.indices[i]
end
@overload
function Base.getproperty(object::IndexTracker{T}, sym::Symbol) where T<:Any
if sym == :keys
return collect(keys(object.indices))
elseif sym == :dimension
return getfield(object, :dimension)
elseif sym == :indices
return getfield(object, :indices)
elseif sym == :size
return object.dimension
elseif sym == :dim
return object.dimension
end
end
@overload
function Base.size(object::IndexTracker)
return object.dimension
end
@overload
function Concepts.provide(object::IndexTracker{T}, data::Vararg{Array{T}}) where T<:Any
return IndexTracker{T}(data);
end
@overload
function Concepts.groupby(obj::IndexTracker{T}, list::Array{T}) where T<:Any
# result = Dict{T, Dict{T, Array{<:CartesianIndex}}}()
result = Dict{T, Any}()
for a_key in collect(unique(obj.keys))
result[a_key] = Dict{T, Array{<:CartesianIndex}}()
for sym in list
result[a_key][sym] = intersect(obj[a_key], obj[sym])
end
end
return result
end
# @overload
# function Base.convert(::Type{Array{<:CartesianIndex}}, x::Union{Array{Int64, 1}, Array{CartesianIndex}})
# if typeof(x) <: Array{Int64,1}
# return [CartesianIndex{1}(_x) for _x in x]
# end
# return x
# end
@overload
function Concepts.type_conversion(::Type{Array{<:CartesianIndex}}, x::Union{Array{Int64, 1}, Array{CartesianIndex{2}, 1}})
if typeof(x) <: Array{Int64,1}
return [CartesianIndex{1}(_x) for _x in x]
end
return x
end
# function disjoint_partition(a::IndexTrakcer{T}, b::Array{T, N}) where {T<:Any, N<:Any}
# if isnothing(a.dimension)
# a.dimension = size(b)
# elseif size(a) != size(b)
# @show(size(a))
# @show(size(b))
# throw(DimensionMismatch())
# end
# if isnothing(a.indices)
# a.indices = Dict{T, Array{CartesianIndex{N}, 1}}()
# end
# if length(intersect(unique(a.keys), unique(b))) > 0
# @warn("New View is not disjoint from the old")
# throw(MethodError())
# end
# for sym in collect(unique(b))
# # a.indices[sym] = convert(Array{<:CartesianIndex} ,findall(x -> x == sym, b))
# a.indices[sym] = type_conversion(Array{<:CartesianIndex}, findall(x -> x == sym, b))
# end
# end
@overload
function Concepts.disjoint_join(a::IndexTracker{T}, b::Array{T, N}) where {T<:Any, N<:Any}
if isnothing(a.dimension)
a.dimension = size(b)
elseif size(a) != size(b)
@show(size(a))
@show(size(b))
throw(DimensionMismatch())
end
if isnothing(a.indices)
a.indices = Dict{T, Array{CartesianIndex{N}, 1}}()
end
if length(intersect(unique(a.keys), unique(b))) > 0
@warn("New View is not disjoint from the old")
throw(MethodError())
end
for sym in collect(unique(b))
# a.indices[sym] = convert(Array{<:CartesianIndex} ,findall(x -> x == sym, b))
a.indices[sym] = type_conversion(Array{<:CartesianIndex}, findall(x -> x == sym, b))
end
end
| MatrixCompletion | https://github.com/jasonsun0310/MatrixCompletion.jl.git |
|
[
"MIT"
] | 0.1.0 | 50e804aa999e452ce4bf5edc50493838bd744e09 | code | 39 | module LossFactory
function
end
| MatrixCompletion | https://github.com/jasonsun0310/MatrixCompletion.jl.git |
|
[
"MIT"
] | 0.1.0 | 50e804aa999e452ce4bf5edc50493838bd744e09 | code | 1803 | using Printf
using PrettyTables
const bad_compare_msg(obj,exp,got) = @sprintf("Expected %s to be %s, instead got %s",string(obj),string(exp),string(got))
@overload
function Concepts.check(object::Type{Val{:rank}},of::Matrix{T},is::Optional{Integer}=nothing) where T<:Number
if isnothing(is)
return LinearAlgebra.rank(of)
end
got_rank = LinearAlgebra.rank(of)
if got_rank == is
return true
end
@warn bad_compare_msg(:rank,is,got_rank)
return false
end
@overload
function Concepts.check(object::Type{Val{:dimension}},of::Array{T,2},is::Optional{Tuple}=nothing) where T<:Any
if isnothing(is)
return size(of)
end
got_size = size(of)
if got_size == is
return true
end
@warn bad_compare_msg(:dimension,is,got_size)
return false
end
@overload
function Concepts.check(object::Union{Type{Val{:l2difference}},Type{Val{:l2diff}}},
a::Union{T1,Array{T1}},b::Union{T2,Array{T2}},
against::Optional{S}=nothing) where {T1<:Number,T2<:Number,S<:Real}
if isnothing(against)
return LinearAlgebra.norm(a-b,2)
end
got_diff = LinearAlgebra.norm(a-b,2)
if abs(got_diff - against) < 1e-5
return true
end
@warn bad_compare_msg(:l2difference,against,got_diff)
return false
end
@overload
function Base.zeros(like::Array{T}) where T<:Any
return zeros(size(like))
end
# This is necessary due to compiler bug??
# @overload
# const Concepts.check(object::Symbol,arg1) = Concepts.check(Val{object},arg1)
# @overload
# const Concepts.check(object::Symbol,arg1,arg2) = Concepts.check(Val{object},arg1,arg2)
# @overload
# const Concepts.check(object::Symbol,arg1,arg2,arg3) = Concepts.check(Val{object},arg1,arg2,arg3)
| MatrixCompletion | https://github.com/jasonsun0310/MatrixCompletion.jl.git |
|
[
"MIT"
] | 0.1.0 | 50e804aa999e452ce4bf5edc50493838bd744e09 | code | 26 | module PreProcessing
end
| MatrixCompletion | https://github.com/jasonsun0310/MatrixCompletion.jl.git |
|
[
"MIT"
] | 0.1.0 | 50e804aa999e452ce4bf5edc50493838bd744e09 | code | 1958 | module PrettyPrinter
using Printf
export append_both_ends,
toprule,
bottomrule,
table_header,
add_row
function append_both_ends(str::String, token::String)
return token * str * token
end
function Base.similar(str::String, token::Char)
return token^length(str)
end
function toprule(col_name::Array{String}; io::IO = Base.stdout)
new_rule = map(x -> Base.similar(append_both_ends(x, ""), '-'), col_name)
push!(new_rule, "")
@printf(io, "%s\n", foldl((x, y) -> x * "+" *y, new_rule, init=""))
# @printf("%s\n", new_rule2)
end
function bottomrule(col_name::Array{String}; io::IO = Base.stdout)
new_rule = map(x -> Base.similar(append_both_ends(x, ""), '-'), col_name)
push!(new_rule, "")
@printf(io, "%s\n", foldl((x, y) -> x * "+" *y, new_rule, init=""))
# @printf("%s\n", new_rule2)
end
function header_column(names::Array{String}; io::IO = Base.stdout)
transformed_names = Base.similar(names)
map!(x -> append_both_ends(x, ""), transformed_names, names)
push!(transformed_names, "")
@printf(io, "%s\n", foldl((x, y) -> x * "|" * y, transformed_names, init=""))
end
function ensure_width(w::Int64, str::String)
if length(str) >= w
return str
end
return append_both_ends(str, Base.repeat(" ", trunc(Int64, (w - length(str)/2)) + 1))
end
function table_header(col_names::Array{String}; io::IO = Base.stdout)
# copy_col_names = map(x -> ensure_width(7, x), col_names)
copy_col_names = col_names
toprule(copy_col_names, io = io)
header_column(copy_col_names, io = io)
bottomrule(copy_col_names, io = io)
end
function add_row(header::Array{String}; data::Array{String}, io::IO = Base.stdout)
# transformed_data = collect(zip(header, data))
transformed_data = map(x -> rpad(append_both_ends(x[2], ""), length(append_both_ends(x[1], "")), " "), collect(zip(header, data)))
push!(transformed_data, "")
@printf(io, "%s\n", foldl((x, y) -> x * "|" * y, transformed_data, init=""))
end
end
| MatrixCompletion | https://github.com/jasonsun0310/MatrixCompletion.jl.git |
|
[
"MIT"
] | 0.1.0 | 50e804aa999e452ce4bf5edc50493838bd744e09 | code | 4596 | using ..Concepts
import Random
import Distributions
struct FixedRankMatrix{T<:UnivariateDistributions} <: AbstractFixedRankMatrix
dist::T
rank::Optional{Integer}
draw
@abstract_instance
function FixedRankMatrix{T}() where T<:UnivariateDistributions
#@debug "abstract constructor of FixedRankMatrix"
return new{T}()
end
function FixedRankMatrix{T}(dist::T;rank::Optional{Integer}=nothing) where T<:UnivariateDistributions
return new{T}(dist,rank)
end
end
const FixedRankMatrix(dist::T;rank::Optional{Integer}=nothing) where T<:UnivariateDistributions =
begin
isnothing(rank) ? FixedRankMatrix{T}(dist) : FixedRankMatrix{T}(dist,rank=rank)
end
@overload
function Concepts.provide(object::FixedRankMatrix{T};row::Integer=10,col::Integer=10) where T<:UnivariateDistributions
return rand(object,row,col)
end
function GaussianMatrix(row::Integer,col::Integer;
rank::Optional{Integer}=nothing,
μ::T=0, σ::T=1) where T<:Real
if isnothing(rank)
return rand(Distributions.Gaussian(μ,σ),row,col)
end
return rand(FixedRankMatrix(Distributions.Gaussian(μ,σ),rank=rank),row,col)
end
function PoissonMatrix(row::Integer,col::Integer;
rank::Optional{Integer}=nothing,
λ = 5) where T<:Real
if isnothing(rank)
return rand(Distributions.Poisson(λ),row,col)
end
return rand(FixedRankMatrix(Distributions.Poisson(λ),rank=rank),row,col)
end
function BernoulliMatrix(row::Integer,col::Integer;
rank::Optional{Integer}=nothing,
p = 0.5) where T<:Real
if isnothing(rank)
return rand(Distributions.Bernoulli(p),row,col)
end
return rand(FixedRankMatrix(Distributions.Bernoulli(p),rank=rank),row,col)
end
function GammaMatrix(row::Integer,col::Integer;
rank::Optional{Integer}=nothing,
α=5,θ=0.5) where T<:Real
if isnothing(rank)
return rand(Distributions.Gamma(α,θ),row,col)
end
return rand(FixedRankMatrix(Distributions.Gamma(α,θ),rank=rank),row,col)
end
@overload
function Random.rand(object::FixedRankMatrix{T},row::Integer,col::Integer) where T<:UnivariateDistributions
used_rank = nothing
if isnothing(object.rank)
@warn "Rank is not specified. Using formula: rank= ⌊0.3 * (row ∧ col)⌋"
used_rank = Int(0.3 * floor(min(row,col)))
elseif object.rank > max(row,col)
used_rank = min(row,col)
else
used_rank = object.rank
end
ensure_feasible(row,col,used_rank);
base_matrix = Random.rand(object.dist,row,used_rank)
redundant_matrix = base_matrix[:,StatsBase.sample(1:used_rank,col-used_rank)];
return hcat(base_matrix,redundant_matrix) * 1.0;
end
@overload
function Random.rand(mixed_dists::Vector{Tuple{T,I,I}}) where {T<:FixedRankMatrix{<:UnivariateDistributions}, I<: Integer}
#ensure_feasible(mixed_dists)
# TODO: Make a ensure feasible function
gen_mat = mapreduce(x -> rand(x[1],x[2],x[3]),(x,y)->hcat(x,y),mixed_dists);
return gen_mat;
end
function ensure_feasible(row::T,col::T,rank::T) where T<:Integer
if col <0 || row <0
throw(DomainError("The dimension of the matrix should be positive integers."))
end
if rank <= 0 || rank > min(row,col)
throw(DomainError("The rank of the matrix should be a positive integer less than min(row,col)"))
end
end
"""
Generate a random matrix of given rank with distribution of 'dist' of given size (rol,col) from user input.
Precondition(s):
1. The distribution has to be an element from the "Distributions.jl" and of subtype "Univariate".
"""
# function rand(dist::T,row::I,col::I;target_rank::I) where {T<:UnivariateDistributions,I<:Integer}
# ensure_feasible(row,col,target_rank);
# base_matrix = Random.rand(dist,row,target_rank)
# redundant_matrix = base_matrix[:,StatsBase.sample(1:target_rank,col-target_rank)];
# return hcat(base_matrix,redundant_matrix) * 1.0;
# end
"""
Generate a random matrix of consisting multiple types of distributions.
Precondition(s):
1. length(mixed_dists) has to be >= 1.
"""
# function rand(mixed_dists::Vector{Tuple{T,Pair{I,I},I}}) where {T<:UnivariateDistributions,I<:Integer}
# _ensure_feasible(mixed_dists)
# gen_mat = mapreduce(x -> rand(x[1],x[2].first,x[2].second;target_rank = x[3]),(x,y)->hcat(x,y),mixed_dists);
# return gen_mat;
# end
# end
| MatrixCompletion | https://github.com/jasonsun0310/MatrixCompletion.jl.git |
|
[
"MIT"
] | 0.1.0 | 50e804aa999e452ce4bf5edc50493838bd744e09 | code | 2987 | # import ..Concepts:AbstractSamplingModels,
# BernoulliModel,
# VecOrMatOfNumbers,
# provide
using ..Concepts
import StatsBase
struct Sampler{T<:AbstractSamplingModels}
model::T
draw
function Sampler{BernoulliModel}()
# abstract type constructor
return new{BernoulliModel}()
end
function Sampler{UniformModel}()
# abstract type constructor
return new{UniformModel}()
end
function Sampler{NonUniformModel}()
# abstract type constructor
return new{NonUniformModel}()
end
function Sampler{BernoulliModel}(model::BernoulliModel)
draw = begin
function(x::VecOrMatOf{Number})
if isa(x,Vector)
n = length(x)
mask = [rand(Distributions.Bernoulli(model.rate)) == 1 ? 1 : missing for i in 1:n]
return mask .* x
end
n,m = size(x);
mask = [rand(Distributions.Bernoulli(model.rate)) == 1 ? 1 : missing for i in 1:n,j in 1:m]
return mask .* x
end
end
return new{BernoulliModel}(model,draw)
end
function Sampler{UniformModel}(model::UniformModel)
draw = begin
function(x::VecOrMatOf{T}) where T<:Any
sampled_object = nothing
if isa(x,Vector)
n = length(x)
mask = [CartesianIndex(i) for i in StatsBase.sample(1:n,Int.(n * model.rate))]
# sampled_object = convert(MaybeMissing{T},Array{Missing}(undef,n))
sampled_object = type_conversion(MaybeMissing{T},Array{Missing}(undef,n))
for i in mask
sampled_object[i] = x[i]
end
else
row,col = size(x)
mask = [CartesianIndex(StatsBase.sample(1:row),StatsBase.sample(1:col)) for i in 1:Int.(row * col * model.rate)]
# display(mask)
# sampled_object = convert(MaybeMissing{T},Array{Missing,2}(undef,row,col))
sampled_object = type_conversion(MaybeMissing{T},Array{Missing,2}(undef,row,col))
# display(sampled_object)
# display(x)
for i in mask
sampled_object[i] = x[i]
end
end
return sampled_object
end
end
return new(model,draw)
end
end
const Sampler(model::BernoulliModel) = Sampler{BernoulliModel}(model)
const Sampler(model::UniformModel) = Sampler{UniformModel}(model)
@overload
function Concepts.provide(object::Sampler{Concepts.BernoulliModel};rate)
return Sampler(BernoulliModel(rate))
end
@overload
function Concepts.provide(object::Sampler{Concepts.UniformModel};rate)
return Sampler(UniformModel(rate))
end
| MatrixCompletion | https://github.com/jasonsun0310/MatrixCompletion.jl.git |
|
[
"MIT"
] | 0.1.0 | 50e804aa999e452ce4bf5edc50493838bd744e09 | code | 429 | module Utilities
using ..Concepts
import Distributions
import LinearAlgebra
# export ErrorMatrix,
# relative_error,
# total_error
include("./Misc.jl")
include("./Diagnostics.jl")
include("./ExponentialFamily.jl")
include("./FastEigen.jl")
include("./RandomMatrices.jl")
include("./Indexing.jl")
include("./Sampling.jl")
include("./BatchUtils.jl")
include("./PrettyPrinter.jl")
# include("./TestModule.jl")
end
| MatrixCompletion | https://github.com/jasonsun0310/MatrixCompletion.jl.git |
|
[
"MIT"
] | 0.1.0 | 50e804aa999e452ce4bf5edc50493838bd744e09 | code | 4941 | using Test
import Random
import Distributions
import StatsBase
import MatrixCompletion.Utilities.Indexing:_check_bernoulli, _check_gaussian,_check_poisson
bernoulliMatrixTest1 = Random.rand(Distributions.Bernoulli(0.5),100);
nonBernoulliMatrixTest1 = Random.rand(Distributions.Uniform(1,10),100);
poissonMatrixTest1 = Random.rand(Distributions.Poisson(3),100);
nonPoissonMatrixTest1 = Random.rand(Distributions.Uniform(1,10),100);
poissonBernoulliDifferenceTest1 = Random.rand(Distributions.Poisson(3),100);
poissonBernoulliDifferenceTest2 = Random.rand(Distributions.Bernoulli(0.4),100);
@test _check_bernoulli(bernoulliMatrixTest1) == true
@test _check_bernoulli(nonBernoulliMatrixTest1) == false
@test _check_poisson(poissonMatrixTest1) == true
@test _check_poisson(nonBernoulliMatrixTest1) == false
@test _check_bernoulli(poissonBernoulliDifferenceTest1) == false
@test _check_poisson(poissonBernoulliDifferenceTest1) == true
@test _check_poisson(poissonBernoulliDifferenceTest2) == false
@test _check_bernoulli(poissonBernoulliDifferenceTest2) == true
import MatrixCompletion.Utilities.Indexing:construct_type_matrix,construct_index_tracker,DIST_FLAGS
import MatrixCompletion.Utilities.Indexing:Bernoulli,
Poisson,
Gaussian,
Gamma,
NegativeBinomial
# test 1: bernoulli only
type_matrix_test_input1 = Random.rand(Distributions.Bernoulli(0.5),5,5)
type_matrix_test_expected1 = Array{Union{DIST_FLAGS,Missing}}(undef,5,5)
fill!(type_matrix_test_expected1,Bernoulli)
@test construct_type_matrix(type_matrix_test_input1) == type_matrix_test_expected1
construct_type_matrix(type_matrix_test_input1)
# test 2: poisson only
type_matrix_test_input2 = Random.rand(Distributions.Poisson(5),5,5)
type_matrix_test_expected2 = Array{Union{DIST_FLAGS,Missing}}(undef,5,5)
fill!(type_matrix_test_expected2,Poisson)
@test construct_type_matrix(type_matrix_test_input2) == type_matrix_test_expected2
# test 3: gaussian only
type_matrix_test_input3 = Random.rand(Distributions.Gaussian(0,1),5,5)
type_matrix_test_expected3 = Array{Union{DIST_FLAGS,Missing}}(undef,5,5)
fill!(type_matrix_test_expected3,Gaussian)
@test construct_type_matrix(type_matrix_test_input3) == type_matrix_test_expected3
import MatrixCompletion.Utilities.RandomMatrices.rand
# test4
type_matrix_test_input4 = rand([(Distributions.Bernoulli(0.5),50=>25,10),(Distributions.Gaussian(5,10),50=>25,10)])
type_matrix_test_expected4_part1 = Array{Union{DIST_FLAGS,Missing}}(undef,50,25)
fill!(type_matrix_test_expected4_part1,Bernoulli)
type_matrix_test_expected4_part2 = Array{Union{DIST_FLAGS,Missing}}(undef,50,25)
fill!(type_matrix_test_expected4_part2,Gaussian)
type_matrix_test_expected4 = hcat(type_matrix_test_expected4_part1,type_matrix_test_expected4_part2)
@test construct_type_matrix(type_matrix_test_input4) == type_matrix_test_expected4
import MatrixCompletion.Utilities.Indexing:IndexTracker
import MatrixCompletion.Utilities.Indexing:construct_index_tracker
# INDEX TRACKER TEST 1
id_tracker_test_input1 = construct_type_matrix(Random.rand(Distributions.Gaussian(0,1),10,10))
id_tracker_test_output1 = construct_index_tracker(input_type_matrix = id_tracker_test_input1)
id_tracker_test_expect1_gaussian = [CartesianIndex(i,j) for i in 1:10 for j in 1:10]
@test sort(id_tracker_test_expect1_gaussian) == sort(id_tracker_test_output1.Gaussian)
@test isempty(id_tracker_test_output1.Gamma)
@test isempty(id_tracker_test_output1.Bernoulli)
@test isempty(id_tracker_test_output1.Poisson)
@test isempty(id_tracker_test_output1.NegativeBinomial)
@test isempty(id_tracker_test_output1.Missing)
# INDEX TRACKER TEST 2
id_tracker_test_input2 = construct_type_matrix(rand([(Distributions.Bernoulli(0.5), 100=>25, 10),
(Distributions.Gaussian(5,10), 100=>25, 10),
(Distributions.Poisson(10), 100=>25, 10),
(Distributions.Gaussian(0,1), 100=>25, 10)]))
id_tracker_test_output2 = construct_index_tracker(input_type_matrix = id_tracker_test_input2)
id_tracker_test_expect2_gaussian = [CartesianIndex(i,j) for i in 1:100 for j in [26:50;76:100]]
id_tracker_test_expect2_bernoulli = [CartesianIndex(i,j) for i in 1:100 for j in 1:25]
id_tracker_test_expect2_poisson = [CartesianIndex(i,j) for i in 1:100 for j in 51:75]
@test sort(id_tracker_test_output2.Gaussian) == sort(id_tracker_test_expect2_gaussian)
@test sort(id_tracker_test_output2.Bernoulli) == sort(id_tracker_test_expect2_bernoulli)
@test sort(id_tracker_test_output2.Poisson) == sort(id_tracker_test_expect2_poisson)
@test isempty(id_tracker_test_output2.Missing)
@test isempty(id_tracker_test_output2.Gamma)
@test isempty(id_tracker_test_output2.NegativeBinomial)
| MatrixCompletion | https://github.com/jasonsun0310/MatrixCompletion.jl.git |
|
[
"MIT"
] | 0.1.0 | 50e804aa999e452ce4bf5edc50493838bd744e09 | code | 4830 | using MatrixCompletion
import Pkg
macro ensure_package(name::String)
if haskey(Pkg.installed(), name) == false
Pkg.add(name)
end
end
@ensure_package("TimerOutputs")
@ensure_package("HDF5")
@ensure_package("JSON")
@ensure_package("DataFrames")
@ensure_package("Distributions")
using TimerOutputs
using Test, Printf
using HDF5
using JSON
using DataFrames
import Distributions, Random
import Serialization
function unit_test_train_subloss(dist = Poisson();
gradient_eval = Losses.provide(Loss{Poisson}()),
input_distribution = Distributions.Poisson(5),
input_size = 500,
ρ = 0,
step_size = 0.1,
max_iter = 100)
y = rand(input_distribution, input_size) * 1.0
mle_x = train(gradient_eval,
fx = rand(input_size),
y = y,
c = zeros(input_size),
ρ = ρ,
iter = max_iter,
γ = step_size);
prediction = predict(dist,forward_map(dist,mle_x))
return provide(Diagnostics{Poisson()}(),
input_data=prediction, reference=y)
end
# struct MatrixCompletionModel end
# function predict(model::MatrixCompletionModel;
# completed_matrix, type_tracker)
# predicted_matrix = similar(completed_matrix)
# for dist in keys(type_tracker.indices)
# idx = type_tracker[convert(Symbol,dist)]
# predicted_matrix[idx] .= predict(dist, forward_map(dist, completed_matrix[idx]))
# end
# return predicted_matrix
# end
# function Base.convert(::Type{T}
function Base.truncate(::Type{Dict{String, Number}}, object::Dict{String, T}) where T<:Any
return convert(Dict{String, Number},
filter(x -> typeof(x.second) <: Number, object))
end
function log_simulation_result(dist::ExponentialFamily, completed_matrix, truth_matrix, type_tracker, tracker; io = Base.stdout)
predicted_matrix = similar(completed_matrix)
predicted_matrix[type_tracker[convert(Symbol,dist)]] .= predict(dist,
forward_map(dist,
completed_matrix[type_tracker[convert(Symbol, dist)]]))
summary_missing_only = provide(Diagnostics{Any}(),
reference = truth_matrix[tracker[convert(Symbol, dist)][:Missing]],
input_data = predicted_matrix[tracker[convert(Symbol, dist)][:Missing]])
@printf(io, "\nSummary on %s (Only Missing)\n%s\n", string(convert(Symbol, dist)), repeat("-", 80))
show(io, MIME("text/plain"), summary_missing_only)
print(io, "\n\n")
summary_all = provide(Diagnostics{Any}(),
reference = truth_matrix[type_tracker[convert(Symbol, dist)]],
input_data = predicted_matrix[type_tracker[convert(Symbol, dist)]])
@printf(io, "\nSummary on %s (Missing && Observed)\n%s\n", string(convert(Symbol, dist)), repeat("-", 80))
show(io, MIME("text/plain"), summary_all)
print(io, "\n\n")
# show(io, MIME("text/plain"), timer)
# close(io)
end
function Base.convert(::Type{Array}, object::Tuple{T, T}) where T
return [object[1],object[2]]
end
function Base.convert(::Type{Array}, object::Array{Tuple{T, T}}) where T<:Real
converted = Array{T, 2}(undef, length(object), 2)
for col in 1:length(object)
converted[col,:] .= convert(Array, object[col])
end
return converted
end
function Serialization.serialize(object::T) where T<:Number
return object
end
function Serialization.serialize(object::Array{T}) where T<:Real
return object
end
function Serialization.serialize(object::Array{T}) where T<:CartesianIndex
return convert(Array, convert.(Tuple, object))
end
function Serialization.serialize(object::Dict)
return JSON.json(object)
end
function Serialization.serialize(object::Tuple)
return object
end
function pickle(filepath::String, data::AbstractDict)
h5open(filepath, "w") do file
for (k, v) in data
write(file, String(k), Serialization.serialize(v))
end
end
end
function pickle(filepath::String, vars...)
h5open(filepath, "w") do file
for v in vars
write(file, v.first, Serialization.serialize(v.second))
end
end
end
function read_pickled(filepath::String, var_name)
c = h5open(filepath, "r") do file
read(file, var_name);
end;
return c
end
# const GLOBAL_SIMULATION_RESULTS_DIR = "/home/jasonsun0310/datavolume/matrix_completion_simulation_result/"
const GLOBAL_SIMULATION_RESULTS_DIR = "/home/jasonsun/mcdata/"
| MatrixCompletion | https://github.com/jasonsun0310/MatrixCompletion.jl.git |
|
[
"MIT"
] | 0.1.0 | 50e804aa999e452ce4bf5edc50493838bd744e09 | code | 9648 | using Test,Printf,LinearAlgebra
import Distributions
using MatrixCompletion
# import Random,Distributions
# import MatrixCompletion.Losses
# import MatrixCompletion.Losses:train,SGD,train_logistic
# import MatrixCompletion.Convex.ADMM:complete
# import MatrixCompletion.Utilities.RandomMatrices:rand
# import MatrixCompletion.Utilities.Sampling:sample,BernoulliModel
# import MatrixCompletion.Utilities.Indexing:construct_index_tracker,construct_type_matrix
# import MatrixCompletion.Utilities.Indexing:Bernoulli,Gaussian,Poisson,Gamma,NegativeBinomial,Missing,DIST_FLAGS
# import MatrixCompletion.Convex.ADMM:MatrixCompletionModel,
# predict,
# _get_column_distribution_type,
# _get_complete_type_matrix,
# _predict_bernoulli,
# _predict_poisson,
# _predict_gamma,
# _predict_negative_binomial,
# _predict_gaussian
# using MatrixCompletion.Utilities
# function test_train_logistic(;size = 1000,ρ = 0.1,γ = 0.2,maxIter = 20)
# y = Int.(Random.bitrand(size));
# mle_x = train(Losses.Logistic(), Random.rand(size), y, zeros(size), ρ, iter = maxIter, γ = γ);
# @test sum((Int.(sign.(mle_x)) .+ 1) / 2 .== y)
# size > 0.99
# end
# test_train_logistic() #
# function test_train_logistic_optimized(f;size = 1000,ρ = 0.1,γ = 0.2,maxIter = 20)
# y = Int.(Random.bitrand(size));
# mle_x = train_logistic(Random.rand(size), y, zeros(size), ρ, iter = maxIter, γ = γ);
# @test sum((Int.(sign.(mle_x)) .+ 1) / 2 .== y) / size > 0.99
# end
# function test_train_logistic_sgd(size = 3000 * 9000, ρ = 0.1, γ = 0.2, ep = 0)
# y = Int.(Random.bitrand(size));
# mle_x = train(SGD(), Losses.Logistic(), Random.rand(size), y, zeros(size), ρ, epoch = ep, γ = γ, num_of_batch = 20);
# print(sum((Int.(sign.(mle_x)) .+ 1) / 2 .== y) / size);
# @test sum((Int.(sign.(mle_x)) .+ 1) / 2 .== y) / size > 0.99
# end
# function unit_test_train_poisson(;sz = 500,ρ = 0,step_size = 0.1,maxIter = 100)
# # sz = 1000
# # max_iter = 200
# # γ = 0.1
# # ρ = 0
# y = Random.rand(Distributions.Poisson(10), sz) * 1.0
# mle_x = train(Losses.Poisson(), Random.rand(sz), y, zeros(sz), ρ, iter = max_iter, γ = step_size);
# recoveredX = round.(exp.(mle_x));
# errRate = sum(abs.(recoveredX .- y) .> 1) / sz;
# # @test 1 - errRate >= 0.99
# return 1- errRate;
# end
# function POISSON_SMALL_TEST_SET_LOOSE()
# @test unit_test_train_poisson(sz=1000,ρ=0,step_size=0.1,maxIter=200) > 0.9
# @test unit_test_train_poisson(sz=3000,ρ=0,step_size=0.1,maxIter=200) > 0.9
# @test unit_test_train_poisson(sz=5000,ρ=0,step_size=0.1,maxIter=200) > 0.9
# @test unit_test_train_poisson(sz=1000,ρ=0.1,step_size=0.1,maxIter=200) > 0.9
# @test unit_test_train_poisson(sz=3000,ρ=0.1,step_size=0.1,maxIter=200) > 0.9
# @test unit_test_train_poisson(sz=5000,ρ=0.1,step_size=0.1,maxIter=200) > 0.9
# end
# function test_train_gamma(;size = 500,ρ = 0.05,γ = 0.1,maxIter = 100)
# end
# function test_train_negative_binomial(;size = 500,ρ = 0.05,γ = 0.1,maxIter = 100)
# end
# function test_complete_type_matrix()
# test1_input = Array{Union{Missing,DIST_FLAGS},2}(undef, 10, 10)
# test1_input[:,1:2] .= Ref(Gaussian);
# test1_input[:,3:4] .= Ref(Bernoulli);
# test1_input[:,5:6] .= Ref(Gamma);
# test1_input[:,7:8] .= Ref(Poisson);
# test1_input[:,9:10] .= Ref(NegativeBinomial);
# test1_expect = deepcopy(test1_input);
# test1_input[diagind(test1_input)] .= missing;
# test1_output = _get_complete_type_matrix(test1_input);
# @test test1_output == test1_expect;
# test2_input = Array{DIST_FLAGS,2}(undef, 10, 10)
# test2_input[:,1:2] .= Ref(Gaussian);
# test2_input[:,3:4] .= Ref(Bernoulli);
# test2_input[:,5:6] .= Ref(Gamma);
# test2_input[:,7:8] .= Ref(Poisson);
# test2_input[:,9:10] .= Ref(NegativeBinomial);
# test2_expect = deepcopy(test2_input);
# test2_output = _get_complete_type_matrix(test2_input);
# @test test2_output == test2_expect;
# end
# function test_predict_bernoulli()
# size = 500;ρ = 0.1;γ = 0.1;maxIter = 500;
# y = Int.(Random.bitrand(size));
# mle_x = train(Losses.Logistic(), Random.rand(size), y, zeros(size), ρ, iter = maxIter, γ = γ);
# @test sum(_predict_bernoulli(mle_x) .== y) / size > 0.99
# end
# test_predict_bernoulli()
using MatrixCompletion
# function unit_test_admm(;input_matrix, = rand(Distributions.Gaussian(0,1),500,500,3),
# distribution_type_matrix = provide()
# sampling_model = BernoulliModel(),
# sampling_rate = 0.8,
# max_iter_inner_gradient_descent = 3,
# max_iter_admm = 200,
# stop_tol_admm = 1e-5,
# debug_mode = false,
# use_auto_diff = true,
# λ = 5e-1,
# μ = 5e-4,
# σ = 0.3,
# τ = 1.618)
# end
function accuracyImputedBinaryPart(;truth::Array{Float64,2} = nothing, completedMatrix::Array{Float64,2} = nothing)
typeM = construct_type_matrix(truth);
# binaryColumns = find(x->x==BINARY,typeM[1,:]);
binaryColumns = findall(x->x == Bernoulli, typeM[1,:]);
imputedBinaryPart = (sign.(completedMatrix[:,binaryColumns]) .+ 1) / 2
return sum(Int.(truth[:,binaryColumns] .== imputedBinaryPart)) / (length(binaryColumns) * size(truth)[1]);
end
function accuracyImputedContinuousPart(;truth::Array{Float64,2} = nothing,completedMatrix::Array{Float64,2} = nothing)
typeM = construct_type_matrix(truth);
continuousColumns = findall(x->x == Gaussian, typeM[1,:])
imputedContinuousPart = completedMatrix[:,continuousColumns];
imputedContinuousPart - truth[:,continuousColumns]
return norm(imputedContinuousPart - truth[:,continuousColumns])^2 / norm(truth[:,continuousColumns])^2
end
function test_admm_with_autodiff_smallinput(;gd_iter = 3,dbg = false)
admm_test_matrix1 = rand([(Distributions.Bernoulli(0.7), 100 => 50, 3),(Distributions.Gaussian(3, 1), 100 => 50, 3)])
admm_test_matrix_missing1 = sample(BernoulliModel(), x = admm_test_matrix1, rate = 0.8)
@time admm_test_matrix_output_1 = complete(A = admm_test_matrix_missing1, maxiter = 200, use_autodiff = true, gd_iter = gd_iter, debug_mode = dbg);
gaussian_acc = accuracyImputedContinuousPart(truth = admm_test_matrix1, completedMatrix = admm_test_matrix_output_1)
bernoulli_acc = accuracyImputedBinaryPart(truth = admm_test_matrix1, completedMatrix = admm_test_matrix_output_1)
@printf("gaussian acc: %f\n", gaussian_acc)
@printf("bernoulli acc: %f\n",bernoulli_acc)
end
function test_admm_without_autodiff_smallinput(;gd_iter = 3,dbg = false)
admm_test_matrix1 = rand([(Distributions.Bernoulli(0.7), 100 => 50, 3),(Distributions.Gaussian(3, 1), 100 => 50, 3)])
admm_test_matrix_missing1 = sample(BernoulliModel(), x = admm_test_matrix1, rate = 0.8)
@time admm_test_matrix_output_1 = complete(A = admm_test_matrix_missing1, maxiter = 200, use_autodiff = false, gd_iter = gd_iter, debug_mode = dbg)
gaussian_acc = accuracyImputedContinuousPart(truth = admm_test_matrix1, completedMatrix = admm_test_matrix_output_1)
bernoulli_acc = accuracyImputedBinaryPart(truth = admm_test_matrix1, completedMatrix = admm_test_matrix_output_1)
@printf("gaussian acc: %f\n", gaussian_acc)
@printf("bernoulli acc: %f\n",bernoulli_acc)
end
function test_admm_without_autodiff_largeinput(;gd_iter = 3,dbg = false)
admm_test_matrix1 = rand([(Distributions.Bernoulli(0.7), 6000 => 3000, 10),(Distributions.Gaussian(3, 1), 6000 => 3000, 10)])
admm_test_matrix_missing1 = sample(BernoulliModel(), x = admm_test_matrix1, rate = 0.8)
@time admm_test_matrix_output_1 = complete(A = admm_test_matrix_missing1, maxiter = 200, use_autodiff = false, gd_iter = gd_iter, debug_mode = dbg)
gaussian_acc = accuracyImputedContinuousPart(truth = admm_test_matrix1, completedMatrix = admm_test_matrix_output_1)
bernoulli_acc = accuracyImputedBinaryPart(truth = admm_test_matrix1, completedMatrix = admm_test_matrix_output_1)
end
#
#
#
# admm_test_matrix2 = rand([(Distributions.Bernoulli(0.7),5000=>2500,100),(Distributions.Gaussian(3,1),5000=>2500,100)])
# admm_test_matrix_missing2 = sample(BernoulliModel(),x = admm_test_matrix2,rate = 0.8)
#
# a = construct_type_matrix(admm_test_matrix2)
# construct_index_trakcer(input_type_matrix=a)
# admm_test_matrix_output_2 = complete(A = admm_test_matrix_missing2)
# accuracyImputedContinuousPart(truth=admm_test_matrix2,completedMatrix = admm_test_matrix_output_2)
# accuracyImputedBinaryPart(truth=admm_test_matrix2,completedMatrix = admm_test_matrix_output_2)
#
#
#
# #
# # _get_column_distribution_type(type_mat[:,1])
# # _get_column_distribution_type(type_mat[:,51])
# # type_mat[:,90] .= Ref(_get_column_distribution_type(type_mat[:,51]))
#
#
# complete_type_mat = _get_complete_type_matrix(type_mat)
# complete_type_mat[:,90]
# predict(x=ot,obs=type_mat)
#
#
# MatrixCompletionModel(observed=admm_test_matrix_missing1,completed=ot,type_matrix=type)
#
#
# test_train_logistic()
# test_train_poisson()
# test_complete_type_matrix()
| MatrixCompletion | https://github.com/jasonsun0310/MatrixCompletion.jl.git |
|
[
"MIT"
] | 0.1.0 | 50e804aa999e452ce4bf5edc50493838bd744e09 | code | 1616 |
# include("admm_test.jl")
# @time test_train_logistic()
# @time test_train_logistic_optimized()
# @time test_train_logistic(size=1000*1000)
# @time test_train_logistic_optimized(size=1000*1000)
# function test_admm_without_autodiff_smallinput(;gd_iter = 3,dbg = false)
# admm_test_matrix1 = rand([(Distributions.Bernoulli(0.7), 100 => 50, 3),(Distributions.Gaussian(3, 1), 100 => 50, 3)])
# admm_test_matrix_missing1 = sample(BernoulliModel(), x = admm_test_matrix1, rate = 0.8)
# @time admm_test_matrix_output_1 = complete(A = admm_test_matrix_missing1, maxiter = 200, use_autodiff = false, gd_iter = gd_iter, debug_mode = dbg)
# gaussian_acc = accuracyImputedContinuousPart(truth = admm_test_matrix1, completedMatrix = admm_test_matrix_output_1)
# bernoulli_acc = accuracyImputedBinaryPart(truth = admm_test_matrix1, completedMatrix = admm_test_matrix_output_1)
# @printf("gaussian acc: %f\n", gaussian_acc)
# @printf("bernoulli acc: %f\n",bernoulli_acc)
# end
@testset "$(format("ADMM Algorithm: Small Input[Gaussian + Bernoulli]"))" begin
let
truth_matrix = rand([(FixedRankMatrix(Distributions.Gaussian(0, 1), rank = 5), 200, 100),
(FixedRankMatrix(Distributions.Bernoulli(0.5), rank = 5), 200, 100)])
sample_model = provide(Sampler{BernoulliModel}, rate = 0.8)
input_matrix = sample_model.draw(truth_matrix)
end
end
# test_admm_with_autodiff_smallinput(gd_iter=3,dbg=true)
# test_admm_without_autodiff_smallinput(gd_iter=3)
# test_admm_without_autodiff_largeinput(gd_iter=3,dbg=true)
#@time POISSON_SMALL_TEST_SET_LOOSE()
| MatrixCompletion | https://github.com/jasonsun0310/MatrixCompletion.jl.git |
|
[
"MIT"
] | 0.1.0 | 50e804aa999e452ce4bf5edc50493838bd744e09 | code | 206 |
function test_scope()
local eigen_v;
for i = 1:5
eigen_v = 1;
end
return eigen_v
end
function test_hello()
print("hello")
end
function test_emacs()
print("hello")
end
| MatrixCompletion | https://github.com/jasonsun0310/MatrixCompletion.jl.git |
|
[
"MIT"
] | 0.1.0 | 50e804aa999e452ce4bf5edc50493838bd744e09 | code | 8506 | module Tst
include("abstract_unittest_functions.jl")
using Test,TimerOutputs,Printf
using HDF5
using JSON
using DataFrames
import Distributions, Random
import Serialization
using MatrixCompletion
# legacy code to be refactored soon!!
# const to = TimerOutput()
#==============================================================================#
# TEST OPTIONS #
#==============================================================================#
const TEST_OPTION_PRINT_TIMER = false
const TEST_OPTION_SMALL_INPUT = false
const TEST_OPTION_MEDIUM_INPUT = false
const TEST_OPTION_LARGE_INPUT = false
const TEST_OPTION_USE_AUTOGRAD = false
const TEST_OPTION_TEST_R_WRAPPER = false
#==============================================================================#
# SUBMODULE FLAGS #
#==============================================================================#
const FLAG_TEST_CONCEPTS = false
const FLAG_TEST_SAMPLING = false
const FLAG_TEST_MISC = false
const FLAG_TEST_RANDOM_OBJECTS = false
const FLAG_TEST_DIAGNOSTICS = false
const FLAG_TEST_EXPONENTIAL_FAMILY = false
const FLAG_TEST_INDEXING_TOOLS = false
const FLAG_TEST_SPARSE_EIGEN = false
const FLAG_TEST_BETTER_MGF = false
const FLAG_TEST_ESTIMATOR_MLE = false
const FLAG_TEST_ESTIMATOR_MOM = false
const FLAG_TEST_MODEL_FITTING = false
const FLAG_TEST_LOSS_OPTIMIZER_POISSON = false
const FLAG_TEST_LOSS_OPTIMIZER_BERNOULLI = false
const FLAG_TEST_LOSS_OPTIMIZER_GAMMA = false
const FLAG_TEST_LOSS_OPTIMIZER_GAUSSIAN = false
const FLAG_TEST_LOSS_OPTIMIZER_NEGATIVE_BINOMIAL = false
const FLAG_TEST_LOSS_OPTIMIZER_MULTINOMIAL = false
const FLAG_TEST_ALGO_ADMM = false
const FLAG_TEST_LIB_MATH = false
const FLAG_TEST_PRETTY_PRINTER = false
const FLAG_TEST_UTILITY_BATCHUTILS = false
const FLAG_TEST_SGD_BERNOULLI = false
const FLAG_TEST_SGD_GAMMA = false
const FLAG_TEST_CHAINED_ADMM = true
const FLAG_TEST_ALGO_ALM = false
# TODO
const FLAG_TEST_ALGO_ADMM_PARALLELL = false
const FLAG_TEST_ALGO_SVT = false
const FLAG_TEST_ALGO_ONEBIT = false
const FLAG_TEST_ALGO_OPTSPACE = false
#==============================================================================#
# SIMULATION FLAGS #
#==============================================================================#
const FLAG_SIMULATION_ADMM_GAMMA = false
const FLAG_SIMULATION_ADMM_BERNOULLI = false
const FLAG_SIMULATION_ADMM_GAUSSIAN = false
const FLAG_SIMULATION_ADMM_POISSON = false
const FLAG_SIMULATION_ADMM_GAUSSIAN_BERNOULLI = false
const FLAG_SIMULATION_ADMM_NEGATIVE_BINOMIAL = false
const FLAG_SIMULATION_ADMM_MIXED = false
#==============================================================================#
# VISUALIZATION FLAGS #
#==============================================================================#
const FLAG_VISUAL_RANDOM_OBJECTS = false
#==============================================================================#
# SIMULATION SCRIPTS #
#==============================================================================#
FLAG_SIMULATION_ADMM_GAMMA ?
include("simulation_runner_gamma.jl") : @info @sprintf("Skipped: Simulation[vary rank] Gamma")
FLAG_SIMULATION_ADMM_BERNOULLI ?
include("simulation_runner_bernoulli.jl") : @info @sprintf("Skipped: Simulation[vary rank] Bernoulli")
FLAG_SIMULATION_ADMM_GAUSSIAN ?
include("simulation_runner_gaussian.jl") : @info @sprintf("Skipped: Simulation[vary rank] Gaussian")
FLAG_SIMULATION_ADMM_POISSON ?
include("simulation_runner_poisson.jl") : @info @sprintf("Skipped: Simulation[vary rank] Poisson")
FLAG_SIMULATION_ADMM_NEGATIVE_BINOMIAL ?
include("simulation_runner_negbin.jl") : @info @sprintf("Skipped: Simulation[vary rank] NegativeBinomial")
FLAG_SIMULATION_ADMM_GAUSSIAN_BERNOULLI ?
include("simulation_runner_gaussian_bernoulli.jl") : @info @sprintf("Skipped: Simulation Gaussian + Bernoulli")
FLAG_SIMULATION_ADMM_MIXED ?
include("simulation_runner_mixed.jl") : @info @sprintf("Skipped: Simulation Mixed")
#==============================================================================#
# TEST SCRIPTS #
#==============================================================================#
FLAG_TEST_MISC ?
include("test_runner_misc.jl") : @info @sprintf("Skipped: Miscellaneous Test\n")
FLAG_TEST_RANDOM_OBJECTS ?
include("test_runner_random_objects.jl") : @info @sprintf("Skipped: Random Objects Test\n")
FLAG_TEST_SAMPLING ?
include("test_runner_sampling.jl") : @info @sprintf("Skipped: Sampling Test\n")
FLAG_TEST_SPARSE_EIGEN ?
include("test_runner_sparse_eigen.jl") : @info @sprintf("Skipped: Sparse Eigen Test\n")
FLAG_TEST_INDEXING_TOOLS ?
include("test_runner_indexing.jl") : @info @sprintf("Skipped: Indexing Tracker Test\n")
FLAG_TEST_CONCEPTS ?
include("test_runner_concepts.jl") : @info @sprintf("Skipped: Concepts Test\n")
FLAG_TEST_DIAGNOSTICS ?
include("test_runner_diagnostics.jl") : @info @sprintf("Skipped: Diagnostics Test\n")
FLAG_TEST_EXPONENTIAL_FAMILY ?
include("test_runner_exponential_family.jl") : @info @sprintf("Skipped: Exponential Family Test\n")
FLAG_TEST_BETTER_MGF ?
include("test_runner_better_mgf.jl") : @info @sprintf("Skipped: MGF Test\n")
FLAG_TEST_ESTIMATOR_MLE ?
include("test_runner_estimator_mle.jl") : @info @sprintf("Skipped: MLE Test\n")
FLAG_TEST_ESTIMATOR_MOM ?
include("test_runner_estimator_mom.jl") : @info @sprintf("Skipped: MOM Test\n")
FLAG_TEST_MODEL_FITTING ?
include("test_runner_model_fitting.jl") : @info @sprintf("Skipped: Model Fitting Test\n")
FLAG_TEST_LOSS_OPTIMIZER_POISSON ?
include("test_runner_poisson_loss.jl") : @info @sprintf("Skipped: Poisson Loss Test\n")
FLAG_TEST_LOSS_OPTIMIZER_BERNOULLI ?
include("test_runner_bernoulli_loss.jl") : @info @sprintf("Skipped: Bernoulli Loss Test\n")
FLAG_TEST_LOSS_OPTIMIZER_GAMMA ?
include("test_runner_gamma_loss.jl") : @info @sprintf("Skipped: Gamma Loss Test\n")
FLAG_TEST_LOSS_OPTIMIZER_NEGATIVE_BINOMIAL ?
include("test_runner_negative_binomial_loss.jl") : @info @sprintf("Skipped: Bernoulli Loss Test\n")
FLAG_TEST_ALGO_ADMM ?
include("test_runner_admm_small_input.jl") : @info @sprintf("Skipped: ADMM Small Input Test\n")
FLAG_TEST_LIB_MATH ?
include("test_runner_lib_math.jl") : @info @sprintf("Skipped: Math Library Test\n")
FLAG_TEST_PRETTY_PRINTER ?
include("test_runner_pretty_printer.jl") : @info @sprintf("Skipped: Pretty Printer\n")
FLAG_TEST_UTILITY_BATCHUTILS ?
include("test_runner_batch_utils.jl") : @info @sprintf("Skipped: Utility[Batch Utils]")
FLAG_TEST_SGD_BERNOULLI ?
include("test_runner_sgd_bernoulli.jl") : @info @sprintf("Skipped: SGD[Bernoulli]")
FLAG_TEST_SGD_GAMMA ?
include("test_runner_sgd_gamma.jl") : @info @sprintf("Skipped: SGD[Gamma]")
FLAG_TEST_ALGO_ALM ?
include("test_runner_algo_alm.jl") : @info @sprintf("Skipped: ALGO[ALM]")
FLAG_TEST_CHAINED_ADMM ?
include("test_runner_chained_admm.jl") : @info @sprintf("Skipped: ALGO[Chained ADMM]")
#==============================================================================#
# VISUAL SCRIPTS #
#==============================================================================#
FLAG_VISUAL_RANDOM_OBJECTS ?
include("visual_random_objects.jl") : nothing
if TEST_OPTION_PRINT_TIMER
println()
println(to)
end
end
| MatrixCompletion | https://github.com/jasonsun0310/MatrixCompletion.jl.git |
|
[
"MIT"
] | 0.1.0 | 50e804aa999e452ce4bf5edc50493838bd744e09 | code | 6058 | include("abstract_unittest_functions.jl")
import MatrixCompletion.Utilities.FastEigen:ARPACK
@info("Simulation: Vary Missing [Mixed, Small]")
let
Random.seed!(65536)
ROW = 500
COL = 500
# for input_rank in union(1,collect(10:10:100))
# for input_sample in union(1, collect(5:5:99))
# try
# @printf("small case: rank = %d | sample = %d%%\n", input_rank, input_sample)
input_rank = 10
input_sample = 80
timer = TimerOutput()
# RESULTS_DIR = GLOBAL_SIMULATION_RESULTS_DIR *
# "mixed/small(500x500)(vary_missing)/" *
# "rank" * string(input_rank) * "/" *
# "sample" * string(input_sample) * "/"
# LOG_FILE_NAME = "io.log"
# DATA_FILE_NAME = "saved_variables.h5"
# LOG_FILE_PATH = RESULTS_DIR * LOG_FILE_NAME
# DATA_FILE_PATH = RESULTS_DIR * DATA_FILE_NAME
# Base.Filesystem.mkpath(RESULTS_DIR)
# io = open(LOG_FILE_PATH, "w")
io = stdout
truth_matrix = rand([(FixedRankMatrix(Distributions.Gaussian(10, 5), rank = input_rank), 500, 100),
(FixedRankMatrix(Distributions.Bernoulli(0.5), rank = input_rank), 500, 100),
(FixedRankMatrix(Distributions.Gamma(10, 0.5), rank = input_rank), 500, 100),
(FixedRankMatrix(Distributions.Poisson(5), rank = input_rank), 500, 100),
(FixedRankMatrix(Distributions.NegativeBinomial(6, 0.8), rank = input_rank), 500, 100)])
sample_model = provide(Sampler{BernoulliModel}(), rate = input_sample / 100)
input_matrix = sample_model.draw(truth_matrix)
manual_type_matrix = Array{Symbol}(undef, ROW, COL)
manual_type_matrix[:, 1:100] .= :Gaussian
manual_type_matrix[:, 101:200] .= :Bernoulli
manual_type_matrix[:, 201:300] .= :Gamma
manual_type_matrix[:, 301:400] .= :Poisson
manual_type_matrix[:, 401:500] .= :NegativeBinomial
user_input_estimators = Dict(:NegativeBinomial=> Dict(:r=>6, :p=>0.8))
@timeit timer "ARPACK" * string(input_sample) begin
completed_matrix, type_tracker, tracker = complete(A = input_matrix,
maxiter = 200,
ρ = 0.3,
use_autodiff = false,
gd_iter = 3,
debug_mode = false,
user_input_estimators = user_input_estimators,
project_rank = input_rank * 10 + 1,
io = io,
type_assignment = manual_type_matrix,
eigen_solver = ARPACK())
end
@timeit timer "KrylovKit" * string(input_sample) begin
completed_matrix, type_tracker, tracker = complete(A = input_matrix,
maxiter = 200,
ρ = 0.3,
use_autodiff = false,
gd_iter = 3,
debug_mode = false,
user_input_estimators = user_input_estimators,
project_rank = input_rank * 10 + 1,
io = io,
type_assignment = manual_type_matrix,
eigen_solver = KrylovMethods())
end
@timeit timer "FullEigen" * string(input_sample) begin
completed_matrix, type_tracker, tracker = complete(A = input_matrix,
maxiter = 200,
ρ = 0.3,
use_autodiff = false,
gd_iter = 3,
debug_mode = false,
user_input_estimators = user_input_estimators,
project_rank = nothing,
io = io,
type_assignment = manual_type_matrix)
end
predicted_matrix = predict(MatrixCompletionModel(),
completed_matrix = completed_matrix,
type_tracker = type_tracker,
estimators = user_input_estimators)
summary_object = summary(MatrixCompletionModel(),
predicted_matrix = predicted_matrix,
truth_matrix = truth_matrix,
type_tracker = type_tracker,
tracker = tracker)
pickle(DATA_FILE_PATH,
"missing_idx" => type_tracker[:Missing],
"completed_matrix" => completed_matrix,
"predicted_matrix" => predicted_matrix,
"truth_matrix" => truth_matrix,
"summary" => summary_object)
print(io, JSON.json(summary_object, 4))
print(io, timer)
s close(io)
# end
# catch
# @printf("ERROR!!! rank = %d | sample = %d%%\n", input_rank, input_sample)
# end
end
end
end
| MatrixCompletion | https://github.com/jasonsun0310/MatrixCompletion.jl.git |
|
[
"MIT"
] | 0.1.0 | 50e804aa999e452ce4bf5edc50493838bd744e09 | code | 3519 | include("abstract_unittest_functions.jl")
@info("Simulation: Vary Missing [Bernoulli, Small]")
let
Random.seed!(65536)
ROW = 400
COL = 400
for input_rank in union(1, collect(25:25:200))
for input_sample in union(1, collect(5:5:99))
try
@printf("small case: rank = %d | sample = %d%%\n", input_rank, input_sample)
timer = TimerOutput()
RESULTS_DIR = GLOBAL_SIMULATION_RESULTS_DIR *
"bernoulli/small(400x400)(vary_missing)/" *
"rank" * string(input_rank) * "/" *
"sample" * string(input_sample) * "/"
LOG_FILE_NAME = "io.log"
DATA_FILE_NAME = "saved_variables.h5"
LOG_FILE_PATH = RESULTS_DIR * LOG_FILE_NAME
DATA_FILE_PATH = RESULTS_DIR * DATA_FILE_NAME
Base.Filesystem.mkpath(RESULTS_DIR)
io = open(LOG_FILE_PATH, "w")
# io = stdout
truth_matrix = rand([(FixedRankMatrix(Distributions.Bernoulli(0.5), rank = input_rank), ROW, COL)])
sample_model = provide(Sampler{BernoulliModel}(), rate = input_sample / 100)
input_matrix = sample_model.draw(truth_matrix)
manual_type_matrix = Array{Symbol}(undef, ROW, COL)
manual_type_matrix .= :Bernoulli
@timeit timer "Bernoulli(400x400)" * "| rank=" * string(input_rank) * "| sample=" * string(input_sample) begin
completed_matrix, type_tracker, tracker = complete(A = input_matrix,
maxiter = 200,
ρ = 0.3,
use_autodiff = false,
gd_iter = 3,
debug_mode = false,
project_rank = nothing,
type_assignment = manual_type_matrix)
end
predicted_matrix = predict(MatrixCompletionModel(),
completed_matrix = completed_matrix,
type_tracker = type_tracker)
summary_object = summary(MatrixCompletionModel(),
predicted_matrix = predicted_matrix,
truth_matrix = truth_matrix,
type_tracker = type_tracker,
tracker = tracker)
pickle(DATA_FILE_PATH,
"missing_idx" => type_tracker[:Missing],
"completed_matrix" => completed_matrix,
"predicted_matrix" => predicted_matrix,
"truth_matrix" => truth_matrix,
"summary" => summary_object)
print(io, JSON.json(summary_object, 4))
print(io, timer)
close(io)
catch
nothing
end
end
end
end
| MatrixCompletion | https://github.com/jasonsun0310/MatrixCompletion.jl.git |
|
[
"MIT"
] | 0.1.0 | 50e804aa999e452ce4bf5edc50493838bd744e09 | code | 2885 |
# include("abstract_unittest_functions.jl")
@info("Simulation: Vary Rank [Bernoulli, Medium]")
let
Random.seed!(65536)
ROW = 2000
COL = 2000
for input_rank in union(1, collect(10:10:300), 480, 490, 500)
# for input_rank in collect(300:10:500)
@printf("medium case: rank = %d\n", input_rank)
dist = Bernoulli()
timer = TimerOutput()
RESULTS_DIR = GLOBAL_SIMULATION_RESULTS_DIR * "bernoulli/medium(2000x2000)(vary_rank)/" * "rank" * string(input_rank) * "/"
LOG_FILE_NAME = "io.log"
DATA_FILE_NAME = "saved_variables.h5"
LOG_FILE_PATH = RESULTS_DIR * LOG_FILE_NAME
DATA_FILE_PATH = RESULTS_DIR * DATA_FILE_NAME
Base.Filesystem.mkpath(RESULTS_DIR)
io = open(LOG_FILE_PATH, "w")
truth_matrix = rand([(FixedRankMatrix(Distributions.Bernoulli(0.5), rank = input_rank), ROW, COL)])
sample_model = provide(Sampler{BernoulliModel}(), rate = 0.8)
input_matrix = sample_model.draw(truth_matrix)
manual_type_matrix = Array{Symbol}(undef, ROW, COL)
manual_type_matrix .= :Bernoulli
@timeit timer "Bernoulli(2000x2000)" * "| rank=" * string(input_rank) begin
completed_matrix, type_tracker, tracker = complete(A = input_matrix,
maxiter = 200,
ρ = 0.3,
use_autodiff = false,
gd_iter = 3,
debug_mode = false,
project_rank = input_rank,
io = io,
type_assignment = manual_type_matrix)
end
predicted_matrix = predict(MatrixCompletionModel(),
completed_matrix = completed_matrix,
type_tracker = type_tracker)
summary_object = summary(MatrixCompletionModel(),
predicted_matrix = predicted_matrix,
truth_matrix = truth_matrix,
type_tracker = type_tracker,
tracker = tracker)
pickle(DATA_FILE_PATH,
"missing_idx" => type_tracker[:Missing],
"completed_matrix" => completed_matrix,
"predicted_matrix" => predicted_matrix,
"truth_matrix" => truth_matrix,
"summary" => summary_object)
# log_simulation_result(Bernoulli(), completed_matrix, truth_matrix, type_tracker,tracker, io = io)
print(io, JSON.json(summary_object, 4))
print(io, timer)
close(io)
end
end
| MatrixCompletion | https://github.com/jasonsun0310/MatrixCompletion.jl.git |
|
[
"MIT"
] | 0.1.0 | 50e804aa999e452ce4bf5edc50493838bd744e09 | code | 2993 | include("abstract_unittest_functions.jl")
@info("Simulation: Vary Rank [Bernoulli, Small]")
let
Random.seed!(65536)
ROW = 400
COL = 400
for input_rank in collect(1:400)
try
@printf("small case: rank = %d\n", input_rank)
dist = Bernoulli()
timer = TimerOutput()
RESULTS_DIR = GLOBAL_SIMULATION_RESULTS_DIR * "bernoulli/p=0.9/small_400x400_vary_rank_sample80/" * "rank" * string(input_rank) * "/"
LOG_FILE_NAME = "io.log"
DATA_FILE_NAME = "saved_variables.h5"
LOG_FILE_PATH = RESULTS_DIR * LOG_FILE_NAME
DATA_FILE_PATH = RESULTS_DIR * DATA_FILE_NAME
Base.Filesystem.mkpath(RESULTS_DIR)
io = open(LOG_FILE_PATH, "w")
truth_matrix = rand([(FixedRankMatrix(Distributions.Bernoulli(0.9), rank = input_rank), ROW, COL)])
sample_model = provide(Sampler{BernoulliModel}(), rate = 0.8)
input_matrix = sample_model.draw(truth_matrix)
manual_type_matrix = Array{Symbol}(undef, ROW, COL)
manual_type_matrix .= :Bernoulli
@timeit timer "Bernoulli(400x400)" * "| rank=" * string(input_rank) begin
completed_matrix, type_tracker, tracker = complete(A = input_matrix,
maxiter = 200,
ρ = 0.3,
use_autodiff = false,
gd_iter = 3,
debug_mode = false,
project_rank = nothing,
io = io,
type_assignment = manual_type_matrix)
end
predicted_matrix = predict(MatrixCompletionModel(),
completed_matrix = completed_matrix,
type_tracker = type_tracker)
summary_object = summary(MatrixCompletionModel(),
predicted_matrix = predicted_matrix,
truth_matrix = truth_matrix,
type_tracker = type_tracker,
tracker = tracker)
pickle(DATA_FILE_PATH,
"missing_idx" => type_tracker[:Missing],
"completed_matrix" => completed_matrix,
"predicted_matrix" => predicted_matrix,
"truth_matrix" => truth_matrix,
"summary" => summary_object)
log_simulation_result(Bernoulli(), completed_matrix, truth_matrix, type_tracker,tracker, io = io)
print(io, JSON.json(summary_object, 4))
print(io, timer)
close(io)
catch
@printf("Error! small case: rank = %d\n", input_rank)
end
end
end
| MatrixCompletion | https://github.com/jasonsun0310/MatrixCompletion.jl.git |
|
[
"MIT"
] | 0.1.0 | 50e804aa999e452ce4bf5edc50493838bd744e09 | code | 3100 | include("abstract_unittest_functions.jl")
@info("Simulation: Vary Missing [Gamma, Small]")
let
Random.seed!(65536)
ROW = 400
COL = 400
for input_rank in collect(25:25:400)
for input_sample in union(collect(40:5:99))
# try
@printf("small case: rank = %d | sample = %d%%\n", input_rank, input_sample)
timer = TimerOutput()
RESULTS_DIR = GLOBAL_SIMULATION_RESULTS_DIR *
"gamma/small_400x400_vary_missing/" *
"rank" * string(input_rank) * "/" *
"sample" * string(input_sample) * "/"
LOG_FILE_NAME = "io.log"
DATA_FILE_NAME = "saved_variables.h5"
LOG_FILE_PATH = RESULTS_DIR * LOG_FILE_NAME
DATA_FILE_PATH = RESULTS_DIR * DATA_FILE_NAME
Base.Filesystem.mkpath(RESULTS_DIR)
io = open(LOG_FILE_PATH, "w")
# io = stdout
truth_matrix = rand([(FixedRankMatrix(Distributions.Gamma(10,0.5), rank = input_rank), ROW, COL)])
sample_model = provide(Sampler{BernoulliModel}(), rate = input_sample / 100)
input_matrix = sample_model.draw(truth_matrix)
manual_type_matrix = Array{Symbol}(undef, ROW, COL)
manual_type_matrix .= :Gamma
@timeit timer "Gamma(400x400)" * "| rank=" * string(input_rank) * "| sample=" * string(input_sample) begin
completed_matrix, type_tracker, tracker = complete(A = input_matrix,
maxiter = 200,
ρ = 0.3,
use_autodiff = false,
gd_iter = 3,
debug_mode = false,
project_rank = nothing,
io = io,
type_assignment = manual_type_matrix)
end
predicted_matrix = predict(MatrixCompletionModel(),
completed_matrix = completed_matrix,
type_tracker = type_tracker)
summary_object = summary(MatrixCompletionModel(),
predicted_matrix = predicted_matrix,
truth_matrix = truth_matrix,
type_tracker = type_tracker,
tracker = tracker)
pickle(DATA_FILE_PATH,
"missing_idx" => type_tracker[:Missing],
"completed_matrix" => completed_matrix,
"predicted_matrix" => predicted_matrix,
"truth_matrix" => truth_matrix,
"summary" => summary_object)
print(io, JSON.json(summary_object, 4))
print(io, timer)
close(io)
# catch
# @printf("ERROR!!!! rank = %d | sample = %d%%\n", input_rank, input_sample)
# end
end
end
end
| MatrixCompletion | https://github.com/jasonsun0310/MatrixCompletion.jl.git |
|
[
"MIT"
] | 0.1.0 | 50e804aa999e452ce4bf5edc50493838bd744e09 | code | 2666 | @info("Simulation: Vary Rank [Gamma, Medium]")
let
Random.seed!(65536)
ROW = 2000
COL = 2000
for input_rank in union(1, collect(10:10:500))
@printf("medium case: rank = %d\n", input_rank)
# dist = Gamma()
timer = TimerOutput()
RESULTS_DIR = GLOBAL_SIMULATION_RESULTS_DIR * "gamma/medium(2000x2000)(vary_rank)/" * "rank" * string(input_rank) * "/"
LOG_FILE_NAME = "io.log"
DATA_FILE_NAME = "saved_variables.h5"
LOG_FILE_PATH = RESULTS_DIR * LOG_FILE_NAME
DATA_FILE_PATH = RESULTS_DIR * DATA_FILE_NAME
Base.Filesystem.mkpath(RESULTS_DIR)
io = open(LOG_FILE_PATH, "w")
truth_matrix = rand([(FixedRankMatrix(Distributions.Gamma(10, 0.5), rank = input_rank), ROW, COL)])
sample_model = provide(Sampler{BernoulliModel}(), rate = 0.8)
input_matrix = sample_model.draw(truth_matrix)
manual_type_matrix = Array{Symbol}(undef, ROW, COL)
manual_type_matrix .= :Gamma
@timeit timer "Gamma(2000x2000)" * "| rank=" * string(input_rank) begin
completed_matrix, type_tracker, tracker = complete(A = input_matrix,
maxiter = 200,
ρ = 0.3,
use_autodiff = false,
gd_iter = 3,
debug_mode = false,
project_rank = input_rank,
io = io,
type_assignment = manual_type_matrix)
end
predicted_matrix = predict(MatrixCompletionModel(),
completed_matrix = completed_matrix,
type_tracker = type_tracker)
summary_object = summary(MatrixCompletionModel(),
predicted_matrix = predicted_matrix,
truth_matrix = truth_matrix,
type_tracker = type_tracker,
tracker = tracker)
pickle(DATA_FILE_PATH,
"missing_idx" => type_tracker[:Missing],
"completed_matrix" => completed_matrix,
"predicted_matrix" => predicted_matrix,
"truth_matrix" => truth_matrix,
"summary" => summary_object)
print(io, JSON.json(summary_object, 4))
print(io, timer)
close(io)
end
end
| MatrixCompletion | https://github.com/jasonsun0310/MatrixCompletion.jl.git |
|
[
"MIT"
] | 0.1.0 | 50e804aa999e452ce4bf5edc50493838bd744e09 | code | 2644 | @info("Simulation: Vary Rank [Gamma, Small]")
let
Random.seed!(65536)
ROW = 400
COL = 400
for input_rank in collect(2:400)
@printf("small case: rank = %d\n", input_rank)
# dist = Poisson()
timer = TimerOutput()
RESULTS_DIR = GLOBAL_SIMULATION_RESULTS_DIR * "gamma/small(400x400)(vary_rank)/" * "rank" * string(input_rank) * "/"
LOG_FILE_NAME = "io.log"
DATA_FILE_NAME = "saved_variables.h5"
LOG_FILE_PATH = RESULTS_DIR * LOG_FILE_NAME
DATA_FILE_PATH = RESULTS_DIR * DATA_FILE_NAME
Base.Filesystem.mkpath(RESULTS_DIR)
io = open(LOG_FILE_PATH, "w")
truth_matrix = rand([(FixedRankMatrix(Distributions.Gamma(10, 0.5), rank = input_rank), ROW, COL)])
sample_model = provide(Sampler{BernoulliModel}(), rate = 0.8)
input_matrix = sample_model.draw(truth_matrix)
manual_type_matrix = Array{Symbol}(undef, ROW, COL)
manual_type_matrix .= :Gamma
@timeit timer "Gamma(400x400)" * "| rank=" * string(input_rank) begin
completed_matrix, type_tracker, tracker = complete(A = input_matrix,
maxiter = 200,
ρ = 0.3,
use_autodiff = false,
gd_iter = 3,
debug_mode = false,
project_rank = nothing,
io = io,
type_assignment = manual_type_matrix)
end
predicted_matrix = predict(MatrixCompletionModel(),
completed_matrix = completed_matrix,
type_tracker = type_tracker)
summary_object = summary(MatrixCompletionModel(),
predicted_matrix = predicted_matrix,
truth_matrix = truth_matrix,
type_tracker = type_tracker,
tracker = tracker)
pickle(DATA_FILE_PATH,
"missing_idx" => type_tracker[:Missing],
"completed_matrix" => completed_matrix,
"predicted_matrix" => predicted_matrix,
"truth_matrix" => truth_matrix,
"summary" => summary_object)
print(io, JSON.json(summary_object, 4))
print(io, timer)
close(io)
end
end
| MatrixCompletion | https://github.com/jasonsun0310/MatrixCompletion.jl.git |
|
[
"MIT"
] | 0.1.0 | 50e804aa999e452ce4bf5edc50493838bd744e09 | code | 3126 | include("abstract_unittest_functions.jl")
@info("Simulation: Vary Missing [Gaussian, Small]")
let
Random.seed!(65536)
ROW = 400
COL = 400
for input_rank in collect(25:25:400)
for input_sample in union(collect(40:5:99))
# try
@printf("small case: rank = %d | sample = %d%%\n", input_rank, input_sample)
timer = TimerOutput()
RESULTS_DIR = GLOBAL_SIMULATION_RESULTS_DIR *
"gaussian/mu=0_sigma=1/small_400x400_vary_missing/" *
"rank" * string(input_rank) * "/" *
"sample" * string(input_sample) * "/"
LOG_FILE_NAME = "io.log"
DATA_FILE_NAME = "saved_variables.h5"
LOG_FILE_PATH = RESULTS_DIR * LOG_FILE_NAME
DATA_FILE_PATH = RESULTS_DIR * DATA_FILE_NAME
Base.Filesystem.mkpath(RESULTS_DIR)
io = open(LOG_FILE_PATH, "w")
# io = stdout
truth_matrix = rand([(FixedRankMatrix(Distributions.Gaussian(0, 1), rank = input_rank), ROW, COL)])
sample_model = provide(Sampler{BernoulliModel}(), rate = input_sample / 100)
input_matrix = sample_model.draw(truth_matrix)
manual_type_matrix = Array{Symbol}(undef, ROW, COL)
manual_type_matrix .= :Gaussian
@timeit timer "Gaussian(400x400)" * "| rank=" * string(input_rank) * "| sample=" * string(input_sample) begin
completed_matrix, type_tracker, tracker = complete(A = input_matrix,
maxiter = 200,
ρ = 0.3,
use_autodiff = false,
gd_iter = 3,
debug_mode = false,
project_rank = nothing,
io = io,
type_assignment = manual_type_matrix)
end
predicted_matrix = predict(MatrixCompletionModel(),
completed_matrix = completed_matrix,
type_tracker = type_tracker)
summary_object = summary(MatrixCompletionModel(),
predicted_matrix = predicted_matrix,
truth_matrix = truth_matrix,
type_tracker = type_tracker,
tracker = tracker)
pickle(DATA_FILE_PATH,
"missing_idx" => type_tracker[:Missing],
"completed_matrix" => completed_matrix,
"predicted_matrix" => predicted_matrix,
"truth_matrix" => truth_matrix,
"summary" => summary_object)
print(io, JSON.json(summary_object, 4))
print(io, timer)
close(io)
# catch
# @printf("ERROR!!!! rank = %d | sample = %d%%\n", input_rank, input_sample)
# end
end
end
end
| MatrixCompletion | https://github.com/jasonsun0310/MatrixCompletion.jl.git |
|
[
"MIT"
] | 0.1.0 | 50e804aa999e452ce4bf5edc50493838bd744e09 | code | 4541 | include("abstract_unittest_functions.jl")
@testset "$(format("ADMM Algorithm: Small Input Simulation [Gaussian 400 x 400]"))" begin
let
Random.seed!(65536)
ROW = 400
COL = 400
for input_rank in 1:400
input_rank = 100
@printf("small case: rank = %d\n", input_rank)
dist = Gaussian()
timer = TimerOutput()
# RESULTS_DIR = GLOBAL_SIMULATION_RESULTS_DIR * "gaussian/small(400x400)(vary_rank)/" * "rank" * string(input_rank) * "/"
# LOG_FILE_NAME = "io.log"
# DATA_FILE_NAME = "saved_variables.h5"
# LOG_FILE_PATH = RESULTS_DIR * LOG_FILE_NAME
# DATA_FILE_PATH = RESULTS_DIR * DATA_FILE_NAME
# Base.Filesystem.mkpath(RESULTS_DIR)
# io = open(LOG_FILE_PATH, "w")
io = stdout
_truth_matrix = rand([(FixedRankMatrix(Distributions.Gaussian(10, 1), rank = input_rank), ROW, COL)])
truth_matrix = _truth_matrix .- 10
sample_model = provide(Sampler{BernoulliModel}(), rate = 0.8)
input_matrix = sample_model.draw(truth_matrix)
manual_type_matrix = Array{Symbol}(undef, ROW, COL)
manual_type_matrix .= :Gaussian
@timeit timer "Gaussian(400x400)" * "| rank=" * string(input_rank) begin
completed_matrix, type_tracker, tracker = complete(A = input_matrix,
maxiter = 200,
ρ = 0.3,
use_autodiff = false,
gd_iter = 3,
debug_mode = false,
project_rank = nothing,
io = io,
type_assignment = manual_type_matrix)
end
predicted_matrix = predict(MatrixCompletionModel(),
completed_matrix = completed_matrix,
type_tracker = type_tracker)
summary_object = summary(MatrixCompletionModel(),
predicted_matrix = predicted_matrix .+ 10,
truth_matrix = _truth_matrix,
type_tracker = type_tracker,
tracker = tracker)
summary_object[:Gaussian]
@timeit timer "Gaussian(400x400)" * "closed" * string(input_rank) begin
completed_matrix, type_tracker, tracker = complete(A = input_matrix,
maxiter = 200,
ρ = 0.3,
use_autodiff = false,
gd_iter = 3,
debug_mode = false,
project_rank = nothing,
io = io,
type_assignment = manual_type_matrix,
closed_form = true)
end
predicted_matrix = predict(MatrixCompletionModel(),
completed_matrix = completed_matrix,
type_tracker = type_tracker)
summary_object = summary(MatrixCompletionModel(),
predicted_matrix = predicted_matrix,
truth_matrix = truth_matrix,
type_tracker = type_tracker,
tracker = tracker)
summary_object[:Gaussian]
pickle(DATA_FILE_PATH,
"missing_idx" => type_tracker[:Missing],
"completed_matrix" => completed_matrix,
"predicted_matrix" => predicted_matrix,
"truth_matrix" => truth_matrix,
"summary" => summary_object)
print(io, JSON.json(summary_object, 4))
print(io, timer)
close(io)
end
end
else
@info("Already Completed Simualtion[Gaussian vary rank][SMALL]")
end
| MatrixCompletion | https://github.com/jasonsun0310/MatrixCompletion.jl.git |
|
[
"MIT"
] | 0.1.0 | 50e804aa999e452ce4bf5edc50493838bd744e09 | code | 3139 | include("abstract_unittest_functions.jl")
@info("Simulation: Vary Missing [Gaussian, Medium]")
let
Random.seed!(65536)
ROW = 2000
COL = 2000
for input_rank in union(200, 500)
for input_sample in union(collect(50:5:99))
# try
@printf("medium case: rank = %d | sample = %d%%\n", input_rank, input_sample)
timer = TimerOutput()
RESULTS_DIR = GLOBAL_SIMULATION_RESULTS_DIR *
"gaussian/mu=0_sigma=1/medium_2000x2000_vary_missing/" *
"rank" * string(input_rank) * "/" *
"sample" * string(input_sample) * "/"
LOG_FILE_NAME = "io.log"
DATA_FILE_NAME = "saved_variables.h5"
LOG_FILE_PATH = RESULTS_DIR * LOG_FILE_NAME
DATA_FILE_PATH = RESULTS_DIR * DATA_FILE_NAME
Base.Filesystem.mkpath(RESULTS_DIR)
io = open(LOG_FILE_PATH, "w")
# io = stdout
truth_matrix = rand([(FixedRankMatrix(Distributions.Gaussian(0, 1), rank = input_rank), ROW, COL)])
sample_model = provide(Sampler{BernoulliModel}(), rate = input_sample / 100)
input_matrix = sample_model.draw(truth_matrix)
manual_type_matrix = Array{Symbol}(undef, ROW, COL)
manual_type_matrix .= :Gaussian
@timeit timer "Gaussian(2000x2000)" * "| rank=" * string(input_rank) * "| sample=" * string(input_sample) begin
completed_matrix, type_tracker, tracker = complete(A = input_matrix,
maxiter = 200,
ρ = 0.3,
use_autodiff = false,
gd_iter = 3,
debug_mode = false,
project_rank = input_rank * 2,
io = io,
type_assignment = manual_type_matrix)
end
predicted_matrix = predict(MatrixCompletionModel(),
completed_matrix = completed_matrix,
type_tracker = type_tracker)
summary_object = summary(MatrixCompletionModel(),
predicted_matrix = predicted_matrix,
truth_matrix = truth_matrix,
type_tracker = type_tracker,
tracker = tracker)
pickle(DATA_FILE_PATH,
"missing_idx" => type_tracker[:Missing],
"completed_matrix" => completed_matrix,
"predicted_matrix" => predicted_matrix,
"truth_matrix" => truth_matrix,
"summary" => summary_object)
print(io, JSON.json(summary_object, 4))
print(io, timer)
close(io)
# catch
# @printf("ERROR!!!! rank = %d | sample = %d%%\n", input_rank, input_sample)
# end
end
end
end
| MatrixCompletion | https://github.com/jasonsun0310/MatrixCompletion.jl.git |
|
[
"MIT"
] | 0.1.0 | 50e804aa999e452ce4bf5edc50493838bd744e09 | code | 2721 | include("abstract_unittest_functions.jl")
@info("Gaussian(0,1) Medium")
let
Random.seed!(65536)
ROW = 2000
COL = 2000
for input_rank in union(1, collect(10:10:500))
@printf("medium case: rank = %d\n", input_rank)
dist = Gaussian()
timer = TimerOutput()
RESULTS_DIR = GLOBAL_SIMULATION_RESULTS_DIR * "gaussian/mu=0_sigma=1/medium_2000x2000_vary_rank_sample=80/" * "rank" * string(input_rank) * "/"
LOG_FILE_NAME = "io.log"
DATA_FILE_NAME = "saved_variables.h5"
LOG_FILE_PATH = RESULTS_DIR * LOG_FILE_NAME
DATA_FILE_PATH = RESULTS_DIR * DATA_FILE_NAME
Base.Filesystem.mkpath(RESULTS_DIR)
io = open(LOG_FILE_PATH, "w")
truth_matrix = rand([(FixedRankMatrix(Distributions.Gaussian(0, 1), rank = input_rank), ROW, COL)])
sample_model = provide(Sampler{BernoulliModel}(), rate = 0.8)
input_matrix = sample_model.draw(truth_matrix)
manual_type_matrix = Array{Symbol}(undef, ROW, COL)
manual_type_matrix .= :Gaussian
@timeit timer "Gaussian(2000x2000)" * "| rank=" * string(input_rank) begin
completed_matrix, type_tracker, tracker = complete(A = input_matrix,
maxiter = 200,
ρ = 0.3,
use_autodiff = false,
gd_iter = 3,
debug_mode = false,
project_rank = input_rank * 2,
io = io,
type_assignment = manual_type_matrix)
end
predicted_matrix = predict(MatrixCompletionModel(),
completed_matrix = completed_matrix,
type_tracker = type_tracker)
summary_object = summary(MatrixCompletionModel(),
predicted_matrix = predicted_matrix,
truth_matrix = truth_matrix,
type_tracker = type_tracker,
tracker = tracker)
pickle(DATA_FILE_PATH,
"missing_idx" => type_tracker[:Missing],
"completed_matrix" => completed_matrix,
"predicted_matrix" => predicted_matrix,
"truth_matrix" => truth_matrix,
"summary" => summary_object)
print(io, JSON.json(summary_object, 4))
print(io, timer)
close(io)
end
end
| MatrixCompletion | https://github.com/jasonsun0310/MatrixCompletion.jl.git |
|
[
"MIT"
] | 0.1.0 | 50e804aa999e452ce4bf5edc50493838bd744e09 | code | 2673 | include("abstract_unittest_functions.jl")
@info("Gaussian(0,1) Small")
let
Random.seed!(65536)
ROW = 400
COL = 400
for input_rank in 1:400
@printf("small case: rank = %d\n", input_rank)
dist = Gaussian()
timer = TimerOutput()
RESULTS_DIR = GLOBAL_SIMULATION_RESULTS_DIR * "gaussian/mu=0_sigma=1/small_400x400_vary_rank/" * "rank" * string(input_rank) * "/"
LOG_FILE_NAME = "io.log"
DATA_FILE_NAME = "saved_variables.h5"
LOG_FILE_PATH = RESULTS_DIR * LOG_FILE_NAME
DATA_FILE_PATH = RESULTS_DIR * DATA_FILE_NAME
Base.Filesystem.mkpath(RESULTS_DIR)
io = open(LOG_FILE_PATH, "w")
truth_matrix = rand([(FixedRankMatrix(Distributions.Gaussian(0, 1), rank = input_rank), ROW, COL)])
sample_model = provide(Sampler{BernoulliModel}(), rate = 0.8)
input_matrix = sample_model.draw(truth_matrix)
manual_type_matrix = Array{Symbol}(undef, ROW, COL)
manual_type_matrix .= :Gaussian
@timeit timer "Gaussian(400x400)" * "| rank=" * string(input_rank) begin
completed_matrix, type_tracker, tracker = complete(A = input_matrix,
maxiter = 200,
ρ = 0.3,
use_autodiff = false,
gd_iter = 3,
debug_mode = false,
project_rank = nothing,
io = io,
type_assignment = manual_type_matrix)
end
predicted_matrix = predict(MatrixCompletionModel(),
completed_matrix = completed_matrix,
type_tracker = type_tracker)
summary_object = summary(MatrixCompletionModel(),
predicted_matrix = predicted_matrix,
truth_matrix = truth_matrix,
type_tracker = type_tracker,
tracker = tracker)
pickle(DATA_FILE_PATH,
"missing_idx" => type_tracker[:Missing],
"completed_matrix" => completed_matrix,
"predicted_matrix" => predicted_matrix,
"truth_matrix" => truth_matrix,
"summary" => summary_object)
print(io, JSON.json(summary_object, 4))
print(io, timer)
close(io)
end
end
| MatrixCompletion | https://github.com/jasonsun0310/MatrixCompletion.jl.git |
|
[
"MIT"
] | 0.1.0 | 50e804aa999e452ce4bf5edc50493838bd744e09 | code | 4281 | include("abstract_unittest_functions.jl")
@info("Simulation: Vary Missing [Mixed, Medium]")
let
Random.seed!(65536)
ROW = 2000
COL = 2000
# for input_rank in union(1,collect(10:10:100))
for input_rank in union(80)
for input_sample in union(collect(50:5:99))
# try
@printf("medium case: rank = %d | sample = %d%%\n", input_rank, input_sample)
timer = TimerOutput()
RESULTS_DIR = GLOBAL_SIMULATION_RESULTS_DIR *
"mixed/medium(2000x2000)(vary_missing)_standardized/" *
"rank" * string(input_rank) * "/" *
"sample" * string(input_sample) * "/"
LOG_FILE_NAME = "io.log"
DATA_FILE_NAME = "saved_variables.h5"
LOG_FILE_PATH = RESULTS_DIR * LOG_FILE_NAME
DATA_FILE_PATH = RESULTS_DIR * DATA_FILE_NAME
Base.Filesystem.mkpath(RESULTS_DIR)
io = open(LOG_FILE_PATH, "w")
truth_matrix = rand([(FixedRankMatrix(Distributions.Gaussian(0, 1), rank = input_rank), 2000, 400),
(FixedRankMatrix(Distributions.Bernoulli(0.5), rank = input_rank), 2000, 400),
(FixedRankMatrix(Distributions.Gamma(10, 0.5), rank = input_rank), 2000, 400),
(FixedRankMatrix(Distributions.Poisson(5), rank = input_rank), 2000, 400),
(FixedRankMatrix(Distributions.NegativeBinomial(6, 0.8), rank = input_rank), 2000, 400)])
sample_model = provide(Sampler{BernoulliModel}(), rate = input_sample / 100)
input_matrix = sample_model.draw(truth_matrix)
manual_type_matrix = Array{Symbol}(undef, ROW, COL)
manual_type_matrix[:, 1:400] .= :Gaussian
manual_type_matrix[:, 401:800] .= :Bernoulli
manual_type_matrix[:, 801:1200] .= :Gamma
manual_type_matrix[:, 1201:1600] .= :Poisson
manual_type_matrix[:, 1601:2000] .= :NegativeBinomial
user_input_estimators = Dict(:NegativeBinomial=> Dict(:r=>6, :p=>0.8))
@timeit timer "Mixed(2000x2000)" * "| rank=" * string(input_rank) * "| sample=" * string(input_sample) begin
completed_matrix, type_tracker, tracker = complete(A = input_matrix,
maxiter = 200,
ρ = 0.3,
use_autodiff = false,
gd_iter = 3,
debug_mode = false,
user_input_estimators = user_input_estimators,
project_rank = input_rank * 10 + 1,
io = io,
type_assignment = manual_type_matrix,
closed_form_update = true)
end
predicted_matrix = predict(MatrixCompletionModel(),
completed_matrix = completed_matrix,
type_tracker = type_tracker,
estimators = user_input_estimators)
summary_object = summary(MatrixCompletionModel(),
predicted_matrix = predicted_matrix,
truth_matrix = truth_matrix,
type_tracker = type_tracker,
tracker = tracker)
pickle(DATA_FILE_PATH,
"missing_idx" => type_tracker[:Missing],
"completed_matrix" => completed_matrix,
"predicted_matrix" => predicted_matrix,
"truth_matrix" => truth_matrix,
"summary" => summary_object)
print(io, JSON.json(summary_object, 4))
print(io, timer)
close(io)
# end
# catch
# @printf("ERROR!!! rank = %d | sample = %d%%\n", input_rank, input_sample)
# end
end
end
end
| MatrixCompletion | https://github.com/jasonsun0310/MatrixCompletion.jl.git |
|
[
"MIT"
] | 0.1.0 | 50e804aa999e452ce4bf5edc50493838bd744e09 | code | 4137 | include("abstract_unittest_functions.jl")
@info("Simulation: Vary Missing [Mixed, Small]")
let
Random.seed!(65536)
ROW = 500
COL = 500
for input_rank in union(1, collect(10:10:100))
for input_sample in union(collect(50:5:99))
try
@printf("small case: rank = %d | sample = %d%%\n", input_rank, input_sample)
timer = TimerOutput()
RESULTS_DIR = GLOBAL_SIMULATION_RESULTS_DIR *
"mixed/small_500x500_vary_missing_standarized_lanzcos/" *
"rank" * string(input_rank) * "/" *
"sample" * string(input_sample) * "/"
LOG_FILE_NAME = "io.log"
DATA_FILE_NAME = "saved_variables.h5"
LOG_FILE_PATH = RESULTS_DIR * LOG_FILE_NAME
DATA_FILE_PATH = RESULTS_DIR * DATA_FILE_NAME
Base.Filesystem.mkpath(RESULTS_DIR)
io = open(LOG_FILE_PATH, "w")
truth_matrix = rand([(FixedRankMatrix(Distributions.Gaussian(0, 1), rank = input_rank), 500, 100),
(FixedRankMatrix(Distributions.Bernoulli(0.5), rank = input_rank), 500, 100),
(FixedRankMatrix(Distributions.Gamma(10, 0.5), rank = input_rank), 500, 100),
(FixedRankMatrix(Distributions.Poisson(5), rank = input_rank), 500, 100),
(FixedRankMatrix(Distributions.NegativeBinomial(6, 0.8), rank = input_rank), 500, 100)])
sample_model = provide(Sampler{BernoulliModel}(), rate = input_sample / 100)
input_matrix = sample_model.draw(truth_matrix)
manual_type_matrix = Array{Symbol}(undef, ROW, COL)
manual_type_matrix[:, 1:100] .= :Gaussian
manual_type_matrix[:, 101:200] .= :Bernoulli
manual_type_matrix[:, 201:300] .= :Gamma
manual_type_matrix[:, 301:400] .= :Poisson
manual_type_matrix[:, 401:500] .= :NegativeBinomial
user_input_estimators = Dict(:NegativeBinomial=> Dict(:r=>6, :p=>0.8))
@timeit timer "Mixed(500x500)" * "| rank=" * string(input_rank) * "| sample=" * string(input_sample) begin
completed_matrix, type_tracker, tracker = complete(A = input_matrix,
maxiter = 200,
ρ = 0.3,
use_autodiff = false,
gd_iter = 3,
debug_mode = false,
user_input_estimators = user_input_estimators,
project_rank = input_rank * 10,
io = io,
type_assignment = manual_type_matrix)
end
predicted_matrix = predict(MatrixCompletionModel(),
completed_matrix = completed_matrix,
type_tracker = type_tracker,
estimators = user_input_estimators)
summary_object = summary(MatrixCompletionModel(),
predicted_matrix = predicted_matrix,
truth_matrix = truth_matrix,
type_tracker = type_tracker,
tracker = tracker)
pickle(DATA_FILE_PATH,
"missing_idx" => type_tracker[:Missing],
"completed_matrix" => completed_matrix,
"predicted_matrix" => predicted_matrix,
"truth_matrix" => truth_matrix,
"summary" => summary_object)
print(io, JSON.json(summary_object, 4))
print(io, timer)
close(io)
# end
catch
@printf("ERROR!!! rank = %d | sample = %d%%\n", input_rank, input_sample)
end
end
end
end
| MatrixCompletion | https://github.com/jasonsun0310/MatrixCompletion.jl.git |
|
[
"MIT"
] | 0.1.0 | 50e804aa999e452ce4bf5edc50493838bd744e09 | code | 3710 | include("abstract_unittest_functions.jl")
@info("Simulation: Vary Rank [Mixed, Medium]")
let
Random.seed!(65536)
ROW = 2000
COL = 2000
for input_rank in union(1, 10:10:50)
@printf("medium case: rank = %d\n", input_rank)
timer = TimerOutput()
RESULTS_DIR = GLOBAL_SIMULATION_RESULTS_DIR * "mixed/medium(2000x2000)(vary_rank)/" * "rank" * string(input_rank) * "/"
LOG_FILE_NAME = "io.log"
DATA_FILE_NAME = "saved_variables.h5"
LOG_FILE_PATH = RESULTS_DIR * LOG_FILE_NAME
DATA_FILE_PATH = RESULTS_DIR * DATA_FILE_NAME
Base.Filesystem.mkpath(RESULTS_DIR)
io = open(LOG_FILE_PATH, "w")
truth_matrix = rand([(FixedRankMatrix(Distributions.Gaussian(0, 1), rank = input_rank), 2000, 400),
(FixedRankMatrix(Distributions.Bernoulli(0.5), rank = input_rank), 2000, 400),
(FixedRankMatrix(Distributions.Gamma(10, 0.5), rank = input_rank), 2000, 400),
(FixedRankMatrix(Distributions.Poisson(5), rank = input_rank), 2000, 400),
(FixedRankMatrix(Distributions.NegativeBinomial(6, 0.8), rank = input_rank), 2000, 400)])
sample_model = provide(Sampler{BernoulliModel}(), rate = 0.8)
input_matrix = sample_model.draw(truth_matrix)
manual_type_matrix = Array{Symbol}(undef, ROW, COL)
manual_type_matrix[:, 1:400] .= :Gaussian
manual_type_matrix[:, 401:800] .= :Bernoulli
manual_type_matrix[:, 801:1200] .= :Gamma
manual_type_matrix[:, 1201:1600] .= :Poisson
manual_type_matrix[:, 1601:2000] .= :NegativeBinomial
user_input_estimators = Dict(:NegativeBinomial=> Dict(:r=>6, :p=>0.8))
@timeit timer "Mixed(2000x2000)" * "| rank=" * string(input_rank) begin
completed_matrix, type_tracker, tracker = complete(A = input_matrix,
maxiter = 200,
ρ = 0.3,
use_autodiff = false,
gd_iter = 3,
debug_mode = false,
user_input_estimators = user_input_estimators,
project_rank = input_rank * 10 + 1,
io = io,
type_assignment = manual_type_matrix)
end
predicted_matrix = predict(MatrixCompletionModel(),
completed_matrix = completed_matrix,
type_tracker = type_tracker,
estimators = user_input_estimators)
summary_object = summary(MatrixCompletionModel(),
predicted_matrix = predicted_matrix,
truth_matrix = truth_matrix,
type_tracker = type_tracker,
tracker = tracker)
pickle(DATA_FILE_PATH,
"missing_idx" => type_tracker[:Missing],
"completed_matrix" => completed_matrix,
"predicted_matrix" => predicted_matrix,
"truth_matrix" => truth_matrix,
"summary" => summary_object)
print(io, JSON.json(summary_object, 4))
print(io, timer)
close(io)
end
end
| MatrixCompletion | https://github.com/jasonsun0310/MatrixCompletion.jl.git |
|
[
"MIT"
] | 0.1.0 | 50e804aa999e452ce4bf5edc50493838bd744e09 | code | 3692 | include("abstract_unittest_functions.jl")
@info("Simulation: Vary Rank [Mixed, Small]")
let
Random.seed!(65536)
ROW = 500
COL = 500
for input_rank in 1:50
@printf("small case: rank = %d\n", input_rank)
timer = TimerOutput()
RESULTS_DIR = GLOBAL_SIMULATION_RESULTS_DIR * "mixed/small_500x500_vary_rank_sample80_lanzcos/" * "rank" * string(input_rank) * "/"
LOG_FILE_NAME = "io.log"
DATA_FILE_NAME = "saved_variables.h5"
LOG_FILE_PATH = RESULTS_DIR * LOG_FILE_NAME
DATA_FILE_PATH = RESULTS_DIR * DATA_FILE_NAME
Base.Filesystem.mkpath(RESULTS_DIR)
io = open(LOG_FILE_PATH, "w")
truth_matrix = rand([(FixedRankMatrix(Distributions.Gaussian(0, 1), rank = input_rank), 500, 100),
(FixedRankMatrix(Distributions.Bernoulli(0.5), rank = input_rank), 500, 100),
(FixedRankMatrix(Distributions.Gamma(10, 0.5), rank = input_rank), 500, 100),
(FixedRankMatrix(Distributions.Poisson(5), rank = input_rank), 500, 100),
(FixedRankMatrix(Distributions.NegativeBinomial(6, 0.8), rank = input_rank), 500, 100)])
sample_model = provide(Sampler{BernoulliModel}(), rate = 0.8)
input_matrix = sample_model.draw(truth_matrix)
manual_type_matrix = Array{Symbol}(undef, ROW, COL)
manual_type_matrix[:, 1:100] .= :Gaussian
manual_type_matrix[:, 101:200] .= :Bernoulli
manual_type_matrix[:, 201:300] .= :Gamma
manual_type_matrix[:, 301:400] .= :Poisson
manual_type_matrix[:, 401:500] .= :NegativeBinomial
user_input_estimators = Dict(:NegativeBinomial=> Dict(:r=>6, :p=>0.8))
@timeit timer "Mixed(500x500)" * "| rank=" * string(input_rank) begin
completed_matrix, type_tracker, tracker = complete(A = input_matrix,
maxiter = 200,
ρ = 0.3,
use_autodiff = false,
gd_iter = 3,
debug_mode = false,
user_input_estimators = user_input_estimators,
project_rank = input_rank * 10 + 1,
io = io,
type_assignment = manual_type_matrix)
end
predicted_matrix = predict(MatrixCompletionModel(),
completed_matrix = completed_matrix,
type_tracker = type_tracker,
estimators = user_input_estimators)
summary_object = summary(MatrixCompletionModel(),
predicted_matrix = predicted_matrix,
truth_matrix = truth_matrix,
type_tracker = type_tracker,
tracker = tracker)
pickle(DATA_FILE_PATH,
"missing_idx" => type_tracker[:Missing],
"completed_matrix" => completed_matrix,
"predicted_matrix" => predicted_matrix,
"truth_matrix" => truth_matrix,
"summary" => summary_object)
print(io, JSON.json(summary_object, 4))
print(io, timer)
close(io)
end
end
| MatrixCompletion | https://github.com/jasonsun0310/MatrixCompletion.jl.git |
|
[
"MIT"
] | 0.1.0 | 50e804aa999e452ce4bf5edc50493838bd744e09 | code | 3436 | include("abstract_unittest_functions.jl")
@info("Simulation: Vary Missing [NegBin, Small]")
let
Random.seed!(65536)
ROW = 400
COL = 400
for input_rank in collect(25:25:400)
for input_sample in union(collect(40:5:99))
# try
@printf("small case: rank = %d | sample = %d%%\n", input_rank, input_sample)
timer = TimerOutput()
RESULTS_DIR = GLOBAL_SIMULATION_RESULTS_DIR *
"negbin/small_400x400_vary_missing/" *
"rank" * string(input_rank) * "/" *
"sample" * string(input_sample) * "/"
LOG_FILE_NAME = "io.log"
DATA_FILE_NAME = "saved_variables.h5"
LOG_FILE_PATH = RESULTS_DIR * LOG_FILE_NAME
DATA_FILE_PATH = RESULTS_DIR * DATA_FILE_NAME
Base.Filesystem.mkpath(RESULTS_DIR)
io = open(LOG_FILE_PATH, "w")
# io = stdout
truth_matrix = rand([(FixedRankMatrix(Distributions.NegativeBinomial(6,0.8), rank = input_rank), ROW, COL)])
sample_model = provide(Sampler{BernoulliModel}(), rate = input_sample / 100)
input_matrix = sample_model.draw(truth_matrix)
manual_type_matrix = Array{Symbol}(undef, ROW, COL)
manual_type_matrix .= :NegativeBinomial
user_input_estimators = Dict(:NegativeBinomial=> Dict(:r=>6, :p=>0.8))
@timeit timer "NegBin(400x400)" * "| rank=" * string(input_rank) * "| sample=" * string(input_sample) begin
completed_matrix, type_tracker, tracker = complete(A = input_matrix,
maxiter = 200,
ρ = 0.3,
use_autodiff = false,
gd_iter = 3,
debug_mode = false,
project_rank = nothing,
io = io,
user_input_estimators = user_input_estimators,
type_assignment = manual_type_matrix)
end
predicted_matrix = predict(MatrixCompletionModel(),
completed_matrix = completed_matrix,
type_tracker = type_tracker,
estimators = user_input_estimators)
summary_object = summary(MatrixCompletionModel(),
predicted_matrix = predicted_matrix,
truth_matrix = truth_matrix,
type_tracker = type_tracker,
tracker = tracker)
pickle(DATA_FILE_PATH,
"missing_idx" => type_tracker[:Missing],
"completed_matrix" => completed_matrix,
"predicted_matrix" => predicted_matrix,
"truth_matrix" => truth_matrix,
"summary" => summary_object)
print(io, JSON.json(summary_object, 4))
print(io, timer)
close(io)
# catch
# @printf("ERROR!!!! rank = %d | sample = %d%%\n", input_rank, input_sample)
# end
end
end
end
| MatrixCompletion | https://github.com/jasonsun0310/MatrixCompletion.jl.git |
|
[
"MIT"
] | 0.1.0 | 50e804aa999e452ce4bf5edc50493838bd744e09 | code | 3137 | @info("Simulation: Vary Rank [NegativeBinomial, Medium]")
let
Random.seed!(65536)
ROW = 2000
COL = 2000
# for input_rank in collect(1:400)
for input_rank in union(1, collect(10:10:500))
@printf("medium case: rank = %d\n", input_rank)
timer = TimerOutput()
RESULTS_DIR = GLOBAL_SIMULATION_RESULTS_DIR * "negbin/medium(2000x2000)(vary_rank)/" * "rank" * string(input_rank) * "/"
LOG_FILE_NAME = "io.log"
DATA_FILE_NAME = "saved_variables.h5"
LOG_FILE_PATH = RESULTS_DIR * LOG_FILE_NAME
DATA_FILE_PATH = RESULTS_DIR * DATA_FILE_NAME
Base.Filesystem.mkpath(RESULTS_DIR)
io = open(LOG_FILE_PATH, "w")
truth_matrix = rand([(FixedRankMatrix(Distributions.NegativeBinomial(6, 0.5), rank = input_rank), ROW, COL)])
sample_model = provide(Sampler{BernoulliModel}(), rate = 0.8)
input_matrix = sample_model.draw(truth_matrix)
manual_type_matrix = Array{Symbol}(undef, ROW, COL)
manual_type_matrix .= :NegativeBinomial
user_input_estimators = Dict(:NegativeBinomial=> Dict(:r=>6, :p=>0.8))
@timeit timer "Bernoulli(400x400)" * "| rank=" * string(input_rank) begin
completed_matrix, type_tracker, tracker = complete(A = input_matrix,
maxiter = 200,
ρ = 0.3,
use_autodiff = false,
gd_iter = 3,
debug_mode = false,
user_input_estimators = user_input_estimators,
project_rank = input_rank,
io = io,
type_assignment = manual_type_matrix)
end
predicted_matrix = predict(MatrixCompletionModel(),
completed_matrix = completed_matrix,
type_tracker = type_tracker,
estimators = user_input_estimators)
summary_object = summary(MatrixCompletionModel(),
predicted_matrix = predicted_matrix,
truth_matrix = truth_matrix,
type_tracker = type_tracker,
tracker = tracker)
pickle(DATA_FILE_PATH,
"missing_idx" => type_tracker[:Missing],
"completed_matrix" => completed_matrix,
"predicted_matrix" => predicted_matrix,
"truth_matrix" => truth_matrix,
"summary" => summary_object)
# log_simulation_result(Bernoulli(), completed_matrix, truth_matrix, type_tracker,tracker, io = io)
print(io, JSON.json(summary_object, 4))
print(io, timer)
close(io)
end
end
| MatrixCompletion | https://github.com/jasonsun0310/MatrixCompletion.jl.git |
|
[
"MIT"
] | 0.1.0 | 50e804aa999e452ce4bf5edc50493838bd744e09 | code | 3153 | include("abstract_unittest_functions.jl")
@info("Simulation: Vary Rank [NegativeBinomial, Small]")
let
Random.seed!(65536)
ROW = 400
COL = 400
for input_rank in collect(172:400)
@printf("small case: rank = %d\n", input_rank)
# dist = Bernoulli()
timer = TimerOutput()
RESULTS_DIR = GLOBAL_SIMULATION_RESULTS_DIR * "negbin/small(400x400)(vary_rank)/" * "rank" * string(input_rank) * "/"
LOG_FILE_NAME = "io.log"
DATA_FILE_NAME = "saved_variables.h5"
LOG_FILE_PATH = RESULTS_DIR * LOG_FILE_NAME
DATA_FILE_PATH = RESULTS_DIR * DATA_FILE_NAME
Base.Filesystem.mkpath(RESULTS_DIR)
io = open(LOG_FILE_PATH, "w")
truth_matrix = rand([(FixedRankMatrix(Distributions.NegativeBinomial(6, 0.5), rank = input_rank), ROW, COL)])
sample_model = provide(Sampler{BernoulliModel}(), rate = 0.8)
input_matrix = sample_model.draw(truth_matrix)
manual_type_matrix = Array{Symbol}(undef, ROW, COL)
manual_type_matrix .= :NegativeBinomial
user_input_estimators = Dict(:NegativeBinomial=> Dict(:r=>6, :p=>0.8))
@timeit timer "NegativeBinomial(400x400)" * "| rank=" * string(input_rank) begin
completed_matrix, type_tracker, tracker = complete(A = input_matrix,
maxiter = 200,
ρ = 0.3,
use_autodiff = false,
gd_iter = 3,
debug_mode = false,
user_input_estimators = user_input_estimators,
project_rank = nothing,
io = io,
type_assignment = manual_type_matrix)
end
predicted_matrix = predict(MatrixCompletionModel(),
completed_matrix = completed_matrix,
type_tracker = type_tracker,
estimators = user_input_estimators)
summary_object = summary(MatrixCompletionModel(),
predicted_matrix = predicted_matrix,
truth_matrix = truth_matrix,
type_tracker = type_tracker,
tracker = tracker)
pickle(DATA_FILE_PATH,
"missing_idx" => type_tracker[:Missing],
"completed_matrix" => completed_matrix,
"predicted_matrix" => predicted_matrix,
"truth_matrix" => truth_matrix,
"summary" => summary_object)
# log_simulation_result(Bernoulli(), completed_matrix, truth_matrix, type_tracker,tracker, io = io)
print(io, JSON.json(summary_object, 4))
print(io, timer)
close(io)
end
end
| MatrixCompletion | https://github.com/jasonsun0310/MatrixCompletion.jl.git |
|
[
"MIT"
] | 0.1.0 | 50e804aa999e452ce4bf5edc50493838bd744e09 | code | 3123 | include("abstract_unittest_functions.jl")
@info("Simulation: Vary Missing [Poisson, Small]")
let
Random.seed!(65536)
ROW = 400
COL = 400
for input_rank in union(collect(25:25:400))
for input_sample in union(collect(5:5:99))
try
@printf("small case: rank = %d | sample = %d%%\n", input_rank, input_sample)
timer = TimerOutput()
RESULTS_DIR = GLOBAL_SIMULATION_RESULTS_DIR *
"poisson/small(400x400)(vary_missing)/" *
"rank" * string(input_rank) * "/" *
"sample" * string(input_sample) * "/"
LOG_FILE_NAME = "io.log"
DATA_FILE_NAME = "saved_variables.h5"
LOG_FILE_PATH = RESULTS_DIR * LOG_FILE_NAME
DATA_FILE_PATH = RESULTS_DIR * DATA_FILE_NAME
Base.Filesystem.mkpath(RESULTS_DIR)
io = open(LOG_FILE_PATH, "w")
# io = stdout
truth_matrix = rand([(FixedRankMatrix(Distributions.Poisson(5), rank = input_rank), ROW, COL)])
sample_model = provide(Sampler{BernoulliModel}(), rate = input_sample / 100)
input_matrix = sample_model.draw(truth_matrix)
manual_type_matrix = Array{Symbol}(undef, ROW, COL)
manual_type_matrix .= :Poisson
@timeit timer "Poisson(400x400)" * "| rank=" * string(input_rank) * "| sample=" * string(input_sample) begin
completed_matrix, type_tracker, tracker = complete(A = input_matrix,
maxiter = 200,
ρ = 0.3,
use_autodiff = false,
gd_iter = 3,
debug_mode = false,
project_rank = nothing,
io = io,
type_assignment = manual_type_matrix)
end
predicted_matrix = predict(MatrixCompletionModel(),
completed_matrix = completed_matrix,
type_tracker = type_tracker)
summary_object = summary(MatrixCompletionModel(),
predicted_matrix = predicted_matrix,
truth_matrix = truth_matrix,
type_tracker = type_tracker,
tracker = tracker)
pickle(DATA_FILE_PATH,
"missing_idx" => type_tracker[:Missing],
"completed_matrix" => completed_matrix,
"predicted_matrix" => predicted_matrix,
"truth_matrix" => truth_matrix,
"summary" => summary_object)
print(io, JSON.json(summary_object, 4))
print(io, timer)
close(io)
catch
nothing
end
end
end
end
| MatrixCompletion | https://github.com/jasonsun0310/MatrixCompletion.jl.git |
|
[
"MIT"
] | 0.1.0 | 50e804aa999e452ce4bf5edc50493838bd744e09 | code | 3129 | include("abstract_unittest_functions.jl")
@info("Simulation: Vary Missing [Poisson, Medium]")
let
Random.seed!(65536)
ROW = 2000
COL = 2000
for input_rank in union(40, 100)
for input_sample in union(collect(50:5:99))
try
@printf("medium case: rank = %d | sample = %d%%\n", input_rank, input_sample)
timer = TimerOutput()
RESULTS_DIR = GLOBAL_SIMULATION_RESULTS_DIR *
"poisson/medium_2000x2000__vary_missing/" *
"rank" * string(input_rank) * "/" *
"sample" * string(input_sample) * "/"
LOG_FILE_NAME = "io.log"
DATA_FILE_NAME = "saved_variables.h5"
LOG_FILE_PATH = RESULTS_DIR * LOG_FILE_NAME
DATA_FILE_PATH = RESULTS_DIR * DATA_FILE_NAME
Base.Filesystem.mkpath(RESULTS_DIR)
io = open(LOG_FILE_PATH, "w")
# io = stdout
truth_matrix = rand([(FixedRankMatrix(Distributions.Poisson(5), rank = input_rank), ROW, COL)])
sample_model = provide(Sampler{BernoulliModel}(), rate = input_sample / 100)
input_matrix = sample_model.draw(truth_matrix)
manual_type_matrix = Array{Symbol}(undef, ROW, COL)
manual_type_matrix .= :Poisson
@timeit timer "Poisson(2000x2000)" * "| rank=" * string(input_rank) * "| sample=" * string(input_sample) begin
completed_matrix, type_tracker, tracker = complete(A = input_matrix,
maxiter = 200,
ρ = 0.3,
use_autodiff = false,
gd_iter = 3,
debug_mode = false,
project_rank = input_rank * 2,
io = io,
type_assignment = manual_type_matrix)
end
predicted_matrix = predict(MatrixCompletionModel(),
completed_matrix = completed_matrix,
type_tracker = type_tracker)
summary_object = summary(MatrixCompletionModel(),
predicted_matrix = predicted_matrix,
truth_matrix = truth_matrix,
type_tracker = type_tracker,
tracker = tracker)
pickle(DATA_FILE_PATH,
"missing_idx" => type_tracker[:Missing],
"completed_matrix" => completed_matrix,
"predicted_matrix" => predicted_matrix,
"truth_matrix" => truth_matrix,
"summary" => summary_object)
print(io, JSON.json(summary_object, 4))
print(io, timer)
close(io)
catch
nothing
end
end
end
end
| MatrixCompletion | https://github.com/jasonsun0310/MatrixCompletion.jl.git |
|
[
"MIT"
] | 0.1.0 | 50e804aa999e452ce4bf5edc50493838bd744e09 | code | 2670 | @info("Simulation: Vary Rank [Poisson, Medium]")
let
Random.seed!(65536)
ROW = 2000
COL = 2000
for input_rank in union(1, collect(10:10:500))
@printf("medium case: rank = %d\n", input_rank)
dist = Poisson()
timer = TimerOutput()
RESULTS_DIR = GLOBAL_SIMULATION_RESULTS_DIR * "poisson/medium(2000x2000)(vary_rank)/" * "rank" * string(input_rank) * "/"
LOG_FILE_NAME = "io.log"
DATA_FILE_NAME = "saved_variables.h5"
LOG_FILE_PATH = RESULTS_DIR * LOG_FILE_NAME
DATA_FILE_PATH = RESULTS_DIR * DATA_FILE_NAME
Base.Filesystem.mkpath(RESULTS_DIR)
io = open(LOG_FILE_PATH, "w")
truth_matrix = rand([(FixedRankMatrix(Distributions.Poisson(5), rank = input_rank), ROW, COL)])
sample_model = provide(Sampler{BernoulliModel}(), rate = 0.8)
input_matrix = sample_model.draw(truth_matrix)
manual_type_matrix = Array{Symbol}(undef, ROW, COL)
manual_type_matrix .= :Poisson
@timeit timer "Poisson(2000x2000)" * "| rank=" * string(input_rank) begin
completed_matrix, type_tracker, tracker = complete(A = input_matrix,
maxiter = 200,
ρ = 0.3,
use_autodiff = false,
gd_iter = 3,
debug_mode = false,
project_rank = input_rank,
io = io,
type_assignment = manual_type_matrix)
end
predicted_matrix = predict(MatrixCompletionModel(),
completed_matrix = completed_matrix,
type_tracker = type_tracker)
summary_object = summary(MatrixCompletionModel(),
predicted_matrix = predicted_matrix,
truth_matrix = truth_matrix,
type_tracker = type_tracker,
tracker = tracker)
pickle(DATA_FILE_PATH,
"missing_idx" => type_tracker[:Missing],
"completed_matrix" => completed_matrix,
"predicted_matrix" => predicted_matrix,
"truth_matrix" => truth_matrix,
"summary" => summary_object)
print(io, JSON.json(summary_object, 4))
print(io, timer)
close(io)
end
end
| MatrixCompletion | https://github.com/jasonsun0310/MatrixCompletion.jl.git |
|
[
"MIT"
] | 0.1.0 | 50e804aa999e452ce4bf5edc50493838bd744e09 | code | 2666 | @info("Simulation: Vary Rank [Poisson, Small]")
let
Random.seed!(65536)
ROW = 2000
COL = 2000
for input_rank in union(1, collect(10:10:500))
@printf("medium case: rank = %d\n", input_rank)
dist = Poisson()
timer = TimerOutput()
RESULTS_DIR = GLOBAL_SIMULATION_RESULTS_DIR * "poisson/medium(400x400)(vary_rank)/" * "rank" * string(input_rank) * "/"
LOG_FILE_NAME = "io.log"
DATA_FILE_NAME = "saved_variables.h5"
LOG_FILE_PATH = RESULTS_DIR * LOG_FILE_NAME
DATA_FILE_PATH = RESULTS_DIR * DATA_FILE_NAME
Base.Filesystem.mkpath(RESULTS_DIR)
io = open(LOG_FILE_PATH, "w")
truth_matrix = rand([(FixedRankMatrix(Distributions.Poisson(5), rank = input_rank), ROW, COL)])
sample_model = provide(Sampler{BernoulliModel}(), rate = 0.8)
input_matrix = sample_model.draw(truth_:matrix)
manual_type_matrix = Array{Symbol}(undef, ROW, COL)
manual_type_matrix .= :Poisson
@timeit timer "Poisson(2000x2000)" * "| rank=" * string(input_rank) begin
completed_matrix, type_tracker, tracker = complete(A = input_matrix,
maxiter = 200,
ρ = 0.3,
use_autodiff = false,
gd_iter = 3,
debug_mode = false,
project_rank = nothing,
io = io,
type_assignment = manual_type_matrix)
end
predicted_matrix = predict(MatrixCompletionModel(),
completed_matrix = completed_matrix,
type_tracker = type_tracker)
summary_object = summary(MatrixCompletionModel(),
predicted_matrix = predicted_matrix,
truth_matrix = truth_matrix,
type_tracker = type_tracker,
tracker = tracker)
pickle(DATA_FILE_PATH,
"missing_idx" => type_tracker[:Missing],
"completed_matrix" => completed_matrix,
"predicted_matrix" => predicted_matrix,
"truth_matrix" => truth_matrix,
"summary" => summary_object)
print(io, JSON.json(summary_object, 4))
print(io, timer)
close(io)
end
end
| MatrixCompletion | https://github.com/jasonsun0310/MatrixCompletion.jl.git |
|
[
"MIT"
] | 0.1.0 | 50e804aa999e452ce4bf5edc50493838bd744e09 | code | 2796 | include("abstract_unittest_functions.jl")
@info("Simulation: Vary Rank [Poisson, Small]")
let
Random.seed!(65536)
ROW = 400
COL = 400
for input_rank in collect(1:400)
@printf("small case: rank = %d\n", input_rank)
input_rank = 100
dist = Poisson()
timer = TimerOutput()
# RESULTS_DIR = GLOBAL_SIMULATION_RESULTS_DIR * "poisson/small(400x400)(vary_rank)/" * "rank" * string(input_rank) * "/"
# LOG_FILE_NAME = "io.log"
# DATA_FILE_NAME = "saved_variables.h5"
# LOG_FILE_PATH = RESULTS_DIR * LOG_FILE_NAME
# DATA_FILE_PATH = RESULTS_DIR * DATA_FILE_NAME
# Base.Filesystem.mkpath(RESULTS_DIR)
# io = open(LOG_FILE_PATH, "w")
io = Base.stdout
truth_matrix = rand([(FixedRankMatrix(Distributions.Poisson(3), rank = input_rank), ROW, COL)])
sample_model = provide(Sampler{BernoulliModel}(), rate = 0.8)
input_matrix = sample_model.draw(truth_matrix)
manual_type_matrix = Array{Symbol}(undef, ROW, COL)
manual_type_matrix .= :Poisson
@timeit timer "Poisson(400x400)" * "| rank=" * string(input_rank) begin
completed_matrix, type_tracker, tracker = complete(A = input_matrix,
maxiter = 200,
ρ = 0.2,
use_autodiff = false,
gd_iter = 3,
debug_mode = false,
project_rank = nothing,
io = io,
type_assignment = manual_type_matrix)
end
predicted_matrix = predict(MatrixCompletionModel(),
completed_matrix = completed_matrix,
type_tracker = type_tracker)
summary_object = summary(MatrixCompletionModel(),
predicted_matrix = predicted_matrix,
truth_matrix = truth_matrix,
type_tracker = type_tracker,
tracker = tracker)
# summary_object[:Poisson][:MissingOnly]
pickle(DATA_FILE_PATH,
"missing_idx" => type_tracker[:Missing],
"completed_matrix" => completed_matrix,
"predicted_matrix" => predicted_matrix,
"truth_matrix" => truth_matrix,
"summary" => summary_object)
print(io, JSON.json(summary_object, 4))
print(io, timer)
close(io)
end
end
| MatrixCompletion | https://github.com/jasonsun0310/MatrixCompletion.jl.git |
|
[
"MIT"
] | 0.1.0 | 50e804aa999e452ce4bf5edc50493838bd744e09 | code | 8203 | const FLAG_SIMULATION_BERNOULLI_VARY_RANK_SMALL = false
const FLAG_SIMULATION_BERNOULLI_VARY_RANK_MEDIUM = true
const FLAG_SIMULATION_BERNOULLI_VARY_RANK_LARGE = false
const FLAG_SIMULATION_BERNOULLI_VARY_MISSING_PERCENTAGE_SMALL = false
const FLAG_SIMULATION_BERNOULLI_VARY_MISSING_PERCENTAGE_MEDIUM = false
const FLAG_SIMULATION_BERNOULLI_VARY_MISSING_PERCENTAGE_LARGE = false
FLAG_SIMULATION_BERNOULLI_VARY_RANK_SMALL ?
include("simulation_bernoulli_vary_rank_small.jl") : nothing
FLAG_SIMULATION_BERNOULLI_VARY_RANK_MEDIUM ?
include("simulation_bernoulli_vary_rank_medium.jl") : nothing
FLAG_SIMULATION_BERNOULLI_VARY_RANK_LARGE ?
include("simulation_bernoulli_vary_rank_large.jl") : nothing
# @testset "$(format("ADMM Algorithm: Small Input Simulation [Bernoulli 400 x 400]"))" begin
# if SIMULATION_STATUS_BERNOULLI_VARY_RANK_SMALL == false
# let
# Random.seed!(65536)
# ROW = 400
# COL = 400
# for input_rank in 1:2:400
# @printf("small case: rank = %d\n", input_rank)
# dist = Bernoulli()
# timer = TimerOutput()
# Base.Filesystem.mkpath("./test_result/bernoulli/small(400x400)")
# io = open("./test_result/bernoulli/small(400x400)/rank"*string(input_rank)*".log", "w")
# truth_matrix = rand([(FixedRankMatrix(Distributions.Bernoulli(0.5), rank = input_rank), ROW, COL)])
# sample_model = provide(Sampler{BernoulliModel}(), rate = 0.8)
# input_matrix = sample_model.draw(truth_matrix)
# manual_type_matrix = Array{Symbol}(undef, ROW, COL)
# manual_type_matrix .= :Bernoulli
# @timeit timer "Bernoulli(400x400)" * "| rank="*string(input_rank) begin
# completed_matrix, type_tracker, tracker = complete(A = input_matrix,
# maxiter = 200,
# ρ = 0.3,
# use_autodiff = false,
# gd_iter = 3,
# debug_mode = false,
# project_rank = nothing,
# io = io,
# type_assignment = manual_type_matrix)
# end
# log_simulation_result(Bernoulli(), completed_matrix, truth_matrix, type_tracker,tracker, io = io)
# show(io, MIME("text/plain"), timer)
# end
# end
# end
# end
# @testset "$(format("ADMM Algorithm: Medium Input Simulation [Bernoulli 2000 x 2000]"))" begin
# if SIMULATION_STATUS_BERNOULLI_VARY_RANK_MEDIUM == false
# let
# Random.seed!(65536)
# ROW = 2000
# COL = 2000
# # for input_rank in union(1, collect(10:10:500))
# for input_rank in collect(300:10:500)
# @printf("medium case: rank = %d\n", input_rank)
# dist = Bernoulli()
# timer = TimerOutput()
# RESULTS_DIR = GLOBAL_SIMULATION_RESULTS_DIR * "bernoulli/medium(2000x2000)(vary_rank)/" * "rank" * string(input_rank) * "/"
# LOG_FILE_NAME = "io.log"
# DATA_FILE_NAME = "saved_variables.h5"
# LOG_FILE_PATH = RESULTS_DIR * LOG_FILE_NAME
# DATA_FILE_PATH = RESULTS_DIR * DATA_FILE_NAME
# Base.Filesystem.mkpath(RESULTS_DIR)
# io = open(LOG_FILE_PATH, "w")
# truth_matrix = rand([(FixedRankMatrix(Distributions.Bernoulli(0.5), rank = input_rank), ROW, COL)])
# sample_model = provide(Sampler{BernoulliModel}(), rate = 0.8)
# input_matrix = sample_model.draw(truth_matrix)
# manual_type_matrix = Array{Symbol}(undef, ROW, COL)
# manual_type_matrix .= :Bernoulli
# @timeit timer "Bernoulli(2000x2000)" * "| rank=" * string(input_rank) begin
# completed_matrix, type_tracker, tracker = complete(A = input_matrix,
# maxiter = 200,
# ρ = 0.3,
# use_autodiff = false,
# gd_iter = 3,
# debug_mode = false,
# project_rank = input_rank,
# io = io,
# type_assignment = manual_type_matrix)
# end
# predicted_matrix = predict(MatrixCompletionModel(),
# completed_matrix = completed_matrix,
# type_tracker = type_tracker)
# summary_object = summary(MatrixCompletionModel(),
# predicted_matrix = predicted_matrix,
# truth_matrix = truth_matrix,
# type_tracker = type_tracker,
# tracker = tracker)
# pickle(DATA_FILE_PATH,
# "missing_idx" => type_tracker[:Missing],
# "completed_matrix" => completed_matrix,
# "predicted_matrix" => predicted_matrix,
# "truth_matrix" => truth_matrix,
# "summary" => summary_object)
# log_simulation_result(Bernoulli(), completed_matrix, truth_matrix, type_tracker,tracker, io = io)
# print(io, JSON.json(summary_object, 4))
# print(io, timer)
# close(io)
# end
# end
# else
# @info("Already Completed Simualtion[Bernoulli vary rank][MEDIUM]")
# end
# end
# @testset "$(format("ADMM Algorithm: Medium Input Simulation [Bernoulli 2000 x 2000]"))" begin
# if SIMULATION_STATUS_BERNOULLI_VARY_RANK_MEDIUM == false
# let
# Random.seed!(65536)
# ROW = 100
# COL = 100
# for input_rank in union(1, collect(10:10:500))
# @printf("medium case: rank = %d\n", input_rank)
# dist = Bernoulli()
# timer = TimerOutput()
# # Base.Filesystem.mkpath("./test_result/bernoulli/medium(2000x2000)")
# # io = open("./test_result/bernoulli/medium(2000x2000)/rank"*string(input_rank)*".log", "w")
# io = Base.stdout
# truth_matrix = rand([(FixedRankMatrix(Distributions.Bernoulli(0.5), rank = input_rank), ROW, COL)])
# sample_model = provide(Sampler{BernoulliModel}(), rate = 0.8)
# input_matrix = sample_model.draw(truth_matrix)
# manual_type_matrix = Array{Symbol}(undef, ROW, COL)
# manual_type_matrix .= :Bernoulli
# @timeit timer "Bernoulli(2000x2000)" * "| rank="*string(input_rank) begin
# completed_matrix, type_tracker, tracker = complete(A = input_matrix,
# maxiter = 200,
# ρ = 0.3,
# use_autodiff = false,
# gd_iter = 3,
# debug_mode = false,
# project_rank = input_rank,
# io = io,
# type_assignment = manual_type_matrix)
# end
# log_simulation_result(Bernoulli(), completed_matrix, truth_matrix, type_tracker,tracker, io = io)
# show(io, MIME("text/plain"), timer)
# end
# end
# end
# end
| MatrixCompletion | https://github.com/jasonsun0310/MatrixCompletion.jl.git |
|
[
"MIT"
] | 0.1.0 | 50e804aa999e452ce4bf5edc50493838bd744e09 | code | 6984 | using MatrixCompletion
const FLAG_SIMULATION_GAMMA_VARY_RANK_SMALL = false
const FLAG_SIMULATION_GAMMA_VARY_RANK_MEDIUM = true
const FLAG_SIMULATION_GAMMA_VARY_RANK_LARGE = false
const FLAG_SIMULATION_GAMMA_VARY_MISSING_PERCENTAGE_SMALL = false
const FLAG_SIMULATION_GAMMA_VARY_MISSING_PERCENTAGE_MEDIUM = false
const FLAG_SIMULATION_GAMMA_VARY_MISSING_PERCENTAGE_LARGE = false
FLAG_SIMULATION_GAMMA_VARY_RANK_SMALL ?
include("simulation_gamma_vary_rank_small.jl") : nothing
FLAG_SIMULATION_GAMMA_VARY_RANK_MEDIUM ?
include("simulation_gamma_vary_rank_medium.jl") : nothing
# @testset "$(format("ADMM Algorithm: Small Input Simulation [Gamma 400 x 400]"))" begin
# if SIMULATION_STATUS_GAMMA_VARY_MISSING_PERCENTAGE_LARGE == false
# @info("Running Simulation: [Gamma vary rank][SMALL]")
# let
# Random.seed!(65536)
# ROW = 400
# COL = 400
# for input_rank in 1:2:400
# @printf("small case: rank = %d\n", input_rank)
# dist = Gamma()
# timer = TimerOutput()
# Base.Filesystem.mkpath("./test_result/gamma/small(400x400)")
# io = open("./test_result/gamma/small(400x400)/rank"*string(input_rank)*".log", "w")
# # io = Base.stdout
# truth_matrix = rand([(FixedRankMatrix(Distributions.Gamma(10, 0.5), rank = input_rank), ROW, COL)])
# sample_model = provide(Sampler{BernoulliModel}(), rate = 0.8)
# input_matrix = sample_model.draw(truth_matrix)
# manual_type_matrix = Array{Symbol}(undef, ROW, COL)
# manual_type_matrix .= :Gamma
# @timeit timer "Gamma(400x400)" * "| rank="*string(input_rank) begin
# completed_matrix, type_tracker, tracker = complete(A = input_matrix,
# maxiter = 200,
# ρ = 0.3,
# use_autodiff = false,
# gd_iter = 3,
# debug_mode = false,
# project_rank = nothing,
# io = io,
# type_assignment = manual_type_matrix)
# end
# log_simulation_result(Gamma(), completed_matrix, truth_matrix, type_tracker,tracker, io = io)
# show(io, MIME("text/plain"), timer)
# end
# end
# else
# @info("Already Completed Simualtion[Gamma vary rank][SMALL]")
# end
# end
# @testset "$(format("ADMM Algorithm: Medium Input Simulation [Gamma 2000 x 2000]"))" begin
# if SIMULATION_STATUS_GAMMA_VARY_RANK_MEDIUM == false
# @info("Running Simulation: [Gamma vary rank][MEDIUM]")
# let
# Random.seed!(65536)
# ROW = 2000
# COL = 2000
# for input_rank in collect(400:10:500)
# @printf("medium case: rank = %d\n", input_rank)
# dist = Gamma()
# timer = TimerOutput()
# Base.Filesystem.mkpath("./test_result/gamma/medium(2000x2000)")
# io = open("./test_result/gamma/medium(2000x2000)/rank"*string(input_rank)*".log", "w")
# # io = Base.stdout
# truth_matrix = rand([(FixedRankMatrix(Distributions.Gamma(10, 0.5), rank = input_rank), ROW, COL)])
# sample_model = provide(Sampler{BernoulliModel}(), rate = 0.8)
# input_matrix = sample_model.draw(truth_matrix)
# manual_type_matrix = Array{Symbol}(undef, ROW, COL)
# manual_type_matrix .= :Gamma
# @timeit timer "Gamma(2000x2000)" * "| rank="*string(input_rank) begin
# completed_matrix, type_tracker, tracker = complete(A = input_matrix,
# maxiter = 200,
# ρ = 0.3,
# use_autodiff = false,
# gd_iter = 3,
# debug_mode = false,
# project_rank = input_rank,
# io = io,
# type_assignment = manual_type_matrix)
# end
# log_simulation_result(Gamma(), completed_matrix, truth_matrix, type_tracker,tracker, io = io)
# show(io, MIME("text/plain"), timer)
# end
# end
# else
# @info("Already Completed Simualtion[Gamma vary rank][MEDIUM]")
# end
# end
# @testset "$(format("ADMM Algorithm: Large Input Simulation [Gamma 4000 x 4000]"))" begin
# let
# Random.seed!(65536)
# ROW = 2000
# COL = 2000
# for input_rank in union(1, collect(10:20:1000))
# @printf("medium case: rank = %d\n", input_rank)
# dist = Gamma()
# timer = TimerOutput()
# Base.Filesystem.mkpath("./test_result/gamma/medium(2000x2000)")
# io = open("./test_result/gamma/medium(2000x2000)/rank"*string(input_rank)*".log", "w")
# # io = Base.stdout
# truth_matrix = rand([(FixedRankMatrix(Distributions.Gamma(10, 0.5), rank = input_rank), ROW, COL)])
# sample_model = provide(Sampler{BernoulliModel}(), rate = 0.8)
# input_matrix = sample_model.draw(truth_matrix)
# manual_type_matrix = Array{Symbol}(undef, ROW, COL)
# manual_type_matrix .= :Gamma
# @timeit timer "Gamma(2000x2000)" * "| rank="*string(input_rank) begin
# completed_matrix, type_tracker, tracker = complete(A = input_matrix,
# maxiter = 200,
# ρ = 0.3,
# use_autodiff = false,
# gd_iter = 3,
# debug_mode = false,
# project_rank = nothing,
# io = io,
# type_assignment = manual_type_matrix)
# end
# log_simulation_result(Gamma(), completed_matrix, truth_matrix, type_tracker,tracker, io = io)
# show(io, MIME("text/plain"), timer)
# end
# end
# end
| MatrixCompletion | https://github.com/jasonsun0310/MatrixCompletion.jl.git |
|
[
"MIT"
] | 0.1.0 | 50e804aa999e452ce4bf5edc50493838bd744e09 | code | 6541 | const SIMULATION_STATUS_GAUSSIAN_VARY_RANK_SMALL = true
const SIMULATION_STATUS_GAUSSIAN_VARY_RANK_MEDIUM = false
const SIMULATION_STATUS_GAUSSIAN_VARY_RANK_LARGE = nothing
const SIMULATION_STATUS_GAUSSIAN_VARY_MISSING_PERCENTAGE_SMALL = false
const SIMULATION_STATUS_GAUSSIAN_VARY_MISSING_PERCENTAGE_MEDIUM = false
const SIMULATION_STATUS_GAUSSIAN_VARY_MISSING_PERCENTAGE_LARGE = nothing
@testset "$(format("ADMM Algorithm: Small Input Simulation [Gaussian 400 x 400]"))" begin
if SIMULATION_STATUS_GAUSSIAN_VARY_RANK_SMALL == true
let
Random.seed!(65536)
ROW = 400
COL = 400
for input_rank in 1:400
@printf("small case: rank = %d\n", input_rank)
dist = Gaussian()
timer = TimerOutput()
RESULTS_DIR = GLOBAL_SIMULATION_RESULTS_DIR * "gaussian/small(400x400)(vary_rank)/" * "rank" * string(input_rank) * "/"
LOG_FILE_NAME = "io.log"
DATA_FILE_NAME = "saved_variables.h5"
LOG_FILE_PATH = RESULTS_DIR * LOG_FILE_NAME
DATA_FILE_PATH = RESULTS_DIR * DATA_FILE_NAME
Base.Filesystem.mkpath(RESULTS_DIR)
io = open(LOG_FILE_PATH, "w")
truth_matrix = rand([(FixedRankMatrix(Distributions.Gaussian(10, 5), rank = input_rank), ROW, COL)])
sample_model = provide(Sampler{BernoulliModel}(), rate = 0.8)
input_matrix = sample_model.draw(truth_matrix)
manual_type_matrix = Array{Symbol}(undef, ROW, COL)
manual_type_matrix .= :Gaussian
@timeit timer "Gaussian(400x400)" * "| rank=" * string(input_rank) begin
completed_matrix, type_tracker, tracker = complete(A = input_matrix,
maxiter = 200,
ρ = 0.3,
use_autodiff = false,
gd_iter = 3,
debug_mode = false,
project_rank = nothing,
io = io,
type_assignment = manual_type_matrix)
end
predicted_matrix = predict(MatrixCompletionModel(),
completed_matrix = completed_matrix,
type_tracker = type_tracker)
summary_object = summary(MatrixCompletionModel(),
predicted_matrix = predicted_matrix,
truth_matrix = truth_matrix,
type_tracker = type_tracker,
tracker = tracker)
pickle(DATA_FILE_PATH,
"missing_idx" => type_tracker[:Missing],
"completed_matrix" => completed_matrix,
"predicted_matrix" => predicted_matrix,
"truth_matrix" => truth_matrix,
"summary" => summary_object)
print(io, JSON.json(summary_object, 4))
print(io, timer)
close(io)
end
end
else
@info("Already Completed Simualtion[Gaussian vary rank][SMALL]")
end
end
@testset "$(format("ADMM Algorithm: Medium Input Simulation [Gaussian 2000 x 2000]"))" begin
if SIMULATION_STATUS_GAUSSIAN_VARY_RANK_MEDIUM == true
let
Random.seed!(65536)
ROW = 2000
COL = 2000
for input_rank in union(1, collect(10:10:500))
@printf("medium case: rank = %d\n", input_rank)
dist = Gaussian()
timer = TimerOutput()
RESULTS_DIR = GLOBAL_SIMULATION_RESULTS_DIR * "gaussian/medium(2000x2000)(vary_rank)/" * "rank" * string(input_rank) * "/"
LOG_FILE_NAME = "io.log"
DATA_FILE_NAME = "saved_variables.h5"
LOG_FILE_PATH = RESULTS_DIR * LOG_FILE_NAME
DATA_FILE_PATH = RESULTS_DIR * DATA_FILE_NAME
Base.Filesystem.mkpath(RESULTS_DIR)
io = open(LOG_FILE_PATH, "w")
truth_matrix = rand([(FixedRankMatrix(Distributions.Gaussian(10, 5), rank = input_rank), ROW, COL)])
sample_model = provide(Sampler{BernoulliModel}(), rate = 0.8)
input_matrix = sample_model.draw(truth_matrix)
manual_type_matrix = Array{Symbol}(undef, ROW, COL)
manual_type_matrix .= :Gaussian
@timeit timer "Gaussian(2000x2000)" * "| rank=" * string(input_rank) begin
completed_matrix, type_tracker, tracker = complete(A = input_matrix,
maxiter = 200,
ρ = 0.3,
use_autodiff = false,
gd_iter = 3,
debug_mode = false,
project_rank = input_rank,
io = io,
type_assignment = manual_type_matrix)
end
predicted_matrix = predict(MatrixCompletionModel(),
completed_matrix = completed_matrix,
type_tracker = type_tracker)
summary_object = summary(MatrixCompletionModel(),
predicted_matrix = predicted_matrix,
truth_matrix = truth_matrix,
type_tracker = type_tracker,
tracker = tracker)
pickle(DATA_FILE_PATH,
"missing_idx" => type_tracker[:Missing],
"completed_matrix" => completed_matrix,
"predicted_matrix" => predicted_matrix,
"truth_matrix" => truth_matrix,
"summary" => summary_object)
print(io, JSON.json(summary_object, 4))
print(io, timer)
close(io)
end
end
else
@info("Already Completed Simualtion[Gaussian vary rank][MEDIUM]")
end
end
| MatrixCompletion | https://github.com/jasonsun0310/MatrixCompletion.jl.git |
|
[
"MIT"
] | 0.1.0 | 50e804aa999e452ce4bf5edc50493838bd744e09 | code | 4114 | import Distributions
using MatrixCompletion
import Random
using TimerOutputs
const to = TimerOutput()
function log_simulation_result(dist::ExponentialFamily, completed_matrix, truth_matrix, type_tracker; io = Base.stdout)
predicted = predict(dist, forward_map(dist,
completed_matrix[type_tracker[convert(Symbol, dist)]]))
truth = truth_matrix[type_tracker[convert(Symbol, dist)]]
summary = provide(Diagnostics{Any}(),
reference = truth,
input_data = predicted)
@printf(io, "\nSummary about %s\n%s\n", string(convert(Symbol, dist)), repeat("-", 80))
show(io, MIME("text/plain"), summary)
print(io, "\n")
end
@testset "$(format("ADMM Algorithm: Small Input[Gaussian + Bernoulli]"))" begin
let
Random.seed!(65536)
for i in 1:200
@printf("small case: rank = %d\n", i)
io = open("./test_result/gaussian_bernoulli/small/rank"*string(2*i)*".txt", "w")
truth_matrix = rand([(FixedRankMatrix(Distributions.Gaussian(5, 10), rank = i), 400, 200),
(FixedRankMatrix(Distributions.Bernoulli(0.5), rank = i), 400, 200)])
sample_model = provide(Sampler{BernoulliModel}(), rate = 0.8)
input_matrix = sample_model.draw(truth_matrix)
@timeit to "Gaussian + Bernoulli" completed_matrix, type_tracker = complete(A = input_matrix,
maxiter = 200,
ρ = 0.3,
use_autodiff = false,
gd_iter = 3,
debug_mode = false,
project_rank = 2 * i,
io = io)
log_simulation_result(Bernoulli(), completed_matrix, truth_matrix, type_tracker, io = io)
log_simulation_result(Gaussian(), completed_matrix, truth_matrix, type_tracker, io = io)
show(io, MIME("text/plain"), to)
close(io)
end
end
end
@testset "$(format("ADMM Algorithm: Medium Input[Gaussian + Bernoulli]"))" begin
let
Random.seed!(65536)
for i in 1:10:500
@printf("medium case: rank = %d\n", i)
io = open("./test_result/gaussian_bernoulli/small/rank"*string(2*i)*".txt", "w")
truth_matrix = rand([(FixedRankMatrix(Distributions.Gaussian(5, 10), rank = i), 2000, 1000),
(FixedRankMatrix(Distributions.Bernoulli(0.5), rank = i), 2000, 1000)])
sample_model = provide(Sampler{BernoulliModel}(), rate = 0.8)
input_matrix = sample_model.draw(truth_matrix)
@timeit to "Gaussian + Bernoulli" completed_matrix, type_tracker = complete(A = input_matrix,
maxiter = 200,
ρ = 0.3,
use_autodiff = false,
gd_iter = 3,
debug_mode = false,
project_rank = 2 * i,
io = io)
log_simulation_result(Bernoulli(), completed_matrix, truth_matrix, type_tracker, io = io)
log_simulation_result(Gaussian(), completed_matrix, truth_matrix, type_tracker, io = io)
show(io, MIME("text/plain"), to)
close(io)
end
end
end
| MatrixCompletion | https://github.com/jasonsun0310/MatrixCompletion.jl.git |
|
[
"MIT"
] | 0.1.0 | 50e804aa999e452ce4bf5edc50493838bd744e09 | code | 1787 |
# @testset "$(format("ADMM Algorithm: Small Input[Gaussian + Bernoulli + Poisson + Gamma + NegativeBinomial]"))" begin
# let
# Random.seed!(65536)
# for i in 1:200
# @printf("small case: rank = %d\n", i)
# io = open("./test_result/gaussian_bernoulli/small/rank"*string(2*i)*".txt", "w")
# truth_matrix = rand([(FixedRankMatrix(Distributions.Gaussian(5, 10), rank = i), 400, 200),
# (FixedRankMatrix(Distributions.Bernoulli(0.5), rank = i), 400, 200)])
# sample_model = provide(Sampler{BernoulliModel}(), rate = 0.8)
# input_matrix = sample_model.draw(truth_matrix)
# @timeit to "Gaussian + Bernoulli" completed_matrix, type_tracker = complete(A = input_matrix,
# maxiter = 200,
# ρ = 0.3,
# use_autodiff = false,
# gd_iter = 3,
# debug_mode = false,
# project_rank = 2 * i,
# io = io)
# log_simulation_result(Bernoulli(), completed_matrix, truth_matrix, type_tracker, io = io)
# log_simulation_result(Gaussian(), completed_matrix, truth_matrix, type_tracker, io = io)
# show(io, MIME("text/plain"), to)
# close(io)
# end
# end
# end
| MatrixCompletion | https://github.com/jasonsun0310/MatrixCompletion.jl.git |
|
[
"MIT"
] | 0.1.0 | 50e804aa999e452ce4bf5edc50493838bd744e09 | code | 615 | const FLAG_SIMULATION_MIXED_VARY_RANK_SMALL = false
const FLAG_SIMULATION_MIXED_VARY_RANK_MEDIUM = true
const FLAG_SIMULATION_MIXED_VARY_RANK_LARGE = false
const FLAG_SIMULATION_MIXED_VARY_MISSING_PERCENTAGE_SMALL = false
const FLAG_SIMULATION_MIXED_VARY_MISSING_PERCENTAGE_MEDIUM = false
const FLAG_SIMULATION_MIXED_VARY_MISSING_PERCENTAGE_LARGE = false
FLAG_SIMULATION_MIXED_VARY_RANK_SMALL ?
include("simulation_mixed_vary_rank_small.jl") : nothing
FLAG_SIMULATION_MIXED_VARY_RANK_MEDIUM ?
include("simulation_mixed_vary_rank_medium.jl") : nothing
| MatrixCompletion | https://github.com/jasonsun0310/MatrixCompletion.jl.git |
|
[
"MIT"
] | 0.1.0 | 50e804aa999e452ce4bf5edc50493838bd744e09 | code | 825 | const FLAG_SIMULATION_NEGATIVE_BINOMIAL_VARY_RANK_SMALL = false
const FLAG_SIMULATION_NEGATIVE_BINOMIAL_VARY_RANK_MEDIUM = true
const FLAG_SIMULATION_NEGATIVE_BINOMIAL_VARY_RANK_LARGE = false
const FLAG_SIMULATION_NEGATIVE_BINOMIAL_VARY_MISSING_PERCENTAGE_SMALL = false
const FLAG_SIMULATION_NEGATIVE_BINOMIAL_VARY_MISSING_PERCENTAGE_MEDIUM = false
const FLAG_SIMULATION_NEGATIVE_BINOMIAL_VARY_MISSING_PERCENTAGE_LARGE = false
FLAG_SIMULATION_NEGATIVE_BINOMIAL_VARY_RANK_SMALL ?
include("simulation_negbin_vary_rank_small.jl") : nothing
FLAG_SIMULATION_NEGATIVE_BINOMIAL_VARY_RANK_MEDIUM ?
include("simulation_negbin_vary_rank_medium.jl") : nothing
FLAG_SIMULATION_NEGATIVE_BINOMIAL_VARY_RANK_LARGE ?
include("simulation_negbin_vary_rank_large.jl") : nothing
| MatrixCompletion | https://github.com/jasonsun0310/MatrixCompletion.jl.git |
|
[
"MIT"
] | 0.1.0 | 50e804aa999e452ce4bf5edc50493838bd744e09 | code | 4663 | const FLAG_SIMULATION_POISSON_VARY_RANK_SMALL = false
const FLAG_SIMULATION_POISSON_VARY_RANK_MEDIUM = true
const FLAG_SIMULATION_POISSON_VARY_RANK_LARGE = false
const FLAG_SIMULATION_POISSON_VARY_MISSING_PERCENTAGE_SMALL = false
const FLAG_SIMULATION_POISSON_VARY_MISSING_PERCENTAGE_MEDIUM = false
const FLAG_SIMULATION_POISSON_VARY_MISSING_PERCENTAGE_LARGE = false
FLAG_SIMULATION_POISSON_VARY_RANK_SMALL ?
include("simulation_poisson_vary_rank_small.jl") : nothing
FLAG_SIMULATION_POISSON_VARY_RANK_MEDIUM ?
include("simulation_poisson_vary_rank_medium.jl") : nothing
# @testset "$(format("ADMM Algorithm: Small Input Simulation [Poisson 400 x 400]"))" begin
# if SIMULATION_STATUS_POISSON_VARY_RANK_SMALL == false
# let
# Random.seed!(65536)
# ROW = 400
# COL = 400
# for input_rank in 1:2:400
# @printf("small case: rank = %d\n", input_rank)
# dist = Poisson()
# timer = TimerOutput()
# Base.Filesystem.mkpath("./test_result/poisson/small(400x400)")
# io = open("./test_result/poisson/small(400x400)/rank"*string(input_rank)*".log", "w")
# truth_matrix = rand([(FixedRankMatrix(Distributions.Poisson(5), rank = input_rank), ROW, COL)])
# sample_model = provide(Sampler{BernoulliModel}(), rate = 0.8)
# input_matrix = sample_model.draw(truth_matrix)
# manual_type_matrix = Array{Symbol}(undef, ROW, COL)
# manual_type_matrix .= :Poisson
# @timeit timer "Poisson(400x400)" * "| rank="*string(input_rank) begin
# completed_matrix, type_tracker, tracker = complete(A = input_matrix,
# maxiter = 200,
# ρ = 0.3,
# use_autodiff = false,
# gd_iter = 3,
# debug_mode = false,
# project_rank = nothing,
# io = io,
# type_assignment = manual_type_matrix)
# end
# log_simulation_result(Poisson(), completed_matrix, truth_matrix, type_tracker,tracker, io = io)
# show(io, MIME("text/plain"), timer)
# end
# end
# end
# end
# @testset "$(format("ADMM Algorithm: Medium Input Simulation [Poisson 2000 x 2000]"))" begin
# if SIMULATION_STATUS_POISSON_VARY_RANK_MEDIUM == false
# let
# Random.seed!(65536)
# ROW = 2000
# COL = 2000
# for input_rank in union(1, collect(10:10:500))
# @printf("medium case: rank = %d\n", input_rank)
# dist = Poisson()
# timer = TimerOutput()
# Base.Filesystem.mkpath("./test_result/poisson/medium(2000x2000)")
# io = open("./test_result/poisson/medium(2000x2000)/rank"*string(input_rank)*".log", "w")
# truth_matrix = rand([(FixedRankMatrix(Distributions.Poisson(5), rank = input_rank), ROW, COL)])
# sample_model = provide(Sampler{BernoulliModel}(), rate = 0.8)
# input_matrix = sample_model.draw(truth_matrix)
# manual_type_matrix = Array{Symbol}(undef, ROW, COL)
# manual_type_matrix .= :Poisson
# @timeit timer "Poisson(2000x2000)" * "| rank="*string(input_rank) begin
# completed_matrix, type_tracker, tracker = complete(A = input_matrix,
# maxiter = 200,
# ρ = 0.3,
# use_autodiff = false,
# gd_iter = 3,
# debug_mode = false,
# project_rank = input_rank,
# io = io,
# type_assignment = manual_type_matrix)
# end
# log_simulation_result(Poisson(), completed_matrix, truth_matrix, type_tracker,tracker, io = io)
# show(io, MIME("text/plain"), timer)
# end
# end
# end
# end
| MatrixCompletion | https://github.com/jasonsun0310/MatrixCompletion.jl.git |
|
[
"MIT"
] | 0.1.0 | 50e804aa999e452ce4bf5edc50493838bd744e09 | code | 1206 | using MatrixCompletion.Utilities.FastEigen
using IterativeSolvers
using LinearAlgebra
using Test
function create_symmetric_matrix(n)
a = rand(n,n)*5
return a+a'
end
function correct_output_sparseeigen(input,k)
eigen_dcp = LinearAlgebra.eigen(input);
eigen_val = eigen_dcp.values;
eigen_vec = eigen_dcp.vectors;
first_k_idx = Base.sortperm(eigen_val,rev=true)[1:k];
return eigen_val[first_k_idx],eigen_vec[:,first_k_idx];
end
function get_projection(v,e)
return e * Diagonal(v) * e';
end
function test_native_eigen(;dim=500,nev=20,repeat=5)
for i = 1:repeat
input = create_symmetric_matrix(dim);
@time λ,X = eigs(NativeEigen(),input;nev=nev);
λ0,X0 = correct_output_sparseeigen(input,nev);
@test norm(get_projection(λ,X) - get_projection(λ0,X0),2)<1e-3;
end
end
function test_lobpcg_wrapper(;dim=1000,nev=20,repeat=5)
input = create_symmetric_matrix(dim);
for i = 1:repeat
@time λ,X = eigs(NativeLOBPCG(),input;nev=nev);
end
end
function test_lobpcg_via_import(;dim=1000,nev=20,repeat=5)
input = create_symmetric_matrix(dim);
for i = 1:5
@time lobpcg(input,true,nev);
end
end
| MatrixCompletion | https://github.com/jasonsun0310/MatrixCompletion.jl.git |
|
[
"MIT"
] | 0.1.0 | 50e804aa999e452ce4bf5edc50493838bd744e09 | code | 122 | try include("sparse_eigen_test.jl") catch end
try include("./test/sparse_eigen_test.jl") catch end
test_native_eigen()
| MatrixCompletion | https://github.com/jasonsun0310/MatrixCompletion.jl.git |
|
[
"MIT"
] | 0.1.0 | 50e804aa999e452ce4bf5edc50493838bd744e09 | code | 5555 | import Distributions
using MatrixCompletion
import Random
using TimerOutputs
const to = TimerOutput()
@testset "$(format("ADMM Algorithm: Small Input[Negative Binomial User Input]"))" begin
let
for i = 5:2:200
@printf("[Small] Currently doing rank %d\n", i)
Random.seed!(65536)
io = open("./test_result/negbin/small/rank"*string(i)*".txt", "w")
r_input = 6
input_size = 200
input_rank = i
truth_matrix = rand([(FixedRankMatrix(Distributions.NegativeBinomial(r_input, 0.8),
rank = input_rank),
input_size,
input_size)])
sample_model = provide(Sampler{BernoulliModel}(), rate = 0.8)
input_matrix = sample_model.draw(truth_matrix)
user_input_estimators = Dict(:NegativeBinomial=> Dict(:r=>r_input, :p=>0.8))
manual_type_input = Array{Symbol}(undef, input_size, input_size)
manual_type_input .= :NegativeBinomial
# display(input_matrix)
@timeit to "neg small" completed_matrix, type_tracker = complete(A = input_matrix,
maxiter = 200,
ρ = 0.3,
use_autodiff = false,
gd_iter = 3,
debug_mode = false,
type_assignment = manual_type_input,
user_input_estimators = user_input_estimators,
project_rank = input_rank,
io = io)
# display(completed_matrix[1:10, 1:10])
predicted_negative_binomial = predict(NegativeBinomial(),
forward_map(NegativeBinomial(),
completed_matrix[type_tracker[:NegativeBinomial]],
r_estimate = r_input))
truth_negative_binomial = truth_matrix[type_tracker[:NegativeBinomial]]
summary_negative_binomial = provide(Diagnostics{Any}(),
reference = truth_negative_binomial,
input_data = predicted_negative_binomial)
show(io, MIME("text/plain"), summary_negative_binomial)
print(io, "\n")
show(io, MIME("text/plain"), to)
close(io)
end
end
end
@testset "$(format("ADMM Algorithm: Medium Input[Negative Binomial User Input]"))" begin
let
for i = 5:20:500
@printf("[Medium] Currently doing rank %d\n ", i)
Random.seed!(65536)
io = open("./test_result/negbin/medium/rank"*string(i)*".txt", "w")
# io = stdout
r_input = 6
input_size = 2000
input_rank = i
truth_matrix = rand([(FixedRankMatrix(Distributions.NegativeBinomial(r_input, 0.8),
rank = input_rank),
input_size,
input_size)])
sample_model = provide(Sampler{BernoulliModel}(), rate = 0.8)
input_matrix = sample_model.draw(truth_matrix)
user_input_estimators = Dict(:NegativeBinomial=> Dict(:r=>r_input, :p=>0.8))
manual_type_input = Array{Symbol}(undef, input_size, input_size)
manual_type_input .= :NegativeBinomial
# display(input_matrix)
@timeit to "neg medium" completed_matrix, type_tracker = complete(A = input_matrix,
maxiter = 200,
ρ = 0.3,
use_autodiff = true,
gd_iter = 3,
debug_mode = false,
type_assignment = manual_type_input,
user_input_estimators = user_input_estimators,
project_rank = input_rank,
io = io)
predicted_negative_binomial = predict(NegativeBinomial(),
forward_map(NegativeBinomial(),
completed_matrix[type_tracker[:NegativeBinomial]],
r_estimate = r_input))
truth_negative_binomial = truth_matrix[type_tracker[:NegativeBinomial]]
summary_negative_binomial = provide(Diagnostics{Any}(),
reference = truth_negative_binomial,
input_data = predicted_negative_binomial)
show(io, MIME("text/plain"), summary_negative_binomial)
print(io, "\n")
show(io, MIME("text/plain"), to)
close(io)
end
end
end
| MatrixCompletion | https://github.com/jasonsun0310/MatrixCompletion.jl.git |
|
[
"MIT"
] | 0.1.0 | 50e804aa999e452ce4bf5edc50493838bd744e09 | code | 864 |
@testset "$(format("Concept: MaybeMissing[Conversion][VecOrMat]"))" begin
let
tc1 = [1,2,3,4]
output1 = convert(MaybeMissing{Number},tc1)
@test isa(output1,Array{MaybeMissing{Number}}) == true
@test output1 == tc1
#see(output1)
output2 = convert(MaybeMissing{Float64},tc1)
@test isa(output2, Array{MaybeMissing{Float64}}) == true
#display(output2)
# test array of missing
tc2 = [missing, missing, missing]
output3 = convert(MaybeMissing{Float64},tc2)
@test isa(output3,Array{MaybeMissing{Float64}}) == true
output3[1] = 1.0
@test output3[1] == 1.0 && ismissing(output3[2]) && ismissing(output3[3])
@test isa(convert(MaybeMissing{Float64},[missing missing;missing missing]),
Array{MaybeMissing{Float64}}) == true
end
end | MatrixCompletion | https://github.com/jasonsun0310/MatrixCompletion.jl.git |
|
[
"MIT"
] | 0.1.0 | 50e804aa999e452ce4bf5edc50493838bd744e09 | code | 3516 | @testset "$(_gen("Optimizer: Gamma Loss [Small][Forgiving][Native]"))" begin
@timeit to "Optimizer: Gamma Loss [Small][Forgiving][Native]" begin
@test unit_test_train_subloss(AbstractGamma(),
gradient_eval = Loss{AbstractGamma}(),
input_distribution = Distributions.Gamma(5,0.5),
input_size = 1000,
ρ = 0,
step_size = 0.1,
max_iter = 100) > 0.9
@test unit_test_train_subloss(AbstractGamma(),
gradient_eval = Loss{AbstractGamma}(),
input_distribution = Distributions.Gamma(5,0.5),
input_size = 3000,
ρ = 0,
step_size = 0.1,
max_iter = 100) > 0.9
@test unit_test_train_subloss(AbstractGamma(),
gradient_eval = Loss{AbstractGamma}(),
input_distribution = Distributions.Gamma(5,0.5),
input_size = 5000,
ρ = 0,
step_size = 0.1,
max_iter = 100) > 0.9
@test unit_test_train_subloss(AbstractGamma(),
gradient_eval = Loss{AbstractGamma}(),
input_distribution = Distributions.Gamma(5,0.5),
input_size = 1000,
ρ = 0.2,
step_size = 0.1,
max_iter = 100) > 0.9
@test unit_test_train_subloss(AbstractGamma(),
gradient_eval = Loss{AbstractGamma}(),
input_distribution = Distributions.Gamma(5,0.5),
input_size = 3000,
ρ = 0.2,
step_size = 0.1,max_iter = 100) > 0.9
@test unit_test_train_subloss(AbstractGamma(),
gradient_eval = Loss{AbstractGamma}(),
input_distribution = Distributions.Gamma(5,0.5),
input_size = 5000,
ρ = 0.2,
step_size = 0.1,
max_iter = 100) > 0.9
@test unit_test_train_subloss(AbstractGamma(),
gradient_eval = Loss{AbstractGamma}(),
input_distribution = Distributions.Gamma(5,0.5),
input_size = 5000,
ρ = 0.2,
step_size = 0.1,
max_iter = 100) > 0.9
end
end
| MatrixCompletion | https://github.com/jasonsun0310/MatrixCompletion.jl.git |
|
[
"MIT"
] | 0.1.0 | 50e804aa999e452ce4bf5edc50493838bd744e09 | code | 5479 |
@testset "$(_gen("Optimizer: Poisson Loss [Small][Forgiving][AutoGrad]"))" begin
@timeit to "Optimizer: Poisson Loss [Small][Forgiving][AutoGrad]" begin
@test unit_test_train_subloss(gradient_eval = provide(Loss{AbstractPoisson}()),
input_distribution = Distributions.Poisson(10),
input_size = 1000,
ρ = 0,
step_size = 0.1,
max_iter = 100) > 0.9
@test unit_test_train_subloss(gradient_eval = provide(Loss{AbstractPoisson}()),
input_distribution = Distributions.Poisson(10),
input_size = 3000,
ρ = 0,
step_size = 0.1,
max_iter = 100) > 0.9
@test unit_test_train_subloss(gradient_eval = provide(Loss{AbstractPoisson}()),
input_distribution = Distributions.Poisson(10),
input_size = 5000,
ρ = 0,
step_size = 0.1,
max_iter = 100) > 0.9
@test unit_test_train_subloss(gradient_eval = provide(Loss{AbstractPoisson}()),
input_distribution = Distributions.Poisson(10),
input_size = 1000,
ρ = 0.2,
step_size = 0.1,
max_iter = 100) > 0.9
@test unit_test_train_subloss(gradient_eval = provide(Loss{AbstractPoisson}()),
input_distribution = Distributions.Poisson(10),
input_size = 3000,
ρ = 0.2,
step_size = 0.1,max_iter = 100) > 0.9
@test unit_test_train_subloss(gradient_eval = provide(Loss{AbstractPoisson}()),
input_distribution = Distributions.Poisson(10),
input_size = 5000,
ρ = 0.2,
step_size = 0.1,
max_iter = 100) > 0.9
end
end
@testset "$(_gen("Optimizer: Poisson Loss [Small][Forgiving][Native]"))" begin
@timeit to "Optimizer: Poisson Loss [Small][Forgiving][Native]" begin
@test unit_test_train_subloss(gradient_eval = Loss{AbstractPoisson}(),
input_distribution = Distributions.Poisson(10),
input_size = 1000,
ρ = 0,
step_size = 0.1,
max_iter = 100) > 0.9
@test unit_test_train_subloss(gradient_eval = Loss{AbstractPoisson}(),
input_distribution = Distributions.Poisson(10),
input_size = 3000,
ρ = 0,
step_size = 0.1,
max_iter = 100) > 0.9
@test unit_test_train_subloss(gradient_eval = Loss{AbstractPoisson}(),
input_distribution = Distributions.Poisson(10),
input_size = 5000,
ρ = 0,
step_size = 0.1,
max_iter = 100) > 0.9
@test unit_test_train_subloss(gradient_eval = Loss{AbstractPoisson}(),
input_distribution = Distributions.Poisson(10),
input_size = 1000,
ρ = 0.2,
step_size = 0.1,
max_iter = 100) > 0.9
@test unit_test_train_subloss(gradient_eval = Loss{AbstractPoisson}(),
input_distribution = Distributions.Poisson(10),
input_size = 3000,
ρ = 0.2,
step_size = 0.1,max_iter = 100) > 0.9
@test unit_test_train_subloss(gradient_eval = Loss{AbstractPoisson}(),
input_distribution = Distributions.Poisson(10),
input_size = 5000,
ρ = 0.2,
step_size = 0.1,
max_iter = 100) > 0.9
end
end
| MatrixCompletion | https://github.com/jasonsun0310/MatrixCompletion.jl.git |
|
[
"MIT"
] | 0.1.0 | 50e804aa999e452ce4bf5edc50493838bd744e09 | code | 1487 | @testset "$(format("Sampling: UniformModel[VecOrMat]"))" begin
#==================== Vector Case ====================#
let
tc1 = rand(10000)
output = Sampler(UniformModel(0.5)).draw(tc1)
@test count(x->ismissing(x),output) > 1000
@test count(x->!ismissing(x),output) > 1000
# stronger
tc2 = rand(10000)
output = Sampler(UniformModel(0.1)).draw(tc2)
@test count(x->ismissing(x),output) >1000
@test count(x->!ismissing(x),output) <1000
end
#==================== Matrix Case ====================#
let
@test 4 <= count(x -> !ismissing(x),Sampler(UniformModel(0.1)).draw(ones(10,10))) <= 10
@test 10 < count(x -> ismissing(x), Sampler(UniformModel(0.1)).draw(ones(10,10)))
end
#==================== Factory Mode ====================#
let
sampler = provide(Sampler{UniformModel}(),rate = 0.5)
tc1 = rand(10000)
output = sampler.draw(tc1)
@test count(x->ismissing(x),output) > 1000
@test count(x->!ismissing(x),output) > 1000
# stronger
tc2 = rand(10000)
sampler2 = provide(Sampler{UniformModel}(),rate = 0.1)
output = sampler2.draw(tc2)
@test count(x->ismissing(x),output) >1000
@test count(x->!ismissing(x),output) <=1000
@test 4 <= count(x -> !ismissing(x),sampler2.draw(ones(10,10))) <= 10
@test 10 < count(x -> ismissing(x),sampler2.draw(ones(10,10)))
end
end
| MatrixCompletion | https://github.com/jasonsun0310/MatrixCompletion.jl.git |
|
[
"MIT"
] | 0.1.0 | 50e804aa999e452ce4bf5edc50493838bd744e09 | code | 23164 | # include("admm_test.jl")
# test_admm_with_autodiff_smallinput(gd_iter=3,dbg=true)
# test_admm_without_autodiff_smallinput(gd_iter=3)
#test_admm_without_autodiff_largeinput(gd_iter=3,dbg=true)
using LinearAlgebra
# @testset "$(format("ADMM Algorithm: Small Input[Gaussian + Bernoulli]"))" begin
# let
# truth_matrix = rand([(FixedRankMatrix(Distributions.Gaussian(5, 10), rank = 5), 200, 100),
# (FixedRankMatrix(Distributions.Bernoulli(0.5), rank = 5), 200, 100)])
# sample_model = provide(Sampler{BernoulliModel}(), rate = 0.8)
# input_matrix = sample_model.draw(truth_matrix)
# display(input_matrix)
# @time completed_matrix, type_tracker = complete(A = input_matrix,
# maxiter = 200,
# ρ = 0.3,
# use_autodiff = false,
# gd_iter = 3,
# debug_mode = false)
# predicted_bernoulli = predict(Bernoulli(),
# forward_map(Bernoulli(), completed_matrix[type_tracker[:Bernoulli]]))
# truth_bernoulli = truth_matrix[type_tracker[:Bernoulli]]
# summary_bernoulli = provide(Diagnostics{Any}(),
# reference = truth_bernoulli,
# input_data = predicted_bernoulli)
# display(summary_bernoulli)
# predicted_gaussian = predict(Gaussian(),
# forward_map(Gaussian(), completed_matrix[type_tracker[:Gaussian]]))
# truth_gaussian = truth_matrix[type_tracker[:Gaussian]]
# summary_gaussian = provide(Diagnostics{Any}(),
# reference = truth_gaussian,
# input_data = predicted_gaussian)
# display(summary_gaussian)
# end
# end
# @testset "$(format("ADMM Algorithm: Small Input[Gaussian + Poisson]"))" begin
# let
# truth_matrix = rand([(FixedRankMatrix(Distributions.Gaussian(5, 10), rank = 5), 200, 100),
# (FixedRankMatrix(Distributions.Poisson(10), rank = 10), 200, 100)])
# sample_model = provide(Sampler{BernoulliModel}(), rate = 0.8)
# input_matrix = sample_model.draw(truth_matrix)
# display(input_matrix)
# completed_matrix, type_tracker = complete(A = input_matrix,
# maxiter = 200,
# ρ = 0.3,
# use_autodiff = false,
# gd_iter = 5,
# debug_mode = false)
# predicted_poisson = predict(Poisson(),
# forward_map(Poisson(), completed_matrix[type_tracker[:Poisson]]))
# truth_poisson = truth_matrix[type_tracker[:Poisson]]
# summary_poisson = provide(Diagnostics{Any}(),
# reference = truth_poisson,
# input_data = predicted_poisson)
# display(summary_poisson)
# predicted_gaussian = predict(Gaussian(),
# forward_map(Gaussian(), completed_matrix[type_tracker[:Gaussian]]))
# truth_gaussian = truth_matrix[type_tracker[:Gaussian]]
# summary_gaussian = provide(Diagnostics{Any}(),
# reference = truth_gaussian,
# input_data = predicted_gaussian)
# display(summary_gaussian)
# end
# end
# @testset "$(format("ADMM Algorithm: Small Input[Gaussian + Poisson + Bernoulli]"))" begin
# let
# truth_matrix = rand([(FixedRankMatrix(Distributions.Gaussian(5, 10), rank = 5), 300, 100),
# (FixedRankMatrix(Distributions.Poisson(10), rank = 5), 300, 100),
# (FixedRankMatrix(Distributions.Bernoulli(0.5), rank = 5), 300, 100)])
# sample_model = provide(Sampler{BernoulliModel}(), rate = 0.8)
# input_matrix = sample_model.draw(truth_matrix)
# display(input_matrix)
# completed_matrix, type_tracker = complete(A = input_matrix,
# maxiter = 200,
# ρ = 0.3,
# use_autodiff = false,
# gd_iter = 5,
# debug_mode = false)
# predicted_poisson = predict(Poisson(),
# forward_map(Poisson(), completed_matrix[type_tracker[:Poisson]]))
# truth_poisson = truth_matrix[type_tracker[:Poisson]]
# summary_poisson = provide(Diagnostics{Any}(),
# reference = truth_poisson,
# input_data = predicted_poisson)
# display(summary_poisson)
# predicted_gaussian = predict(Gaussian(),
# forward_map(Gaussian(), completed_matrix[type_tracker[:Gaussian]]))
# truth_gaussian = truth_matrix[type_tracker[:Gaussian]]
# summary_gaussian = provide(Diagnostics{Any}(),
# reference = truth_gaussian,
# input_data = predicted_gaussian)
# display(summary_gaussian)
# predicted_bernoulli = predict(Bernoulli(),
# forward_map(Bernoulli(), completed_matrix[type_tracker[:Bernoulli]]))
# truth_bernoulli = truth_matrix[type_tracker[:Bernoulli]]
# summary_bernoulli = provide(Diagnostics{Any}(),
# reference = truth_bernoulli,
# input_data = predicted_bernoulli)
# display(summary_bernoulli)
# end
# end
# @testset "$(format("ADMM Algorithm: Medium Input[Gaussian + Poisson + Bernoulli]"))" begin
# let
# truth_matrix = rand([(FixedRankMatrix(Distributions.Gaussian(5, 10), rank = 5), 300, 100),
# (FixedRankMatrix(Distributions.Poisson(10), rank = 5), 300, 100),
# (FixedRankMatrix(Distributions.Bernoulli(0.5), rank = 5), 300, 100)])
# sample_model = provide(Sampler{BernoulliModel}(), rate = 0.8)
# input_matrix = sample_model.draw(truth_matrix)
# display(input_matrix)
# completed_matrix, type_tracker = complete(A = input_matrix,
# maxiter = 200,
# ρ = 0.3,
# use_autodiff = false,
# gd_iter = 3,
# debug_mode = false)
# predicted_poisson = predict(Poisson(),
# forward_map(Poisson(), completed_matrix[type_tracker[:Poisson]]))
# truth_poisson = truth_matrix[type_tracker[:Poisson]]
# summary_poisson = provide(Diagnostics{Any}(),
# reference = truth_poisson,
# input_data = predicted_poisson)
# predicted_gaussian = predict(Gaussian(),
# forward_map(Gaussian(), completed_matrix[type_tracker[:Gaussian]]))
# truth_gaussian = truth_matrix[type_tracker[:Gaussian]]
# summary_gaussian = provide(Diagnostics{Any}(),
# reference = truth_gaussian,
# input_data = predicted_gaussian)
# predicted_bernoulli = predict(Bernoulli(),
# forward_map(Bernoulli(), completed_matrix[type_tracker[:Bernoulli]]))
# truth_bernoulli = truth_matrix[type_tracker[:Bernoulli]]
# summary_bernoulli = provide(Diagnostics{Any}(),
# reference = truth_bernoulli,
# input_data = predicted_bernoulli)
# @info("Gaussian")
# display(summary_gaussian)
# @info("Poisson")
# display(summary_poisson)
# @info("Bernoulli")
# display(summary_bernoulli)
# end
# end
function relative_l2_error(x, y)
return LinearAlgebra.norm(x - y, 2)^2 / LinearAlgebra.norm(y)^2
end
# @testset "$(format("ADMM Algorithm: Small Input[Gaussian + Poisson + Bernoulli + Gamma]"))" begin
# let
# truth_matrix = rand([(FixedRankMatrix(Distributions.Gaussian(5, 10), rank = 3), 400, 100),
# (FixedRankMatrix(Distributions.Poisson(10), rank = 3), 400, 100),
# (FixedRankMatrix(Distributions.Bernoulli(0.5), rank = 3), 400, 100),
# (FixedRankMatrix(Distributions.Gamma(10, 2), rank = 3), 400, 100)])
# sample_model = provide(Sampler{BernoulliModel}(), rate = 0.8)
# input_matrix = sample_model.draw(truth_matrix)
# display(input_matrix)
# completed_matrix, type_tracker = complete(A = input_matrix,
# maxiter = 200,
# ρ = 0.3,
# use_autodiff = false,
# gd_iter = 3,
# debug_mode = false)
# predicted_poisson = predict(Poisson(),
# forward_map(Poisson(), completed_matrix[type_tracker[:Poisson]]))
# truth_poisson = truth_matrix[type_tracker[:Poisson]]
# summary_poisson = provide(Diagnostics{Any}(),
# reference = truth_poisson,
# input_data = predicted_poisson)
# predicted_gaussian = predict(Gaussian(),
# forward_map(Gaussian(), completed_matrix[type_tracker[:Gaussian]]))
# truth_gaussian = truth_matrix[type_tracker[:Gaussian]]
# summary_gaussian = provide(Diagnostics{Any}(),
# reference = truth_gaussian,
# input_data = predicted_gaussian)
# predicted_gamma = predict(Gamma(),
# forward_map(Gamma(), completed_matrix[type_tracker[:Gamma]]))
# truth_gamma = truth_matrix[type_tracker[:Gamma]]
# summary_gamma = provide(Diagnostics{Any}(),
# reference = truth_gamma,
# input_data = predicted_gamma)
# predicted_bernoulli = predict(Bernoulli(),
# forward_map(Bernoulli(), completed_matrix[type_tracker[:Bernoulli]]))
# truth_bernoulli = truth_matrix[type_tracker[:Bernoulli]]
# summary_bernoulli = provide(Diagnostics{Any}(),
# reference = truth_bernoulli,
# input_data = predicted_bernoulli)
# @info("Gaussian")
# display(summary_gaussian)
# @info("Poisson")
# display(summary_poisson)
# @info("Bernoulli")
# display(summary_bernoulli)
# @info("Gamma")
# display(summary_gamma)
# end
# end
# # 1. rectangular matrices sufficient condition
# # 2. big scale simulation
# # 3. fix the small dimension
# # 4. time performance
# # 5. beroulli / uniform
# # 6. weighted missing pattern
# @testset "$(format("ADMM Algorithm: Small Input[Gaussian + Poisson + Bernoulli + Gamma]"))" begin
# let
# truth_matrix = rand([(FixedRankMatrix(Distributions.Gaussian(5, 10), rank = 4), 400, 15),
# (FixedRankMatrix(Distributions.Poisson(10), rank = 4), 400, 15),
# (FixedRankMatrix(Distributions.Bernoulli(0.5), rank = 4), 400, 15),
# (FixedRankMatrix(Distributions.Gamma(10, 2), rank = 4), 400, 15)])
# sample_model = provide(Sampler{BernoulliModel}(), rate = 0.8)
# input_matrix = sample_model.draw(truth_matrix)
# display(input_matrix)
# completed_matrix, type_tracker = complete(A = input_matrix,
# maxiter = 200,
# ρ = 0.3,
# use_autodiff = false,
# gd_iter = 10,
# debug_mode = false)
# predicted_poisson = predict(Poisson(),
# forward_map(Poisson(), completed_matrix[type_tracker[:Poisson]]))
# truth_poisson = truth_matrix[type_tracker[:Poisson]]
# summary_poisson = provide(Diagnostics{Any}(),
# reference = truth_poisson,
# input_data = predicted_poisson)
# predicted_gaussian = predict(Gaussian(),
# forward_map(Gaussian(), completed_matrix[type_tracker[:Gaussian]]))
# truth_gaussian = truth_matrix[type_tracker[:Gaussian]]
# summary_gaussian = provide(Diagnostics{Any}(),
# reference = truth_gaussian,
# input_data = predicted_gaussian)
# predicted_gamma = predict(Gamma(),
# forward_map(Gamma(), completed_matrix[type_tracker[:Gamma]]))
# truth_gamma = truth_matrix[type_tracker[:Gamma]]
# summary_gamma = provide(Diagnostics{Any}(),
# reference = truth_gamma,
# input_data = predicted_gamma)
# predicted_bernoulli = predict(Bernoulli(),
# forward_map(Bernoulli(), completed_matrix[type_tracker[:Bernoulli]]))
# truth_bernoulli = truth_matrix[type_tracker[:Bernoulli]]
# summary_bernoulli = provide(Diagnostics{Any}(),
# reference = truth_bernoulli,
# input_data = predicted_bernoulli)
# @info("Gaussian")
# display(summary_gaussian)
# @info("Poisson")
# display(summary_poisson)
# @info("Bernoulli")
# display(summary_bernoulli)
# @info("Gamma")
# display(summary_gamma)
# end
# end
# @testset "$(format("ADMM Algorithm: Medium Input[Gaussian + Poisson + Bernoulli + Gamma]"))" begin
# let
# truth_matrix = rand([(FixedRankMatrix(Distributions.Gaussian(5, 10), rank = 3), 1600, 400),
# (FixedRankMatrix(Distributions.Poisson(10), rank = 3), 1600, 400),
# (FixedRankMatrix(Distributions.Bernoulli(0.5), rank = 3), 1600, 400),
# (FixedRankMatrix(Distributions.Gamma(10, 2), rank = 3), 1600, 400)])
# sample_model = provide(Sampler{BernoulliModel}(), rate = 0.8)
# input_matrix = sample_model.draw(truth_matrix)
# display(input_matrix)
# manual_type_input = Array{Symbol}(undef, 1600, 1600)
# manual_type_input[:, 1:400] .= :Gaussian
# manual_type_input[:, 401:800] .= :Poisson
# manual_type_input[:, 801:1200] .= :Bernoulli
# manual_type_input[:, 1201:1600] .= :Gamma
# completed_matrix, type_tracker = complete(A = input_matrix,
# maxiter = 200,
# ρ = 0.3,
# use_autodiff = false,
# gd_iter = 3,
# debug_mode = false,
# type_assignment = manual_type_input)
# predicted_poisson = predict(Poisson(),
# forward_map(Poisson(), completed_matrix[type_tracker[:Poisson]]))
# truth_poisson = truth_matrix[type_tracker[:Poisson]]
# summary_poisson = provide(Diagnostics{Any}(),
# reference = truth_poisson,
# input_data = predicted_poisson)
# predicted_gaussian = predict(Gaussian(),
# forward_map(Gaussian(), completed_matrix[type_tracker[:Gaussian]]))
# truth_gaussian = truth_matrix[type_tracker[:Gaussian]]
# summary_gaussian = provide(Diagnostics{Any}(),
# reference = truth_gaussian,
# input_data = predicted_gaussian)
# predicted_gamma = predict(Gamma(),
# forward_map(Gamma(), completed_matrix[type_tracker[:Gamma]]))
# truth_gamma = truth_matrix[type_tracker[:Gamma]]
# summary_gamma = provide(Diagnostics{Any}(),
# reference = truth_gamma,
# input_data = predicted_gamma)
# predicted_bernoulli = predict(Bernoulli(),
# forward_map(Bernoulli(), completed_matrix[type_tracker[:Bernoulli]]))
# truth_bernoulli = truth_matrix[type_tracker[:Bernoulli]]
# summary_bernoulli = provide(Diagnostics{Any}(),
# reference = truth_bernoulli,
# input_data = predicted_bernoulli)
# @info("Gaussian")
# display(summary_gaussian)
# @info("Poisson")
# display(summary_poisson)
# @info("Bernoulli")
# display(summary_bernoulli)
# @info("Gamma")
# display(summary_gamma)
# end
# end
# @testset "$(format("ADMM Algorithm: Small Input[Gamma]"))" begin
# using MatrixCompletion
# import Distributions
# import LinearAlgebra
# truth_matrix = rand([(FixedRankMatrix(Distributions.Gamma(10, 2), rank = 5), 1600, 400)])
# sample_model = provide(Sampler{BernoulliModel}(), rate = 0.8)
# input_matrix = sample_model.draw(truth_matrix)
# manual_type_input = Array{Symbol}(undef, 1600, 400)
# manual_type_input .= :Gamma
# completed_matrix, type_tracker = complete(A = input_matrix,
# maxiter = 200,
# ρ = 0.3,
# use_autodiff = false,
# gd_iter = 10,
# debug_mode = false,
# type_assignment = manual_type_input)
# predicted_matrix = predict(Gamma(), forward_map(Gamma(), completed_matrix))
# error_matrix = abs.(predicted_matrix - truth_matrix)
# total_l2_error = LinearAlgebra.norm(error_matrix, 2)^2
# relative_error = relative_l2_error(predicted_matrix, truth_matrix)
# @info("completed")
# display(completed_matrix[1:10, 1:10])
# @info("predicted")
# display(predicted_matrix[1:10, 1:10])
# @info("truth")
# display(truth_matrix[1:10, 1:10])
# @info("error")
# display(error_matrix[1:10, 1:10])
# @show(relative_error)
# @show(total_l2_error)
# end
# @testset "$(format("ADMM Algorithm: Small Input[Gaussian + Gamma]"))" begin
# let
# truth_matrix = rand([(FixedRankMatrix(Distributions.Gaussian(5, 10), rank = 5), 200, 100),
# (FixedRankMatrix(Distributions.Gamma(5, 0.5), rank = 5), 200, 100)])
# sample_model = provide(Sampler{BernoulliModel}(), rate = 0.8)
# input_matrix = sample_model.draw(truth_matrix)
# manual_type_input = Array{Symbol}(undef, 200, 200)
# manual_type_input[:, 1:100] .= :Gaussian
# manual_type_input[:, 101:200] .= :Gamma
# display(input_matrix)
# completed_matrix, type_tracker = complete(A = input_matrix,
# maxiter = 200,
# σ = 0.3,
# use_autodiff = false,
# gd_iter = 3,
# debug_mode = false,
# type_assignment = manual_type_input)
# predicted_gamma = predict(Gamma(),
# forward_map(Gamma(), completed_matrix[type_tracker[:Gamma]]))
# truth_gamma = truth_matrix[type_tracker[:Gamma]]
# display(truth_gamma - predicted_gamma)
# error_matrix = abs.(truth_gamma - predicted_gamma)
# relative_error = LinearAlgebra.norm(error_matrix,2)^2 / LinearAlgebra.norm(truth_gamma) ^ 2
# @show(relative_l2_error(predicted_gamma, truth_gamma))
# # summary_gamma = provide(Diagnostics{Any}(),
# # reference = truth_gamma,
# # input_data = predicted_gamma)
# # display(summary_gamma)
# predicted_gaussian = predict(Gaussian(),
# forward_map(Gaussian(), completed_matrix[type_tracker[:Gaussian]]))
# truth_gaussian = truth_matrix[type_tracker[:Gaussian]]
# summary_gaussian = provide(Diagnostics{Any}(),
# reference = truth_gaussian,
# input_data = predicted_gaussian)
# display(summary_gaussian)
# end
# end
include("./sub_test_runner_admm_negative_binomial.jl")
# include("./sub_test_runner_admm_bernoulli.jl")
# include("./sub_test_runner_admm_gaussian.jl")
# include("./sub_test_runner_admm_poisson.jl")
# include("./sub_test_runner_admm_gamma.jl")
# @testset "$(format("ADMM Algorithm: Small Input[Gaussian + Bernoulli, AutoDiff]"))" begin
# let
# truth_matrix = rand([(FixedRankMatrix(Distributions.Gaussian(5, 10), rank = 2), 200, 100),
# (FixedRankMatrix(Distributions.Bernoulli(0.5), rank = 2), 200, 100)])
# sample_model = provide(Sampler{BernoulliModel}(), rate = 0.8)
# input_matrix = sample_model.draw(truth_matrix)
# display(input_matrix)
# completed_matrix, type_tracker = complete(A = input_matrix,
# maxiter = 200,
# σ = 0.3,
# use_autodiff = true,
# gd_iter = 3,
# debug_mode = false)
# predicted_bernoulli = predict(Bernoulli(),
# forward_map(Bernoulli(), completed_matrix[type_tracker[:Bernoulli]]))
# truth_bernoulli = truth_matrix[type_tracker[:Bernoulli]]
# summary_bernoulli = provide(Diagnostics{Any}(),
# reference = truth_bernoulli,
# input_data = predicted_bernoulli)
# display(summary_bernoulli)
# predicted_gaussian = predict(Gaussian(),
# forward_map(Gaussian(), completed_matrix[type_tracker[:Gaussian]]))
# truth_gaussian = truth_matrix[type_tracker[:Gaussian]]
# summary_gaussian = provide(Diagnostics{Any}(),
# reference = truth_gaussian,
# input_data = predicted_gaussian)
# display(summary_gaussian)
# end
# end
| MatrixCompletion | https://github.com/jasonsun0310/MatrixCompletion.jl.git |
|
[
"MIT"
] | 0.1.0 | 50e804aa999e452ce4bf5edc50493838bd744e09 | code | 6644 | # using MatrixCompletion
const TEST_CHAINED_ALM_SMALL_RANDOM = true
const TEST_ONESHOT_ALM_SMALL_RANDOM = false
import LinearAlgebra: norm
import Random
function run_chained_alm_randomized(;m = nothing, n = nothing, k = nothing,
impute_round = 10, sample_rate = nothing, data = nothing, sampled_data = nothing,
prox_params = nothing,
init_X = nothing, init_Y = nothing)
local truth_matrix
if isnothing(data)
truth_matrix = randn(m, k) * randn(k, n)
else
truth_matrix = data
m, n = Base.size(data)
end
if isnothing(sample_rate) && !isnothing(sampled_data)
input_matrix = sampled_data
else
sample_model = provide(Sampler{BernoulliModel}(), rate = sample_rate / 100)
input_matrix = sample_model.draw(truth_matrix)
end
manual_type_matrix = Array{Symbol}(undef, m, n)
manual_type_matrix .= :Gaussian
imputed, X, Y, tracker = complete(ChainedALM(),
A = input_matrix,
type_assignment = manual_type_matrix,
block_size = Int64(500 * 500 / 10),
rx = MatrixCompletion.LowRankModels.QuadReg(0),
ry = MatrixCompletion.LowRankModels.QuadReg(0),
target_rank = k,
imputation_round = impute_round,
initialX = init_X,
initialY = init_Y,
proximal_params = prox_params)
return truth_matrix, imputed, X, Y, tracker
end
function run_one_shot_alm_randomized(;m = nothing, n = nothing, k = nothing,
sample_rate = nothing, data = nothing, prox_params = nothing,
sampled_data = nothing,
init_X = nothing, init_Y = nothing)
local truth_matrix, input_matrix
if isnothing(data)
truth_matrix = randn(m, k) * randn(k, n)
else
truth_matrix = data
m, n = Base.size(data)
end
if isnothing(sample_rate) && !isnothing(sampled_data)
input_matrix = sampled_data
else
sample_model = provide(Sampler{BernoulliModel}(), rate = sample_rate / 100)
input_matrix = sample_model.draw(truth_matrix)
end
manual_type_matrix = Array{Symbol}(undef, m, n)
manual_type_matrix .= :Gaussian
imputed, X, Y, tracker = complete(OneShotALM(),
A = input_matrix,
type_assignment = manual_type_matrix,
target_rank = k,
initialX = init_X,
initialY = init_Y,
proximal_params = prox_params)
return truth_matrix, imputed, X, Y, tracker
end
function get_diagnostic(A, A_imputed, X, Y, tracker)
ret = Dict{Symbol, Any}()
ret[:L2_total_error] = norm(A[tracker[:Missing][:Total]] - A_imputed[tracker[:Missing][:Total]]) ^ 2
ret[:L2_relative_error] = ret[:L2_total_error] / norm(A[tracker[:Missing][:Total]]) ^ 2
ret[:MissingEntries] = tracker[:Missing][:Total]
ret[:Truth] = A
ret[:Imputed] = Base.convert(Array{Float64, 2}, A_imputed)
ret[:X] = X
ret[:Y] = Y
return ret
end
# @testset "$(format("Algorithm: OneShotALM[Randomized, Small]"))" begin
# for i in 1:1
# @test get_diagnostic(run_one_shot_alm_randomized(m = 200, n = 200, k = 10, sample_rate = 80)...)[:L2_relative_error] < 0.05
# end
# end
# @testset "$(format("Algorithm: ChainedALM[Randomized, Small]"))" begin
# for i in 1:1
# @test get_diagnostic(run_chained_alm_randomized(m = 200, n = 200, k = 10, sample_rate = 80)...)[:L2_relative_error] < 0.05
# end
# end
let
Random.seed!(65536)
m = 500
n = 500
for k in collect(10:10:500)
for sample_rate in collect(10:1:99)
RESULTS_DIR = GLOBAL_SIMULATION_RESULTS_DIR *
"random_continuous/small_500x500/" *
"rank" * string(k) * "/" *
"sample" * string(sample_rate) * "/"
DATA_FILE_NAME_ONESHOT = "oneshot_saved_variables.h5"
DATA_FILE_PATH_ONESHOT = RESULTS_DIR * DATA_FILE_NAME_ONESHOT
DATA_FILE_NAME_CHAINED = "chained_saved_variables.h5"
DATA_FILE_PATH_CHAINED = RESULTS_DIR * DATA_FILE_NAME_CHAINED
Base.Filesystem.mkpath(RESULTS_DIR)
truth_matrix = randn(m, k) * randn(k, n)
sample_rate = 80
sample_model = provide(Sampler{BernoulliModel}(), rate = sample_rate / 100)
input_matrix = sample_model.draw(truth_matrix)
initX = randn(k, m)
initY = randn(k, n)
param_oneshot = ProxGradParams(max_iter = 200)
param_chained = ProxGradParams(max_iter = 200)
result_oneshot_alm = get_diagnostic(run_one_shot_alm_randomized(data = deepcopy(truth_matrix),
sampled_data = deepcopy(input_matrix),
k = k,
init_X = deepcopy(initX),
init_Y = deepcopy(initY),
prox_params = param_oneshot)...)
result_chained_alm = get_diagnostic(run_chained_alm_randomized(data = deepcopy(truth_matrix),
sampled_data = deepcopy(input_matrix),
impute_round = 5,
init_X = deepcopy(initX),
init_Y = deepcopy(initY),
k = k,
prox_params = param_chained)...)
pickle(DATA_FILE_PATH_ONESHOT, result_oneshot_alm)
pickle(DATA_FILE_PATH_CHAINED, result_chained_alm)
end
end
end
| MatrixCompletion | https://github.com/jasonsun0310/MatrixCompletion.jl.git |
|
[
"MIT"
] | 0.1.0 | 50e804aa999e452ce4bf5edc50493838bd744e09 | code | 682 | using MatrixCompletion.Utilities.BatchUtils
@testset "$(format("BatchUtils: Tables Header"))" begin
let
tc = collect(1:100)
batch = BatchFactory{SequentialScan}(size = 10)
initialize(batch, tc)
expected = [1:10, 11:20, 21:30, 31:40, 41:50, 51:60, 61:70, 71:80, 81:90, 91:100]
ptr = 1
while has_next(batch)
@test tc[next(batch)] == collect(expected[ptr])
ptr += 1
end
end
let
tc = collect(1:100)
batch = BatchFactory{SequentialScan}(size = 64)
initialize(batch, tc)
expected = [1:64, 65:100]
ptr = 1
while has_next(batch)
@test tc[next(batch)] == collect(expected[ptr])
ptr += 1
end
end
end
| MatrixCompletion | https://github.com/jasonsun0310/MatrixCompletion.jl.git |
|
[
"MIT"
] | 0.1.0 | 50e804aa999e452ce4bf5edc50493838bd744e09 | code | 14186 | @testset "$(format("Loss: Bernoulli[construction]"))" begin
@test typeof(Loss{Bernoulli}(Bernoulli())) == Loss{Bernoulli}
@test typeof(Loss(Bernoulli())) == Loss{Bernoulli}
@test typeof(Loss(:Bernoulli)) == Loss{Bernoulli}
end
@testset "$(format("Optimizer: Bernoulli Loss [Small][Forgiving][AutoGrad]"))" begin
@timeit to "Optimizer: Bernoulli Loss [Small][Forgiving][AutoGrad]" begin
let
is_admissible(result) = begin
if result["relative-error[#within-radius(1e-5)]"] < 0.1
return true
end
@warn @sprintf("expected %f, got %f", 0.1, result["relative-error[#within-radius(1e-5)]"])
return false
end
@test !isnothing(provide(Loss(Bernoulli())))
@test !isnothing(provide(Loss(:Bernoulli)))
@test is_admissible(unit_test_train_subloss(Bernoulli(),gradient_eval = provide(Loss(Bernoulli())),
input_distribution = Distributions.Bernoulli(0.6),
input_size = 1000,
ρ = 0,
step_size = 0.1,
max_iter = 200))
@test is_admissible(unit_test_train_subloss(Bernoulli(),gradient_eval = provide(Loss(Bernoulli())),
input_distribution = Distributions.Bernoulli(0.6),
input_size = 3000,
ρ = 0,
step_size = 0.1,
max_iter = 100))
@test is_admissible(unit_test_train_subloss(Bernoulli(),gradient_eval = provide(Loss(Bernoulli())),
input_distribution = Distributions.Bernoulli(0.6),
input_size = 5000,
ρ = 0,
step_size = 0.1,
max_iter = 100))
@test is_admissible(unit_test_train_subloss(Bernoulli(),gradient_eval = provide(Loss(Bernoulli())),
input_distribution = Distributions.Bernoulli(0.6),
input_size = 1000,
ρ = 0.1,
step_size = 0.1,
max_iter = 100))
@test is_admissible(unit_test_train_subloss(Bernoulli(),gradient_eval = provide(Loss(Bernoulli())),
input_distribution = Distributions.Bernoulli(0.6),
input_size = 3000,
ρ = 0.1,
step_size = 0.1,
max_iter = 100))
@test is_admissible(unit_test_train_subloss(Bernoulli(),gradient_eval = provide(Loss(Bernoulli())),
input_distribution = Distributions.Bernoulli(0.6),
input_size = 5000,
ρ = 0.2,
step_size = 0.1,
max_iter = 100))
@test is_admissible(unit_test_train_subloss(Bernoulli(),gradient_eval = provide(Loss(:Bernoulli)),
input_distribution = Distributions.Bernoulli(0.6),
input_size = 1000,
ρ = 0,
step_size = 0.1,
max_iter = 100))
@test is_admissible(unit_test_train_subloss(Bernoulli(),gradient_eval = provide(Loss(:Bernoulli)),
input_distribution = Distributions.Bernoulli(0.6),
input_size = 3000,
ρ = 0,
step_size = 0.1,
max_iter = 100))
@test is_admissible(unit_test_train_subloss(Bernoulli(),gradient_eval = provide(Loss(:Bernoulli)),
input_distribution = Distributions.Bernoulli(0.6),
input_size = 5000,
ρ = 0,
step_size = 0.1,
max_iter = 100))
@test is_admissible(unit_test_train_subloss(Bernoulli(),gradient_eval = provide(Loss(:Bernoulli)),
input_distribution = Distributions.Bernoulli(0.6),
input_size = 1000,
ρ = 0.1,
step_size = 0.1,
max_iter = 100))
@test is_admissible(unit_test_train_subloss(Bernoulli(),gradient_eval = provide(Loss(:Bernoulli)),
input_distribution = Distributions.Bernoulli(0.6),
input_size = 3000,
ρ = 0.1,
step_size = 0.1,
max_iter = 100))
@test is_admissible(unit_test_train_subloss(Bernoulli(),gradient_eval = provide(Loss(:Bernoulli)),
input_distribution = Distributions.Bernoulli(0.6),
input_size = 5000,
ρ = 0.2,
step_size = 0.1,
max_iter = 100))
end
end
end
@testset "$(format("Optimizer: Bernoulli Loss [Small][Forgiving][Native]"))" begin
@timeit to "Optimizer: Bernoulli Loss [Small][Forgiving][Native]" begin
is_admissible(result) = begin
if result["relative-error[#within-radius(1e-5)]"] < 0.1
return true
end
@warn @sprintf("expected %f, got %f",0.1,result["relative-error[#within-radius(1e-5)]"])
return false
end
@test is_admissible(unit_test_train_subloss(Bernoulli(),gradient_eval = Loss(Bernoulli()),
input_distribution = Distributions.Bernoulli(0.6),
input_size = 1000,
ρ = 0,
step_size = 0.1,
max_iter = 100))
@test is_admissible(unit_test_train_subloss(Bernoulli(),gradient_eval = Loss(Bernoulli()),
input_distribution = Distributions.Bernoulli(0.6),
input_size = 3000,
ρ = 0,
step_size = 0.1,
max_iter = 100))
@test is_admissible(unit_test_train_subloss(Bernoulli(),gradient_eval = Loss(Bernoulli()),
input_distribution = Distributions.Bernoulli(0.6),
input_size = 5000,
ρ = 0,
step_size = 0.1,
max_iter = 100))
@test is_admissible(unit_test_train_subloss(Bernoulli(),gradient_eval = Loss(Bernoulli()),
input_distribution = Distributions.Bernoulli(0.6),
input_size = 1000,
ρ = 0.2,
step_size = 0.1,
max_iter = 100))
@test is_admissible(unit_test_train_subloss(Bernoulli(),gradient_eval = Loss(Bernoulli()),
input_distribution = Distributions.Bernoulli(0.6),
input_size = 3000,
ρ = 0.2,
step_size = 0.1,
max_iter = 100))
@test is_admissible(unit_test_train_subloss(Bernoulli(),gradient_eval = Loss(Bernoulli()),
input_distribution = Distributions.Bernoulli(0.6),
input_size = 5000,
ρ = 0.2,
step_size = 0.1,
max_iter = 100))
@test is_admissible(unit_test_train_subloss(Bernoulli(),gradient_eval = Loss(:Bernoulli),
input_distribution = Distributions.Bernoulli(0.6),
input_size = 1000,
ρ = 0,
step_size = 0.1,
max_iter = 100))
@test is_admissible(unit_test_train_subloss(Bernoulli(),gradient_eval = Loss(:Bernoulli),
input_distribution = Distributions.Bernoulli(0.6),
input_size = 3000,
ρ = 0,
step_size = 0.1,
max_iter = 100))
@test is_admissible(unit_test_train_subloss(Bernoulli(),gradient_eval = Loss(:Bernoulli),
input_distribution = Distributions.Bernoulli(0.6),
input_size = 5000,
ρ = 0,
step_size = 0.1,
max_iter = 100))
@test is_admissible(unit_test_train_subloss(Bernoulli(),gradient_eval = Loss(:Bernoulli),
input_distribution = Distributions.Bernoulli(0.6),
input_size = 1000,
ρ = 0.2,
step_size = 0.1,
max_iter = 100))
@test is_admissible(unit_test_train_subloss(Bernoulli(),gradient_eval = Loss(:Bernoulli),
input_distribution = Distributions.Bernoulli(0.6),
input_size = 3000,
ρ = 0.2,
step_size = 0.1,
max_iter = 100))
@test is_admissible(unit_test_train_subloss(Bernoulli(),gradient_eval = Loss(:Bernoulli),
input_distribution = Distributions.Bernoulli(0.6),
input_size = 5000,
ρ = 0.2,
step_size = 0.1,
max_iter = 100))
end
end
| MatrixCompletion | https://github.com/jasonsun0310/MatrixCompletion.jl.git |
|
[
"MIT"
] | 0.1.0 | 50e804aa999e452ce4bf5edc50493838bd744e09 | code | 6191 | import Distributions
@testset "$(format("MGF: construction"))" begin
let
@test typeof(MGF{Poisson}(Poisson())) == MGF{Poisson}
@test typeof(MGF{Poisson}(Poisson();logscale = true)) == MGF{Poisson}
@test typeof(MGF(Poisson())) == MGF{Poisson}
@test typeof(MGF(Poisson(),logscale=false)) == MGF{Poisson}
@test typeof(MGF(:Poisson)) == MGF{Poisson}
#@test_throws UnrecognizedSymbolException MGF(:POisson)
# test argument with constructor
let
tc = MGF{Poisson}(Poisson(),logscale=true)
@test typeof(tc) == MGF{Poisson}
@test tc.OPTION_LOG_SCALE == true
end
let
tc = MGF(Poisson(),logscale=true)
@test typeof(tc) == MGF{Poisson}
@test tc.OPTION_LOG_SCALE == true
end
let
tc = MGF(:Poisson,logscale=true)
@test typeof(tc) == MGF{Poisson}
@test tc.OPTION_LOG_SCALE == true
end
end
end
@testset "$(format("SampleMGF: construction"))" begin
let
@test typeof(SampleMGF()) == SampleMGF
end
# test constuctor with argument
let
tc = SampleMGF(logscale = false)
@test typeof(tc) == SampleMGF
@test tc.OPTION_LOG_SCALE == false
end
end
@testset "$(format("MGF: evaluation[Poisson]"))" begin
let
for i = 1:5
t = collect(1:0.05:1.5)
tc_λ = rand() * 3
tc = evaluate(MGF(:Poisson),t,λ=tc_λ)
compare = Distributions.mgf.(Distributions.Poisson(tc_λ),t)
@test check(:l2diff,tc,compare) < 1e-2
tc_log = evaluate(MGF(:Poisson,logscale=true),t,λ=tc_λ)
compare_log = log.(Distributions.mgf.(Distributions.Poisson(tc_λ),t))
@test check(:l2diff,tc_log,compare_log) < 1e-1
end
end
end
@testset "$(format("MGF: evaluation[Gaussian]"))" begin
let
for i = 1:5
t = collect(1:0.05:1.5)
tc_μ = rand() * 5
tc_σ = rand() * 5
tc = evaluate(MGF(:Gaussian),t;μ = tc_μ, σ = tc_σ)
compare = Distributions.mgf.(Distributions.Gaussian(tc_μ,tc_σ),t)
@test check(:l2diff,tc,compare) < 1e-2
tc_log = evaluate(MGF(:Gaussian,logscale=true),t;μ = tc_μ,σ = tc_σ)
compare_log = log.(Distributions.mgf.(Distributions.Gaussian(tc_μ,tc_σ),t))
@test check(:l2diff,tc_log,compare_log) < 1e-2
end
end
end
@testset "$(format("MGF: evaluation[Gamma]"))" begin
let
for i = 1:5
tc_α = rand() * 5
tc_θ = rand() * 5
# ensure support
t = collect(0.01:0.05: (1/tc_θ))
tc = evaluate(MGF(:Gamma),t;α = tc_α, θ = tc_θ)
compare = Distributions.mgf.(Distributions.Gamma(tc_α,tc_θ),t)
@test check(:l2diff,tc,compare) < 1e-2
tc_log = evaluate(MGF(:Gamma,logscale=true),t;α = tc_α,θ = tc_θ)
compare_log = log.(Distributions.mgf.(Distributions.Gamma(tc_α,tc_θ),t))
@test check(:l2diff,tc_log,compare_log) < 1e-2
end
end
end
@testset "$(format("MGF: evaluation[Bernoulli]"))" begin
let
for i = 1:5
tc_p = rand()
# ensure support
t = collect(0.01:0.05: 0.5)
tc = evaluate(MGF(:Bernoulli),t;p = tc_p)
compare = Distributions.mgf.(Distributions.Bernoulli(tc_p),t)
@test check(:l2diff,tc,compare) < 1e-2
tc_log = evaluate(MGF(:Bernoulli,logscale=true),t;p = tc_p)
compare_log = log.(Distributions.mgf.(Distributions.Bernoulli(tc_p),t))
@test check(:l2diff,tc_log,compare_log) < 1e-2
end
end
end
@testset "$(format("MGF: evaluation[NegativeBinomial]"))" begin
let
for i = 1:5
tc_p = rand()
tc_r = rand() * 5
t = collect(0.01:0.05: -log(tc_p))
tc = evaluate(MGF(:NegativeBinomial),t;
p = tc_p, r = tc_r)
compare = Distributions.mgf.(Distributions.NegativeBinomial(tc_r,tc_p),t)
@test check(:l2diff,tc,compare) < 1e-2
tc_log = evaluate(MGF(:NegativeBinomial,logscale=true),t;p = tc_p,r=tc_r)
compare_log = log.(Distributions.mgf.(Distributions.NegativeBinomial(tc_r, tc_p),t))
@test check(:l2diff,tc_log,compare_log) < 1e-2
end
end
end
@testset "$(format("SampleMGF: evaluation"))" begin
# there is not way to make sure its correctness. we try to a few distributions
# and show that as order increases the approximation gets more accurate.
let
total_passed = 0
for i = 1:1000
t = collect(0:0.0001:0.001)
sample = rand(Distributions.Gamma(rand(1:100),rand(1:100)),5000)
sample_mgf = evaluate(SampleMGF(),t,data = sample,order = 15)
real_mgf = Distributions.mgf.(Distributions.fit_mle(Distributions.Gamma,sample) ,t)
not_real_mgf = Distributions.mgf.(Distributions.fit_mle(Distributions.Normal,sample),t)
if check(:l2diff,sample_mgf,real_mgf) < check(:l2diff,sample_mgf,not_real_mgf)
total_passed = total_passed + 1
end
end
@test total_passed/1000 > 0.80
end
let
total_passed = 0
for i = 1:1000
t = collect(0:0.01:0.1)
sample = rand(Distributions.Normal(rand(10:100),rand(1:10)),5000)
sample_mgf = evaluate(SampleMGF(),t,data = sample,order = 15)
real_mgf = Distributions.mgf.(Distributions.fit_mle(Distributions.Normal,sample) ,t)
not_real_mgf = nothing
try
not_real_mgf = Distributions.mgf.(Distributions.fit_mle(Distributions.Gamma,sample),t)
catch
continue
end
if check(:l2diff,sample_mgf,real_mgf) < check(:l2diff,sample_mgf,not_real_mgf)
total_passed = total_passed + 1
end
end
@test total_passed/1000 > 0.80
end
end
| MatrixCompletion | https://github.com/jasonsun0310/MatrixCompletion.jl.git |
|
[
"MIT"
] | 0.1.0 | 50e804aa999e452ce4bf5edc50493838bd744e09 | code | 7079 | @testset "$(format("Chained ADMM: Bernoulli"))" begin
let
Random.seed!(65536)
ROW = 500
COL = 500
for input_rank in collect(80:10:120)
for input_sample in collect(50:5:90)
try
@show(Pair(input_rank, input_sample))
truth_matrix = rand([(FixedRankMatrix(Distributions.Bernoulli(0.5), rank = input_rank), ROW, COL)])
sample_model = provide(Sampler{BernoulliModel}(), rate = input_sample / 100)
input_matrix = sample_model.draw(truth_matrix)
manual_type_matrix = Array{Symbol}(undef, ROW, COL)
manual_type_matrix .= :Bernoulli
let
RESULTS_DIR = GLOBAL_SIMULATION_RESULTS_DIR *
"bernoulli/small(" * string(ROW) * "x" * string(COL) * ")" * "(vary_missing_200_iter_max)/" *
"rank" * string(input_rank) * "/" *
"sample" * string(input_sample) * "/oneshot/"
LOG_FILE_NAME = "io.log"
DATA_FILE_NAME = "saved_variables.h5"
LOG_FILE_PATH = RESULTS_DIR * LOG_FILE_NAME
DATA_FILE_PATH = RESULTS_DIR * DATA_FILE_NAME
Base.Filesystem.mkpath(RESULTS_DIR)
io = open(LOG_FILE_PATH, "w")
completed_matrix, type_tracker, tracker, imputed = complete(OneShotADMM(),
A = input_matrix,
maxiter = 1000,
ρ = 0.3,
use_autodiff = false,
gd_iter = 3,
io = io,
debug_mode = false,
project_rank = nothing,
type_assignment = manual_type_matrix)
predicted_matrix = predict(MatrixCompletionModel(),
completed_matrix = completed_matrix,
type_tracker = type_tracker)
summary_object = summary(MatrixCompletionModel(),
predicted_matrix = predicted_matrix,
truth_matrix = truth_matrix,
type_tracker = type_tracker,
tracker = tracker)
pickle(DATA_FILE_PATH,
"missing_idx" => type_tracker[:Missing],
"completed_matrix" => completed_matrix,
"predicted_matrix" => predicted_matrix,
"truth_matrix" => truth_matrix,
"summary" => summary_object)
print(io, JSON.json(summary_object, 4))
close(io)
end
let
RESULTS_DIR = GLOBAL_SIMULATION_RESULTS_DIR *
"bernoulli/small(" * string(ROW) * "x" * string(COL) * ")" * "(vary_missing_200_iter_max)/" *
"rank" * string(input_rank) * "/" *
"sample" * string(input_sample) * "/chained/"
LOG_FILE_NAME = "io.log"
DATA_FILE_NAME = "saved_variables.h5"
LOG_FILE_PATH = RESULTS_DIR * LOG_FILE_NAME
DATA_FILE_PATH = RESULTS_DIR * DATA_FILE_NAME
Base.Filesystem.mkpath(RESULTS_DIR)
io = open(LOG_FILE_PATH, "w")
completed_matrix, type_tracker, tracker, imputed = complete(ChainedADMM(),
A = deepcopy(input_matrix),
maxiter = 200,
ρ = 0.3,
use_autodiff = false,
gd_iter = 3,
imputation_round = 5,
io = io,
debug_mode = false,
project_rank = nothing,
type_assignment = deepcopy(manual_type_matrix))
predicted_matrix = predict(MatrixCompletionModel(),
completed_matrix = completed_matrix,
type_tracker = type_tracker)
summary_object = summary(MatrixCompletionModel(),
predicted_matrix = predicted_matrix,
truth_matrix = truth_matrix,
type_tracker = type_tracker,
tracker = tracker)
pickle(DATA_FILE_PATH,
"missing_idx" => type_tracker[:Missing],
"completed_matrix" => completed_matrix,
"predicted_matrix" => predicted_matrix,
"truth_matrix" => truth_matrix,
"summary" => summary_object)
print(io, JSON.json(summary_object, 4))
close(io)
end
catch
# nothing
@info("got exception not log")
end
end
end
end
end
| MatrixCompletion | https://github.com/jasonsun0310/MatrixCompletion.jl.git |
|
[
"MIT"
] | 0.1.0 | 50e804aa999e452ce4bf5edc50493838bd744e09 | code | 906 | using MatrixCompletion
import LinearAlgebra
function unit_test_lpspace(p)
tc = LpSpace(p)
test_vec = rand(100)
@test tc.p==p && tc.norm(test_vec) == LinearAlgebra.norm(test_vec,tc.p)
end
@testset "$(format("Concepts: LpSpace[Construction]"))" begin
[unit_test_lpspace(i) for i in 1:10]
end
@testset "$(format("Concepts: Type Convertsion[Symbol->Exponential Family]"))" begin
@test typeof(convert(ExponentialFamily,:Poisson)) == typeof(Poisson())
end
@testset "$(format("Concepts: Comparator[construction]"))" begin
@test typeof(Comparator{Int64}()) == Comparator{Int64}
@test typeof(Comparator(MGF(:Gaussian))) == Comparator{MGF{Gaussian}}
@test typeof(Comparator(MGF)) == Comparator{MGF}
@test typeof(Comparator(:MGF)) == Comparator{MGF}
let
tc = Comparator{MGF}(MGF(),eval_at = [1,2,3])
@test tc.field[:eval_at] == [1,2,3]
end
end
| MatrixCompletion | https://github.com/jasonsun0310/MatrixCompletion.jl.git |
|
[
"MIT"
] | 0.1.0 | 50e804aa999e452ce4bf5edc50493838bd744e09 | code | 6619 | using MatrixCompletion
import Distributions
function unit_test_relative_error(metric,input,reference;base_metric = metric)
@test abs(
provide(RelativeError(),input,reference;metric = metric,base_metric=base_metric)
- metric(input-reference)/base_metric(reference)) <1e-5
end
function unit_test_absolute_error(metric,input,reference)
@test abs(provide(AbsoluteError(),input,reference;metric=metric) - metric(input-reference)) <1e-5
end
function unit_test_diagnostics(;input =nothing, reference = nothing)
end
@testset "$(format("Diagnostics: Absolute Error[LpMetric]"))" begin
# test lp norm metric for arrays
[unit_test_absolute_error(metric,rand(100),rand(100)) for metric in [x -> LinearAlgebra.norm(x,p) for p in 1:0.5:10]]
# test lp norm metric for metrices
[unit_test_relative_error(metric,rand(100,100),rand(100,100)) for metric in [x -> LinearAlgebra.norm(x,p) for p in 1:0.5:10]]
# test within_radius metric for arrays
end
@testset "$(format("Diagnostics: Absolute Error[WithinRadius]"))" begin
let
for i = 1:10
tc = rand(100) .+ 1000
mask = rand(Distributions.Bernoulli(0.6),100)
num_of_non_zeros = sum(mask)
unit_test_absolute_error(x -> within_radius(x), mask,zeros(100))
unit_test_absolute_error(x -> within_radius(x), tc .* mask, tc)
end
end
# test within_radius metric for matrices
let
for i = 1:10
tc = rand(100,100) .+ 1000
mask = rand(Distributions.Bernoulli(0.6),100,100)
num_of_non_zeros = sum(mask)
unit_test_absolute_error(x -> within_radius(x), mask,zeros(100,100))
unit_test_absolute_error(x -> within_radius(x), tc .* mask, tc)
end
end
end
@testset "$(format("Diagnostics: Relative Error[LpMetric]"))" begin
# test lp norm metric for arrays
[unit_test_relative_error(metric,rand(100),rand(100)) for metric in [x -> LinearAlgebra.norm(x,p) for p in 1:0.5:10]]
# test lp norm metric for matrices
[unit_test_relative_error(metric,rand(100,100),rand(100,100)) for metric in [x -> LinearAlgebra.norm(x,p) for p in 1:0.5:10]]
end
@testset "$(format("Diagnostics: Relative Error[WithinRadius]"))" begin
# for arrays
let
for i = 1:10
tc = rand(100) .+ 1000
mask = rand(Distributions.Bernoulli(0.6),100)
num_of_non_zeros = sum(mask)
unit_test_relative_error(x -> within_radius(x), mask,zeros(100);base_metric = x -> LinearAlgebra.norm(mask,0))
unit_test_relative_error(x -> within_radius(x), tc .* mask, tc;base_metric = x -> LinearAlgebra.norm(mask,0))
end
end
# for matrices
let
for i = 1:10
tc = rand(100,100) .+ 1000
mask = rand(Distributions.Bernoulli(0.6),100,100)
num_of_non_zeros = sum(mask)
unit_test_relative_error(x -> within_radius(x), mask,zeros(100,100);base_metric = x -> LinearAlgebra.norm(mask,0))
unit_test_relative_error(x -> within_radius(x), tc .* mask, tc;base_metric = x -> LinearAlgebra.norm(mask,0))
end
end
end
@testset "$(format("Diagnostics: Construction[For Arrays]"))" begin
# test dispatcher
@test isa(provide(Diagnostics{Gamma()}(),input_data = [1.1], reference = [1.1]),Dict)
# test arg parser
@test_throws DomainError provide(Diagnostics{Gamma()}())
# test for arrays
let
input_data = [1,1,2,1,0] * 1.0
reference = deepcopy(input_data)
diagnostic = provide(Diagnostics{Gamma()}(),
input_data = input_data,reference = reference);
@test diagnostic["relative-error[#within-radius(1e-5)]"] == sum(Int.(abs.(input_data - reference) .> 1e-5))/length(input_data)
@test diagnostic["absolute-error[#within-radius(1e-5)]"] == sum(Int.(abs.(input_data - reference) .> 1e-5))
@test diagnostic["relative-error[L1]"] == 0
@test diagnostic["relative-error[L2]"] == 0
@test diagnostic["absolute-error[L1]"] == 0
@test diagnostic["absolute-error[L2]"] == 0
@test LinearAlgebra.norm(diagnostic["error-matrix"] - [0,0,0,0,0],2) <1e-5
end
let
input_data = [1,1,3,1,0] * 1.0
reference = [1,0,1,0,0] * 1.0
diagnostic = provide(Diagnostics{Gamma()}(),
input_data = input_data,reference = reference);
@test diagnostic["relative-error[#within-radius(1e-5)]"] == 3/5
@test diagnostic["absolute-error[#within-radius(1e-5)]"] == 3
@test diagnostic["relative-error[L1]"] == 2
@test diagnostic["relative-error[L2]"] == sqrt(6) / sqrt(2)
@test diagnostic["absolute-error[L1]"] == 4
@test diagnostic["absolute-error[L2]"] == sqrt(6)
@test LinearAlgebra.norm(diagnostic["error-matrix"] - [0,1,2,1,0],2) <1e-5
end
end
@testset "$(format("Diagnostics: Construction[For Matrices]"))" begin
# test dispatcher
@test isa(provide(Diagnostics{Gamma()}(),input_data = ones(2,2), reference = ones(2,2)),Dict)
# zeros
let
input_data = rand(10,10)
reference = deepcopy(input_data)
diagnostic = provide(Diagnostics{Gamma()}(),
input_data = input_data, reference = reference);
@test diagnostic["relative-error[#within-radius(1e-5)]"] == 0
@test diagnostic["absolute-error[#within-radius(1e-5)]"] == 0
@test diagnostic["relative-error[L1]"] == 0
@test diagnostic["relative-error[L2]"] == 0
@test diagnostic["absolute-error[L1]"] == 0
@test diagnostic["absolute-error[L2]"] == 0
@test LinearAlgebra.norm(diagnostic["error-matrix"] - zeros(10,10) ,2) <1e-5
end
# given test
let
input_data = [1 0 2;
0 2 0;
0 0 1]
reference = [1 0 0;
0 0 1;
0 0 1]
diagnostic = provide(Diagnostics{Gamma()}(),
input_data = input_data,reference = reference);
@test diagnostic["relative-error[#within-radius(1e-5)]"] == 3/9
@test diagnostic["absolute-error[#within-radius(1e-5)]"] == 3
@test diagnostic["relative-error[L1]"] == 5 /3
@test diagnostic["relative-error[L2]"] == 3/sqrt(3)
@test diagnostic["absolute-error[L1]"] == 5
@test diagnostic["absolute-error[L2]"] == 3
@test LinearAlgebra.norm(diagnostic["error-matrix"] - [0 0 2;0 2 1;0 0 0],2) <1e-5
end
end
| MatrixCompletion | https://github.com/jasonsun0310/MatrixCompletion.jl.git |
|
[
"MIT"
] | 0.1.0 | 50e804aa999e452ce4bf5edc50493838bd744e09 | code | 1619 | import Distributions
using MatrixCompletion
@testset "$(format("Estimator: MLE[construction]"))" begin
@test typeof(MLE{Gaussian}()) == MLE{Gaussian}
@test typeof(MLE(Gaussian())) == MLE{Gaussian}
@test typeof(MLE(:Gaussian)) == MLE{Gaussian}
end
@testset "$(format("Estimator: MLE[Gaussian]"))" begin
for i = 1:10
input_σ = rand() * 10
input_μ = rand() * 10
tc = rand(Distributions.Gaussian(input_μ,input_σ),1000)
out_1 = Distributions.fit_mle(Distributions.Gaussian,tc)
out_2 = estimator(MLE{Gaussian}(),tc)
@test out_1.μ == out_2[:μ] && out_1.σ == out_2[:σ]
end
end
@testset "$(format("Estimator: MLE[Gamma]"))" begin
for i = 1:10
input_α = rand() * 10
input_θ = rand() * 10
tc = rand(Distributions.Gamma(input_α,input_θ),10000)
out_1 = Distributions.fit_mle(Distributions.Gamma,tc)
out_2 = estimator(MLE{Gamma}(),tc)
@test out_1.α == out_2[:α] && out_1.θ == out_2[:θ]
end
end
@testset "$(format("Estimator: MLE[Poisson]"))" begin
for i = 1:10
input_λ = rand() * 10
tc = rand(Distributions.Poisson(input_λ),10000)
out_1 = sum(tc) / length(tc)
out_2 = estimator(MLE{Poisson}(),tc)
@test check(:l2diff,out_1,out_2[:λ]) < 0.5
end
end
@testset "$(format("Estimator: MLE[Bernoulli]"))" begin
for i = 1:10
input_p = rand()
tc = rand(Distributions.Bernoulli(input_p),10000)
out_1 = sum(tc) / length(tc)
out_2 = estimator(MLE{Bernoulli}(),tc)
@test check(:l2diff,out_1,out_2[:p]) < 0.5
end
end
| MatrixCompletion | https://github.com/jasonsun0310/MatrixCompletion.jl.git |
|
[
"MIT"
] | 0.1.0 | 50e804aa999e452ce4bf5edc50493838bd744e09 | code | 430 | using MatrixCompletion
import Distributions
@testset "$(format("Estimator: MOM[NegativeBinomial]]"))" begin
let
for i in 1:10
p_test = rand()
r_test = rand() * 20
data = rand(Distributions.NegativeBinomial(r_test, p_test), 1000 * 200)
tc = estimator(MOM{NegativeBinomial}(), data)
@test abs(tc[:p] - p_test) / p_test < 0.05
@test abs(tc[:r] - r_test) / r_test < 0.05
end
end
end
| MatrixCompletion | https://github.com/jasonsun0310/MatrixCompletion.jl.git |
|
[
"MIT"
] | 0.1.0 | 50e804aa999e452ce4bf5edc50493838bd744e09 | code | 4690 | using MatrixCompletion
@testset "$(format("Exponential Family: forward_map[poisson]"))" begin
let
tc1 = rand(100)
@test check(:l2diff, forward_map(Poisson(),tc1),exp.(tc1),0)
@test check(:l2diff, forward_map(:Poisson,tc1),exp.(tc1),0)
tc2 = rand(100,100)
@test check(:l2diff, forward_map(Poisson(),tc2),exp.(tc2),0)
@test check(:l2diff, forward_map(:Poisson,tc2),exp.(tc2),0)
end
end
@testset "$(format("Exponential Family: forward_map[gamma]"))" begin
let
tc1 = rand(100)
@test check(:l2diff, forward_map(Gamma(),tc1),1 ./ tc1,0)
@test check(:l2diff, forward_map(:Gamma,tc1),1 ./ tc1,0)
tc2 = rand(100,100)
@test check(:l2diff, forward_map(Gamma(),tc2),1 ./ tc2,0)
@test check(:l2diff, forward_map(:Gamma,tc2),1 ./ tc2,0)
end
end
@testset "$(format("Exponential Family: forward_map[gaussian]"))" begin
let
tc1 = rand(100)
@test check(:l2diff, forward_map(Gaussian(),tc1),tc1,0)
@test check(:l2diff, forward_map(:Gaussian,tc1), tc1,0)
tc2 = rand(100,100)
@test check(:l2diff, forward_map(Gaussian(),tc2),tc2,0)
@test check(:l2diff, forward_map(:Gaussian,tc2),tc2,0)
end
end
@testset "$(format("Exponential Family: forward_map[bernoulli]"))" begin
let
logit = (x) -> log.(x./(1 .- x))
tc1 = rand(100)
tc1_logit = logit(tc1)
@test check(:l2diff, forward_map(Bernoulli(),tc1_logit),tc1,0)
@test check(:l2diff, forward_map(:Bernoulli,tc1_logit), tc1,0)
tc2 = rand(100,100)
tc2_logit = logit(tc2)
@test check(:l2diff, forward_map(Bernoulli(),tc2_logit),tc2,0)
@test check(:l2diff, forward_map(:Bernoulli,tc2_logit), tc2,0)
end
end
@testset "$(format("Exponential Family: predict[poisson]"))" begin
let
tc1 = rand(2:20,100)
log_tc1 = log.(tc1)
@test check(:l2diff, predict(Poisson(),forward_map(Poisson(),log_tc1)),tc1,0.0)
@test check(:l2diff, predict(:Poisson,forward_map(:Poisson,log_tc1)),tc1,0.0)
@test predict(:Poisson,[1,1];custom_prediction_function=1) == -1 #
tc2 = rand(2:20,100,100)
log_tc2 = log.(tc2)
@test check(:l2diff, predict(Poisson(),forward_map(Poisson(),log_tc2)),tc2,0.0)
@test check(:l2diff, predict(:Poisson,forward_map(:Poisson,log_tc2)),tc2,0.0)
@test predict(:Poisson,[1 1;1 1];custom_prediction_function=1) == -1 #
end
end
@testset "$(format("Exponential Family: predict[bernoulli]"))" begin
let
logit = (x) -> log.(x./(1 .- x))
tc1 = rand(100)
tc1_int = Int.(tc1 .> 0.5)
logit_tc1 = logit(tc1)
@test check(:l2diff, predict(Bernoulli(), forward_map(Bernoulli(),logit_tc1)), tc1_int, 0.0)
@test check(:l2diff, predict(:Bernoulli, forward_map(:Bernoulli, logit_tc1)), tc1_int, 0.0)
@test predict(:Bernoulli,[1,1];custom_prediction_function=1) == -1 #
tc2 = rand(100,100)
tc2_int = Int.(tc2 .> 0.5)
logit_tc2 = logit(tc2)
@test check(:l2diff, predict(Bernoulli(),forward_map(Bernoulli(),logit_tc2)),tc2_int,0.0)
@test check(:l2diff, predict(:Bernoulli,forward_map(:Bernoulli,logit_tc2)),tc2_int,0.0)
@test predict(:Bernoulli,[1 1;1 1];custom_prediction_function=1) == -1 #
end
end
@testset "$(format("Exponential Family: predict[gaussian]"))" begin
let
tc1 = rand(100)
@test check(:l2diff, predict(Gaussian(),forward_map(Gaussian(),tc1)),tc1,0.0)
@test check(:l2diff, predict(:Gaussian,forward_map(:Gaussian,tc1)),tc1,0.0)
@test predict(:Gaussian,[1,1];custom_prediction_function=1) == -1 #
tc2 = rand(100,100)
@test check(:l2diff, predict(Gaussian(),forward_map(Gaussian(),tc2)),tc2,0.0)
@test check(:l2diff, predict(:Gaussian,forward_map(:Gaussian,tc2)),tc2,0.0)
@test predict(:Poisson,[1 1;1 1];custom_prediction_function=1) == -1 #
end
end
@testset "$(format("Exponential Family: predict[gaussian]"))" begin
let
tc1 = rand(100)
inv_tc1 = 1 ./ tc1
@test check(:l2diff, predict(Gamma(),forward_map(Gamma(),1 ./ tc1)),tc1,0.0)
@test check(:l2diff, predict(:Gamma,forward_map(:Gamma,1 ./ tc1)),tc1,0.0)
@test predict(:Gamma,[1,1];custom_prediction_function=1) == -1 #
tc2 = rand(100,100)
inv_tc2 = 1 ./ tc2
@test check(:l2diff, predict(Gamma(),forward_map(Gamma(),inv_tc2)),tc2,0.0)
@test check(:l2diff, predict(:Gamma,forward_map(:Gamma,inv_tc2)),tc2,0.0)
@test predict(:Poisson,[1 1;1 1];custom_prediction_function=1) == -1 #
end
end
| MatrixCompletion | https://github.com/jasonsun0310/MatrixCompletion.jl.git |
|
[
"MIT"
] | 0.1.0 | 50e804aa999e452ce4bf5edc50493838bd744e09 | code | 7465 | @testset "$(format("Loss: Gamma[construction]"))" begin
@test typeof(Loss{Gamma}(Gamma())) == Loss{Gamma}
@test typeof(Loss(Gamma())) == Loss{Gamma}
@test typeof(Loss(:Gamma)) == Loss{Gamma}
end
@testset "$(format("Optimizer: Gamma Loss [Small][Forgiving][Native][]"))" begin
@timeit to "Optimizer: Gamma Loss [Small][Forgiving][Native]" begin
let
is_admissible(result) = begin
if result["relative-error[L2]"] < 1000
return true
end
@warn @sprintf("expected %f, got %f",0.1,result["relative-error[L2]"])
return false
end
@test !isnothing(Loss(Gamma()))
@test !isnothing(provide(Loss(:Gamma)))
@test is_admissible(unit_test_train_subloss(Gamma(),gradient_eval = Loss(Gamma()),
input_distribution = Distributions.Gamma(5,0.5),
input_size = 1000,
ρ = 0,
step_size = 0.005,
max_iter = 200))
@test is_admissible(unit_test_train_subloss(Gamma(),gradient_eval = Loss(Gamma()),
input_distribution = Distributions.Gamma(5,0.5),
input_size = 3000,
ρ = 0,
step_size = 0.005,
max_iter = 200))
@test is_admissible(unit_test_train_subloss(Gamma(),gradient_eval = Loss(Gamma()),
input_distribution = Distributions.Gamma(5,0.5),
input_size = 5000,
ρ = 0,
step_size = 0.005,
max_iter = 200))
@test is_admissible(unit_test_train_subloss(Gamma(),gradient_eval = Loss(Gamma()),
input_distribution = Distributions.Gamma(5,0.5),
input_size = 1000,
ρ = 0.1,
step_size = 0.005,
max_iter = 200))
@test is_admissible(unit_test_train_subloss(Gamma(),gradient_eval = Loss(Gamma()),
input_distribution = Distributions.Gamma(5,0.5),
input_size = 3000,
ρ = 0.1,
step_size = 0.005,
max_iter = 200))
@test is_admissible(unit_test_train_subloss(Gamma(),gradient_eval = Loss(Gamma()) ,
input_distribution = Distributions.Gamma(5,0.5),
input_size = 5000,
ρ = 0.2,
step_size = 0.005,
max_iter = 100))
@test is_admissible(unit_test_train_subloss(Gamma(),gradient_eval = Loss(:Gamma),
input_distribution = Distributions.Gamma(5,0.5),
input_size = 1000,
ρ = 0,
step_size = 0.005,
max_iter = 200))
@test is_admissible(unit_test_train_subloss(Gamma(),gradient_eval = Loss(:Gamma),
input_distribution = Distributions.Gamma(5,0.5),
input_size = 3000,
ρ = 0,
step_size = 0.005,
max_iter = 200))
@test is_admissible(unit_test_train_subloss(Gamma(),gradient_eval = Loss(:Gamma),
input_distribution = Distributions.Gamma(5,0.5),
input_size = 5000,
ρ = 0,
step_size = 0.005,
max_iter = 200))
@test is_admissible(unit_test_train_subloss(Gamma(),gradient_eval = Loss(:Gamma),
input_distribution = Distributions.Gamma(5,0.5),
input_size = 1000,
ρ = 0.1,
step_size = 0.005,
max_iter = 200))
@test is_admissible(unit_test_train_subloss(Gamma(),gradient_eval = Loss(:Gamma),
input_distribution = Distributions.Gamma(5,0.5),
input_size = 3000,
ρ = 0.1,
step_size =0.005,
max_iter = 200))
@test is_admissible(unit_test_train_subloss(Gamma(),gradient_eval = Loss(:Gamma),
input_distribution = Distributions.Gamma(5,0.5),
input_size = 5000,
ρ = 0.2,
step_size = 0.005,
max_iter = 200))
end
end
end
@warn "Autograd version of the gamma case is yet to be implemented due to numerical instability (forward pass goes into complex domain...)"
| MatrixCompletion | https://github.com/jasonsun0310/MatrixCompletion.jl.git |
|
[
"MIT"
] | 0.1.0 | 50e804aa999e452ce4bf5edc50493838bd744e09 | code | 3399 | using MatrixCompletion
@testset "$(format("Tracker: IndexTracker[convert]"))" begin
let
data = [1, 2, 3, 4]
tc = convert(Array{<:CartesianIndex}, data)
@test typeof(tc) == Array{CartesianIndex{1}, 1}
end
let
data = rand(5, 5)
tc = convert(Array{<:CartesianIndex}, findall(x -> x < 0.5 , data))
@test typeof(tc) == Array{CartesianIndex{2}, 1}
end
end
@testset "$(format("Tracker: IndexTracker[data stream initialization]"))" begin
let
data1 = [:Gaussian :Bernoulli;
:Gaussian :Bernoulli]
data2 = [:Observed :Observed;
:Missing :Missing]
tc = IndexTracker{Symbol}(data1, data2)
end
let
data1 = [:Gaussian :Bernoulli;
:Gaussian :Bernoulli]
data2 = [:Observed :Observed :Observed;
:Missing :Missing :Missing]
@test_throws DimensionMismatch tc = IndexTracker{Symbol}(data1, data2)
end
end
@testset "$(format("Tracker: IndexTracker[disjoint join]"))" begin
let
tc = IndexTracker{Symbol}([:a, :b])
@test_throws DimensionMismatch disjoint_join(tc, [:a])
@test_throws MethodError disjoint_join(tc, [:a, :c])
end
let
tc = IndexTracker{Symbol}([:a :b; :c :d])
end
end
@testset "$(format("Tracker: IndexTracker[getindex]"))" begin
let
data = [:a,:b]
tc = IndexTracker{Symbol}(data)
@test data[tc[:a]] == data[findall(x -> x == :a, data)]
end
let
data = [:a :b;
:c :d]
tc = IndexTracker{Symbol}(data)
@test data[tc[:a]] == data[findall(x -> x == :a, data)]
@test data[tc[:b]] == data[findall(x -> x == :b, data)]
@test data[tc[:c]] == data[findall(x -> x == :c, data)]
@test data[tc[:d]] == data[findall(x -> x == :d, data)]
end
end
@testset "$(format("Tracker: IndexTracker[symbol]"))" begin
let
tc1 = IndexTracker{Symbol}([:a, :b])
@test typeof(tc1) == IndexTracker{Symbol}
@test size(tc1) == (2, )
@test tc1.dimension == (2, )
end
end
@testset "$(format("Tracker: IndexTracker[groupby]"))" begin
let
data = Array{Symbol}(undef, 20, 20)
data[:, 1:10] .= :Gaussian
data[:, 11:20] .= :Binomial
data2 = Array{Symbol}(undef, 20, 20)
data2[:, 1:5] .= :Observed
data2[:, 10:15] .= :Observed
data2[:, 6:10] .= :Missing
data2[:, 16:20] .= :Missing
tc = IndexTracker{Symbol}(data)
# @show(size(Iterators.flatten(data[:, 1:10])))
@test tc[:Gaussian] == findall(x -> x == :Gaussian, data)
@test tc[:Binomial] == findall(x -> x == :Binomial, data)
disjoint_join(tc, data2)
@test tc[:Observed] == findall(x -> x == :Observed, data2)
@test tc[:Missing] == findall(x -> x == :Missing, data2)
tc2 = groupby(tc, [:Observed, :Missing])
@test tc2[:Gaussian][:Observed] == intersect(findall(x -> x == :Gaussian, data),
findall(x -> x == :Observed, data2))
@test tc2[:Gaussian][:Missing] == intersect(findall(x -> x == :Gaussian, data),
findall(x -> x == :Missing, data2))
@test tc2[:Binomial][:Observed] == intersect(findall(x -> x == :Binomial, data),
findall(x -> x == :Observed, data2))
@test tc2[:Binomial][:Missing] == intersect(findall(x -> x == :Binomial, data),
findall(x -> x == :Missing, data2))
end
end
| MatrixCompletion | https://github.com/jasonsun0310/MatrixCompletion.jl.git |
|
[
"MIT"
] | 0.1.0 | 50e804aa999e452ce4bf5edc50493838bd744e09 | code | 841 | using MatrixCompletion.MathLib
import LinearAlgebra
function create_symmetric_matrix(n)
a = rand(n,n)*5
return a+a'
end
function correct_output_sparseeigen(input,k)
eigen_dcp = LinearAlgebra.eigen(input);
eigen_val = eigen_dcp.values;
eigen_vec = eigen_dcp.vectors;
first_k_idx = Base.sortperm(eigen_val,rev=true)[1:k];
return eigen_val[first_k_idx],eigen_vec[:,first_k_idx];
end
@testset "$(format("Math Library: Projection[SemidefiniteCone]"))" begin
let
for i in 1:50
data = create_symmetric_matrix(100)
rk = rand(2:30)
tc = project(SemidefiniteCone(rank = rk), data)
λ0, X0 = correct_output_sparseeigen(data, rk)
tc_comp = X0 * LinearAlgebra.Diagonal(λ0) * X0'
@test LinearAlgebra.norm(tc_comp - tc)^2 / LinearAlgebra.norm(tc_comp) ^ 2 < 0.02
end
end
end
| MatrixCompletion | https://github.com/jasonsun0310/MatrixCompletion.jl.git |
|
[
"MIT"
] | 0.1.0 | 50e804aa999e452ce4bf5edc50493838bd744e09 | code | 2186 | import LinearAlgebra
@testset "$(format("Misc: check[:rank]"))" begin
let
tc = ones(5,5)
@test check(Val{:rank},tc,1)
@test check(:rank,tc,1)
@test check(:rank,tc) == 1
@test check(:rank,tc,2) == false
end
end
@testset "$(format("Misc: check[:dimension]"))" begin
let
tc = rand(5,5)
@test check(:dimension,tc) == (5,5)
@test check(:dimension,tc,(5,5)) == true
@test check(:dimension,tc,(4,5)) == false
end
end
@testset "$(format("Misc: check[:l2difference]"))" begin
# number case
let
for i = 1:5
tc_a = rand() * 10
tc_b = rand() * 10
@test abs(check(:l2difference,tc_a,tc_b) - LinearAlgebra.norm(tc_a-tc_b,2)) < 1e-5
@test check(:l2difference,tc_a,tc_b,LinearAlgebra.norm(tc_a-tc_b,2))
@test abs(check(:l2diff,tc_a,tc_b) - LinearAlgebra.norm(tc_a-tc_b,2)) < 1e-5
@test check(:l2diff,tc_a,tc_b,LinearAlgebra.norm(tc_a-tc_b,2))
end
end
# vector case
let
for i = 1:5
tc_a = rand(10) .* 10
tc_b = rand(10) .* 10
@test abs(check(:l2difference,tc_a,tc_b) - LinearAlgebra.norm(tc_a-tc_b,2)) < 1e-5
@test check(:l2difference,tc_a,tc_b,LinearAlgebra.norm(tc_a-tc_b,2))
@test abs(check(:l2diff,tc_a,tc_b) - LinearAlgebra.norm(tc_a-tc_b,2)) < 1e-5
@test check(:l2diff,tc_a,tc_b,LinearAlgebra.norm(tc_a-tc_b,2))
end
end
# matrix case
let
for i = 1:5
tc_a = rand(10,10) .* 10
tc_b = rand(10,10) .* 10
@test abs(check(:l2difference,tc_a,tc_b) - LinearAlgebra.norm(tc_a-tc_b,2)) < 1e-5
@test check(:l2difference,tc_a,tc_b,LinearAlgebra.norm(tc_a-tc_b,2))
@test abs(check(:l2diff,tc_a,tc_b) - LinearAlgebra.norm(tc_a-tc_b,2)) < 1e-5
@test check(:l2diff,tc_a,tc_b,LinearAlgebra.norm(tc_a-tc_b,2))
end
end
end
@testset "$(format("Misc: zeros"))" begin
let
# matrix case
tc = rand(100,100)
@test check(:l2diff,zeros(tc),zeros(100,100)) < 1e-3
end
end
| MatrixCompletion | https://github.com/jasonsun0310/MatrixCompletion.jl.git |
|
[
"MIT"
] | 0.1.0 | 50e804aa999e452ce4bf5edc50493838bd744e09 | code | 5784 | import Distributions
# @testset "$(format("Model Fitting: choose[Gaussian v.s. Poisson]"))" begin
# let
# total_passed = 0
# for i = 1:1000
# sample = rand(Distributions.Gamma(rand(1:100),rand(1:100)),5000)
# if choose(Gaussian(),Gamma(),data=sample) == :Gamma
# total_passed = total_passed + 1
# end
# end
# @test total_passed/1000.0 > 0.9
# @info @sprintf("[Success rate][Gamma[α∈(1,100),θ∈(1,100)](true) v.s. Gaussian]: %f\n",total_passed/1000.0)
# end
# let
# total_passed = 0
# for i = 1:1000
# sample = rand(Distributions.Gamma(rand(1:10),rand(1:10)),5000)
# if choose(Gaussian(),Gamma(),data=sample,
# comp=Comparator{MGF}(MGF(),eval_at = collect(0.01:0.001:0.02))) == :Gamma
# total_passed = total_passed + 1
# end
# end
# @test total_passed/1000 > 0.9
# @info @sprintf("[Success rate][Gamma[α∈(1,10),θ∈(1,10)](true) v.s. Gaussian]: %f\n",total_passed/1000.0)
# end
# let
# total_passed = 0
# for i = 1:1000
# sample = rand(Distributions.Gamma(rand(1:10),rand(1:10)),5000)
# if choose(Gaussian(),Gamma(),data=sample,
# comp=Comparator{MGF}(MGF())) == :Gamma
# total_passed = total_passed + 1
# end
# end
# @test total_passed/1000 > 0.9
# @info @sprintf("[Success rate][Gamma[α∈(1,10),θ∈(1,10)](true) v.s. Gaussian]: %f\n",total_passed/1000.0)
# end
# let
# total_passed = 0
# for i = 1:1000
# sample = rand(Distributions.Gamma(rand(1:100),rand(1:100)),5000)
# if choose(Gaussian(),Gamma(),data=sample,
# comp=Comparator{MGF}(MGF())) == :Gamma
# total_passed = total_passed + 1
# end
# end
# @test total_passed/1000 > 0.9
# @info @sprintf("[Success rate][Gamma[α∈(1,100),θ∈(1,100)](true) v.s. Gaussian]: %f\n",total_passed/1000.0)
# end
# let
# total_passed = 0
# for i = 1:1000
# sample = rand(Distributions.Gaussian(rand(50:100),rand(1:5)),5000)
# if choose(Gaussian(),Gamma(),data=sample,
# comp=Comparator{MGF}(MGF())) == :Gaussian
# total_passed = total_passed + 1
# end
# end
# @test total_passed/1000 > 0.9
# @info @sprintf("[Success rate][Gamma[α∈(1,100),θ∈(1,100)] v.s. Gaussian(true)]: %f\n",total_passed/1000.0)
# end
# end
@testset "$(format("Model Fitting: check[continuous/integral]"))" begin
# test the continuous case
[let
tc1 = rand(100) * 100
@test check(:continuous,tc1) == true
@test check(:integral,tc1) == false
tc2 = rand(100,100) * 100
@test check(:continuous,tc2) == true
@test check(:integral,tc2) == false
end for i = 1:5]
# test the integral case
[let
tc1 = rand(1:10,100)
@test check(:continuous,tc1) == false
@test check(:integral,tc1) == true
tc2 = rand(1:10,100,100)
@test check(:continuous,tc2) == false
@test check(:integral,tc2) == true
end for i = 1:5]
end
@testset "$(format("Model Fitting: check[Bernoulli]"))" begin
[let
tc1 = rand(0:1,100)
@test check(:Bernoulli,tc1) == true
@test check(:Bernoulli,tc1 .* 1.0) == true
@test check(:Bernoulli,tc1 .* 1.5) == false
tc2 = rand(0:1,100,100)
@test check(:Bernoulli,tc2) == true
@test check(:Bernoulli,tc2 .* 1.0) == true
@test check(:Bernoulli,tc2 .* 1.5) == false
end for i = 1:5]
end
@testset "$(format("Model Fitting: check[Poisson/NB]"))" begin
let
tc1 = rand(Distributions.Poisson(10),100)
@test choose(:Poisson,:NegativeBinomial;data = tc1) == :Poisson
@test choose(Poisson,NegativeBinomial;data=tc1) == :Poisson
@test choose(Poisson(),NegativeBinomial();data = tc1) == :Poisson
end
end
@testset "$(format("Model Fitting: check[support]"))" begin
let
tc = collect(1:10)
@test check(:support,tc,layout=:flatten) == (1,10)
@test check(:support,collect(-5:5),layout=:flatten) == (-5,5)
end
end
@testset "$(format("Model Fitting: check[Gamma/Gaussian]"))" begin
[let
tc_vec = rand(Distributions.Gaussian(rand()*100,rand()*10),10000)
@test choose(Gaussian,Gamma,data= tc_vec) == :Gaussian
@test choose(Gaussian(),Gamma(),data= tc_vec) == :Gaussian
@test choose(:Gaussian,:Gamma,data= tc_vec) == :Gaussian
tc_mat = rand(Distributions.Gaussian(rand()*100,rand()*10),1000,1000)
@test choose(Gaussian,Gamma,data= tc_mat) == :Gaussian
@test choose(Gaussian(),Gamma(),data= tc_mat) == :Gaussian
@test choose(:Gaussian,:Gamma,data= tc_mat) == :Gaussian
end for i =1:5]
[let
tc_vec = rand(Distributions.Gamma(rand()*10,rand()*10),10000)
@test choose(Gaussian,Gamma,data= tc_vec) == :Gamma
@test choose(Gaussian(),Gamma(),data= tc_vec) == :Gamma
@test choose(:Gaussian,:Gamma,data= tc_vec) == :Gamma
tc_mat = rand(Distributions.Gamma(rand()*10,rand()*10),1000,1000)
@test choose(Gaussian,Gamma,data= tc_mat) == :Gamma
@test choose(Gaussian(),Gamma(),data= tc_mat) == :Gamma
@test choose(:Gaussian,:Gamma,data= tc_mat) == :Gamma
tc_mat = rand(Distributions.Gamma(rand()*100,rand()*100),1000,1000)
@test choose(Gaussian,Gamma,data= tc_mat) == :Gamma
@test choose(Gaussian(),Gamma(),data= tc_mat) == :Gamma
@test choose(:Gaussian,:Gamma,data= tc_mat) == :Gamma
end for i=1:5]
end
| MatrixCompletion | https://github.com/jasonsun0310/MatrixCompletion.jl.git |
|
[
"MIT"
] | 0.1.0 | 50e804aa999e452ce4bf5edc50493838bd744e09 | code | 1111 | using MatrixCompletion
import Distributions
@testset "$(format("GD Optimizer: Negative Binomial Loss [Small][Forgiving][Native]"))" begin
let
input_size = 1600*400
y = rand(Distributions.NegativeBinomial(10, 0.6), input_size) * 1.0
mle_x = MatrixCompletion.Losses.negative_binomial_train(fx = rand(input_size),
y = y,
c = zeros(input_size),
ρ = 0,
γ = 0.2,
iter = 200,
verbose = true,
r_estimate = 10)
@show(mle_x[1:20])
prediction = predict(NegativeBinomial(),forward_map(NegativeBinomial(),mle_x,r_estimate=10))
tc = provide(Diagnostics{Poisson()}(),
input_data=prediction, reference=y)
display(tc)
end
end
| MatrixCompletion | https://github.com/jasonsun0310/MatrixCompletion.jl.git |
|
[
"MIT"
] | 0.1.0 | 50e804aa999e452ce4bf5edc50493838bd744e09 | code | 13924 | @testset "$(format("Loss: Poisson[construction]"))" begin
@test typeof(Loss{Poisson}(Poisson())) == Loss{Poisson}
@test typeof(Loss(Poisson())) == Loss{Poisson}
@test typeof(Loss(:Poisson)) == Loss{Poisson}
end
@testset "$(format("Optimizer: Poisson Loss [Small][Forgiving][AutoGrad]"))" begin
@timeit to "Optimizer: Poisson Loss [Small][Forgiving][AutoGrad]" begin
let
is_admissable(result) = begin
if result["relative-error[#within-radius(1e-5)]"] < 0.1 || result["relative-error[#within-radius(1)]"] <0.1
return true
end
@warn @sprintf("expected %f, got %f",0.1,result["relative-error[#within-radius(1e-5)]"])
return false
end
@test !isnothing(provide(Loss(Poisson())))
@test !isnothing(provide(Loss(:Poisson)))
@test is_admissable(unit_test_train_subloss(gradient_eval = provide(Loss(Poisson())),
input_distribution = Distributions.Poisson(5),
input_size = 1000,
ρ = 0,
step_size = 0.1,
max_iter = 100))
@test is_admissable(unit_test_train_subloss(gradient_eval = provide(Loss(Poisson())),
input_distribution = Distributions.Poisson(5),
input_size = 3000,
ρ = 0,
step_size = 0.1,
max_iter = 100))
@test is_admissable(unit_test_train_subloss(gradient_eval = provide(Loss(Poisson())),
input_distribution = Distributions.Poisson(5),
input_size = 5000,
ρ = 0,
step_size = 0.1,
max_iter = 100))
@test is_admissable(unit_test_train_subloss(gradient_eval = provide(Loss(Poisson())),
input_distribution = Distributions.Poisson(5),
input_size = 1000,
ρ = 0.1,
step_size = 0.1,
max_iter = 100))
@test is_admissable(unit_test_train_subloss(gradient_eval = provide(Loss(Poisson())),
input_distribution = Distributions.Poisson(5),
input_size = 3000,
ρ = 0.1,
step_size = 0.1,
max_iter = 100))
@test is_admissable(unit_test_train_subloss(gradient_eval = provide(Loss(Poisson())),
input_distribution = Distributions.Poisson(5),
input_size = 5000,
ρ = 0.2,
step_size = 0.1,
max_iter = 100))
@test is_admissable(unit_test_train_subloss(gradient_eval = provide(Loss(:Poisson)),
input_distribution = Distributions.Poisson(5),
input_size = 1000,
ρ = 0,
step_size = 0.1,
max_iter = 100))
@test is_admissable(unit_test_train_subloss(gradient_eval = provide(Loss(:Poisson)),
input_distribution = Distributions.Poisson(5),
input_size = 3000,
ρ = 0,
step_size = 0.1,
max_iter = 100))
@test is_admissable(unit_test_train_subloss(gradient_eval = provide(Loss(:Poisson)),
input_distribution = Distributions.Poisson(5),
input_size = 5000,
ρ = 0,
step_size = 0.1,
max_iter = 100))
@test is_admissable(unit_test_train_subloss(gradient_eval = provide(Loss(:Poisson)),
input_distribution = Distributions.Poisson(5),
input_size = 1000,
ρ = 0.1,
step_size = 0.1,
max_iter = 100))
@test is_admissable(unit_test_train_subloss(gradient_eval = provide(Loss(:Poisson)),
input_distribution = Distributions.Poisson(5),
input_size = 3000,
ρ = 0.1,
step_size = 0.1,
max_iter = 100))
@test is_admissable(unit_test_train_subloss(gradient_eval = provide(Loss(:Poisson)),
input_distribution = Distributions.Poisson(5),
input_size = 5000,
ρ = 0.2,
step_size = 0.1,
max_iter = 100))
end
end
end
@testset "$(format("Optimizer: Poisson Loss [Small][Forgiving][Native]"))" begin
@timeit to "Optimizer: Poisson Loss [Small][Forgiving][Native]" begin
is_admissable(result) = begin
if result["relative-error[#within-radius(1e-5)]"] < 0.1 || result["relative-error[#within-radius(1)]"] <0.1
return true
end
@warn @sprintf("expected %f, got %f",0.1,result["relative-error[#within-radius(1e-5)]"])
return false
end
@test is_admissable(unit_test_train_subloss(gradient_eval = Loss(Poisson()),
input_distribution = Distributions.Poisson(5),
input_size = 1000,
ρ = 0,
step_size = 0.1,
max_iter = 100))
@test is_admissable(unit_test_train_subloss(gradient_eval = Loss(Poisson()),
input_distribution = Distributions.Poisson(5),
input_size = 3000,
ρ = 0,
step_size = 0.1,
max_iter = 100))
@test is_admissable(unit_test_train_subloss(gradient_eval = Loss(Poisson()),
input_distribution = Distributions.Poisson(5),
input_size = 5000,
ρ = 0,
step_size = 0.1,
max_iter = 100))
@test is_admissable(unit_test_train_subloss(gradient_eval = Loss(Poisson()),
input_distribution = Distributions.Poisson(5),
input_size = 1000,
ρ = 0.2,
step_size = 0.1,
max_iter = 100))
@test is_admissable(unit_test_train_subloss(gradient_eval = Loss(Poisson()),
input_distribution = Distributions.Poisson(5),
input_size = 3000,
ρ = 0.2,
step_size = 0.1,
max_iter = 100))
@test is_admissable(unit_test_train_subloss(gradient_eval = Loss(Poisson()),
input_distribution = Distributions.Poisson(5),
input_size = 5000,
ρ = 0.2,
step_size = 0.1,
max_iter = 100))
@test is_admissable(unit_test_train_subloss(gradient_eval = Loss(:Poisson),
input_distribution = Distributions.Poisson(5),
input_size = 1000,
ρ = 0,
step_size = 0.1,
max_iter = 100))
@test is_admissable(unit_test_train_subloss(gradient_eval = Loss(:Poisson),
input_distribution = Distributions.Poisson(5),
input_size = 3000,
ρ = 0,
step_size = 0.1,
max_iter = 100))
@test is_admissable(unit_test_train_subloss(gradient_eval = Loss(:Poisson),
input_distribution = Distributions.Poisson(5),
input_size = 5000,
ρ = 0,
step_size = 0.1,
max_iter = 100))
@test is_admissable(unit_test_train_subloss(gradient_eval = Loss(:Poisson),
input_distribution = Distributions.Poisson(5),
input_size = 1000,
ρ = 0.2,
step_size = 0.1,
max_iter = 100))
@test is_admissable(unit_test_train_subloss(gradient_eval = Loss(:Poisson),
input_distribution = Distributions.Poisson(5),
input_size = 3000,
ρ = 0.2,
step_size = 0.1,
max_iter = 100))
@test is_admissable(unit_test_train_subloss(gradient_eval = Loss(:Poisson),
input_distribution = Distributions.Poisson(5),
input_size = 5000,
ρ = 0.2,
step_size = 0.1,
max_iter = 100))
end
end
#include("test_impl_poissonloss.jl")
#include("test_impl_gammaloss.jl")
@label END_OF_TEST
| MatrixCompletion | https://github.com/jasonsun0310/MatrixCompletion.jl.git |
|
[
"MIT"
] | 0.1.0 | 50e804aa999e452ce4bf5edc50493838bd744e09 | code | 423 | using MatrixCompletion.Utilities.PrettyPrinter
using Printf
@testset "$(format("PrettyPrinter: Tables Header"))" begin
let
header_list = ["Iter", "R(primal)", " R(dual)", "ℒ(Gaussian)", "ℒ(Bernoulli)", "ℒ(Poisson)", "ℒ(Gamma)", "λ‖diag(Z)‖ᵢ", " μ⟨I, X⟩", " ‖Z₁₂‖ᵢ "]
row = map(x -> @sprintf("%3.2e", x), rand(10))
row[1] = "100"
table_header(header_list)
add_row(header_list, data = row)
end
end
| MatrixCompletion | https://github.com/jasonsun0310/MatrixCompletion.jl.git |
|
[
"MIT"
] | 0.1.0 | 50e804aa999e452ce4bf5edc50493838bd744e09 | code | 2959 | import Distributions
import LinearAlgebra
const VISUAL_RANDOM_STRUCTURE = true
@testset "$(format("Random Structure: FixedRankMatrix[Constructor]"))" begin
# test data type
@test isa(FixedRankMatrix{Distributions.Poisson},DataType) == true
@test isa(FixedRankMatrix{Distributions.Poisson}(),DataType) == false
# test default constructor
@test FixedRankMatrix{Distributions.Poisson}(Distributions.Poisson();rank=4).rank==4
# test shorthanded constructor
@test FixedRankMatrix(Distributions.Poisson(5),rank=2).rank == 2
@test typeof(FixedRankMatrix(Distributions.Poisson(5),rank=2).dist) == Distributions.Poisson{Float64}
# test mixed distribution
end
@testset "$(format("Random Structure: FixedRankMatrix[overload: Base.rand]"))" begin
let
tc1 = rand(FixedRankMatrix(Distributions.Gaussian(0,1),rank =3),
10,10)
@test_logs (:warn,"Rank is not specified. Using formula: rank= ⌊0.3 * (row ∧ col)⌋") rand(FixedRankMatrix(Distributions.Gaussian(0,1)), 100,30)
tc2 = rand(FixedRankMatrix(Distributions.Gaussian(0,1)),
100,30)
@test check(:rank,tc1,3)
@test check(:rank,tc2,9)
tc3 = rand([(FixedRankMatrix(Distributions.Gaussian(0,1), rank=2),10,3),
(FixedRankMatrix(Distributions.Poisson(5), rank=2),10,3),
(FixedRankMatrix(Distributions.Bernoulli(0.5), rank=2),10,4)])
@test LinearAlgebra.rank(tc3) == 6
end
end
@testset "$(format("Random Structure: FixedRankMatrix[overload: Concepts.provide]"))" begin
# test provide interface
let
tc = provide(FixedRankMatrix(Distributions.Gaussian(0,1),rank=5),
row = 10,col=10)
@test check(:dimension,tc,(10,10))
@test check(:rank,tc,5)
end
end
@testset "$(format("Random Structure: GaussianMatrix"))" begin
let
tc = GaussianMatrix(20,20,rank=10,μ=0.0,σ=1.0)
@test check(:dimension,tc,(20,20))
@test check(:rank,tc,10)
# test optional rank parameter
tc1 = GaussianMatrix(20,20;μ=0,σ=1)
@test check(:dimension,tc1,(20,20))
@test check(:rank,tc1,20)
end
end
@testset "$(format("Random Structure: PoissonMatrix"))" begin
let
tc = PoissonMatrix(20,20,rank=10,λ=3)
@test check(:dimension,tc,(20,20))
@test check(:rank,tc,10)
# test optional rank parameter
tc1 = PoissonMatrix(20,20;λ=3)
@test check(:dimension,tc1,(20,20))
@test check(:rank,tc1,20)
end
end
@testset "$(format("Random Structure: BernoulliMatrix"))" begin
let
tc = BernoulliMatrix(20,20,rank=10,p=0.5)
@test check(:dimension,tc,(20,20))
@test check(:rank,tc,10)
# test optional rank parameter
tc1 = BernoulliMatrix(20,20;p=0.5)
@test check(:dimension,tc1,(20,20))
@test check(:rank,tc1,20)
end
end
| MatrixCompletion | https://github.com/jasonsun0310/MatrixCompletion.jl.git |
|
[
"MIT"
] | 0.1.0 | 50e804aa999e452ce4bf5edc50493838bd744e09 | code | 2216 | using MatrixCompletion
@testset "$(format("Sampling: BernoulliModel[VecOrMat]"))" begin
sample_bernoulli_model0 = Sampler(BernoulliModel(0.8))
tc1 = sample_bernoulli_model0.draw(ones(5,5))
@test isa(tc1, Array{MaybeMissing{Float64},2}) || Array{Float64,2}
tc2 = sample_bernoulli_model0.draw([1,2,3,4,5])
@test isa(tc2, Array{MaybeMissing{Int64},1}) || isa(tc2,Array{Int64})
sample_bernoulli_model1= provide(Sampler{BernoulliModel}(),rate = 0.8)
tc3 = sample_bernoulli_model1.draw(ones(5,5))
@test isa(tc3, Array{MaybeMissing{Float64},2}) || Array{Float64,2}
tc4 = sample_bernoulli_model1.draw([1,2,3,4,5])
@test isa(tc4, Array{MaybeMissing{Int64},1}) || isa(tc4,Array{Int64})
end
@testset "$(format("Sampling: UniformModel[VecOrMat]"))" begin
#==================== Vector Case ====================#
let
tc1 = rand(10000)
output = Sampler(UniformModel(0.5)).draw(tc1)
@test count(x->ismissing(x),output) > 1000
@test count(x->!ismissing(x),output) > 1000
# stronger
tc2 = rand(10000)
output = Sampler(UniformModel(0.1)).draw(tc2)
@test count(x->ismissing(x),output) >1000
@test count(x->!ismissing(x),output) <1000
end
#==================== Matrix Case ====================#
let
@test 4 <= count(x -> !ismissing(x),Sampler(UniformModel(0.1)).draw(ones(10,10))) <= 10
@test 10 < count(x -> ismissing(x), Sampler(UniformModel(0.1)).draw(ones(10,10)))
end
#==================== Factory Mode ====================#
let
sampler = provide(Sampler{UniformModel}(),rate = 0.5)
tc1 = rand(10000)
output = sampler.draw(tc1)
@test count(x->ismissing(x),output) > 1000
@test count(x->!ismissing(x),output) > 1000
# stronger
tc2 = rand(10000)
sampler2 = provide(Sampler{UniformModel}(),rate = 0.1)
output = sampler2.draw(tc2)
@test count(x->ismissing(x),output) >1000
@test count(x->!ismissing(x),output) <=1000
@test 4 <= count(x -> !ismissing(x),sampler2.draw(ones(10,10))) <= 10
@test 10 < count(x -> ismissing(x),sampler2.draw(ones(10,10)))
end
end
| MatrixCompletion | https://github.com/jasonsun0310/MatrixCompletion.jl.git |
|
[
"MIT"
] | 0.1.0 | 50e804aa999e452ce4bf5edc50493838bd744e09 | code | 1079 | using MatrixCompletion
import Distributions
@testset "$(format("SGD Optimizer: Bernoulli Loss [Small][Forgiving][Native]"))" begin
let
input_size = 500
y = rand(Distributions.Bernoulli(0.6), input_size) * 1.0
mle_x = MatrixCompletion.Losses.sgd_train(Loss{Bernoulli}(),
fx = rand(input_size),
y = y,
c = zeros(input_size),
ρ = 0,
α = 0.2,
ρ₁ = 0.9,
ρ₂ = 0.999,
batch_size = 500,
epoch = 10);
prediction = predict(Bernoulli(),forward_map(Bernoulli(),mle_x))
tc = provide(Diagnostics{Poisson()}(),
input_data=prediction, reference=y)
display(tc)
end
end
| MatrixCompletion | https://github.com/jasonsun0310/MatrixCompletion.jl.git |
|
[
"MIT"
] | 0.1.0 | 50e804aa999e452ce4bf5edc50493838bd744e09 | code | 2095 |
import LinearAlgebra
function relative_error_l2(a, b)
return LinearAlgebra.norm(a - b)^2 / LinearAlgebra.norm(b)^2
end
# @testset "$(format("SGD Optimizer: Bernoulli Loss [Small][Forgiving][Native]"))" begin
# let
# input_size = 1600* 400
# y = rand(Distributions.Gamma(10, 2), input_size) * 1.0
# mle_x = MatrixCompletion.Losses.sgd_train(Loss{Gamma}(),
# fx = y,
# y = y,
# c = zeros(input_size),
# ρ = 0.2,
# α = 0.2,
# ρ₁ = 0.9,
# ρ₂ = 0.999,
# batch_size = 1600 * 400,
# epoch = 20);
# prediction = predict(Gamma(),forward_map(Gamma(),mle_x))
# # @show(relative_error_l2(prediction, y))
# tc = provide(Diagnostics{Poisson()}(),
# input_data=prediction, reference=y)
# display(tc)
# end
# end
@testset "$(format("GD Optimizer: Gamma Loss [Small][Forgiving][Native]"))" begin
let
input_size = 200 * 200
y = rand(Distributions.Gamma(10, 2), input_size) * 1.0
mle_x = MatrixCompletion.Losses.train(Loss{Gamma}(),
fx = rand(input_size),
y = y,
c = zeros(input_size),
ρ = 0,
γ = 0.2,
iter = 500,
verbose = true)
@show(mle_x[1:20])
prediction = predict(Gamma(),forward_map(Gamma(),mle_x))
tc = provide(Diagnostics{Poisson()}(),
input_data=prediction, reference=y)
display(tc)
end
end
| MatrixCompletion | https://github.com/jasonsun0310/MatrixCompletion.jl.git |