repo
stringlengths 2
91
| file
stringlengths 14
211
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
1.36M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
kge_ecotox_regression | kge_ecotox_regression-main/main.py |
"""
TODO:
- Train embedding model.
- Apply embeddings to data.
- Encode data.
- Train,valid,test model
"""
from autoencoder import create_auto_encoder
from model import create_model, CorrelelatedFeatures, ApproxKerasSVM, coeff_determination
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split, GridSearchCV, RandomizedSearchCV
from sklearn.preprocessing import OneHotEncoder
from tensorflow.keras.callbacks import EarlyStopping
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import KFold
from random import shuffle
from collections import defaultdict
import tensorflow as tf
from sklearn.svm import SVR
from sklearn.neural_network import MLPRegressor
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression, LinearRegression, HuberRegressor, BayesianRidge
from sklearn.ensemble import RandomForestRegressor, AdaBoostRegressor, VotingRegressor, BaggingRegressor, ExtraTreesRegressor, GradientBoostingRegressor
from sklearn.compose import TransformedTargetRegressor
from sklearn.preprocessing import QuantileTransformer, RobustScaler
from sklearn.tree import DecisionTreeRegressor
from itertools import product
from random import choice, choices
from sklearn.pipeline import Pipeline
from tqdm import tqdm
from matplotlib import pyplot as plt
from sklearn.decomposition import PCA,FastICA
from sklearn.cluster import FeatureAgglomeration
from sklearn.feature_selection import RFE
from sklearn.metrics import r2_score
from sklearn.isotonic import IsotonicRegression
from sklearn.feature_selection import VarianceThreshold
from sklearn.dummy import DummyRegressor
from sklearn.experimental import enable_hist_gradient_boosting # noqa
from sklearn.ensemble import HistGradientBoostingRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.model_selection import cross_val_score, LeaveOneOut
MAX_ENCODER_EPOCHS = 1000
MAX_EPOCHS = 1000
EPSILON = 1e-10
MODEL = 'ComplEx'
hidden_dim = (128,)
SEED = 42
np.random.seed(SEED)
import tensorflow as tf
tf.get_logger().setLevel('ERROR')
import warnings
warnings.filterwarnings('ignore')
def load_fingerprints(filename):
df = pd.read_csv(filename,index_col='chemical')
l = len(df.iloc[0]['fingerprint'])
out = {}
for c in df.index:
fp = df.loc[c]['fingerprint']
v = [int(f) for f in fp]
out[c] = np.asarray(v)
return out
def load_features(filename):
df = pd.read_csv(filename,index_col='chemical')
df = df.dropna()
columns = df.columns
out = {}
for c in df.index:
v = [df.loc[c][col] for col in columns]
out[c] = np.asarray(v)
return out
def load_one_hot(entities):
all_entities = list(set(entities))
out = {}
for e in entities:
v = np.zeros((len(all_entities),))
v[all_entities.index(e)] = 1
out[e] = np.asarray(v)
return out
def load_embeddings(filename,filename_ids):
df = np.load(filename)
ids = dict(np.load(filename_ids))
return {k:df[int(ids[k])] for k in ids}
def load_data(filename,filter_chemicals=None, filter_species=None):
df = pd.read_csv(filename)
X,y = [],[]
if filter_chemicals:
to_drop = set(df.chemical) - filter_chemicals
for c in to_drop:
df = df.drop(df[df.chemical == c].index)
if filter_species:
to_drop = set(df.species) - filter_species
for s in to_drop:
df = df.drop(df[df.species == s].index)
df = df.drop(df[df.study_duration > 24*14].index)
df = df.groupby(['chemical','species'],as_index=False).mean()
X = list(zip(df['chemical'],df['species']))
y = np.log(df.concentration+EPSILON)
tmp = np.asarray(df.study_duration).reshape((-1,1))
mms = StandardScaler()
tmp = mms.fit_transform(tmp)
experimental_features = dict(zip(X,tmp.reshape(-1,1)))
y = np.asarray(y).reshape((-1,1))
#y = MinMaxScaler().fit_transform(y)
return X, y, experimental_features
def data_split(X,Y,restrictions=None,method = 1, variant = 1, prop=0.33):
"""
C_x - chemical set
S_x - species set
t,v - training,validation
1. C_t \cap C_v == Ø and S_t \cap S_v != Ø,
2. C_t \cap C_v == Ø and S_t \cap S_v == Ø,
3. C_t \cap C_v != Ø and S_t \cap S_v != Ø,
4. C_t \cap C_v != Ø and S_t \cap S_v == Ø,
Variants where C_t \cap C_v != Ø (same for S_x):
1. C_t == C_v
2. |C_t \cap C_v| < |C_t \cup C_v|
Restrictions:
Retriction of a set. eg. s_1 \in S_v and |S_v|=1, {'S_v':{'content:[s_1],'max_len',1}}
"""
C_t,C_v,S_t,S_v=map(set,[[]]*4)
restrictions = {**{'C_t':{},'C_v':{},'S_t':{},'S_v':{}},**restrictions}
def filter_restrictions(C_t,C_v,S_t,S_v):
for _set,_inv_set,k in zip([C_t,C_v,S_t,S_v],[C_v,C_t,S_v,S_t],['C_t','C_v','S_t','S_v']):
if k in restrictions:
if 'content' in restrictions[k]:
_set |= restrictions[k]['content']
if 'not content' in restrictions[k]:
_set -= restrictions[k]['not content']
if 'max_len' in restrictions[k]:
while restrictions[k]['max_len'] < len(_set):
entity = choice(list(_set))
if not ('content' in restrictions[k] and entity in restrictions[k]['content']):
_set.remove(entity)
return C_t,C_v,S_t,S_v
def check_restrictions(C_t,C_v,S_t,S_v):
for _set,k,inv_k in zip([C_t,C_v,S_t,S_v],['C_t','C_v','S_t','S_v'],['C_v','C_t','S_v','S_t']):
if k in restrictions:
if 'content' in restrictions[k] and 'not content' in restrictions[k]:
try:
assert len(restrictions[k]['content'].intersection(restrictions[k]['not content'])) < 1
except AssertionError:
raise AssertionError('Set %s content conflict.' % k)
if 'content' in restrictions[k] and 'max_len' in restrictions[k]:
try:
assert len(restrictions[k]['content']) <= restrictions[k]['max_len']
except AssertionError:
raise AssertionError('Set %s content is longer than max length' % k)
if ((method == 1 and 'C' in k) or (method == 4 and 'S' in k) or method == 2) and 'content' in restrictions[inv_k]:
try:
assert restrictions[k]['content'].intersection(restrictions[inv_k]['content']) == set()
except AssertionError:
raise AssertionError('Intersection in %s content is not allowed in method %s.' % ('chemical' if method==1 else 'species',str(method)))
if method == 3 and 'content' in restrictions[inv_k]:
try:
assert restrictions[k]['content'].intersection(restrictions[inv_k]['content']) == set()
except AssertionError:
raise AssertionError('Intersection in set content is not allowed in method 3.')
C,S = map(set,zip(*X))
if method == 1:
C_t,C_v = train_test_split(list(C),test_size=prop)
if variant == 1:
S_t,S_v = S, S
else:
S_t = choices(list(S),k=int((1-prop)*len(S)))
S_v = choices(list(S),k=int(prop*len(S)))
if method == 2:
S_t,S_v = train_test_split(list(S),test_size=prop)
C_t,C_v = train_test_split(list(C),test_size=prop)
if method == 3:
X_t, X_v = train_test_split(X,test_size=prop)
C_t,S_t = map(set,zip(*X_t))
C_v,S_v = map(set,zip(*X_v))
if method == 4:
S_t,S_v = train_test_split(list(S),test_size=prop)
if variant == 1:
C_t,C_v = C, C
else:
C_t = choices(list(C),k=int((1-prop)*len(C)))
C_v = choices(list(C),k=int(prop*len(C)))
C_t,C_v,S_t,S_v = map(set,[C_t,C_v,S_t,S_v])
C_t,C_v,S_t,S_v = filter_restrictions(C_t,C_v,S_t,S_v)
if method == 1: C_t -= C_v
if method == 2:
C_t -= C_v
S_t -= S_v
if method == 4: S_t -= S_v
if method == 1:
assert C_t.intersection(C_v) == set()
if variant == 1:
S_t = S_v
assert S_t == S_v
else:
assert len(S_t.intersection(S_v)) < len(S_t.union(S_v))
if method == 2:
assert C_t.intersection(C_v) == set() and S_t.intersection(S_v) == set()
if method == 3:
assert len(C_t.intersection(C_v)) > 0 and len(S_t.intersection(S_v)) > 0
if method == 4:
assert S_t.intersection(S_v) == set()
if variant == 1:
C_t = C_v
assert C_t == C_v
else:
assert len(C_t.intersection(C_v)) < len(C_t.union(C_v))
check_restrictions(C_t,C_v,S_t,S_v)
Xtr = []
Xte = []
ytr = []
yte = []
for x,y in zip(X,Y):
c,s = x
if c in C_t and s in S_t:
Xtr.append(x)
ytr.append(y)
if c in C_v and s in S_v:
Xte.append(x)
yte.append(y)
return Xtr,Xte,ytr,yte
class FilterFingerprints:
def __init__(self):
pass
def fit(self,X):
idx = []
for i,a in enumerate(X.T):
if len(np.unique(a)) > 1:
idx.append(i)
self.idx = idx
def transform(self,X):
if len(X.shape) > 1:
return X[:,self.idx]
else:
return X[self.idx]
def fit_transform(self,X):
self.fit(X)
return self.transform(X)
def compile_model(model):
model.compile(optimizer='adagrad',loss='log_cosh',metrics=['mae','mse',R2(name='r2')])
import math
def lcm(a, b):
return abs(a*b) // math.gcd(a, b)
def combine(Xs):
n = map(len,Xs)
l = max(*map(lambda x: lcm(len(x[0]),len(x[1])),product(Xs,Xs)))
r = [l//a for a in n]
tmp = []
for X,a in zip(Xs,r):
tmp.append(np.repeat(X,a,axis=0))
return np.concatenate(tmp,axis=1)
def list_duplicates(seq):
tally = defaultdict(list)
for i,item in enumerate(seq):
tally[item].append(i)
return ((key,locs) for key,locs in tally.items() if len(locs)>1)
def run_model(C_t,C_v,S_t,S_v,y,
experimental_features,
fingerprints,
chemical_embedding,
species_embedding,
chemical_features,
merge_species=False):
"""
Take four classes of chemicals, two pairs of siblings, test these on one-two species, combine siblings, combine cusins, see performance drop. Repeat on species side.
Repeat with embeddings for chemicals and species and see the same performance on lower levels, but imporved over baseline on higher levels.
"""
"""
5-fold validation
+ 1-fold test set
"""
keys = set(y.keys())
keys_t = keys.intersection(set(product(C_t,S_t)))
keys_v = keys.intersection(set(product(C_v,S_v)))
ytr,yte = map(lambda x:np.asarray([y[i] for i in x]),[keys_t,keys_v])
if len(yte) < 1 or len(ytr) < 1:
return None,None,None
fingerprints_train,fingerprints_test = map(lambda x:np.asarray([fingerprints[i] for i,_ in x]),[keys_t,keys_v])
chemical_embedding_train,chemical_embedding_test = map(lambda x:np.asarray([chemical_embedding[i] for i,_ in x]),[keys_t,keys_v])
chemical_features_train,chemical_features_test = map(lambda x:np.asarray([chemical_features[i] for i,_ in x]),[keys_t,keys_v])
species_embedding_train,species_embedding_test = map(lambda x:np.asarray([species_embedding[i] for _,i in x]),[keys_t,keys_v])
experimental_features_train,experimental_features_test = map(lambda x:np.asarray([experimental_features[i] for i in x]),[keys_t,keys_v])
species_one_hot_encoder = OneHotEncoder(sparse=False)
sp_t = set(list(zip(*keys_t))[1])
sp_v = set(list(zip(*keys_v))[1])
sp = np.asarray(list(sp_t|sp_v)).reshape((-1,1))
species_one_hot_encoder.fit(sp)
species_one_hot_train,species_one_hot_test = map(lambda x:species_one_hot_encoder.transform(np.asarray(list(zip(*x))[1]).reshape((-1,1))),[keys_t,keys_v])
if merge_species:
for array in [species_embedding_train,species_one_hot_train,ytr]:
for elem,loc in list_duplicates([c for c,_ in keys_t]): #i.e. mean where c is the same
array[loc] = np.mean(array[loc])
for array in [species_embedding_test,species_one_hot_test,yte]:
for elem,loc in list_duplicates([c for c,_ in keys_v]):
array[loc] = np.mean(array[loc])
n_tr = ytr.shape[1]
n_te = yte.shape[1]
train_1 = combine([fingerprints_train,chemical_features_train,species_one_hot_train,experimental_features_train,ytr])
train_2 = combine([fingerprints_train,chemical_features_train,species_embedding_train,chemical_embedding_train,experimental_features_train,ytr])
test_1 = combine([fingerprints_test,chemical_features_test,species_one_hot_test,experimental_features_test,yte])
test_2 = combine([fingerprints_test,chemical_features_test,species_embedding_test,chemical_embedding_test,experimental_features_test,yte])
Xtr_1,ytr = train_1[:,:-n_tr],train_1[:,-n_tr:]
Xtr_2,ytr = train_2[:,:-n_tr],train_2[:,-n_tr:]
Xte_1,yte = test_1[:,:-n_te],test_1[:,-n_te:]
Xte_2,yte = test_2[:,:-n_te],test_2[:,-n_te:]
res1 = np.zeros(yte.ravel().shape)
res2 = np.zeros(yte.ravel().shape)
params = {'n_neighbors':[2,5,10,25,50,100],
'weights':['uniform','distance']}
n = min(len(ytr),5)
FOLDS = 10
for Xtr,Xte,res in zip([Xtr_1,Xtr_2],[Xte_1,Xte_2],[res1,res2]):
for _ in range(FOLDS):
regr = AdaBoostRegressor(n_estimators=10,loss='square')
regr.fit(Xtr,ytr.ravel())
res += regr.predict(Xte)/FOLDS
return res1,res2,yte
from SPARQLWrapper import SPARQLWrapper, JSON
sparql = SPARQLWrapper("https://query.wikidata.org/sparql")
sparql.setReturnFormat(JSON)
def get_species_name(ncbi_id):
q = """
select ?label where {
?s wdt:P685 "%s" ;
wdt:P225 ?label .
}
""" % ncbi_id
sparql.setQuery(q)
try:
results = sparql.query().convert()
for result in results["results"]["bindings"]:
out = result["label"]["value"]
return out
except:
return ncbi_id
def encode_fingerprints(fingerprints_all):
fingerprint_encoder, fingerprint_ae = create_auto_encoder(input_size=len(fingerprints_all[0]),dense_layers=(128,),noise=0.1)
fingerprint_ae.compile(optimizer='adagrad',loss='binary_crossentropy')
fingerprint_ae.fit(fingerprints_all,fingerprints_all,
epochs=MAX_ENCODER_EPOCHS,
callbacks=[EarlyStopping('loss',min_delta=1e-5)],
verbose=0)
return fingerprint_encoder.predict(fingerprints_all)
from sklearn.cluster import KMeans
# function returns WSS score for k values from 1 to kmax
def calculate_WSS(points, kmax):
sse = []
for k in range(1, kmax+1):
kmeans = KMeans(n_clusters = k).fit(points)
centroids = kmeans.cluster_centers_
pred_clusters = kmeans.predict(points)
curr_sse = 0
# calculate square of Euclidean distance of each point from its cluster center and add to current WSS
for i in range(len(points)):
curr_center = centroids[pred_clusters[i]]
curr_sse += (points[i, 0] - curr_center[0]) ** 2 + (points[i, 1] - curr_center[1]) ** 2
sse.append(curr_sse)
return sse
def define_chemical_clusters(fingerprints,k=15,use_pca=True):
if not isinstance(fingerprints,list):
fingerprints = [fingerprints]
keys = set.intersection(*[set(f.keys()) for f in fingerprints])
array = np.concatenate([np.asarray([v[k] for k in keys]) for v in fingerprints],axis=1)
if use_pca:
array = PCA(2).fit_transform(array)
if k < 0:
sse = calculate_WSS(array,25)
k = np.argmin(sse) + 1
plt.plot(sse)
plt.show()
clusters = defaultdict(set)
kmeans = KMeans(n_clusters = k).fit(array)
cp = kmeans.predict(array)
for k,v in zip(keys,cp):
clusters[v].add(k)
return clusters, kmeans.cluster_centers_
def merge_closest(clusters,cluster_centers,ord=2):
dist = {}
for i,cc1 in enumerate(cluster_centers):
for j,cc2 in enumerate(cluster_centers):
if i == j: continue
dist[(i,j)] = np.linalg.norm(cc1-cc2,ord=ord)
if len(dist) > 1:
merge,_ = sorted(dist.items(),key=lambda x:x[1])[0]
else:
merge = (i,j)
k1,k2 = merge
cluster_centers[k1] = np.mean([cluster_centers[k1],cluster_centers[k2]],axis=0)
cluster_centers = np.delete(cluster_centers,k2,axis=0)
clusters[k1] |= clusters[k2]
clusters.pop(k2,None)
return clusters, cluster_centers
def filter_data(X,Y,C_t,C_v,S_t,S_v):
Xtr,Xte,ytr,yte = [],[],[],[]
for x,y in zip(X,Y):
c,s = x
if c in C_t and s in S_t:
Xtr.append(x)
ytr.append(y)
if c in C_v and s in S_v:
Xte.append(x)
yte.append(y)
return Xtr,Xte,ytr,yte
import sys
# insert at 1, 0 is the script path (or '' in REPL)
sys.path.insert(1, '/media/erik/Mass/Dropbox/NIVA_GITLAB/pySMIfp')
from smiles_fingerprints import smiles_fingerprint
def load_smiles_fingerprints():
q = """
select ?chembl ?smiles where {
?c wdt:P233 ?smiles ;
wdt:P592 ?chembl .
}
"""
converter = {}
sparql.setQuery(q)
results = sparql.query().convert()
for result in results["results"]["bindings"]:
ch = result["chembl"]["value"]
smi = result['smiles']['value']
smifp = smiles_fingerprint(smi)
converter['http://rdf.ebi.ac.uk/resource/chembl/molecule/'+ch] = smifp
return converter
def save_smiles_fingerprints(fp,filename='data/smiles_fingerprints.csv'):
a = {}
for i in range(len(smiles_fingerprint('C'))):
a['sig%s'%str(i)] = [array[i] for _,array in fp.items()]
df = pd.DataFrame(data={'chemical':list(fp.keys()),**a})
df.to_csv(filename)
def read_smiles_fingerprints(filename):
df = pd.read_csv(filename)
cols = [c for c in df.columns if 'sig' in c]
chemicals = df['chemical'].values
arrays = df[cols].values
return dict(zip(chemicals,np.asarray(arrays)))
def chemical_similarities(fingerprints):
keys = fingerprints.keys()
array = np.asarray([i for k,i in fingerprints.items()])
sim = []
for a in array:
v = a @ array.T
w = np.sum(a) + np.sum(array,axis=1)
sim_score = 2*v/w
sim.append(sim_score)
return {k:s for k,s in zip(keys,sim)}
def main():
"""
organic = obo['CHEBI_50860']
inorganic = obo['CHEBI_24835']
"""
model = 'ComplEx'
g1_parts = [[0],[0,1],[0,1,2]]
g2_parts = [[0],[0,1]]
p = list(product(g1_parts,g2_parts))
p += [p[-1]]
ul = (False,False)
f1,f2=[],[]
for g1p,g2p,in p:
for lit,gp,fs,name in zip([*ul],[g1p,g2p],[f1,f2],['_chemical_','_taxonomy_']):
fs.append(model+name+str(hash((lit,*gp))))
if (g1p,g2p) == p[-1]:
ul = (True,True)
organic_chemicals = set()
inorganic_chemicals = set()
salts = set()
for i in range(1,10):
df = pd.read_csv('./data/chemical_group_%s.csv' % str(i),index_col='parent')
try:
organic_chemicals |= set(df.loc['http://purl.obolibrary.org/obo/CHEBI_50860','children'].split(','))
except:
pass
try:
inorganic_chemicals |= set(df.loc['http://purl.obolibrary.org/obo/CHEBI_24835','children'].split(','))
except:
pass
try:
salts |= set(df.loc['http://purl.obolibrary.org/obo/CHEBI_24866','children'].split(','))
except:
pass
print('Num organic chemicals',len(organic_chemicals))
print('Num inorganic chemicals',len(inorganic_chemicals))
print('Num salts',len(salts))
C = organic_chemicals
try:
smiles_fingerprints = read_smiles_fingerprints('./data/smiles_fingerprints.csv')
except FileNotFoundError:
smiles_fingerprints = load_smiles_fingerprints()
save_smiles_fingerprints(smiles_fingerprints,'./data/smiles_fingerprints.csv')
mms = MinMaxScaler().fit_transform(np.asarray([smiles_fingerprints[k] for k in smiles_fingerprints]))
smiles_fingerprints = dict(zip(smiles_fingerprints,mms))
X,Y,experimental_features = load_data('./data/experiments.csv',filter_chemicals=None, filter_species=None)
pubchem_fingerprints = load_fingerprints('./data/chemicals_fingerprints.csv')
Y = {k:y for k,y in zip(X,Y)}
pubchem_fingerprints = chemical_similarities(pubchem_fingerprints)
chemical_embedding = load_embeddings('./data/embeddings/%s_entity_embeddings.npy' % f1[0],
'./data/embeddings/%s_entity_ids.npy' % f1[0])
species_embedding = load_embeddings('./data/embeddings/%s_entity_embeddings.npy' % f2[0],
'./data/embeddings/%s_entity_ids.npy' % f2[0])
chemical_features = load_features('./data/chemicals_features.csv')
chemical_features = dict(zip(chemical_features,MinMaxScaler().fit_transform(np.asarray([chemical_features[k] for k in chemical_features]))))
for cf in [QuantileTransformer(n_quantiles=100,output_distribution='normal')]:
chemical_embedding = dict(zip(chemical_embedding,cf.fit_transform(np.asarray([chemical_embedding[k] for k in chemical_embedding]))))
for cf in [QuantileTransformer(n_quantiles=100,output_distribution='normal')]:
species_embedding = dict(zip(species_embedding,cf.fit_transform(np.asarray([species_embedding[k] for k in species_embedding]))))
species_divisions = defaultdict(set)
for k in range(1,2):
df = pd.read_csv('./data/species_groups_%s.csv' % str(k), index_col='parent')
for s in df.index:
species_divisions[s] |= set(df.loc[s,'children'].split(','))
species_divisions = dict(filter(lambda x:len(x[1])>5,species_divisions.items()))
#for k in species_divisions:
#print(get_species_name(k.split('/')[-1]))
#species_divisions = defaultdict(set)
#df = pd.read_csv('./data/species_divisions.csv', index_col='parent')
#for s in df.index:
#species_divisions[s] |= set(df.loc[s,'children'].split(','))
C = set.intersection(*map(lambda k:set(k.keys()),[smiles_fingerprints,pubchem_fingerprints,chemical_features,chemical_embedding]))
for d in [smiles_fingerprints,pubchem_fingerprints,chemical_embedding,chemical_features]:
for c in set(d.keys()):
if not c in C:
d.pop(c,None)
n = 7
clusters, cluster_centers = define_chemical_clusters([smiles_fingerprints],k=max(-1,n),use_pca=False)
print(*map(lambda x:len(x[1]),clusters.items()))
data = {}
all_runs = {}
TOP_K = 10
while True:
for C,S in tqdm(product(clusters,species_divisions),total=len(clusters)*len(species_divisions)):
k = [C,S]
C = list(clusters[C])
S = species_divisions[S]
k[1] = get_species_name(k[1].split('/')[-1])
loo = LeaveOneOut()
predictions = []
y_true = []
for train_index, test_index in loo.split(C):
C_t = [C[i] for i in train_index]
C_v = [C[i] for i in test_index]
r1,r2,yte = run_model(C_t,C_v,S,S,Y,
experimental_features,
pubchem_fingerprints,
chemical_embedding,
species_embedding,
chemical_features,
merge_species=True)
if r1 is None and r2 is None: continue
r1 = np.mean(r1)
r2 = np.mean(r2)
y_true.append(np.mean(yte))
predictions.append((r1,r2))
y_true, predictions = map(np.asarray,[y_true,predictions])
if len(predictions) < 10: continue
try:
if len(predictions.shape) < 2:
predictions = np.expand_dims(predictions,axis=1)
rsq_1 = r2_score(y_true,predictions[:,0])
rsq_2 = r2_score(y_true,predictions[:,1])
all_runs[tuple(k)] = (rsq_1,rsq_2)
except ValueError:
pass
all_runs = dict(sorted(all_runs.items(),key=lambda x: sum(x[1])/2,reverse=True))
print(all_runs)
data[len(cluster_centers)] = all_runs
if len(cluster_centers) > 0:
clusters, cluster_centers = merge_closest(clusters,cluster_centers)
for k in list(all_runs.keys())[:TOP_K]:
_,s = k
species_divisions.pop(k,None)
else:
break
pd.to_pickle(data,'chemical_cluster_merging.pkl')
exit()
ks = set()
for k in species_divisions:
S = species_divisions[k]
still_true = True
for k_c in clusters:
C = clusters[k_c]
Xtr,Xte,ytr,yte = filter_data(X,Y,C,C,S,S)
if count(Xtr,Xte) > 100: ks.add(k)
for k in tqdm(ks):
n=6
clusters, cluster_centers = define_chemical_clusters([smiles_fingerprints],k=max(-1,n))
S = species_divisions[k]
sn = get_species_name(k.split('/')[-1])
results = defaultdict(list)
i = 0
while True:
k_c = sorted(clusters,key=lambda x:len(clusters[x]),reverse=True)[0]
C_t = clusters[k_c]
if len(C_t) < 1: continue
C_t,C_v = train_test_split(list(C_t),test_size=0.25)
S_t = S
S_v = S
Xtr,Xte,ytr,yte = filter_data(X,Y,C_t,C_v,S_t,S_v)
try:
assert count(Xtr,Xte) > 20
r1,r2 = run_model(Xtr,
Xte,
ytr,
yte,
experimental_features,
pubchem_fingerprints,
chemical_embedding,
species_embedding,
chemical_features,
merge_species=True)
except AssertionError:
r1,r2 = float('nan'), float('nan')
except np.AxisError:
r1,r2 = float('nan'), float('nan')
results[i].append((r1,r2))
clusters, cluster_centers = merge_closest(clusters,cluster_centers)
if len(cluster_centers) < 1:
break
i += 1
v0 = [[v[0] for v in results[k]] for k in results]
v1 = [[v[1] for v in results[k]] for k in results]
fig, ax = plt.subplots()
for x,color,ran in zip([v0,v1],['red','green'],[np.arange(0,len(v0)*2,2),np.arange(1,len(v1)*2,2)]):
mins = [np.nanmin(a) for a in x]
maxes = [np.nanmax(a) for a in x]
means = [np.nanmean(a) for a in x]
std = [np.nanstd(a) for a in x]
mins,maxes,means,std = map(np.asarray,[mins,maxes,means,std])
ax.bar(ran,maxes,width=0.5,color=color)
#plt.ylim(-1,1)
ax.set_xticks(np.arange(0.5,len(v0)*2,2))
ax.set_xticklabels(('%s Clusters' % str(abs(i)) for i in range(-n,0)))
plt.savefig('./plots/chemical_clusters_taxon_%s.png' % sn)
exit()
#def tqdm(x,**params):
#return x
for filter_chemicals,string,TOP_K in tqdm(zip([inorganic_chemicals | salts],['organic'],[4]),total=1,desc='Chemical Groups'):
#if string=='organic': continue
for division in tqdm(S_v,total=len(S_v),desc='Divisions'):
if not len(S_v[division]) > 1: continue
model_params={'encode':False,'train_ae_fingerprints':False,'train_ae_species':False}
results = [[]]*TOP_K
f = lambda _s: sum([1 for c,s in X if (s == _s and c in C-filter_chemicals)])
tmp_division = list(sorted(S_v[division],key=f,reverse=True))[:TOP_K]
for i,s_v in tqdm(enumerate(tmp_division),desc='Species in division %s' % division,leave=False,total=len(tmp_division)):
C_restriction = {'C_v':{'not content':filter_chemicals},'C_t':{'not content':filter_chemicals}}
configs = []
#Method 1
configs.append((1, 1, {'S_v':{'content':set([s_v]),'max_len':1}}))
configs.append((1, 2, {'S_v':{'content':set([s_v]),'max_len':1}}))
#Method 2
configs.append((2, 1, {'S_v':{'content':set([s_v]),'max_len':1}}))
#Method 3
configs.append((3, 1, {'S_v':{'content':set([s_v]),'max_len':1}}))
configs.append((3, 2, {'S_v':{'content':set([s_v]),'max_len':1}}))
#Method 4
configs.append((4, 1, {'S_v':{'content':set([s_v]),'max_len':1}}))
configs.append((4, 2, {'S_v':{'content':set([s_v]),'max_len':1}}))
tmp_res = np.zeros((len(configs),2))
for j,config in tqdm(enumerate(configs),total=len(configs),leave=False,desc='Configs'):
m,v,res = config
r1_tmp = []
r2_tmp = []
for _ in range(10):
tf.keras.backend.clear_session()
prop = 0.3
Xtr,Xte,ytr,yte = data_split(X,Y,restrictions={**res,**C_restriction},method=m,variant=v,prop=prop)
try:
r1,r2 = run_model(Xtr,
Xte,
ytr,
yte,
experimental_features,
fingerprints,
chemical_embedding,
species_embedding,
model_params=model_params)
except:
r1,r2=0,0
r1_tmp.append(r1)
r2_tmp.append(r2)
tmp_res[j,0] = np.mean(r1_tmp)
tmp_res[j,1] = np.mean(r2_tmp)
results[i] = tmp_res
fig, axs = plt.subplots(1,len(results),figsize=(40, 10))
for i,ax in enumerate(axs):
ms = results[i]
baseline = ms[:,0]
over = ms[:,1]
baseline = np.nan_to_num(baseline, nan=0.0,posinf=0.0, neginf=0.0)
over = np.nan_to_num(over, nan=0.0,posinf=0.0, neginf=0.0)
width = 0.4
ax.bar(np.arange(0,len(baseline)*2,2),baseline,width,color='red')
ax.bar(np.arange(1,len(baseline)*2,2),over,width,color='green')
ax.set_title(get_species_name(tmp_division[i].split('/')[-1]))
ax.set_xticks(np.arange(0.5,len(baseline)*2,2))
ax.set_xticklabels((str(i) for i in range(len(configs))))
ax.set_ylim(0,max(*over,*baseline)+0.1)
plt.savefig('plots/division_%s_%s.png' % (division,string))
if __name__ == '__main__':
main()
| 32,681 | 35.394209 | 169 | py |
kge_ecotox_regression | kge_ecotox_regression-main/train_rdf2vec.py |
from pyrdf2vec.graphs import KG
from pyrdf2vec.samplers import UniformSampler
from pyrdf2vec.walkers import RandomWalker
from pyrdf2vec import RDF2VecTransformer
import pandas as pd
from rdflib import Graph, URIRef
import numpy as np
from main import load_data
import rdflib
d = './data/embeddings/'
pdf = [pd.read_csv('./data/chemicals_%s.csv' % str(i)) for i in range(3)]
kg1 = pd.concat(pdf)
kg2 = pd.read_csv('./data/taxonomy.csv')
X,_ = load_data('./data/experiments.csv')
entities1 = list(set(map(rdflib.URIRef,list(zip(*X))[0])))
entities2 = list(set(map(rdflib.URIRef,list(zip(*X))[1])))
for kg,kg_name,entities in zip([kg1,kg2],['chemical','taxonomy'],[entities1,entities2]):
g = Graph()
for t in zip(kg['subject'],kg['predicate'],kg['object']):
g.add(tuple(map(rdflib.URIRef,t)))
g.serialize('tmp.ttl',format='ttl')
kg = KG(location="tmp.ttl",file_type='ttl')
walkers = [RandomWalker(4, 5, UniformSampler())]
transformer = RDF2VecTransformer(walkers=walkers)
embeddings = transformer.fit_transform(kg,entities)
np.save(d + 'rdf2vec_%s_entity_embeddings.csv' % kg_name, embeddings)
np.save(d + 'rdf2vec_%s_entity_ids.csv' % kg_name, np.asarray(list(enumerate(entities))))
| 1,266 | 27.795455 | 93 | py |
kge_ecotox_regression | kge_ecotox_regression-main/embedding_model.py | from tensorflow.keras import Model, Sequential
from tensorflow.keras.layers import Input, Embedding, Dense, Dropout, Conv2D, Flatten, Concatenate, Multiply
import tensorflow as tf
def min_distance_loss(w,epsilon=1.0):
r = tf.reduce_sum(w*w, 1)
r = tf.reshape(r, [-1, 1])
D = r - 2*tf.matmul(w, tf.transpose(w)) + tf.transpose(r)
D = D + tf.linalg.diag(epsilon * tf.ones(D.shape[0]))
return tf.reduce_sum(tf.where(D<epsilon,1.0,0.0))/tf.cast(w.shape[1],tf.float32)
def TransE(entities,relations,dim=200,bias=1,lamb=1,norm_size=0.0,mdl=0.0):
inp = Input((3,))
inp_label = Input(())
s,p,o = tf.unstack(inp,axis=-1)
entity_embedding = Embedding(len(entities),dim,name='entity_embedding')
relation_embedding = Embedding(len(relations),dim,name='relation_embedding')
h,r,t = entity_embedding(s),relation_embedding(p),entity_embedding(o)
score = bias - tf.norm(h+r-t, ord=2, axis=-1)
loss = lamb - inp_label * score
loss = tf.where(loss>0,loss,0) + \
norm_size * tf.norm(entity_embedding.weights[0],ord=2)**2 + \
min_distance_loss(entity_embedding.weights[0]) * mdl
model = Model(inputs=[inp,inp_label],outputs=score)
model.add_loss(loss)
model.compile(optimizer='adam',loss=None)
return model
def DistMult(entities,relations,dim=200,norm_size=0.0,mdl=0.0):
inp = Input((3,))
inp_label = Input(())
s,p,o = tf.unstack(inp,axis=-1)
entity_embedding = Embedding(len(entities),dim,name='entity_embedding')
relation_embedding = Embedding(len(relations),dim,name='relation_embedding')
h,r,t = entity_embedding(s),relation_embedding(p),entity_embedding(o)
score = tf.keras.layers.Activation('linear')(tf.reduce_sum(h*r*t,axis=-1))
model = Model(inputs=[inp,inp_label],outputs=score)
loss = lambda true,pred: tf.reduce_sum(tf.math.log(1+tf.math.exp(-true*pred))) + \
norm_size * tf.norm(entity_embedding.weights[0],ord=2)**2 + \
min_distance_loss(entity_embedding.weights[0],mdl) * mdl
model.compile(optimizer='adam',loss=loss)
return model
def ComplEx(entities,relations,dim=200,norm_size=0.0,mdl=0.0):
inp = Input((3,))
inp_label = Input(())
s,p,o = tf.unstack(inp,axis=-1)
entity_embedding = Embedding(len(entities),dim,name='entity_embedding')
relation_embedding = Embedding(len(relations),dim,name='relation_embedding')
h,r,t = entity_embedding(s),relation_embedding(p),entity_embedding(o)
h_real,h_img = tf.split(h,2,axis=-1)
r_real,r_img = tf.split(r,2,axis=-1)
t_real,t_img = tf.split(t,2,axis=-1)
score = tf.reduce_sum(r_real*h_real*t_real,axis=-1) + \
tf.reduce_sum(r_real*h_img*t_img,axis=-1) + \
tf.reduce_sum(r_img*h_real*t_img,axis=-1) - \
tf.reduce_sum(r_img*h_img*t_real,axis=-1)
model = Model(inputs=[inp,inp_label],outputs=score)
loss = lambda true,pred: tf.reduce_sum(tf.math.log(1+tf.math.exp(-true*pred))) + \
norm_size * tf.norm(entity_embedding.weights[0],ord=2)**2 + \
min_distance_loss(entity_embedding.weights[0]) * mdl
model.compile(optimizer='adam',loss=loss)
return model
def ConvE(entities,relations):
dim = 200
inp = Input((3,))
inp_label = Input(())
s,p,o = tf.unstack(inp,axis=-1)
entity_embedding = Embedding(len(entities),dim,name='entity_embedding')
relation_embedding = Embedding(len(relations),dim,name='relation_embedding')
h,r,t = entity_embedding(s),relation_embedding(p),entity_embedding(o)
h = tf.reshape(h,(-1,20,10,1))
r = tf.reshape(r,(-1,20,10,1))
x = Concatenate(axis=2)([h,r])
x = Conv2D(16,(5,5),activation='relu')(x)
x = Dropout(0.2)(x)
x = Conv2D(16,(3,3),activation='relu')(x)
x = Dropout(0.2)(x)
x = Flatten()(x)
x = Dense(dim)(x)
x = Multiply()([x,t])
x = Dense(1,activation='sigmoid')(x)
model = Model(inputs=[inp,inp_label],outputs=x)
model.compile(optimizer='adam',loss=tf.keras.losses.BinaryCrossentropy(label_smoothing=0.05))
return model
| 4,177 | 31.897638 | 108 | py |
kge_ecotox_regression | kge_ecotox_regression-main/pretrained_embedding_models.py |
import sys
import os
from itertools import product
from KGEkeras import DistMult, HolE, TransE, HAKE, ConvE, ComplEx, ConvR, RotatE, pRotatE, ConvKB, CosinE
from kerastuner import RandomSearch, HyperParameters, Objective, Hyperband, BayesianOptimization
from random import choice
from collections import defaultdict
from tensorflow.keras.losses import binary_crossentropy,hinge,mean_squared_error
from tensorflow.keras import Input
from tensorflow.keras import Model
import pandas as pd
import numpy as np
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import EarlyStopping, Callback, TerminateOnNaN, ReduceLROnPlateau
from sklearn.metrics.cluster import completeness_score
from tensorflow.keras.optimizers import Adam
import json
import tensorflow as tf
from tensorflow.keras.optimizers.schedules import ExponentialDecay
from KGEkeras import loss_function_lookup
from lib.utils import generate_negative, oversample_data, load_data
from tqdm import tqdm
import string
import random
from random import choices
from lib.hptuner import HPTuner
import pickle
try:
from tensorflow_addons.callbacks import TimeStopping
except:
pass
from rdflib import Graph, URIRef, Literal, Namespace
from KGEkeras import LiteralConverter
from sklearn.decomposition import PCA
SECONDS_PER_TRAIL = 600
SECONDS_TO_TERMINATE = 3600
SEARCH_MAX_EPOCHS = 10
MAX_EPOCHS = 200
MIN_EPOCHS = 50
MAX_TRIALS = 20
PATIENCE = 10
EPSILON = 10e-7
models = {
#'DistMult':DistMult,
#'TransE':TransE,
#'HolE':HolE,
'ComplEx':ComplEx,
#'HAKE':HAKE,
#'pRotatE':pRotatE,
#'RotatE':RotatE,
#'ConvE':ConvE,
#'ConvKB':ConvKB,
}
class DataGenerator(tf.keras.utils.Sequence):
def __init__(self, kg, ns=10, batch_size=32, shuffle=True):
self.batch_size = min(batch_size,len(kg))
self.kg = kg
self.ns = ns
self.num_e = len(set([s for s,_,_ in kg])|set([o for _,_,o in kg]))
self.shuffle = shuffle
self.indices = list(range(len(kg)))
self.on_epoch_end()
def __len__(self):
return len(self.kg) // self.batch_size
def __getitem__(self, index):
index = self.index[index * self.batch_size:(index + 1) * self.batch_size]
batch = [self.indices[k] for k in index]
X, y = self.__get_data(batch)
return X, y
def on_epoch_end(self):
self.index = np.arange(len(self.indices))
if self.shuffle == True:
np.random.shuffle(self.index)
def __get_data(self, batch):
tmp_kg = np.asarray([self.kg[i] for i in batch])
negative_kg = generate_negative(tmp_kg,N=self.num_e,negative=self.ns)
X = oversample_data(kgs=[tmp_kg,negative_kg])
return X, None
def build_model(hp):
params = hp.copy()
params['e_dim'] = params['dim']
params['r_dim'] = params['dim']
params['name'] = 'embedding_model'
embedding_model = models[params['embedding_model']]
embedding_model = embedding_model(**params)
triple = Input((3,))
ftriple = Input((3,))
inputs = [triple, ftriple]
score = embedding_model(triple)
fscore = embedding_model(ftriple)
loss_function = loss_function_lookup(params['loss_function'])
loss = loss_function(score,fscore,params['margin'] or 1, 1)
model = Model(inputs=inputs, outputs=loss)
model.add_loss(loss)
model.compile(optimizer=Adam(learning_rate=ExponentialDecay(params['learning_rate'],decay_steps=100000,decay_rate=0.96)),
loss=None)
return model
def optimize_model(model, kg, lit=False, name='name', hp=None):
if lit:
lc = LiteralConverter(kg)
literals = lc.fit_transform()
kg = lc.g
literals = PCA(min(len(literals[0]),100)).fit_transform(literals)
else:
literals = None
kg -= [(s,p,o) for s,p,o in kg if isinstance(o,Literal)]
entities = set(kg.subjects()) | set(kg.objects())
relations = set(kg.predicates())
me = {k:i for i,k in enumerate(entities)}
mr = {k:i for i,k in enumerate(relations)}
kg = list(map(lambda x: (me[x[0]],mr[x[1]],me[x[2]]), kg))
bs = 512
kg = np.asarray(kg)
model_name = model
N = len(me)
M = len(mr)
hptuner = HPTuner(runs=MAX_TRIALS, objectiv_direction='min')
hptuner.add_value_hp('gamma',0,21)
hptuner.add_value_hp('dim',100,401,dtype=int)
hptuner.add_value_hp('negative_samples',10,101,dtype=int)
hptuner.add_value_hp('margin',1,11,dtype=int)
hptuner.add_list_hp('loss_function',['pairwize_hinge','pairwize_logistic','pointwize_hinge','pointwize_logistic'],exhaustive=True)
hptuner.add_fixed_hp('embedding_model',model)
hptuner.add_fixed_hp('dp',0.2)
hptuner.add_fixed_hp('hidden_dp',0.2)
hptuner.add_fixed_hp('num_entities',N)
hptuner.add_fixed_hp('num_relations',M)
if hp:
for k,i in hp.items():
hptuner.add_fixed_hp(k,i)
hptuner.add_fixed_hp('num_entities',N)
hptuner.add_fixed_hp('num_relations',M)
hptuner.add_fixed_hp('learning_rate',0.001)
hptuner.add_fixed_hp('regularization',0.001)
if lit:
hptuner.add_fixed_hp('literals',literals)
hptuner.add_fixed_hp('literal_activation','tanh')
if hp:
hptuner.next_hp_config()
hptuner.add_result(0.0)
with tqdm(total=hptuner.runs, desc='Trials') as pbar:
while hptuner.is_active and hp is None:
hp = hptuner.next_hp_config()
model = build_model(hp)
tr_gen = DataGenerator(kg, batch_size=bs, shuffle=True, ns=hp['negative_samples'])
hist = model.fit(tr_gen,epochs=SEARCH_MAX_EPOCHS,verbose=2, callbacks=[EarlyStopping('loss'),TerminateOnNaN()])
score = hist.history['loss'][-1]/hist.history['loss'][0]
hptuner.add_result(score)
tf.keras.backend.clear_session()
pbar.update(1)
hp = hptuner.best_config()
#if hp is None:
#with open('./pretrained_hp/%s%s_kg.json' % (model_name,name), 'w') as fp:
#json.dump(hp, fp)
model = build_model(hp)
tr_gen = DataGenerator(kg, batch_size=bs, shuffle=True, ns=hp['negative_samples'])
hist = model.fit(tr_gen,epochs=MAX_EPOCHS, verbose=2, callbacks=[EarlyStopping('loss',patience=PATIENCE), TerminateOnNaN()])
if np.isnan(hist.history['loss'][-1]):
print(model_name,'nan loss.')
return optimize_model(model_name,kg,lit,name,None)
for l in model.layers:
if isinstance(l,models[model_name]):
m = l.name
m, W1, W2 = model, model.get_layer(m).entity_embedding.get_weights()[0], model.get_layer(m).relational_embedding.get_weights()[0]
m.save_weights('pretrained_models/model/'+name)
np.save(name+'_entity_embeddings.npy', W1)
np.save(name+'_entity_ids.npy',np.asarray(list(zip(entities,range(len(entities))))))
np.save(name+'_relational_embeddings.npy', W2)
np.save(name+'_relation_ids.npy',np.asarray(list(zip(relations,range(len(relations))))))
def main():
d = './data/embeddings/'
use_literals = product([False,True],[False,True])
g1_parts = [[0],[0,1],[0,1,2]]
g2_parts = [[0],[0,1]]
p = list(product(g1_parts,g2_parts))
p += [p[-1]]
ul = (False,False)
for g1p,g2p in tqdm(p):
g1,g2 = Graph(),Graph()
for i in g1p:
g = Graph()
g.load('./data/chemicals_%s.ttl' % str(i),format='ttl')
g1 += g
for i in g2p:
g = Graph()
g.load('./data/taxonomy_%s.ttl' % str(i),format='ttl')
g2 += g
for lit,gp,kg,name in zip([*ul],[g1p,g2p],[g1,g2],['_chemical_','_taxonomy_']):
#hp_file = '../KGE-CEP/pretrained_hp/%s%s_kg.json' % (model,name)
hp = {'e_dim':100,
'negative_samples':10,
'loss_function':'pairwize_logistic'}
model = 'ComplEx'
f = d+model+name+str(hash((lit,*gp)))
optimize_model(model,kg,lit,name=f,hp=hp)
tf.keras.backend.clear_session()
if (g1p,g2p) == p[-1]:
ul = (True,True)
if __name__ == '__main__':
main()
| 8,625 | 30.140794 | 134 | py |
kge_ecotox_regression | kge_ecotox_regression-main/create_data.py |
"""
TODO:
- Load LC50 data from ECOTOX.
- Take median per chemical species pairs.
- Defined chemical groups.
- Export files per chemical groups and each species.
- Forall chemicals and species export relevant KGs.
"""
from tera.DataAggregation import Taxonomy, Effects, Traits
from tera.DataAccess import EffectsAPI
from tera.DataIntegration import DownloadedWikidata, LogMapMapping
from tera.utils import strip_namespace, unit_conversion
from tqdm import tqdm
from rdflib import Graph, URIRef, Literal, BNode, Namespace
from rdflib.namespace import RDFS, RDF
cco = Namespace('http://rdf.ebi.ac.uk/terms/chembl#')
skos = Namespace('http://www.w3.org/2004/02/skos/core#')
obo = Namespace('http://purl.obolibrary.org/obo/')
import pandas as pd
from collections import defaultdict
import pubchempy as pcp
import numpy as np
def get_subgraph(to_visit, graph, backtracking=0):
out = Graph()
visited = set()
while to_visit:
curr = to_visit.pop()
visited.add(curr)
tmp = set(graph.triples((curr,None,None)))
for t in tmp:
out.add(t)
to_visit |= set([o for _,_,o in tmp if not isinstance(o,Literal)])
to_visit -= visited
if backtracking > 0:
tmp = set()
for s in set([s for s,_,_ in out]):
tmp |= set(graph.subjects(object=s))
for t in out:
graph.remove(t)
return out + get_subgraph(tmp, graph, backtracking-1)
return out
def load_endpoint_data():
ed = Effects(directory='../ecotox_data/',verbose=False)
species_mapping = LogMapMapping(filename='./data/final_mappings.txt')
chemicals_mappings = DownloadedWikidata(filename='./data/cas_to_chembl.csv')
species_mapping.load()
chemicals_mappings.load()
ncbi_namespace = Namespace('https://www.ncbi.nlm.nih.gov/taxonomy/')
species_mapping = [(ed.namespace['taxon/'+k],ncbi_namespace['taxon/'+i.pop(0)]) for k,i in species_mapping.mappings.items()]
ed.replace(species_mapping)
chembl_namespace = Namespace('http://rdf.ebi.ac.uk/resource/chembl/molecule/')
chemicals_mappings = [(ed.namespace['cas/'+k],chembl_namespace[i.pop(0)]) for k,i in chemicals_mappings.mappings.items()]
ed.replace(chemicals_mappings)
endpoints = EffectsAPI(dataobject=ed, verbose=True).get_endpoint(c=None, s=None)
d = defaultdict(list)
for c,s,cc,cu,ep,ef,sd,sdu in endpoints:
try:
sd = float(sd)
except:
continue
if 'day' in str(sdu).lower():
sd *= 24
elif 'week' in str(sdu).lower():
sd *= (7*24)
elif 'hour' in str(sdu).lower():
sd *= 1
else:
continue
if ('LC50' in str(ep) or 'LD50' in str(ep) or ('EC50' in str(ep) and 'MOR' in str(ef))) and ('ncbi' in str(s) and 'chembl' in str(c)):
try:
factor = unit_conversion(str(cu),'http://qudt.org/vocab/unit#MilligramPerLitre')
except:
factor = 0
if factor > 0:
cc = float(cc)
cc = cc*factor
d['chemical'].append(str(c))
d['species'].append(str(s))
d['concentration'].append(cc)
d['study_duration'].append(sd)
df = pd.DataFrame(data=d)
df.to_csv('./data/experiments.csv')
def fingerprints():
df = pd.read_csv('./data/experiments.csv')
mapping = DownloadedWikidata(filename='./data/chembl_to_cid.csv')
to_look_for = mapping.convert(set(df['chemical']),reverse=False,strip=True)
to_look_for = set([URIRef('http://rdf.ncbi.nlm.nih.gov/pubchem/compound/CID'+str(i)) for k,i in to_look_for.items() if i != 'no mapping'])
out = []
fp = []
for c,c2 in tqdm(zip(to_look_for,set(df['chemical'])),total=len(to_look_for)):
try:
compound = pcp.Compound.from_cid(int(c.split('CID')[-1]))
fp.append(bin(int(compound.fingerprint,16))[2:])
out.append(c2)
except:
pass
df = pd.DataFrame(data={'chemical':out,'fingerprint':fp})
df.to_csv('./data/chemicals_fingerprints.csv')
def chemical_features():
df = pd.read_csv('./data/experiments.csv')
mapping = DownloadedWikidata(filename='./data/chembl_to_cid.csv')
to_look_for = mapping.convert(set(df['chemical']),reverse=False,strip=True)
to_look_for = set([URIRef('http://rdf.ncbi.nlm.nih.gov/pubchem/compound/CID'+str(i)) for k,i in to_look_for.items() if i != 'no mapping'])
out = defaultdict(list)
fp = []
fs = ['xlogp','exact_mass','tpsa','complexity','charge']
for c,c2 in tqdm(zip(to_look_for,set(df['chemical'])),total=len(to_look_for)):
try:
compound = pcp.Compound.from_cid(int(c.split('CID')[-1]))
tmp = compound.to_dict(fs)
for k in tmp:
out[k].append(tmp[k])
out['chemical'].append(c2)
except:
pass
df = pd.DataFrame(data=out)
df.to_csv('./data/chemicals_features.csv')
def load_chemical_groups():
"""
Split (in)organic
CHEBI:50860 (organic)
CHEBI:24835 (inorganic)
"""
df = pd.read_csv('./data/experiments.csv')
chemicals = df['chemical']
graph = Graph()
graph.parse('../chembl/chembl_26.0_molecule_chebi_ls.ttl',format='ttl')
mapping = defaultdict()
for c in chemicals:
c = URIRef(c)
m = list(graph.objects(subject=c,predicate=skos['exactMatch']))
if m:
mapping[c]=m.pop(0)
chebi_graph = Graph()
chebi_graph.parse('../chebi/chebi.ttl',format='ttl')
chebi_graph = replace(chebi_graph,{i:k for k,i in mapping.items()})
for steps in range(1,20):
out = defaultdict(set)
desendents=set()
for c in map(lambda x:URIRef(x), chemicals):
p = ' / '.join('<'+r+'>' for r in [RDFS.subClassOf]*steps)
qres = chebi_graph.query(
"""SELECT DISTINCT ?parent
WHERE {
<%s> %s ?parent
}""" % (str(c),p))
for r in qres:
desendents.add((c,r[-1]))
for c,p in desendents:
out[p].add(c)
df = pd.DataFrame(data={'parent':list(out.keys()),'children':[','.join(out[k]) for k in out]})
df.to_csv('./data/chemical_group_%s.csv' % str(steps))
def load_species_groups():
df = pd.read_csv('./data/experiments.csv')
t = Graph()
t.load('./data/taxonomy_0.ttl',format='ttl')
species = set(df['species'])
for steps in range(0,20):
out_taxon = defaultdict(set)
out_division = defaultdict(set)
desendents = set()
for c in map(URIRef,species):
p = ' / '.join('<'+r+'>' for r in [RDF.type,*[RDFS.subClassOf]*steps])
qres = t.query(
"""SELECT DISTINCT ?parent
WHERE {
<%s> %s ?parent
}""" % (str(c),p))
for r in qres:
desendents.add((c,r[-1]))
for c,p in desendents:
if 'division' in str(p):
out_division[p].add(c)
else:
out_taxon[p].add(c)
df = pd.DataFrame(data={'parent':list(out_division.keys()),'children':[','.join(out_division[k]) for k in out_division]})
df.to_csv('./data/species_divisions.csv')
df = pd.DataFrame(data={'parent':list(out_taxon.keys()),'children':[','.join(out_taxon[k]) for k in out_taxon]})
df.to_csv('./data/species_groups_%s.csv' % str(steps))
def replace(graph,mapping):
for s,p,o in graph:
if s in mapping:
graph.remove((s,p,o))
graph.add((mapping[s],p,o))
if o in mapping:
graph.remove((s,p,o))
graph.add((s,p,mapping[o]))
if p in mapping:
graph.remove((s,p,o))
graph.add((s,mapping[p],o))
return graph
def load_chemical_graph():
df = pd.read_csv('./data/experiments.csv')
mapping = DownloadedWikidata(filename='./data/chembl_to_mesh.csv')
mapping.load()
mapping = {URIRef('http://id.nlm.nih.gov/mesh/'+i.pop(0)):URIRef('http://rdf.ebi.ac.uk/resource/chembl/molecule/'+k) for k,i in mapping.mappings.items()}
mesh_graph = Graph()
mesh_graph.parse('../mesh/mesh.nt',format='nt')
mesh_graph = replace(mesh_graph,mapping)
graph = Graph()
graph.parse('../chembl/chembl_26.0_molecule_chebi_ls.ttl',format='ttl')
mapping = defaultdict()
for c in df['chemical']:
c = URIRef(c)
m = list(graph.objects(subject=c,predicate=skos['exactMatch']))
if m:
mapping[c]=m.pop(0)
chebi_graph = Graph()
chebi_graph.parse('../chebi/chebi.ttl',format='ttl')
chebi_graph = replace(chebi_graph,{i:k for k,i in mapping.items()})
chembl_graph = Graph()
for f in [#'../chembl/chembl_26.0_molecule.ttl',
'../chembl/chembl_26.0_molhierarchy.ttl',
'../chembl/chembl_26.0_target.ttl',
'../chembl/chembl_26.0_targetrel.ttl',
'../chembl/chembl_26.0_moa.ttl']:
chembl_graph.parse(f,format='ttl')
for i,g in enumerate([mesh_graph,chebi_graph,chembl_graph]):
graph = get_subgraph(set([URIRef(a) for a in set(df['chemical'])]), g, backtracking=0)
graph.serialize('./data/chemicals_%s.ttl' % str(i),format='ttl')
def load_taxonomy_graph():
df = pd.read_csv('./data/experiments.csv')
t = Taxonomy(directory='../taxdump/', verbose=True, taxon_namespace='http://www.ncbi.nlm.nih.gov/Taxonomy/Browser/wwwtax.cgi?mode=Info&id=')
ne = DownloadedWikidata(filename='./data/ncbi_to_eol.csv', verbose=False)
n = list(set(t.graph.subjects(predicate=t.namespace['rank'],
object=t.namespace['rank/species'])))
tr = Traits(directory='../eol/', verbose=True)
conv = ne.convert(n, strip=True)
converted = [(tr.namespace[i],k) for k,i in conv.items() if i != 'no mapping']
tr.replace(converted)
for i,g in enumerate([t.graph,tr.graph]):
tmp = set([URIRef(a) for a in set(df['species'])])
graph = get_subgraph(tmp, g, backtracking=0)
graph.serialize('./data/taxonomy_%s.ttl' % str(i),format='ttl')
if __name__ == '__main__':
#load_endpoint_data()
#fingerprints()
#chemical_features()
#load_species_groups()
#load_chemical_groups()
#load_chemical_graph()
load_taxonomy_graph()
| 10,807 | 30.510204 | 157 | py |
kge_ecotox_regression | kge_ecotox_regression-main/autoencoder.py |
from tensorflow.keras.layers import Dense, GaussianNoise, Input, LayerNormalization
from tensorflow.keras.models import Model
from tensorflow import keras
def create_auto_encoder(input_size, dense_layers = (10,), noise=0):
autoencoder = keras.Sequential()
if noise > 0:
autoencoder.add(GaussianNoise(noise))
for l in dense_layers:
autoencoder.add(Dense(l,activation='relu'))
encoder = autoencoder
for l in dense_layers[::-1]:
autoencoder.add(Dense(l,activation='relu'))
autoencoder.add(Dense(input_size,activation='sigmoid'))
return encoder, autoencoder
| 613 | 33.111111 | 83 | py |
lepard | lepard-main/main.py | import os, torch, json, argparse, shutil
from easydict import EasyDict as edict
import yaml
from datasets.dataloader import get_dataloader, get_datasets
from models.pipeline import Pipeline
from lib.utils import setup_seed
from lib.tester import get_trainer
from models.loss import MatchMotionLoss
from lib.tictok import Timers
from configs.models import architectures
from torch import optim
setup_seed(0)
def join(loader, node):
seq = loader.construct_sequence(node)
return '_'.join([str(i) for i in seq])
yaml.add_constructor('!join', join)
if __name__ == '__main__':
# load configs
parser = argparse.ArgumentParser()
parser.add_argument('config', type=str, help= 'Path to the config file.')
args = parser.parse_args()
with open(args.config,'r') as f:
config = yaml.load(f, Loader=yaml.Loader)
config['snapshot_dir'] = 'snapshot/%s/%s' % (config['dataset']+config['folder'], config['exp_dir'])
config['tboard_dir'] = 'snapshot/%s/%s/tensorboard' % (config['dataset']+config['folder'], config['exp_dir'])
config['save_dir'] = 'snapshot/%s/%s/checkpoints' % (config['dataset']+config['folder'], config['exp_dir'])
config = edict(config)
os.makedirs(config.snapshot_dir, exist_ok=True)
os.makedirs(config.save_dir, exist_ok=True)
os.makedirs(config.tboard_dir, exist_ok=True)
if config.gpu_mode:
config.device = torch.device("cuda:0")
else:
config.device = torch.device('cpu')
# backup the
if config.mode == 'train':
os.system(f'cp -r models {config.snapshot_dir}')
os.system(f'cp -r configs {config.snapshot_dir}')
os.system(f'cp -r cpp_wrappers {config.snapshot_dir}')
os.system(f'cp -r datasets {config.snapshot_dir}')
os.system(f'cp -r kernels {config.snapshot_dir}')
os.system(f'cp -r lib {config.snapshot_dir}')
shutil.copy2('main.py',config.snapshot_dir)
# model initialization
config.kpfcn_config.architecture = architectures[config.dataset]
config.model = Pipeline(config)
# config.model = KPFCNN(config)
# create optimizer
if config.optimizer == 'SGD':
config.optimizer = optim.SGD(
config.model.parameters(),
lr=config.lr,
momentum=config.momentum,
weight_decay=config.weight_decay,
)
elif config.optimizer == 'ADAM':
config.optimizer = optim.Adam(
config.model.parameters(),
lr=config.lr,
betas=(0.9, 0.999),
weight_decay=config.weight_decay,
)
#create learning rate scheduler
if 'overfit' in config.exp_dir :
config.scheduler = optim.lr_scheduler.MultiStepLR(
config.optimizer,
milestones=[config.max_epoch-1], # fix lr during overfitting
gamma=0.1,
last_epoch=-1)
else:
config.scheduler = optim.lr_scheduler.ExponentialLR(
config.optimizer,
gamma=config.scheduler_gamma,
)
config.timers = Timers()
# create dataset and dataloader
train_set, val_set, test_set = get_datasets(config)
config.train_loader, neighborhood_limits = get_dataloader(train_set,config,shuffle=True)
config.val_loader, _ = get_dataloader(val_set, config, shuffle=False, neighborhood_limits=neighborhood_limits)
config.test_loader, _ = get_dataloader(test_set, config, shuffle=False, neighborhood_limits=neighborhood_limits)
# config.desc_loss = MetricLoss(config)
config.desc_loss = MatchMotionLoss (config['train_loss'])
trainer = get_trainer(config)
if(config.mode=='train'):
trainer.train()
else:
trainer.test()
| 3,723 | 32.54955 | 116 | py |
lepard | lepard-main/models/matching.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from models.position_encoding import VolumetricPositionEncoding as VolPE
def log_optimal_transport(scores, alpha, iters, src_mask, tgt_mask ):
b, m, n = scores.shape
if src_mask is None:
ms = m
ns = n
else :
ms = src_mask.sum(dim=1, keepdim=True)
ns = tgt_mask.sum(dim=1, keepdim=True)
bins0 = alpha.expand(b, m, 1)
bins1 = alpha.expand(b, 1, n)
alpha = alpha.expand(b, 1, 1)
Z = torch.cat([torch.cat([scores, bins0], -1),
torch.cat([bins1, alpha], -1)], 1)
norm = - (ms + ns).log() # [b, 1]
log_mu = torch.cat([norm .repeat(1, m), ns.log() + norm], dim=1)
log_nu = torch.cat([norm.repeat(1, n), ms.log() + norm], dim=1)
u, v = torch.zeros_like(log_mu), torch.zeros_like(log_nu)
for _ in range(iters):
u = log_mu - torch.logsumexp( Z + v.unsqueeze(1), dim=2)
v = log_nu - torch.logsumexp(Z + u.unsqueeze(2), dim=1)
Z= Z + u.unsqueeze(2) + v.unsqueeze(1)
Z = Z - norm.view(-1,1,1)
return Z
class Matching(nn.Module):
def __init__(self, config):
super().__init__()
self.match_type = config['match_type']
self.confidence_threshold = config['confidence_threshold']
d_model = config['feature_dim']
self.src_proj = nn.Linear(d_model, d_model, bias=False)
self.tgt_proj = nn.Linear(d_model, d_model, bias=False)
self.entangled= config['entangled']
if self.match_type == "dual_softmax":
self.temperature = config['dsmax_temperature']
elif self.match_type == 'sinkhorn':
#sinkhorn algorithm
self.skh_init_bin_score = config['skh_init_bin_score']
self.skh_iters = config['skh_iters']
self.skh_prefilter = config['skh_prefilter']
self.bin_score = nn.Parameter(
torch.tensor( self.skh_init_bin_score, requires_grad=True))
else:
raise NotImplementedError()
@staticmethod
@torch.no_grad()
def get_match( conf_matrix, thr, mutual=True):
mask = conf_matrix > thr
#mutual nearest
if mutual:
mask = mask \
* (conf_matrix == conf_matrix.max(dim=2, keepdim=True)[0]) \
* (conf_matrix == conf_matrix.max(dim=1, keepdim=True)[0])
#find all valid coarse matches
index = (mask==True).nonzero()
b_ind, src_ind, tgt_ind = index[:,0], index[:,1], index[:,2]
mconf = conf_matrix[b_ind, src_ind, tgt_ind]
return index, mconf, mask
@staticmethod
@torch.no_grad()
def get_topk_match( conf_matrix, thr, mutual=True):
mask = conf_matrix > thr
#mutual nearest
if mutual:
mask = mask \
* (conf_matrix == conf_matrix.max(dim=2, keepdim=True)[0]) \
* (conf_matrix == conf_matrix.max(dim=1, keepdim=True)[0])
#find all valid coarse matches
index = (mask==True).nonzero()
b_ind, src_ind, tgt_ind = index[:,0], index[:,1], index[:,2]
mconf = conf_matrix[b_ind, src_ind, tgt_ind]
return index, mconf, mask
def forward(self, src_feats, tgt_feats, src_pe, tgt_pe, src_mask, tgt_mask, data, pe_type="rotary"):
'''
@param src_feats: [B, S, C]
@param tgt_feats: [B, T, C]
@param src_mask: [B, S]
@param tgt_mask: [B, T]
@return:
'''
src_feats = self.src_proj(src_feats)
tgt_feats = self.src_proj(tgt_feats)
data["src_feats_nopos"] = src_feats
data["tgt_feats_nopos"] = tgt_feats
if not self.entangled :
src_feats = VolPE.embed_pos(pe_type, src_feats, src_pe)
tgt_feats = VolPE.embed_pos(pe_type, tgt_feats, tgt_pe)
data["src_feats"] = src_feats
data["tgt_feats"] = tgt_feats
src_feats, tgt_feats = map(lambda feat: feat / feat.shape[-1] ** .5,
[src_feats, tgt_feats])
if self.match_type == "dual_softmax":
# dual softmax matching
sim_matrix_1 = torch.einsum("bsc,btc->bst", src_feats, tgt_feats) / self.temperature
if src_mask is not None:
sim_matrix_2 = sim_matrix_1.clone()
sim_matrix_1.masked_fill_(~src_mask[:, :, None], float('-inf'))
sim_matrix_2.masked_fill_(~tgt_mask[:, None, :], float('-inf'))
conf_matrix = F.softmax(sim_matrix_1, 1) * F.softmax(sim_matrix_2, 2)
else :
conf_matrix = F.softmax(sim_matrix_1, 1) * F.softmax(sim_matrix_1, 2)
elif self.match_type == "sinkhorn" :
#optimal transport sinkhoron
sim_matrix = torch.einsum("bsc,btc->bst", src_feats, tgt_feats)
if src_mask is not None:
sim_matrix.masked_fill_(
~(src_mask[..., None] * tgt_mask[:, None]).bool(), float('-inf'))
log_assign_matrix = log_optimal_transport( sim_matrix, self.bin_score, self.skh_iters, src_mask, tgt_mask)
assign_matrix = log_assign_matrix.exp()
conf_matrix = assign_matrix[:, :-1, :-1].contiguous()
coarse_match, _, _ = self.get_match(conf_matrix, self.confidence_threshold)
return conf_matrix, coarse_match
| 5,412 | 29.931429 | 118 | py |
lepard | lepard-main/models/loss.py | import torch
import torch.nn as nn
import numpy as np
import open3d as o3d
from lib.benchmark_utils import to_o3d_pcd
from lib.visualization import *
import nibabel.quaternions as nq
from sklearn.metrics import precision_recall_fscore_support
from datasets.utils import blend_scene_flow, multual_nn_correspondence, knn_point_np
from models.matching import Matching as CM
def ransac_pose_estimation(src_pcd, tgt_pcd, corrs, distance_threshold=0.05, ransac_n=3):
src_pcd = to_o3d_pcd(src_pcd)
tgt_pcd = to_o3d_pcd(tgt_pcd)
corrs = o3d.utility.Vector2iVector(np.array(corrs).T)
result_ransac = o3d.registration.registration_ransac_based_on_correspondence(
source=src_pcd, target=tgt_pcd, corres=corrs,
max_correspondence_distance=distance_threshold,
estimation_method=o3d.registration.TransformationEstimationPointToPoint(False),
ransac_n=ransac_n,
criteria=o3d.registration.RANSACConvergenceCriteria(50000, 1000))
return result_ransac.transformation
def computeTransformationErr(trans, info):
"""
Computer the transformation error as an approximation of the RMSE of corresponding points.
More informaiton at http://redwood-data.org/indoor/registration.html
Args:
trans (numpy array): transformation matrices [n,4,4]
info (numpy array): covariance matrices of the gt transformation paramaters [n,4,4]
Returns:
p (float): transformation error
"""
t = trans[:3, 3]
r = trans[:3, :3]
q = nq.mat2quat(r)
er = np.concatenate([t, q[1:]], axis=0)
p = er.reshape(1, 6) @ info @ er.reshape(6, 1) / info[0, 0]
return p.item()
class MatchMotionLoss(nn.Module):
def __init__(self, config):
super().__init__()
self.focal_alpha = config['focal_alpha']
self.focal_gamma = config['focal_gamma']
self.pos_w = config['pos_weight']
self.neg_w = config['neg_weight']
self.mot_w = config['motion_weight']
self.mat_w = config['match_weight']
self.motion_loss_type = config['motion_loss_type']
self.match_type = config['match_type']
self.positioning_type = config['positioning_type']
self.registration_threshold = config['registration_threshold']
self.confidence_threshold_metric = config['confidence_threshold_metric']
self.inlier_thr = config['inlier_thr']
self.fmr_thr = config['fmr_thr']
self.mutual_nearest = config['mutual_nearest']
self.dataset = config['dataset']
def forward(self, data):
loss_info = {}
loss = self.ge_coarse_loss(data, loss_info)
loss_info.update({ 'loss': loss })
return loss_info
def ge_coarse_loss(self, data, loss_info, eval_metric=False):
if self.dataset == "4dmatch":
s2t_flow = torch.zeros_like(data['s_pcd'])
for i, cflow in enumerate(data['coarse_flow']):
s2t_flow[i][: len(cflow)] = cflow
loss = 0.
src_mask = data['src_mask']
tgt_mask = data['tgt_mask']
conf_matrix_pred = data['conf_matrix_pred']
match_gt = data['coarse_matches']
R_s2t_gt = data['batched_rot']
t_s2t_gt = data['batched_trn']
#get the overlap mask, for dense motion loss
s_overlap_mask = torch.zeros_like(src_mask).bool()
for bi, corr in enumerate (match_gt):
s_overlap_mask[bi][ corr[0] ] = True
# compute focal loss
c_weight = (src_mask[:, :, None] * tgt_mask[:, None, :]).float()
conf_matrix_gt = self.match_2_conf_matrix(match_gt, conf_matrix_pred)
data['conf_matrix_gt'] = conf_matrix_gt
focal_coarse = self.compute_correspondence_loss(conf_matrix_pred, conf_matrix_gt, weight=c_weight)
recall, precision = self.compute_match_recall( conf_matrix_gt, data['coarse_match_pred'])
loss_info.update( { "focal_coarse": focal_coarse, "recall_coarse": recall, "precision_coarse": precision } )
loss = loss + self.mat_w * focal_coarse
if recall > 0.01 and self.mot_w > 0:
R_s2t_pred = data["R_s2t_pred"]
t_s2t_pred = data["t_s2t_pred"]
#compute predicted flow. Note, if 4dmatch, the R_pred,t_pred try to find the best rigid fit of deformation
src_pcd_wrapped_pred = (torch.matmul(R_s2t_pred, data['s_pcd'].transpose(1, 2)) + t_s2t_pred).transpose(1, 2)
sflow_pred = src_pcd_wrapped_pred - data['s_pcd']
if self.dataset == '4dmatch':
spcd_deformed = data['s_pcd'] + s2t_flow
src_pcd_wrapped_gt = (torch.matmul(R_s2t_gt, spcd_deformed.transpose(1, 2)) + t_s2t_gt).transpose(1, 2)
else : # 3dmatch
src_pcd_wrapped_gt = (torch.matmul(R_s2t_gt, data['s_pcd'].transpose(1, 2)) + t_s2t_gt).transpose(1, 2)
sflow_gt = src_pcd_wrapped_gt - data['s_pcd']
e1 = torch.sum(torch.abs(sflow_pred - sflow_gt), 2)
e1 = e1[s_overlap_mask] # [data['src_mask']]
l1_loss = torch.mean(e1)
loss = loss + self.mot_w * l1_loss
#
# if eval_metric :
#
# match_pred, _, _ = CM.get_match(data['conf_matrix_pred'], thr=self.confidence_threshold_metric, mutual=self.mutual_nearest)
#
# '''Inlier Ratio (IR)'''
# ir = self.compute_inlier_ratio(match_pred, data, self.inlier_thr,
# s2t_flow=s2t_flow if self.dataset == "4dmatch" else None)
# loss_info.update({"Inlier Ratio": ir.mean()})
#
# if self.dataset == '3dmatch':
#
# '''Feature Matching Recall (FMR)'''
# fmr = (ir > self.fmr_thr).float().sum() / len(ir)
# loss_info.update({"Feature Matching Recall": fmr})
#
# '''Registration Recall (RR)'''
# rot_, trn_ = self.ransac_regist_coarse(data['s_pcd'], data['t_pcd'], src_mask, tgt_mask , match_pred)
# rot, trn = rot_.to(data['s_pcd']) , trn_.to(data['s_pcd'])
# rr = self.compute_registration_recall(rot, trn, data, self.registration_threshold)
# loss_info.update({'Registration_Recall': rr})
if self.positioning_type == "procrustes":
for layer_ind in data["position_layers"]:
# compute focal loss
rpe_conf_matrix = data["position_layers"][layer_ind]["conf_matrix"]
focal_rpe = self.compute_correspondence_loss(rpe_conf_matrix, conf_matrix_gt, weight=c_weight)
recall, precision = self.compute_match_recall(conf_matrix_gt,
data["position_layers"][layer_ind]['match_pred'])
# loss_info.update({'focal_layer_%d' % layer_ind: focal_rpe, 'recall_layer_%d' % layer_ind: recall,
# 'precision_layer_%d' % layer_ind: precision})
loss = loss + self.mat_w * focal_rpe
if recall >0.01 and self.mot_w > 0:
R_s2t_pred = data["position_layers"][layer_ind]["R_s2t_pred"]
t_s2t_pred = data["position_layers"][layer_ind]["t_s2t_pred"]
src_pcd_wrapped_pred = (torch.matmul(R_s2t_pred, data['s_pcd'].transpose(1, 2)) + t_s2t_pred).transpose(1, 2)
sflow_pred = src_pcd_wrapped_pred - data['s_pcd']
if self.dataset == '4dmatch':
spcd_deformed = data['s_pcd'] + s2t_flow
src_pcd_wrapped_gt = ( torch.matmul(R_s2t_gt, spcd_deformed.transpose(1, 2)) + t_s2t_gt).transpose(1, 2)
else: # 3dmatch
src_pcd_wrapped_gt = ( torch.matmul(R_s2t_gt, data['s_pcd'].transpose(1, 2)) + t_s2t_gt).transpose(1, 2)
sflow_gt = src_pcd_wrapped_gt - data['s_pcd']
e1 = torch.sum(torch.abs(sflow_pred - sflow_gt), 2) #[data['src_mask']]
e1 = e1[s_overlap_mask] # [data['src_mask']]
l1_loss = torch.mean(e1)
loss = loss + self.mot_w * l1_loss
return loss
@staticmethod
def compute_nrfmr(match_pred, data, recall_thr=0.04):
s_pcd, t_pcd = data['s_pcd'], data['t_pcd']
s_pcd_raw = data['src_pcd_list']
sflow_list = data['sflow_list']
metric_index_list = data['metric_index_list']
batched_rot = data['batched_rot'] # B,3,3
batched_trn = data['batched_trn']
nrfmr = 0.
for i in range(len(s_pcd_raw)):
# use the match prediction as the motion anchor
match_pred_i = match_pred[match_pred[:, 0] == i]
s_id, t_id = match_pred_i[:, 1], match_pred_i[:, 2]
s_pcd_matched = s_pcd[i][s_id]
t_pcd_matched = t_pcd[i][t_id]
motion_pred = t_pcd_matched - s_pcd_matched
if len(s_pcd_matched) >= 3 :
# get the wrapped metric points
metric_index = metric_index_list[i]
sflow = sflow_list[i]
s_pcd_raw_i = s_pcd_raw[i]
metric_pcd = s_pcd_raw_i[metric_index]
metric_sflow = sflow[metric_index]
metric_pcd_deformed = metric_pcd + metric_sflow
metric_pcd_wrapped_gt = (torch.matmul(batched_rot[i], metric_pcd_deformed.T) + batched_trn[i]).T
# blend the motion for metric points
try:
metric_motion_pred, valid_mask = MatchMotionLoss.blend_anchor_motion(
metric_pcd.cpu().numpy(), s_pcd_matched.cpu().numpy(), motion_pred.cpu().numpy(), knn=3,
search_radius=0.1)
metric_pcd_wrapped_pred = metric_pcd + torch.from_numpy(metric_motion_pred).to(metric_pcd)
dist = torch.sqrt(torch.sum((metric_pcd_wrapped_pred - metric_pcd_wrapped_gt) ** 2, dim=1))
r = (dist < recall_thr).float().sum() / len(dist)
except :
r = 0
nrfmr = nrfmr + r
debug = False
if debug:
import mayavi.mlab as mlab
c_red = (224. / 255., 0 / 255., 125 / 255.)
c_pink = (224. / 255., 75. / 255., 232. / 255.)
c_blue = (0. / 255., 0. / 255., 255. / 255.)
scale_factor = 0.013
metric_pcd_wrapped_gt = metric_pcd_wrapped_gt.cpu()
metric_pcd_wrapped_pred = metric_pcd_wrapped_pred.cpu()
err = metric_pcd_wrapped_pred - metric_pcd_wrapped_gt
mlab.points3d(metric_pcd_wrapped_gt[:, 0], metric_pcd_wrapped_gt[:, 1], metric_pcd_wrapped_gt[:, 2],
scale_factor=scale_factor, color=c_pink)
mlab.points3d(metric_pcd_wrapped_pred[:, 0], metric_pcd_wrapped_pred[:, 1],
metric_pcd_wrapped_pred[:, 2], scale_factor=scale_factor, color=c_blue)
mlab.quiver3d(metric_pcd_wrapped_gt[:, 0], metric_pcd_wrapped_gt[:, 1], metric_pcd_wrapped_gt[:, 2],
err[:, 0], err[:, 1], err[:, 2],
scale_factor=1, mode='2ddash', line_width=1.)
mlab.show()
nrfmr = nrfmr / len(s_pcd_raw)
return nrfmr
@staticmethod
def blend_anchor_motion(query_loc, reference_loc, reference_flow, knn=3, search_radius=0.1):
'''approximate flow on query points
this function assume query points are sub- or un-sampled from reference locations
@param query_loc:[m,3]
@param reference_loc:[n,3]
@param reference_flow:[n,3]
@param knn:
@return:
blended_flow:[m,3]
'''
dists, idx = knn_point_np(knn, reference_loc, query_loc)
dists[dists < 1e-10] = 1e-10
mask = dists > search_radius
dists[mask] = 1e+10
weight = 1.0 / dists
weight = weight / np.sum(weight, -1, keepdims=True) # [B,N,3]
blended_flow = np.sum(reference_flow[idx] * weight.reshape([-1, knn, 1]), axis=1, keepdims=False)
mask = mask.sum(axis=1) < 3
return blended_flow, mask
def compute_correspondence_loss(self, conf, conf_gt, weight=None):
'''
@param conf: [B, L, S]
@param conf_gt: [B, L, S]
@param weight: [B, L, S]
@return:
'''
pos_mask = conf_gt == 1
neg_mask = conf_gt == 0
pos_w, neg_w = self.pos_w, self.neg_w
#corner case assign a wrong gt
if not pos_mask.any():
pos_mask[0, 0, 0] = True
if weight is not None:
weight[0, 0, 0] = 0.
pos_w = 0.
if not neg_mask.any():
neg_mask[0, 0, 0] = True
if weight is not None:
weight[0, 0, 0] = 0.
neg_w = 0.
# focal loss
conf = torch.clamp(conf, 1e-6, 1 - 1e-6)
alpha = self.focal_alpha
gamma = self.focal_gamma
if self.match_type == "dual_softmax":
pos_conf = conf[pos_mask]
loss_pos = - alpha * torch.pow(1 - pos_conf, gamma) * pos_conf.log()
if weight is not None:
loss_pos = loss_pos * weight[pos_mask]
loss = pos_w * loss_pos.mean()
return loss
elif self.match_type == "sinkhorn":
# no supervision on dustbin row & column.
loss_pos = - alpha * torch.pow(1 - conf[pos_mask], gamma) * (conf[pos_mask]).log()
loss_neg = - alpha * torch.pow(conf[neg_mask], gamma) * (1 - conf[neg_mask]).log()
loss = pos_w * loss_pos.mean() + neg_w * loss_neg.mean()
return loss
def match_2_conf_matrix(self, matches_gt, matrix_pred):
matrix_gt = torch.zeros_like(matrix_pred)
for b, match in enumerate (matches_gt) :
matrix_gt [ b][ match[0], match[1] ] = 1
return matrix_gt
@staticmethod
def compute_match_recall(conf_matrix_gt, match_pred) : #, s_pcd, t_pcd, search_radius=0.3):
'''
@param conf_matrix_gt:
@param match_pred:
@return:
'''
pred_matrix = torch.zeros_like(conf_matrix_gt)
b_ind, src_ind, tgt_ind = match_pred[:, 0], match_pred[:, 1], match_pred[:, 2]
pred_matrix[b_ind, src_ind, tgt_ind] = 1.
true_positive = (pred_matrix == conf_matrix_gt) * conf_matrix_gt
recall = true_positive.sum() / conf_matrix_gt.sum()
precision = true_positive.sum() / max(len(match_pred), 1)
return recall, precision
@staticmethod
def ransac_regist_coarse(batched_src_pcd, batched_tgt_pcd, src_mask, tgt_mask, match_pred ):
s_len = src_mask.sum(dim=1).int()
t_len = tgt_mask.sum(dim=1).int()
bsize = len(batched_src_pcd)
batched_src_pcd = MatchMotionLoss.tensor2numpy( batched_src_pcd)
batched_tgt_pcd = MatchMotionLoss.tensor2numpy( batched_tgt_pcd)
match_pred = MatchMotionLoss.tensor2numpy(match_pred)
rot = []
trn = []
for i in range(bsize):
s_pcd = batched_src_pcd[i][:s_len[i]]
t_pcd = batched_tgt_pcd[i][:t_len[i]]
pair_i = match_pred[:, 0] == i
n_pts = pair_i.sum()
if n_pts < 3 :
rot.append(torch.eye(3))
trn.append(torch.zeros((3,1)))
continue
ind = match_pred[pair_i]
s_ind, t_ind = ind[:, 1], ind[:, 2]
pose = ransac_pose_estimation(s_pcd, t_pcd, [s_ind, t_ind], distance_threshold=0.05)
pose = pose.copy()
rot.append(torch.from_numpy(pose[:3,:3]))
trn.append(torch.from_numpy(pose[:3,3:]))
return torch.stack(rot, dim=0 ), torch.stack(trn , dim=0)#ndarray
@staticmethod
def compute_inlier_ratio(match_pred, data, inlier_thr, s2t_flow=None):
s_pcd, t_pcd = data['s_pcd'], data['t_pcd'] #B,N,3
batched_rot = data['batched_rot'] #B,3,3
batched_trn = data['batched_trn']
if s2t_flow is not None: # 4dmatch
s_pcd_deformed = s_pcd + s2t_flow
s_pcd_wrapped = (torch.matmul(batched_rot, s_pcd_deformed.transpose(1, 2)) + batched_trn).transpose(1,2)
else: # 3dmatch
s_pcd_wrapped = (torch.matmul(batched_rot, s_pcd.transpose(1, 2)) + batched_trn).transpose(1,2)
s_pcd_matched = s_pcd_wrapped [match_pred[:,0], match_pred[:,1]]
t_pcd_matched = t_pcd [match_pred[:,0], match_pred[:,2]]
inlier = torch.sum( (s_pcd_matched - t_pcd_matched)**2 , dim= 1) < inlier_thr**2
bsize = len(s_pcd)
IR=[]
for i in range(bsize):
pair_i = match_pred[:, 0] == i
n_match = pair_i.sum()
inlier_i = inlier[pair_i]
n_inlier = inlier_i.sum().float()
if n_match <3:
IR.append( n_match.float()*0)
else :
IR.append(n_inlier/n_match)
return torch.stack(IR, dim=0)
@staticmethod
def compute_registration_recall(R_est, t_est, data, thr=0.2):
bs = len(R_est)
success = 0.
if data['gt_cov'] is not None:
err2 = thr ** 2
gt = np.zeros( (bs, 4, 4))
gt[:, -1,-1] = 1
gt[:, :3, :3] = data['batched_rot'].cpu().numpy()
gt[:, :3, 3:] = data['batched_trn'].cpu().numpy()
pred = np.zeros((bs, 4, 4))
pred[:, -1, -1] = 1
pred[:, :3, :3] = R_est.detach().cpu().numpy()
pred[:, :3, 3:] = t_est.detach().cpu().numpy()
for i in range(bs):
p = computeTransformationErr( np.linalg.inv(gt[i]) @ pred[i], data['gt_cov'][i])
if p <= err2:
success += 1
rr = success / bs
return rr
else :
return 0.
@staticmethod
def tensor2numpy(tensor):
if tensor.requires_grad:
tensor=tensor.detach()
return tensor.cpu().numpy() | 18,271 | 38.042735 | 137 | py |
lepard | lepard-main/models/position_encoding.py | import math
import torch
from torch import nn
class VolumetricPositionEncoding(nn.Module):
def __init__(self, config):
super().__init__()
self.feature_dim = config.feature_dim
self.vol_bnds = config.vol_bnds
self.voxel_size = config.voxel_size
self.vol_origin = self.vol_bnds[0]
self.pe_type = config.pe_type
def voxelize(self, xyz):
'''
@param xyz: B,N,3
@return: B,N,3
'''
if type ( self.vol_origin ) == list :
self.vol_origin = torch.FloatTensor(self.vol_origin ).view(1, 1, -1).to( xyz.device )
return (xyz - self.vol_origin) / self.voxel_size
@staticmethod
def embed_rotary(x, cos, sin):
'''
@param x: [B,N,d]
@param cos: [B,N,d] [θ0,θ0,θ1,θ1,θ2,θ2......θd/2-1,θd/2-1]
@param sin: [B,N,d] [θ0,θ0,θ1,θ1,θ2,θ2......θd/2-1,θd/2-1]
@return:
'''
x2 = torch.stack([-x[..., 1::2], x[..., ::2]], dim=-1).reshape_as(x).contiguous()
x = x * cos + x2 * sin
return x
@staticmethod
def embed_pos(pe_type, x, pe):
""" combine feature and position code
"""
if pe_type == 'rotary':
return VolumetricPositionEncoding.embed_rotary(x, pe[..., 0], pe[..., 1])
elif pe_type == 'sinusoidal':
return x + pe
else:
raise KeyError()
def forward(self, XYZ):
'''
@param XYZ: [B,N,3]
@return:
'''
bsize, npoint, _ = XYZ.shape
vox = self.voxelize( XYZ)
x_position, y_position, z_position = vox[..., 0:1], vox[...,1:2], vox[...,2:3]
div_term = torch.exp( torch.arange(0, self.feature_dim // 3, 2, dtype=torch.float, device=XYZ.device) * (-math.log(10000.0) / (self.feature_dim // 3)))
div_term = div_term.view( 1,1, -1) # [1, 1, d//6]
sinx = torch.sin(x_position * div_term) # [B, N, d//6]
cosx = torch.cos(x_position * div_term)
siny = torch.sin(y_position * div_term)
cosy = torch.cos(y_position * div_term)
sinz = torch.sin(z_position * div_term)
cosz = torch.cos(z_position * div_term)
if self.pe_type == 'sinusoidal' :
position_code = torch.cat( [ sinx, cosx, siny, cosy, sinz, cosz] , dim=-1 )
elif self.pe_type == "rotary" :
# sin/cos [θ0,θ1,θ2......θd/6-1] -> sin/cos [θ0,θ0,θ1,θ1,θ2,θ2......θd/6-1,θd/6-1]
sinx, cosx, siny, cosy, sinz, cosz = map( lambda feat:torch.stack([feat, feat], dim=-1).view(bsize, npoint, -1),
[ sinx, cosx, siny, cosy, sinz, cosz] )
sin_pos = torch.cat([sinx,siny,sinz], dim=-1)
cos_pos = torch.cat([cosx,cosy,cosz], dim=-1)
position_code = torch.stack( [cos_pos, sin_pos] , dim=-1)
else:
raise KeyError()
if position_code.requires_grad:
position_code = position_code.detach()
return position_code | 2,989 | 33.367816 | 160 | py |
lepard | lepard-main/models/backbone.py | from models.blocks import *
import torch.nn.functional as F
import numpy as np
class KPFCN(nn.Module):
def __init__(self, config):
super(KPFCN, self).__init__()
############
# Parameters
############
layer = 0
r = config.first_subsampling_dl * config.conv_radius
in_dim = config.in_feats_dim
out_dim = config.first_feats_dim
#####################
# List Encoder blocks
#####################
self.encoder_blocks = nn.ModuleList()
self.encoder_skip_dims = []
self.encoder_skips = []
# Loop over consecutive blocks
for block_i, block in enumerate(config.architecture):
# Check equivariance
if ('equivariant' in block) and (not out_dim % 3 == 0):
raise ValueError('Equivariant block but features dimension is not a factor of 3')
# Detect change to next layer for skip connection
if np.any([tmp in block for tmp in ['pool', 'strided', 'upsample', 'global']]):
self.encoder_skips.append(block_i)
self.encoder_skip_dims.append(in_dim)
# Detect upsampling block to stop
if 'upsample' in block:
break
# Apply the good block function defining tf ops
self.encoder_blocks.append(block_decider(block,
r,
in_dim,
out_dim,
layer,
config))
# Update dimension of input from output
if 'simple' in block:
in_dim = out_dim // 2
else:
in_dim = out_dim
# Detect change to a subsampled layer
if 'pool' in block or 'strided' in block:
# Update radius and feature dimension for next layer
layer += 1
r *= 2
out_dim *= 2
#####################
# bottleneck output & input layer
self.coarse_out = nn.Conv1d(in_dim//2, config.coarse_feature_dim, kernel_size=1, bias=True)
coarse_in_dim = config.coarse_feature_dim
self.coarse_in = nn.Conv1d(coarse_in_dim, in_dim//2, kernel_size=1, bias=True)
#####################
# List Decoder blocks
#####################
# Save all block operations in a list of modules
self.decoder_blocks = nn.ModuleList()
self.decoder_concats = []
# Find first upsampling block
start_i = 0
for block_i, block in enumerate(config.architecture):
if 'upsample' in block:
start_i = block_i
break
# Loop over consecutive blocks
for block_i, block in enumerate(config.architecture[start_i:]):
# Add dimension of skip connection concat
if block_i > 0 and 'upsample' in config.architecture[start_i + block_i - 1]:
in_dim += self.encoder_skip_dims[layer]
self.decoder_concats.append(block_i)
# Apply the good block function defining tf ops
self.decoder_blocks.append(block_decider(block,
r,
in_dim,
out_dim,
layer,
config))
# Update dimension of input from output
in_dim = out_dim
# Detect change to a subsampled layer
if 'upsample' in block:
# Update radius and feature dimension for next layer
layer -= 1
r *= 0.5
out_dim = out_dim // 2
#####################
# fine output layer
#####################
fine_feature_dim = config.fine_feature_dim
self.fine_out = nn.Conv1d(out_dim, fine_feature_dim, kernel_size=1, bias=True)
def forward(self, batch, phase = 'encode'):
# Get input features
if phase == 'coarse' :
x = batch['features'].clone().detach()
# 1. joint encoder part
self.skip_x = []
for block_i, block_op in enumerate(self.encoder_blocks):
if block_i in self.encoder_skips:
self.skip_x.append(x)
x = block_op(x, batch) # [N,C]
for block_i, block_op in enumerate(self.decoder_blocks):
if block_i in self.decoder_concats:
x = torch.cat([x, self.skip_x.pop()], dim=1)
x = block_op(x, batch)
if block_i == 1 :
coarse_feats = x.transpose(0,1).unsqueeze(0) #[B, C, N]
coarse_feats = self.coarse_out(coarse_feats) #[B, C, N]
coarse_feats = coarse_feats.transpose(1,2).squeeze(0)
return coarse_feats #[N,C2]
#
# elif phase == "fine":
#
# coarse_feats = batch['coarse_feats']
# coarse_feats = coarse_feats.transpose(0,1).unsqueeze(0)
# coarse_feats = self.coarse_in(coarse_feats)
# x = coarse_feats.transpose(1,2).squeeze(0)
#
#
# for block_i, block_op in enumerate(self.decoder_blocks):
# if block_i > 1 :
# if block_i in self.decoder_concats:
# x = torch.cat([x, self.skip_x.pop()], dim=1)
# x = block_op(x, batch)
#
# fine_feats = x.transpose(0, 1).unsqueeze(0) # [1, C, N]
# fine_feats = self.fine_out(fine_feats) # [1, C, N]
# fine_feats = fine_feats.transpose(1, 2).squeeze(0)
#
# return fine_feats | 6,033 | 36.018405 | 100 | py |
lepard | lepard-main/models/transformer.py | import copy
import math
import torch
from torch import nn
from torch.nn import Module, Dropout
from models.position_encoding import VolumetricPositionEncoding as VolPE
from models.matching import Matching
from models.procrustes import SoftProcrustesLayer
import numpy as np
import random
from scipy.spatial.transform import Rotation
class GeometryAttentionLayer(nn.Module):
def __init__(self, config):
super(GeometryAttentionLayer, self).__init__()
d_model = config['feature_dim']
nhead = config['n_head']
self.dim = d_model // nhead
self.nhead = nhead
self.pe_type = config['pe_type']
# multi-head attention
self.q_proj = nn.Linear(d_model, d_model, bias=False)
self.k_proj = nn.Linear(d_model, d_model, bias=False)
self.v_proj = nn.Linear(d_model, d_model, bias=False)
# self.attention = Attention() #LinearAttention() if attention == 'linear' else FullAttention()
self.merge = nn.Linear(d_model, d_model, bias=False)
# feed-forward network
self.mlp = nn.Sequential(
nn.Linear(d_model*2, d_model*2, bias=False),
nn.ReLU(True),
nn.Linear(d_model*2, d_model, bias=False),
)
# norm and dropout
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
def forward(self, x, source, x_pe, source_pe, x_mask=None, source_mask=None):
bs = x.size(0)
q, k, v = x, source, source
qp, kvp = x_pe, source_pe
q_mask, kv_mask = x_mask, source_mask
if self.pe_type == 'sinusoidal':
#w(x+p), attention is all you need : https://arxiv.org/abs/1706.03762
if qp is not None: # disentangeld
q = q + qp
k = k + kvp
qw = self.q_proj(q).view(bs, -1, self.nhead, self.dim) # [N, L, (H, D)]
kw = self.k_proj(k).view(bs, -1, self.nhead, self.dim) # [N, S, (H, D)]
vw = self.v_proj(v).view(bs, -1, self.nhead, self.dim)
elif self.pe_type == 'rotary':
#Rwx roformer : https://arxiv.org/abs/2104.09864
qw = self.q_proj(q)
kw = self.k_proj(k)
vw = self.v_proj(v)
if qp is not None: # disentangeld
q_cos, q_sin = qp[...,0] ,qp[...,1]
k_cos, k_sin = kvp[...,0],kvp[...,1]
qw = VolPE.embed_rotary(qw, q_cos, q_sin)
kw = VolPE.embed_rotary(kw, k_cos, k_sin)
qw = qw.view(bs, -1, self.nhead, self.dim)
kw = kw.view(bs, -1, self.nhead, self.dim)
vw = vw.view(bs, -1, self.nhead, self.dim)
else:
raise KeyError()
# attention
a = torch.einsum("nlhd,nshd->nlsh", qw, kw)
if kv_mask is not None:
a.masked_fill_( q_mask[:, :, None, None] * (~kv_mask[:, None, :, None]), float('-inf'))
a = a / qw.size(3) **0.5
a = torch.softmax(a, dim=2)
o = torch.einsum("nlsh,nshd->nlhd", a, vw).contiguous() # [N, L, (H, D)]
message = self.merge(o.view(bs, -1, self.nhead*self.dim)) # [N, L, C]
message = self.norm1(message)
# feed-forward network
message = self.mlp(torch.cat([x, message], dim=2))
message = self.norm2(message)
e = x + message
return e
class RepositioningTransformer(nn.Module):
def __init__(self, config):
super(RepositioningTransformer, self).__init__()
self.d_model = config['feature_dim']
self.nhead = config['n_head']
self.layer_types = config['layer_types']
self.positioning_type = config['positioning_type']
self.pe_type =config['pe_type']
self.entangled= config['entangled']
self.positional_encoding = VolPE(config)
encoder_layer = GeometryAttentionLayer (config)
self.layers = nn.ModuleList()
for l_type in self.layer_types:
if l_type in ['self','cross']:
self.layers.append( copy.deepcopy(encoder_layer))
elif l_type == "positioning":
if self.positioning_type == 'procrustes':
positioning_layer = nn.ModuleList()
positioning_layer.append( Matching(config['feature_matching']))
positioning_layer.append( SoftProcrustesLayer(config['procrustes']) )
self.layers.append(positioning_layer)
elif self.positioning_type in ['oracle', 'randSO3']:
self.layers.append( None)
else :
raise KeyError(self.positioning_type + " undefined positional encoding type")
else:
raise KeyError()
self._reset_parameters()
def forward(self, src_feat, tgt_feat, s_pcd, t_pcd, src_mask, tgt_mask, data, T = None, timers = None):
self.timers = timers
assert self.d_model == src_feat.size(2), "the feature number of src and transformer must be equal"
if T is not None:
R, t = T
src_pcd_wrapped = (torch.matmul(R, s_pcd.transpose(1, 2)) + t).transpose(1, 2)
tgt_pcd_wrapped = t_pcd
else:
src_pcd_wrapped = s_pcd
tgt_pcd_wrapped = t_pcd
src_pe = self.positional_encoding( src_pcd_wrapped)
tgt_pe = self.positional_encoding( tgt_pcd_wrapped)
if not self.entangled:
position_layer = 0
data.update({"position_layers":{}})
for layer, name in zip(self.layers, self.layer_types) :
if name == 'self':
if self.timers: self.timers.tic('self atten')
src_feat = layer(src_feat, src_feat, src_pe, src_pe, src_mask, src_mask,)
tgt_feat = layer(tgt_feat, tgt_feat, tgt_pe, tgt_pe, tgt_mask, tgt_mask)
if self.timers: self.timers.toc('self atten')
elif name == 'cross':
if self.timers: self.timers.tic('cross atten')
src_feat = layer(src_feat, tgt_feat, src_pe, tgt_pe, src_mask, tgt_mask)
tgt_feat = layer(tgt_feat, src_feat, tgt_pe, src_pe, tgt_mask, src_mask)
if self.timers: self.timers.toc('cross atten')
elif name =='positioning':
if self.positioning_type == 'procrustes':
conf_matrix, match_pred = layer[0](src_feat, tgt_feat, src_pe, tgt_pe, src_mask, tgt_mask, data, pe_type=self.pe_type)
position_layer += 1
data["position_layers"][position_layer] = {"conf_matrix": conf_matrix, "match_pred": match_pred}
if self.timers: self.timers.tic('procrustes_layer')
R, t, R_forwd, t_forwd, condition, solution_mask = layer[1] (conf_matrix, s_pcd, t_pcd, src_mask, tgt_mask)
if self.timers: self.timers.toc('procrustes_layer')
data["position_layers"][position_layer].update({
"R_s2t_pred": R,"t_s2t_pred": t, "solution_mask": solution_mask, "condition": condition})
src_pcd_wrapped = (torch.matmul(R_forwd, s_pcd.transpose(1, 2)) + t_forwd).transpose(1, 2)
tgt_pcd_wrapped = t_pcd
src_pe = self.positional_encoding(src_pcd_wrapped)
tgt_pe = self.positional_encoding(tgt_pcd_wrapped)
elif self.positioning_type == 'randSO3':
src_pcd_wrapped = self.rand_rot_pcd( s_pcd, src_mask)
tgt_pcd_wrapped = t_pcd
src_pe = self.positional_encoding(src_pcd_wrapped)
tgt_pe = self.positional_encoding(tgt_pcd_wrapped)
elif self.positioning_type == 'oracle':
#Note R,t ground truth is only available for computing oracle position encoding
rot_gt = data['batched_rot']
trn_gt = data['batched_trn']
src_pcd_wrapped = (torch.matmul(rot_gt, s_pcd.transpose(1, 2)) + trn_gt).transpose(1, 2)
tgt_pcd_wrapped = t_pcd
src_pe = self.positional_encoding(src_pcd_wrapped)
tgt_pe = self.positional_encoding(tgt_pcd_wrapped)
else:
raise KeyError(self.positioning_type + " undefined positional encoding type")
else :
raise KeyError
return src_feat, tgt_feat, src_pe, tgt_pe
else : # pos. fea. entangeled
position_layer = 0
data.update({"position_layers":{}})
src_feat = VolPE.embed_pos(self.pe_type, src_feat, src_pe)
tgt_feat = VolPE.embed_pos(self.pe_type, tgt_feat, tgt_pe)
for layer, name in zip(self.layers, self.layer_types):
if name == 'self':
if self.timers: self.timers.tic('self atten')
src_feat = layer(src_feat, src_feat, None, None, src_mask, src_mask, )
tgt_feat = layer(tgt_feat, tgt_feat, None, None, tgt_mask, tgt_mask)
if self.timers: self.timers.toc('self atten')
elif name == 'cross':
if self.timers: self.timers.tic('cross atten')
src_feat = layer(src_feat, tgt_feat, None, None, src_mask, tgt_mask)
tgt_feat = layer(tgt_feat, src_feat, None, None, tgt_mask, src_mask)
if self.timers: self.timers.toc('cross atten')
elif name == 'positioning':
pass
return src_feat, tgt_feat, src_pe, tgt_pe
def rand_rot_pcd (self, pcd, mask):
'''
@param pcd: B, N, 3
@param mask: B, N
@return:
'''
pcd[~mask]=0.
N = mask.shape[1]
n_points = mask.sum(dim=1, keepdim=True).view(-1,1,1)
bs = pcd.shape[0]
euler_ab = np.random.rand(bs, 3) * np.pi * 2 # anglez, angley, anglex
rand_rot = torch.from_numpy( Rotation.from_euler('zyx', euler_ab).as_matrix() ).to(pcd)
pcd_u = pcd.mean(dim=1, keepdim=True) * N / n_points
pcd_centered = pcd - pcd_u
pcd_rand_rot = torch.matmul( rand_rot, pcd_centered.transpose(1,2) ).transpose(1,2) + pcd_u
return pcd_rand_rot
def _reset_parameters(self):
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p) | 10,666 | 36.559859 | 142 | py |
lepard | lepard-main/models/__init__.py | 0 | 0 | 0 | py |
|
lepard | lepard-main/models/procrustes.py | import torch
import torch.nn as nn
def topk(data, num_topk):
sort, idx = data.sort(descending=True)
return sort[:num_topk], idx[:num_topk]
class SoftProcrustesLayer(nn.Module):
def __init__(self, config):
super(SoftProcrustesLayer, self).__init__()
self.sample_rate = config.sample_rate
self.max_condition_num= config.max_condition_num
@staticmethod
def batch_weighted_procrustes( X, Y, w, eps=0.0001):
'''
@param X: source frame [B, N,3]
@param Y: target frame [B, N,3]
@param w: weights [B, N,1]
@param eps:
@return:
'''
# https://ieeexplore.ieee.org/document/88573
bsize = X.shape[0]
device = X.device
W1 = torch.abs(w).sum(dim=1, keepdim=True)
w_norm = w / (W1 + eps)
mean_X = (w_norm * X).sum(dim=1, keepdim=True)
mean_Y = (w_norm * Y).sum(dim=1, keepdim=True)
Sxy = torch.matmul( (Y - mean_Y).transpose(1,2), w_norm * (X - mean_X) )
Sxy = Sxy.cpu().double()
U, D, V = Sxy.svd() # small SVD runs faster on cpu
condition = D.max(dim=1)[0] / D.min(dim=1)[0]
S = torch.eye(3)[None].repeat(bsize,1,1).double()
UV_det = U.det() * V.det()
S[:, 2:3, 2:3] = UV_det.view(-1, 1,1)
svT = torch.matmul( S, V.transpose(1,2) )
R = torch.matmul( U, svT).float().to(device)
t = mean_Y.transpose(1,2) - torch.matmul( R, mean_X.transpose(1,2) )
return R, t, condition
def forward(self, conf_matrix, src_pcd, tgt_pcd, src_mask, tgt_mask):
'''
@param conf_matrix:
@param src_pcd:
@param tgt_pcd:
@param src_mask:
@param tgt_mask:
@return:
'''
bsize, N, M = conf_matrix.shape
# subsample correspondence
src_len = src_mask.sum(dim=1)
tgt_len = tgt_mask.sum(dim=1)
entry_max, _ = torch.stack([src_len,tgt_len], dim=0).max(dim=0)
entry_max = (entry_max * self.sample_rate).int()
sample_n_points = entry_max.float().mean().int() #entry_max.max()
conf, idx = conf_matrix.view(bsize, -1).sort(descending=True,dim=1)
w = conf [:, :sample_n_points]
idx= idx[:, :sample_n_points]
idx_src = idx//M #torch.div(idx, M, rounding_mode='trunc')
idx_tgt = idx%M
b_index = torch.arange(bsize).view(-1, 1).repeat((1, sample_n_points)).view(-1)
src_pcd_sampled = src_pcd[b_index, idx_src.view(-1)].view(bsize, sample_n_points, -1)
tgt_pcd_sampled = tgt_pcd[b_index, idx_tgt.view(-1)].view(bsize, sample_n_points, -1)
w_mask = torch.arange(sample_n_points).view(1,-1).repeat(bsize,1).to(w)
w_mask = w_mask < entry_max[:,None]
w[~w_mask] = 0.
# solve
try :
R, t, condition = self.batch_weighted_procrustes(src_pcd_sampled, tgt_pcd_sampled, w[...,None])
except: # fail to get valid solution, this usually happens at the early stage of training
R = torch.eye(3)[None].repeat(bsize,1,1).type_as(conf_matrix)
t = torch.zeros(3, 1)[None].repeat(bsize,1,1).type_as(conf_matrix)
condition = torch.zeros(bsize).type_as(conf_matrix)
#filter unreliable solution with condition nnumber
solution_mask = condition < self.max_condition_num
R_forwd = R.clone()
t_forwd = t.clone()
R_forwd[~solution_mask] = torch.eye(3).type_as(R)
t_forwd[~solution_mask] = torch.zeros(3, 1).type_as(R)
return R, t, R_forwd, t_forwd, condition, solution_mask | 3,591 | 37.623656 | 107 | py |
lepard | lepard-main/models/pipeline.py | from models.blocks import *
from models.backbone import KPFCN
from models.transformer import RepositioningTransformer
from models.matching import Matching
from models.procrustes import SoftProcrustesLayer
class Pipeline(nn.Module):
def __init__(self, config):
super(Pipeline, self).__init__()
self.config = config
self.backbone = KPFCN(config['kpfcn_config'])
self.pe_type = config['coarse_transformer']['pe_type']
self.positioning_type = config['coarse_transformer']['positioning_type']
self.coarse_transformer = RepositioningTransformer(config['coarse_transformer'])
self.coarse_matching = Matching(config['coarse_matching'])
self.soft_procrustes = SoftProcrustesLayer(config['coarse_transformer']['procrustes'])
def forward(self, data, timers=None):
self.timers = timers
if self.timers: self.timers.tic('kpfcn backbone encode')
coarse_feats = self.backbone(data, phase="coarse")
if self.timers: self.timers.toc('kpfcn backbone encode')
if self.timers: self.timers.tic('coarse_preprocess')
src_feats, tgt_feats, s_pcd, t_pcd, src_mask, tgt_mask = self.split_feats (coarse_feats, data)
data.update({ 's_pcd': s_pcd, 't_pcd': t_pcd })
if self.timers: self.timers.toc('coarse_preprocess')
if self.timers: self.timers.tic('coarse feature transformer')
src_feats, tgt_feats, src_pe, tgt_pe = self.coarse_transformer(src_feats, tgt_feats, s_pcd, t_pcd, src_mask, tgt_mask, data, timers=timers)
if self.timers: self.timers.toc('coarse feature transformer')
if self.timers: self.timers.tic('match feature coarse')
conf_matrix_pred, coarse_match_pred = self.coarse_matching(src_feats, tgt_feats, src_pe, tgt_pe, src_mask, tgt_mask, data, pe_type = self.pe_type)
data.update({'conf_matrix_pred': conf_matrix_pred, 'coarse_match_pred': coarse_match_pred })
if self.timers: self.timers.toc('match feature coarse')
if self.timers: self.timers.tic('procrustes_layer')
R, t, _, _, _, _ = self.soft_procrustes(conf_matrix_pred, s_pcd, t_pcd, src_mask, tgt_mask)
data.update({"R_s2t_pred": R, "t_s2t_pred": t})
if self.timers: self.timers.toc('procrustes_layer')
return data
def split_feats(self, geo_feats, data):
pcd = data['points'][self.config['kpfcn_config']['coarse_level']]
src_mask = data['src_mask']
tgt_mask = data['tgt_mask']
src_ind_coarse_split = data[ 'src_ind_coarse_split']
tgt_ind_coarse_split = data['tgt_ind_coarse_split']
src_ind_coarse = data['src_ind_coarse']
tgt_ind_coarse = data['tgt_ind_coarse']
b_size, src_pts_max = src_mask.shape
tgt_pts_max = tgt_mask.shape[1]
src_feats = torch.zeros([b_size * src_pts_max, geo_feats.shape[-1]]).type_as(geo_feats)
tgt_feats = torch.zeros([b_size * tgt_pts_max, geo_feats.shape[-1]]).type_as(geo_feats)
src_pcd = torch.zeros([b_size * src_pts_max, 3]).type_as(pcd)
tgt_pcd = torch.zeros([b_size * tgt_pts_max, 3]).type_as(pcd)
src_feats[src_ind_coarse_split] = geo_feats[src_ind_coarse]
tgt_feats[tgt_ind_coarse_split] = geo_feats[tgt_ind_coarse]
src_pcd[src_ind_coarse_split] = pcd[src_ind_coarse]
tgt_pcd[tgt_ind_coarse_split] = pcd[tgt_ind_coarse]
return src_feats.view( b_size , src_pts_max , -1), \
tgt_feats.view( b_size , tgt_pts_max , -1), \
src_pcd.view( b_size , src_pts_max , -1), \
tgt_pcd.view( b_size , tgt_pts_max , -1), \
src_mask, \
tgt_mask | 3,685 | 43.95122 | 154 | py |
lepard | lepard-main/models/blocks.py | import time
import math
import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
from torch.nn.init import kaiming_uniform_
from kernels.kernel_points import load_kernels
# from lib.ply import write_ply
def gather(x, idx, method=2):
"""
implementation of a custom gather operation for faster backwards.
:param x: input with shape [N, D_1, ... D_d]
:param idx: indexing with shape [n_1, ..., n_m]
:param method: Choice of the method
:return: x[idx] with shape [n_1, ..., n_m, D_1, ... D_d]
"""
if method == 0:
return x[idx]
elif method == 1:
x = x.unsqueeze(1)
x = x.expand((-1, idx.shape[-1], -1))
idx = idx.unsqueeze(2)
idx = idx.expand((-1, -1, x.shape[-1]))
return x.gather(0, idx)
elif method == 2:
for i, ni in enumerate(idx.size()[1:]):
x = x.unsqueeze(i+1)
new_s = list(x.size())
new_s[i+1] = ni
x = x.expand(new_s)
n = len(idx.size())
for i, di in enumerate(x.size()[n:]):
idx = idx.unsqueeze(i+n)
new_s = list(idx.size())
new_s[i+n] = di
idx = idx.expand(new_s)
return x.gather(0, idx)
else:
raise ValueError('Unkown method')
def radius_gaussian(sq_r, sig, eps=1e-9):
"""
Compute a radius gaussian (gaussian of distance)
:param sq_r: input radiuses [dn, ..., d1, d0]
:param sig: extents of gaussians [d1, d0] or [d0] or float
:return: gaussian of sq_r [dn, ..., d1, d0]
"""
return torch.exp(-sq_r / (2 * sig**2 + eps))
def closest_pool(x, inds):
"""
Pools features from the closest neighbors. WARNING: this function assumes the neighbors are ordered.
:param x: [n1, d] features matrix
:param inds: [n2, max_num] Only the first column is used for pooling
:return: [n2, d] pooled features matrix
"""
# Add a last row with minimum features for shadow pools
x = torch.cat((x, torch.zeros_like(x[:1, :])), 0)
# Get features for each pooling location [n2, d]
return gather(x, inds[:, 0])
def max_pool(x, inds):
"""
Pools features with the maximum values.
:param x: [n1, d] features matrix
:param inds: [n2, max_num] pooling indices
:return: [n2, d] pooled features matrix
"""
# Add a last row with minimum features for shadow pools
x = torch.cat((x, torch.zeros_like(x[:1, :])), 0)
# Get all features for each pooling location [n2, max_num, d]
pool_features = gather(x, inds)
# Pool the maximum [n2, d]
max_features, _ = torch.max(pool_features, 1)
return max_features
def global_average(x, batch_lengths):
"""
Block performing a global average over batch pooling
:param x: [N, D] input features
:param batch_lengths: [B] list of batch lengths
:return: [B, D] averaged features
"""
# Loop over the clouds of the batch
averaged_features = []
i0 = 0
for b_i, length in enumerate(batch_lengths):
# Average features for each batch cloud
averaged_features.append(torch.mean(x[i0:i0 + length], dim=0))
# Increment for next cloud
i0 += length
# Average features in each batch
return torch.stack(averaged_features)
# ----------------------------------------------------------------------------------------------------------------------
#
# KPConv class
# \******************/
#
class KPConv(nn.Module):
def __init__(self, kernel_size, p_dim, in_channels, out_channels, KP_extent, radius,
fixed_kernel_points='center', KP_influence='linear', aggregation_mode='sum',
deformable=False, modulated=False):
"""
Initialize parameters for KPConvDeformable.
:param kernel_size: Number of kernel points.
:param p_dim: dimension of the point space.
:param in_channels: dimension of input features.
:param out_channels: dimension of output features.
:param KP_extent: influence radius of each kernel point.
:param radius: radius used for kernel point init. Even for deformable, use the config.conv_radius
:param fixed_kernel_points: fix position of certain kernel points ('none', 'center' or 'verticals').
:param KP_influence: influence function of the kernel points ('constant', 'linear', 'gaussian').
:param aggregation_mode: choose to sum influences, or only keep the closest ('closest', 'sum').
:param deformable: choose deformable or not
:param modulated: choose if kernel weights are modulated in addition to deformed
"""
super(KPConv, self).__init__()
# Save parameters
self.K = kernel_size
self.p_dim = p_dim
self.in_channels = in_channels
self.out_channels = out_channels
self.radius = radius
self.KP_extent = KP_extent
self.fixed_kernel_points = fixed_kernel_points
self.KP_influence = KP_influence
self.aggregation_mode = aggregation_mode
self.deformable = deformable
self.modulated = modulated
# Running variable containing deformed KP distance to input points. (used in regularization loss)
self.min_d2 = None
self.deformed_KP = None
self.offset_features = None
# Initialize weights
self.weights = Parameter(torch.zeros((self.K, in_channels, out_channels), dtype=torch.float32),
requires_grad=True)
# Initiate weights for offsets
if deformable:
if modulated:
self.offset_dim = (self.p_dim + 1) * self.K
else:
self.offset_dim = self.p_dim * self.K
self.offset_conv = KPConv(self.K,
self.p_dim,
self.in_channels,
self.offset_dim,
KP_extent,
radius,
fixed_kernel_points=fixed_kernel_points,
KP_influence=KP_influence,
aggregation_mode=aggregation_mode)
self.offset_bias = Parameter(torch.zeros(self.offset_dim, dtype=torch.float32), requires_grad=True)
else:
self.offset_dim = None
self.offset_conv = None
self.offset_bias = None
# Reset parameters
self.reset_parameters()
# Initialize kernel points
self.kernel_points = self.init_KP()
return
def reset_parameters(self):
kaiming_uniform_(self.weights, a=math.sqrt(5))
if self.deformable:
nn.init.zeros_(self.offset_bias)
return
def init_KP(self):
"""
Initialize the kernel point positions in a sphere
:return: the tensor of kernel points
"""
# Create one kernel disposition (as numpy array). Choose the KP distance to center thanks to the KP extent
K_points_numpy = load_kernels(self.radius,
self.K,
dimension=self.p_dim,
fixed=self.fixed_kernel_points)
return Parameter(torch.tensor(K_points_numpy, dtype=torch.float32),
requires_grad=False)
def forward(self, q_pts, s_pts, neighb_inds, x):
###################
# Offset generation
###################
if self.deformable:
# Get offsets with a KPConv that only takes part of the features
self.offset_features = self.offset_conv(q_pts, s_pts, neighb_inds, x) + self.offset_bias
if self.modulated:
# Get offset (in normalized scale) from features
unscaled_offsets = self.offset_features[:, :self.p_dim * self.K]
unscaled_offsets = unscaled_offsets.view(-1, self.K, self.p_dim)
# Get modulations
modulations = 2 * torch.sigmoid(self.offset_features[:, self.p_dim * self.K:])
else:
# Get offset (in normalized scale) from features
unscaled_offsets = self.offset_features.view(-1, self.K, self.p_dim)
# No modulations
modulations = None
# Rescale offset for this layer
offsets = unscaled_offsets * self.KP_extent
else:
offsets = None
modulations = None
######################
# Deformed convolution
######################
# Add a fake point in the last row for shadow neighbors
s_pts = torch.cat((s_pts, torch.zeros_like(s_pts[:1, :]) + 1e6), 0)
# Get neighbor points [n_points, n_neighbors, dim]
neighbors = s_pts[neighb_inds, :]
# Center every neighborhood
neighbors = neighbors - q_pts.unsqueeze(1)
# Apply offsets to kernel points [n_points, n_kpoints, dim]
if self.deformable:
self.deformed_KP = offsets + self.kernel_points
deformed_K_points = self.deformed_KP.unsqueeze(1)
else:
deformed_K_points = self.kernel_points
# Get all difference matrices [n_points, n_neighbors, n_kpoints, dim]
neighbors.unsqueeze_(2)
differences = neighbors - deformed_K_points
# Get the square distances [n_points, n_neighbors, n_kpoints]
sq_distances = torch.sum(differences ** 2, dim=3)
# Optimization by ignoring points outside a deformed KP range
if self.deformable:
# Save distances for loss
self.min_d2, _ = torch.min(sq_distances, dim=1)
# Boolean of the neighbors in range of a kernel point [n_points, n_neighbors]
in_range = torch.any(sq_distances < self.KP_extent ** 2, dim=2).type(torch.int32)
# New value of max neighbors
new_max_neighb = torch.max(torch.sum(in_range, dim=1))
# For each row of neighbors, indices of the ones that are in range [n_points, new_max_neighb]
neighb_row_bool, neighb_row_inds = torch.topk(in_range, new_max_neighb.item(), dim=1)
# Gather new neighbor indices [n_points, new_max_neighb]
new_neighb_inds = neighb_inds.gather(1, neighb_row_inds, sparse_grad=False)
# Gather new distances to KP [n_points, new_max_neighb, n_kpoints]
neighb_row_inds.unsqueeze_(2)
neighb_row_inds = neighb_row_inds.expand(-1, -1, self.K)
sq_distances = sq_distances.gather(1, neighb_row_inds, sparse_grad=False)
# New shadow neighbors have to point to the last shadow point
new_neighb_inds *= neighb_row_bool
new_neighb_inds -= (neighb_row_bool.type(torch.int64) - 1) * int(s_pts.shape[0] - 1)
else:
new_neighb_inds = neighb_inds
# Get Kernel point influences [n_points, n_kpoints, n_neighbors]
if self.KP_influence == 'constant':
# Every point get an influence of 1.
all_weights = torch.ones_like(sq_distances)
all_weights = torch.transpose(all_weights, 1, 2)
elif self.KP_influence == 'linear':
# Influence decrease linearly with the distance, and get to zero when d = KP_extent.
all_weights = torch.clamp(1 - torch.sqrt(sq_distances) / self.KP_extent, min=0.0)
all_weights = torch.transpose(all_weights, 1, 2)
elif self.KP_influence == 'gaussian':
# Influence in gaussian of the distance.
sigma = self.KP_extent * 0.3
all_weights = radius_gaussian(sq_distances, sigma)
all_weights = torch.transpose(all_weights, 1, 2)
else:
raise ValueError('Unknown influence function type (config.KP_influence)')
# In case of closest mode, only the closest KP can influence each point
if self.aggregation_mode == 'closest':
neighbors_1nn = torch.argmin(sq_distances, dim=2)
all_weights *= torch.transpose(nn.functional.one_hot(neighbors_1nn, self.K), 1, 2)
elif self.aggregation_mode != 'sum':
raise ValueError("Unknown convolution mode. Should be 'closest' or 'sum'")
# Add a zero feature for shadow neighbors
x = torch.cat((x, torch.zeros_like(x[:1, :])), 0)
# Get the features of each neighborhood [n_points, n_neighbors, in_fdim]
neighb_x = gather(x, new_neighb_inds)
# Apply distance weights [n_points, n_kpoints, in_fdim]
weighted_features = torch.matmul(all_weights, neighb_x)
# Apply modulations
if self.deformable and self.modulated:
weighted_features *= modulations.unsqueeze(2)
# Apply network weights [n_kpoints, n_points, out_fdim]
weighted_features = weighted_features.permute((1, 0, 2))
kernel_outputs = torch.matmul(weighted_features, self.weights)
# Convolution sum [n_points, out_fdim]
# return torch.sum(kernel_outputs, dim=0)
output_features = torch.sum(kernel_outputs, dim=0, keepdim=False)
# normalization term.
neighbor_features_sum = torch.sum(neighb_x, dim=-1)
neighbor_num = torch.sum(torch.gt(neighbor_features_sum, 0.0), dim=-1)
neighbor_num = torch.max(neighbor_num, torch.ones_like(neighbor_num))
output_features = output_features / neighbor_num.unsqueeze(1)
return output_features
def __repr__(self):
return 'KPConv(radius: {:.2f}, extent: {:.2f}, in_feat: {:d}, out_feat: {:d})'.format(self.radius, self.KP_extent,
self.in_channels,
self.out_channels)
# ----------------------------------------------------------------------------------------------------------------------
#
# Complex blocks
# \********************/
#
def block_decider(block_name,
radius,
in_dim,
out_dim,
layer_ind,
config):
if block_name == 'unary':
return UnaryBlock(in_dim, out_dim, config.use_batch_norm, config.batch_norm_momentum)
elif block_name in ['simple',
'simple_deformable',
'simple_invariant',
'simple_equivariant',
'simple_strided',
'simple_deformable_strided',
'simple_invariant_strided',
'simple_equivariant_strided']:
return SimpleBlock(block_name, in_dim, out_dim, radius, layer_ind, config)
elif block_name in ['resnetb',
'resnetb_invariant',
'resnetb_equivariant',
'resnetb_deformable',
'resnetb_strided',
'resnetb_deformable_strided',
'resnetb_equivariant_strided',
'resnetb_invariant_strided']:
return ResnetBottleneckBlock(block_name, in_dim, out_dim, radius, layer_ind, config)
elif block_name == 'max_pool' or block_name == 'max_pool_wide':
return MaxPoolBlock(layer_ind)
elif block_name == 'global_average':
return GlobalAverageBlock()
elif block_name == 'nearest_upsample':
return NearestUpsampleBlock(layer_ind)
else:
raise ValueError('Unknown block name in the architecture definition : ' + block_name)
class BatchNormBlock(nn.Module):
def __init__(self, in_dim, use_bn, bn_momentum):
"""
Initialize a batch normalization block. If network does not use batch normalization, replace with biases.
:param in_dim: dimension input features
:param use_bn: boolean indicating if we use Batch Norm
:param bn_momentum: Batch norm momentum
"""
super(BatchNormBlock, self).__init__()
self.bn_momentum = bn_momentum
self.use_bn = use_bn
self.in_dim = in_dim
if self.use_bn:
#self.batch_norm = nn.BatchNorm1d(in_dim, momentum=bn_momentum)
self.batch_norm = nn.InstanceNorm1d(in_dim, momentum=bn_momentum)
else:
self.bias = Parameter(torch.zeros(in_dim, dtype=torch.float32), requires_grad=True)
return
def reset_parameters(self):
nn.init.zeros_(self.bias)
def forward(self, x):
if self.use_bn:
x = x.unsqueeze(2)
x = x.transpose(0, 2)
x = self.batch_norm(x)
x = x.transpose(0, 2)
return x.squeeze()
else:
return x + self.bias
def __repr__(self):
return 'BatchNormBlock(in_feat: {:d}, momentum: {:.3f}, only_bias: {:s})'.format(self.in_dim,
self.bn_momentum,
str(not self.use_bn))
class UnaryBlock(nn.Module):
def __init__(self, in_dim, out_dim, use_bn, bn_momentum, no_relu=False):
"""
Initialize a standard unary block with its ReLU and BatchNorm.
:param in_dim: dimension input features
:param out_dim: dimension input features
:param use_bn: boolean indicating if we use Batch Norm
:param bn_momentum: Batch norm momentum
"""
super(UnaryBlock, self).__init__()
self.bn_momentum = bn_momentum
self.use_bn = use_bn
self.no_relu = no_relu
self.in_dim = in_dim
self.out_dim = out_dim
self.mlp = nn.Linear(in_dim, out_dim, bias=False)
self.batch_norm = BatchNormBlock(out_dim, self.use_bn, self.bn_momentum)
if not no_relu:
self.leaky_relu = nn.LeakyReLU(0.1)
return
def forward(self, x, batch=None):
x = self.mlp(x)
x = self.batch_norm(x)
if not self.no_relu:
x = self.leaky_relu(x)
return x
def __repr__(self):
return 'UnaryBlock(in_feat: {:d}, out_feat: {:d}, BN: {:s}, ReLU: {:s})'.format(self.in_dim,
self.out_dim,
str(self.use_bn),
str(not self.no_relu))
class LastUnaryBlock(nn.Module):
def __init__(self, in_dim, out_dim, use_bn, bn_momentum, no_relu=False):
"""
Initialize a standard last_unary block without BN, ReLU.
:param in_dim: dimension input features
:param out_dim: dimension input features
:param use_bn: boolean indicating if we use Batch Norm
:param bn_momentum: Batch norm momentum
"""
super(LastUnaryBlock, self).__init__()
self.in_dim = in_dim
self.out_dim = out_dim
self.mlp = nn.Linear(in_dim, out_dim, bias=False)
return
def forward(self, x, batch=None):
x = self.mlp(x)
return x
def __repr__(self):
return 'LastUnaryBlock(in_feat: {:d}, out_feat: {:d})'.format(self.in_dim,
self.out_dim)
class SimpleBlock(nn.Module):
def __init__(self, block_name, in_dim, out_dim, radius, layer_ind, config):
"""
Initialize a simple convolution block with its ReLU and BatchNorm.
:param in_dim: dimension input features
:param out_dim: dimension input features
:param radius: current radius of convolution
:param config: parameters
"""
super(SimpleBlock, self).__init__()
# get KP_extent from current radius
current_extent = radius * config.KP_extent / config.conv_radius
# Get other parameters
self.bn_momentum = config.batch_norm_momentum
self.use_bn = config.use_batch_norm
self.layer_ind = layer_ind
self.block_name = block_name
self.in_dim = in_dim
self.out_dim = out_dim
# Define the KPConv class
self.KPConv = KPConv(config.num_kernel_points,
config.in_points_dim,
in_dim,
out_dim // 2,
current_extent,
radius,
fixed_kernel_points=config.fixed_kernel_points,
KP_influence=config.KP_influence,
aggregation_mode=config.aggregation_mode,
deformable='deform' in block_name,
modulated=config.modulated)
# Other opperations
self.batch_norm = BatchNormBlock(out_dim // 2, self.use_bn, self.bn_momentum)
self.leaky_relu = nn.LeakyReLU(0.1)
return
def forward(self, x, batch):
if 'strided' in self.block_name:
q_pts = batch['points'][self.layer_ind + 1]
s_pts = batch['points'][self.layer_ind]
neighb_inds = batch['pools'][self.layer_ind]
else:
q_pts = batch['points'][self.layer_ind]
s_pts = batch['points'][self.layer_ind]
neighb_inds = batch['neighbors'][self.layer_ind]
x = self.KPConv(q_pts, s_pts, neighb_inds, x)
return self.leaky_relu(self.batch_norm(x))
class ResnetBottleneckBlock(nn.Module):
def __init__(self, block_name, in_dim, out_dim, radius, layer_ind, config):
"""
Initialize a resnet bottleneck block.
:param in_dim: dimension input features
:param out_dim: dimension input features
:param radius: current radius of convolution
:param config: parameters
"""
super(ResnetBottleneckBlock, self).__init__()
# get KP_extent from current radius
current_extent = radius * config.KP_extent / config.conv_radius
# Get other parameters
self.bn_momentum = config.batch_norm_momentum
self.use_bn = config.use_batch_norm
self.block_name = block_name
self.layer_ind = layer_ind
self.in_dim = in_dim
self.out_dim = out_dim
# First downscaling mlp
if in_dim != out_dim // 4:
self.unary1 = UnaryBlock(in_dim, out_dim // 4, self.use_bn, self.bn_momentum)
else:
self.unary1 = nn.Identity()
# KPConv block
self.KPConv = KPConv(config.num_kernel_points,
config.in_points_dim,
out_dim // 4,
out_dim // 4,
current_extent,
radius,
fixed_kernel_points=config.fixed_kernel_points,
KP_influence=config.KP_influence,
aggregation_mode=config.aggregation_mode,
deformable='deform' in block_name,
modulated=config.modulated)
self.batch_norm_conv = BatchNormBlock(out_dim // 4, self.use_bn, self.bn_momentum)
# Second upscaling mlp
self.unary2 = UnaryBlock(out_dim // 4, out_dim, self.use_bn, self.bn_momentum, no_relu=True)
# Shortcut optional mpl
if in_dim != out_dim:
self.unary_shortcut = UnaryBlock(in_dim, out_dim, self.use_bn, self.bn_momentum, no_relu=True)
else:
self.unary_shortcut = nn.Identity()
# Other operations
self.leaky_relu = nn.LeakyReLU(0.1)
return
def forward(self, features, batch):
if 'strided' in self.block_name:
q_pts = batch['points'][self.layer_ind + 1]
s_pts = batch['points'][self.layer_ind]
neighb_inds = batch['pools'][self.layer_ind]
else:
q_pts = batch['points'][self.layer_ind]
s_pts = batch['points'][self.layer_ind]
neighb_inds = batch['neighbors'][self.layer_ind]
# First downscaling mlp
x = self.unary1(features)
# Convolution
x = self.KPConv(q_pts, s_pts, neighb_inds, x)
x = self.leaky_relu(self.batch_norm_conv(x))
# Second upscaling mlp
x = self.unary2(x)
# Shortcut
if 'strided' in self.block_name:
shortcut = max_pool(features, neighb_inds)
else:
shortcut = features
shortcut = self.unary_shortcut(shortcut)
return self.leaky_relu(x + shortcut)
class GlobalAverageBlock(nn.Module):
def __init__(self):
"""
Initialize a global average block with its ReLU and BatchNorm.
"""
super(GlobalAverageBlock, self).__init__()
return
def forward(self, x, batch):
return global_average(x, batch['stack_lengths'][-1])
class NearestUpsampleBlock(nn.Module):
def __init__(self, layer_ind):
"""
Initialize a nearest upsampling block with its ReLU and BatchNorm.
"""
super(NearestUpsampleBlock, self).__init__()
self.layer_ind = layer_ind
return
def forward(self, x, batch):
return closest_pool(x, batch['upsamples'][self.layer_ind - 1])
def __repr__(self):
return 'NearestUpsampleBlock(layer: {:d} -> {:d})'.format(self.layer_ind,
self.layer_ind - 1)
class MaxPoolBlock(nn.Module):
def __init__(self, layer_ind):
"""
Initialize a max pooling block with its ReLU and BatchNorm.
"""
super(MaxPoolBlock, self).__init__()
self.layer_ind = layer_ind
return
def forward(self, x, batch):
return max_pool(x, batch['pools'][self.layer_ind + 1])
| 26,090 | 35.956091 | 122 | py |
lepard | lepard-main/cpp_wrappers/cpp_neighbors/setup.py | from distutils.core import setup, Extension
import numpy.distutils.misc_util
# Adding OpenCV to project
# ************************
# Adding sources of the project
# *****************************
SOURCES = ["../cpp_utils/cloud/cloud.cpp",
"neighbors/neighbors.cpp",
"wrapper.cpp"]
module = Extension(name="radius_neighbors",
sources=SOURCES,
extra_compile_args=['-std=c++11',
'-D_GLIBCXX_USE_CXX11_ABI=0'])
setup(ext_modules=[module], include_dirs=numpy.distutils.misc_util.get_numpy_include_dirs())
| 619 | 20.37931 | 92 | py |
lepard | lepard-main/cpp_wrappers/cpp_subsampling/setup.py | from distutils.core import setup, Extension
import numpy.distutils.misc_util
# Adding OpenCV to project
# ************************
# Adding sources of the project
# *****************************
SOURCES = ["../cpp_utils/cloud/cloud.cpp",
"grid_subsampling/grid_subsampling.cpp",
"wrapper.cpp"]
module = Extension(name="grid_subsampling",
sources=SOURCES,
extra_compile_args=['-std=c++11',
'-D_GLIBCXX_USE_CXX11_ABI=0'])
setup(ext_modules=[module], include_dirs=numpy.distutils.misc_util.get_numpy_include_dirs())
| 633 | 20.862069 | 92 | py |
lepard | lepard-main/datasets/_4dmatch.py | import os, sys, glob, torch
# sys.path.append("../")
[sys.path.append(i) for i in ['.', '..']]
import numpy as np
import torch
import random
from scipy.spatial.transform import Rotation
from torch.utils.data import Dataset
from lib.benchmark_utils import to_o3d_pcd, to_tsfm, KDTree_corr
from lib.utils import load_obj
HMN_intrin = np.array( [443, 256, 443, 250 ])
cam_intrin = np.array( [443, 256, 443, 250 ])
from lib.benchmark_utils import to_o3d_pcd, to_tsfm, get_correspondences
class _4DMatch(Dataset):
def __init__(self, config, split, data_augmentation=True):
super(_4DMatch, self).__init__()
assert split in ['train','val','test']
if 'overfit' in config.exp_dir:
d_slice = config.batch_size
else :
d_slice = None
self.entries = self.read_entries( config.split[split] , config.data_root, d_slice=d_slice )
self.base_dir = config.data_root
self.data_augmentation = data_augmentation
self.config = config
self.rot_factor = 1.
self.augment_noise = config.augment_noise
self.max_points = 30000
self.overlap_radius = 0.0375
self.cache = {}
self.cache_size = 30000
def read_entries (self, split, data_root, d_slice=None, shuffle= False):
entries = glob.glob(os.path.join(data_root, split, "*/*.npz"))
if shuffle:
random.shuffle(entries)
if d_slice:
return entries[:d_slice]
return entries
def __len__(self):
return len(self.entries )
def __getitem__(self, index, debug=False):
if index in self.cache:
entry = self.cache[index]
else :
entry = np.load(self.entries[index])
if len(self.cache) < self.cache_size:
self.cache[index] = entry
# get transformation
rot = entry['rot']
trans = entry['trans']
s2t_flow = entry['s2t_flow']
src_pcd = entry['s_pc']
tgt_pcd = entry['t_pc']
correspondences = entry['correspondences'] # obtained with search radius 0.015 m
src_pcd_deformed = src_pcd + s2t_flow
if "metric_index" in entry:
metric_index = entry['metric_index'].squeeze()
else:
metric_index = None
# if we get too many points, we do some downsampling
if (src_pcd.shape[0] > self.max_points):
idx = np.random.permutation(src_pcd.shape[0])[:self.max_points]
src_pcd = src_pcd[idx]
if (tgt_pcd.shape[0] > self.max_points):
idx = np.random.permutation(tgt_pcd.shape[0])[:self.max_points]
tgt_pcd = tgt_pcd[idx]
if debug:
import mayavi.mlab as mlab
c_red = (224. / 255., 0 / 255., 125 / 255.)
c_pink = (224. / 255., 75. / 255., 232. / 255.)
c_blue = (0. / 255., 0. / 255., 255. / 255.)
scale_factor = 0.013
src_wrapped = (np.matmul( rot, src_pcd_deformed.T ) + trans ).T
mlab.points3d(src_wrapped[:, 0], src_wrapped[:, 1], src_wrapped[:, 2], scale_factor=scale_factor, color=c_pink)
mlab.points3d(src_pcd[ :, 0] , src_pcd[ :, 1], src_pcd[:, 2], scale_factor=scale_factor , color=c_red)
mlab.points3d(tgt_pcd[ :, 0] , tgt_pcd[ :, 1], tgt_pcd[:, 2], scale_factor=scale_factor , color=c_blue)
mlab.show()
# add gaussian noise
if self.data_augmentation:
# rotate the point cloud
euler_ab = np.random.rand(3) * np.pi * 2 / self.rot_factor # anglez, angley, anglex
rot_ab = Rotation.from_euler('zyx', euler_ab).as_matrix()
if (np.random.rand(1)[0] > 0.5):
src_pcd = np.matmul(rot_ab, src_pcd.T).T
src_pcd_deformed = np.matmul(rot_ab, src_pcd_deformed.T).T
rot = np.matmul(rot, rot_ab.T)
else:
tgt_pcd = np.matmul(rot_ab, tgt_pcd.T).T
rot = np.matmul(rot_ab, rot)
trans = np.matmul(rot_ab, trans)
src_pcd += (np.random.rand(src_pcd.shape[0], 3) - 0.5) * self.augment_noise
tgt_pcd += (np.random.rand(tgt_pcd.shape[0], 3) - 0.5) * self.augment_noise
s2t_flow = src_pcd_deformed - src_pcd
if debug:
# wrapp_src = (np.matmul(rot, src_pcd.T)+ trans).T
src_wrapped = (np.matmul( rot, src_pcd_deformed.T ) + trans ).T
mlab.points3d(src_wrapped[:, 0], src_wrapped[:, 1], src_wrapped[:, 2], scale_factor=scale_factor, color=c_red)
mlab.points3d(tgt_pcd[:, 0], tgt_pcd[:, 1], tgt_pcd[:, 2], scale_factor=scale_factor, color=c_blue)
mlab.show()
if (trans.ndim == 1):
trans = trans[:, None]
src_feats = np.ones_like(src_pcd[:, :1]).astype(np.float32)
tgt_feats = np.ones_like(tgt_pcd[:, :1]).astype(np.float32)
rot = rot.astype(np.float32)
trans = trans.astype(np.float32)
#R * ( Ps + flow ) + t = Pt
return src_pcd, tgt_pcd, src_feats, tgt_feats, correspondences, rot, trans, s2t_flow, metric_index
if __name__ == '__main__':
from lib.utils import load_config
from easydict import EasyDict as edict
from lib.tictok import Timers
import yaml
def join(loader, node):
seq = loader.construct_sequence(node)
return '_'.join([str(i) for i in seq])
yaml.add_constructor('!join', join)
config = "/home/liyang/workspace/Regformer/configs/train/4dmatch.yaml"
with open(config,'r') as f:
config = yaml.load(f, Loader=yaml.Loader)
config = edict(config)
config.timers=Timers()
D = _4DMatch(config, "test")
for i in range (len(D)):
try:
if i%1000 == 0 :
print (i,"/",len(D))
D.__getitem__(i, debug=True)
except:
# print(i, "/", len(D))
pass | 5,939 | 32.75 | 123 | py |
lepard | lepard-main/datasets/dataloader.py | import numpy as np
from functools import partial
import torch
import cpp_wrappers.cpp_subsampling.grid_subsampling as cpp_subsampling
import cpp_wrappers.cpp_neighbors.radius_neighbors as cpp_neighbors
from datasets._3dmatch import _3DMatch
from datasets._4dmatch import _4DMatch
from datasets.utils import blend_scene_flow, multual_nn_correspondence
from lib.visualization import *
from torch.utils.data import DataLoader
def batch_grid_subsampling_kpconv(points, batches_len, features=None, labels=None, sampleDl=0.1, max_p=0, verbose=0, random_grid_orient=True):
"""
CPP wrapper for a grid subsampling (method = barycenter for points and features)
"""
if (features is None) and (labels is None):
s_points, s_len = cpp_subsampling.subsample_batch(points,
batches_len,
sampleDl=sampleDl,
max_p=max_p,
verbose=verbose)
return torch.from_numpy(s_points), torch.from_numpy(s_len)
elif (labels is None):
s_points, s_len, s_features = cpp_subsampling.subsample_batch(points,
batches_len,
features=features,
sampleDl=sampleDl,
max_p=max_p,
verbose=verbose)
return torch.from_numpy(s_points), torch.from_numpy(s_len), torch.from_numpy(s_features)
elif (features is None):
s_points, s_len, s_labels = cpp_subsampling.subsample_batch(points,
batches_len,
classes=labels,
sampleDl=sampleDl,
max_p=max_p,
verbose=verbose)
return torch.from_numpy(s_points), torch.from_numpy(s_len), torch.from_numpy(s_labels)
else:
s_points, s_len, s_features, s_labels = cpp_subsampling.subsample_batch(points,
batches_len,
features=features,
classes=labels,
sampleDl=sampleDl,
max_p=max_p,
verbose=verbose)
return torch.from_numpy(s_points), torch.from_numpy(s_len), torch.from_numpy(s_features), torch.from_numpy(s_labels)
def batch_neighbors_kpconv(queries, supports, q_batches, s_batches, radius, max_neighbors):
"""
Computes neighbors for a batch of queries and supports, apply radius search
:param queries: (N1, 3) the query points
:param supports: (N2, 3) the support points
:param q_batches: (B) the list of lengths of batch elements in queries
:param s_batches: (B)the list of lengths of batch elements in supports
:param radius: float32
:return: neighbors indices
"""
neighbors = cpp_neighbors.batch_query(queries, supports, q_batches, s_batches, radius=radius)
if max_neighbors > 0:
return torch.from_numpy(neighbors[:, :max_neighbors])
else:
return torch.from_numpy(neighbors)
def collate_fn_3dmatch(list_data, config, neighborhood_limits ):
batched_points_list = []
batched_features_list = []
batched_lengths_list = []
correspondences_list = []
src_pcd_list = []
tgt_pcd_list = []
batched_rot = []
batched_trn = []
gt_cov_list = []
for ind, ( src_pcd, tgt_pcd, src_feats, tgt_feats, correspondences, rot, trn, gt_cov) in enumerate(list_data):
correspondences_list.append(correspondences )
src_pcd_list.append(torch.from_numpy(src_pcd) )
tgt_pcd_list.append(torch.from_numpy(tgt_pcd) )
batched_points_list.append(src_pcd)
batched_points_list.append(tgt_pcd)
batched_features_list.append(src_feats)
batched_features_list.append(tgt_feats)
batched_lengths_list.append(len(src_pcd))
batched_lengths_list.append(len(tgt_pcd))
batched_rot.append( torch.from_numpy(rot).float())
batched_trn.append( torch.from_numpy(trn).float())
gt_cov_list.append(gt_cov)
gt_cov_list = None if gt_cov_list[0] is None \
else np.stack(gt_cov_list, axis=0)
# if timers: cnter['collate_load_batch'] = time.time() - st
batched_features = torch.from_numpy(np.concatenate(batched_features_list, axis=0))
batched_points = torch.from_numpy(np.concatenate(batched_points_list, axis=0))
batched_lengths = torch.from_numpy(np.array(batched_lengths_list)).int()
batched_rot = torch.stack(batched_rot,dim=0)
batched_trn = torch.stack(batched_trn,dim=0)
# Starting radius of convolutions
r_normal = config.first_subsampling_dl * config.conv_radius
# Starting layer
layer_blocks = []
layer = 0
# Lists of inputs
input_points = []
input_neighbors = []
input_pools = []
input_upsamples = []
input_batches_len = []
# construt kpfcn inds
for block_i, block in enumerate(config.architecture):
# Stop when meeting a global pooling or upsampling
if 'global' in block or 'upsample' in block:
break
# Get all blocks of the layer
if not ('pool' in block or 'strided' in block):
layer_blocks += [block]
if block_i < len(config.architecture) - 1 and not ('upsample' in config.architecture[block_i + 1]):
continue
# Convolution neighbors indices
# *****************************
if layer_blocks:
# Convolutions are done in this layer, compute the neighbors with the good radius
if np.any(['deformable' in blck for blck in layer_blocks[:-1]]):
r = r_normal * config.deform_radius / config.conv_radius
else:
r = r_normal
conv_i = batch_neighbors_kpconv(batched_points, batched_points, batched_lengths, batched_lengths, r,
neighborhood_limits[layer])
else:
# This layer only perform pooling, no neighbors required
conv_i = torch.zeros((0, 1), dtype=torch.int64)
# Pooling neighbors indices
# *************************
# If end of layer is a pooling operation
if 'pool' in block or 'strided' in block:
# New subsampling length
dl = 2 * r_normal / config.conv_radius
# Subsampled points
pool_p, pool_b = batch_grid_subsampling_kpconv(batched_points, batched_lengths, sampleDl=dl)
# Radius of pooled neighbors
if 'deformable' in block:
r = r_normal * config.deform_radius / config.conv_radius
else:
r = r_normal
# Subsample indices
pool_i = batch_neighbors_kpconv(pool_p, batched_points, pool_b, batched_lengths, r,
neighborhood_limits[layer])
# Upsample indices (with the radius of the next layer to keep wanted density)
up_i = batch_neighbors_kpconv(batched_points, pool_p, batched_lengths, pool_b, 2 * r,
neighborhood_limits[layer])
else:
# No pooling in the end of this layer, no pooling indices required
pool_i = torch.zeros((0, 1), dtype=torch.int64)
pool_p = torch.zeros((0, 3), dtype=torch.float32)
pool_b = torch.zeros((0,), dtype=torch.int64)
up_i = torch.zeros((0, 1), dtype=torch.int64)
# Updating input lists
input_points += [batched_points.float()]
input_neighbors += [conv_i.long()]
input_pools += [pool_i.long()]
input_upsamples += [up_i.long()]
input_batches_len += [batched_lengths]
# New points for next layer
batched_points = pool_p
batched_lengths = pool_b
# Update radius and reset blocks
r_normal *= 2
layer += 1
layer_blocks = []
# coarse infomation
coarse_level = config.coarse_level
pts_num_coarse = input_batches_len[coarse_level].view(-1, 2)
b_size = pts_num_coarse.shape[0]
src_pts_max, tgt_pts_max = pts_num_coarse.amax(dim=0)
coarse_pcd = input_points[coarse_level] # .numpy()
coarse_matches= []
src_ind_coarse_split= [] # src_feats shape :[b_size * src_pts_max]
src_ind_coarse = []
tgt_ind_coarse_split= []
tgt_ind_coarse = []
accumu = 0
src_mask = torch.zeros([b_size, src_pts_max], dtype=torch.bool)
tgt_mask = torch.zeros([b_size, tgt_pts_max], dtype=torch.bool)
#grid subsample fine level points for differentiable matching
fine_pts, fine_length = batch_grid_subsampling_kpconv(input_points[0], input_batches_len[0], sampleDl=dl*0.5*0.85)
fine_ind = batch_neighbors_kpconv(fine_pts, input_points[0], fine_length, input_batches_len[0], dl*0.5*0.85, 1).squeeze().long()
for entry_id, cnt in enumerate( pts_num_coarse ): #input_batches_len[-1].numpy().reshape(-1,2)) :
n_s_pts, n_t_pts = cnt
'''split mask for bottlenect feats'''
src_mask[entry_id][:n_s_pts] = 1
tgt_mask[entry_id][:n_t_pts] = 1
'''split indices of bottleneck feats'''
src_ind_coarse_split.append( torch.arange( n_s_pts ) + entry_id * src_pts_max )
tgt_ind_coarse_split.append( torch.arange( n_t_pts ) + entry_id * tgt_pts_max )
src_ind_coarse.append( torch.arange( n_s_pts ) + accumu )
tgt_ind_coarse.append( torch.arange( n_t_pts ) + accumu + n_s_pts )
'''get match at coarse level'''
c_src_pcd = coarse_pcd[accumu : accumu + n_s_pts]
c_tgt_pcd = coarse_pcd[accumu + n_s_pts: accumu + n_s_pts + n_t_pts]
s_pc_wrapped = (torch.matmul( batched_rot[entry_id], c_src_pcd.T ) + batched_trn [entry_id]).T
coarse_match_gt = torch.from_numpy( multual_nn_correspondence(s_pc_wrapped.numpy(), c_tgt_pcd.numpy(), search_radius=config['coarse_match_radius']) )# 0.1m scaled
coarse_matches.append(coarse_match_gt)
accumu = accumu + n_s_pts + n_t_pts
vis=False # for debug
if vis :
viz_coarse_nn_correspondence_mayavi(c_src_pcd, c_tgt_pcd, coarse_match_gt, scale_factor=0.04)
vis=False # for debug
if vis :
pass
import mayavi.mlab as mlab
# src_nei_valid = src_nei_mask[coarse_match_gt[0]].view(-1)
# tgt_nei_valid = tgt_nei_mask[coarse_match_gt[1]].view(-1)
#
# f_src_pcd = src_m_nei_pts.view(-1, 3)[src_nei_valid]
# f_tgt_pcd = tgt_m_nei_pts.view(-1,3)[tgt_nei_valid]
#
# mlab.points3d(f_src_pcd[:, 0], f_src_pcd[:, 1], f_src_pcd[:, 2], scale_factor=0.02,color=c_gray1)
# mlab.points3d(f_tgt_pcd[:, 0], f_tgt_pcd[:, 1], f_tgt_pcd[:, 2], scale_factor=0.02,color=c_gray2)
#
# src_m_nn_pts =src_m_nn_pts.view(-1, 3)
# src_m_nn_pts_wrapped = src_m_nn_pts_wrapped.view(-1,3)
# tgt_m_nn_pts = tgt_m_nei_pts [ torch.arange(tgt_m_nei_pts.shape[0]), nni.view(-1), ... ]
# mlab.points3d(src_m_nn_pts[:, 0], src_m_nn_pts[:, 1], src_m_nn_pts[:, 2], scale_factor=0.04,color=c_red)
# mlab.points3d(src_m_nn_pts_wrapped[:, 0], src_m_nn_pts_wrapped[:, 1], src_m_nn_pts_wrapped[:, 2], scale_factor=0.04,color=c_red)
# mlab.points3d(tgt_m_nn_pts[:, 0], tgt_m_nn_pts[:, 1], tgt_m_nn_pts[:, 2], scale_factor=0.04 ,color=c_blue)
# mlab.show()
# viz_coarse_nn_correspondence_mayavi(c_src_pcd, c_tgt_pcd, coarse_match_gt,
# f_src_pcd=src_m_nei_pts.view(-1,3)[src_nei_valid],
# f_tgt_pcd=tgt_m_nei_pts.view(-1,3)[tgt_nei_valid], scale_factor=0.08)
src_ind_coarse_split = torch.cat(src_ind_coarse_split)
tgt_ind_coarse_split = torch.cat(tgt_ind_coarse_split)
src_ind_coarse = torch.cat(src_ind_coarse)
tgt_ind_coarse = torch.cat(tgt_ind_coarse)
dict_inputs = {
'src_pcd_list': src_pcd_list,
'tgt_pcd_list': tgt_pcd_list,
'points': input_points,
'neighbors': input_neighbors,
'pools': input_pools,
'upsamples': input_upsamples,
'features': batched_features.float(),
'stack_lengths': input_batches_len,
'coarse_matches': coarse_matches,
'src_mask': src_mask,
'tgt_mask': tgt_mask,
'src_ind_coarse_split': src_ind_coarse_split,
'tgt_ind_coarse_split': tgt_ind_coarse_split,
'src_ind_coarse': src_ind_coarse,
'tgt_ind_coarse': tgt_ind_coarse,
'batched_rot': batched_rot,
'batched_trn': batched_trn,
'gt_cov': gt_cov_list,
#for refine
'correspondences_list': correspondences_list,
'fine_ind': fine_ind,
'fine_pts': fine_pts,
'fine_length': fine_length
}
return dict_inputs
def collate_fn_4dmatch(list_data, config, neighborhood_limits ):
batched_points_list = []
batched_features_list = []
batched_lengths_list = []
correspondences_list = []
src_pcd_list = []
tgt_pcd_list = []
batched_rot = []
batched_trn = []
sflow_list = []
metric_index_list = [] #for feature matching recall computation
for ind, ( src_pcd, tgt_pcd, src_feats, tgt_feats, correspondences, rot, trn, s2t_flow, metric_index) in enumerate(list_data):
correspondences_list.append(correspondences )
src_pcd_list.append(torch.from_numpy(src_pcd) )
tgt_pcd_list.append(torch.from_numpy(tgt_pcd) )
batched_points_list.append(src_pcd)
batched_points_list.append(tgt_pcd)
batched_features_list.append(src_feats)
batched_features_list.append(tgt_feats)
batched_lengths_list.append(len(src_pcd))
batched_lengths_list.append(len(tgt_pcd))
batched_rot.append( torch.from_numpy(rot).float())
batched_trn.append( torch.from_numpy(trn).float())
# gt_cov_list.append(gt_cov)
sflow_list.append( torch.from_numpy(s2t_flow).float() )
if metric_index is None:
metric_index_list = None
else :
metric_index_list.append ( torch.from_numpy(metric_index))
# if timers: cnter['collate_load_batch'] = time.time() - st
batched_features = torch.from_numpy(np.concatenate(batched_features_list, axis=0))
batched_points = torch.from_numpy(np.concatenate(batched_points_list, axis=0))
batched_lengths = torch.from_numpy(np.array(batched_lengths_list)).int()
batched_rot = torch.stack(batched_rot,dim=0)
batched_trn = torch.stack(batched_trn,dim=0)
# Starting radius of convolutions
r_normal = config.first_subsampling_dl * config.conv_radius
# Starting layer
layer_blocks = []
layer = 0
# Lists of inputs
input_points = []
input_neighbors = []
input_pools = []
input_upsamples = []
input_batches_len = []
# construt kpfcn inds
for block_i, block in enumerate(config.architecture):
# Stop when meeting a global pooling or upsampling
if 'global' in block or 'upsample' in block:
break
# Get all blocks of the layer
if not ('pool' in block or 'strided' in block):
layer_blocks += [block]
if block_i < len(config.architecture) - 1 and not ('upsample' in config.architecture[block_i + 1]):
continue
# Convolution neighbors indices
# *****************************
if layer_blocks:
# Convolutions are done in this layer, compute the neighbors with the good radius
if np.any(['deformable' in blck for blck in layer_blocks[:-1]]):
r = r_normal * config.deform_radius / config.conv_radius
else:
r = r_normal
conv_i = batch_neighbors_kpconv(batched_points, batched_points, batched_lengths, batched_lengths, r,
neighborhood_limits[layer])
else:
# This layer only perform pooling, no neighbors required
conv_i = torch.zeros((0, 1), dtype=torch.int64)
# Pooling neighbors indices
# *************************
# If end of layer is a pooling operation
if 'pool' in block or 'strided' in block:
# New subsampling length
dl = 2 * r_normal / config.conv_radius
# Subsampled points
pool_p, pool_b = batch_grid_subsampling_kpconv(batched_points, batched_lengths, sampleDl=dl)
# Radius of pooled neighbors
if 'deformable' in block:
r = r_normal * config.deform_radius / config.conv_radius
else:
r = r_normal
# Subsample indices
pool_i = batch_neighbors_kpconv(pool_p, batched_points, pool_b, batched_lengths, r,
neighborhood_limits[layer])
# Upsample indices (with the radius of the next layer to keep wanted density)
up_i = batch_neighbors_kpconv(batched_points, pool_p, batched_lengths, pool_b, 2 * r,
neighborhood_limits[layer])
else:
# No pooling in the end of this layer, no pooling indices required
pool_i = torch.zeros((0, 1), dtype=torch.int64)
pool_p = torch.zeros((0, 3), dtype=torch.float32)
pool_b = torch.zeros((0,), dtype=torch.int64)
up_i = torch.zeros((0, 1), dtype=torch.int64)
# Updating input lists
input_points += [batched_points.float()]
input_neighbors += [conv_i.long()]
input_pools += [pool_i.long()]
input_upsamples += [up_i.long()]
input_batches_len += [batched_lengths]
# New points for next layer
batched_points = pool_p
batched_lengths = pool_b
# Update radius and reset blocks
r_normal *= 2
layer += 1
layer_blocks = []
# coarse infomation
coarse_level = config.coarse_level
pts_num_coarse = input_batches_len[coarse_level].view(-1, 2)
b_size = pts_num_coarse.shape[0]
src_pts_max, tgt_pts_max = pts_num_coarse.amax(dim=0)
coarse_pcd = input_points[coarse_level] # .numpy()
coarse_matches= []
coarse_flow = []
src_ind_coarse_split= [] # src_feats shape :[b_size * src_pts_max]
src_ind_coarse = []
tgt_ind_coarse_split= []
tgt_ind_coarse = []
accumu = 0
src_mask = torch.zeros([b_size, src_pts_max], dtype=torch.bool)
tgt_mask = torch.zeros([b_size, tgt_pts_max], dtype=torch.bool)
for entry_id, cnt in enumerate( pts_num_coarse ): #input_batches_len[-1].numpy().reshape(-1,2)) :
n_s_pts, n_t_pts = cnt
'''split mask for bottlenect feats'''
src_mask[entry_id][:n_s_pts] = 1
tgt_mask[entry_id][:n_t_pts] = 1
'''split indices of bottleneck feats'''
src_ind_coarse_split.append( torch.arange( n_s_pts ) + entry_id * src_pts_max )
tgt_ind_coarse_split.append( torch.arange( n_t_pts ) + entry_id * tgt_pts_max )
src_ind_coarse.append( torch.arange( n_s_pts ) + accumu )
tgt_ind_coarse.append( torch.arange( n_t_pts ) + accumu + n_s_pts )
'''get match at coarse level'''
c_src_pcd_np = coarse_pcd[accumu : accumu + n_s_pts].numpy()
c_tgt_pcd_np = coarse_pcd[accumu + n_s_pts: accumu + n_s_pts + n_t_pts].numpy()
#interpolate flow
f_src_pcd = batched_points_list[entry_id * 2]
c_flow = blend_scene_flow( c_src_pcd_np, f_src_pcd, sflow_list[entry_id].numpy(), knn=3)
c_src_pcd_deformed = c_src_pcd_np + c_flow
s_pc_wrapped = (np.matmul( batched_rot[entry_id].numpy(), c_src_pcd_deformed.T ) + batched_trn [entry_id].numpy()).T
coarse_match_gt = torch.from_numpy( multual_nn_correspondence(s_pc_wrapped , c_tgt_pcd_np , search_radius=config['coarse_match_radius']) )# 0.1m scaled
coarse_matches.append(coarse_match_gt)
coarse_flow.append(torch.from_numpy(c_flow) )
accumu = accumu + n_s_pts + n_t_pts
vis=False # for debug
if vis :
viz_coarse_nn_correspondence_mayavi(c_src_pcd_np, c_tgt_pcd_np, coarse_match_gt, scale_factor=0.02)
src_ind_coarse_split = torch.cat(src_ind_coarse_split)
tgt_ind_coarse_split = torch.cat(tgt_ind_coarse_split)
src_ind_coarse = torch.cat(src_ind_coarse)
tgt_ind_coarse = torch.cat(tgt_ind_coarse)
dict_inputs = {
'src_pcd_list': src_pcd_list,
'tgt_pcd_list': tgt_pcd_list,
'points': input_points,
'neighbors': input_neighbors,
'pools': input_pools,
'upsamples': input_upsamples,
'features': batched_features.float(),
'stack_lengths': input_batches_len,
'coarse_matches': coarse_matches,
'coarse_flow' : coarse_flow,
'src_mask': src_mask,
'tgt_mask': tgt_mask,
'src_ind_coarse_split': src_ind_coarse_split,
'tgt_ind_coarse_split': tgt_ind_coarse_split,
'src_ind_coarse': src_ind_coarse,
'tgt_ind_coarse': tgt_ind_coarse,
'batched_rot': batched_rot,
'batched_trn': batched_trn,
'sflow_list': sflow_list,
"metric_index_list": metric_index_list
}
return dict_inputs
def calibrate_neighbors(dataset, config, collate_fn, keep_ratio=0.8, samples_threshold=2000):
# From config parameter, compute higher bound of neighbors number in a neighborhood
hist_n = int(np.ceil(4 / 3 * np.pi * (config.deform_radius + 1) ** 3))
neighb_hists = np.zeros((config.num_layers, hist_n), dtype=np.int32)
# Get histogram of neighborhood sizes i in 1 epoch max.
for i in range(len(dataset)):
batched_input = collate_fn([dataset[i]], config, neighborhood_limits=[hist_n] * 5)
# update histogram
counts = [torch.sum(neighb_mat < neighb_mat.shape[0], dim=1).numpy() for neighb_mat in batched_input['neighbors']]
hists = [np.bincount(c, minlength=hist_n)[:hist_n] for c in counts]
neighb_hists += np.vstack(hists)
# if timer.total_time - last_display > 0.1:
# last_display = timer.total_time
# print(f"Calib Neighbors {i:08d}: timings {timer.total_time:4.2f}s")
if np.min(np.sum(neighb_hists, axis=1)) > samples_threshold:
break
cumsum = np.cumsum(neighb_hists.T, axis=0)
percentiles = np.sum(cumsum < (keep_ratio * cumsum[hist_n - 1, :]), axis=0)
neighborhood_limits = percentiles
print('\n')
return neighborhood_limits
def get_datasets(config):
if (config.dataset == '3dmatch'):
train_set = _3DMatch(config, 'train', data_augmentation=True)
val_set = _3DMatch(config, 'val', data_augmentation=False)
test_set = _3DMatch(config, 'test', data_augmentation=False)
elif(config.dataset == '4dmatch'):
train_set = _4DMatch(config, 'train', data_augmentation=True)
val_set = _4DMatch(config, 'val', data_augmentation=False)
test_set = _4DMatch(config, 'test', data_augmentation=False)
else:
raise NotImplementedError
return train_set, val_set, test_set
def get_dataloader(dataset, config, shuffle=True, neighborhood_limits=None):
if config.dataset=='4dmatch':
collate_fn = collate_fn_4dmatch
elif config.dataset == '3dmatch':
collate_fn = collate_fn_3dmatch
else:
raise NotImplementedError()
if neighborhood_limits is None:
neighborhood_limits = calibrate_neighbors(dataset, config['kpfcn_config'], collate_fn=collate_fn)
print("neighborhood:", neighborhood_limits)
dataloader = torch.utils.data.DataLoader(
dataset,
batch_size=config['batch_size'],
shuffle=shuffle,
num_workers=config['num_workers'],
collate_fn=partial(collate_fn, config=config['kpfcn_config'], neighborhood_limits=neighborhood_limits ),
drop_last=False
)
return dataloader, neighborhood_limits
if __name__ == '__main__':
pass
| 24,996 | 37.875583 | 171 | py |
lepard | lepard-main/datasets/utils.py | import numpy as np
# from lib.benchmark_utils import to_o3d_pcd, KDTree_corr
def partition_arg_topK(matrix, K, axis=0):
""" find index of K smallest entries along a axis
perform topK based on np.argpartition
:param matrix: to be sorted
:param K: select and sort the top K items
:param axis: 0 or 1. dimension to be sorted.
:return:
"""
a_part = np.argpartition(matrix, K, axis=axis)
if axis == 0:
row_index = np.arange(matrix.shape[1 - axis])
a_sec_argsort_K = np.argsort(matrix[a_part[0:K, :], row_index], axis=axis)
return a_part[0:K, :][a_sec_argsort_K, row_index]
else:
column_index = np.arange(matrix.shape[1 - axis])[:, None]
a_sec_argsort_K = np.argsort(matrix[column_index, a_part[:, 0:K]], axis=axis)
return a_part[:, 0:K][column_index, a_sec_argsort_K]
def knn_point_np(k, reference_pts, query_pts):
'''
:param k: number of k in k-nn search
:param reference_pts: (N, 3) float32 array, input points
:param query_pts: (M, 3) float32 array, query points
:return:
val: (batch_size, npoint, k) float32 array, L2 distances
idx: (batch_size, npoint, k) int32 array, indices to input points
'''
N, _ = reference_pts.shape
M, _ = query_pts.shape
reference_pts = reference_pts.reshape(1, N, -1).repeat(M, axis=0)
query_pts = query_pts.reshape(M, 1, -1).repeat(N, axis=1)
dist = np.sum((reference_pts - query_pts) ** 2, -1)
idx = partition_arg_topK(dist, K=k, axis=1)
val = np.take_along_axis ( dist , idx, axis=1)
return np.sqrt(val), idx
def blend_scene_flow (query_loc, reference_loc, reference_flow , knn=3) :
'''approximate flow on query points
this function assume query points are sub-/un-sampled from reference locations
@param query_loc:[m,3]
@param reference_loc:[n,3]
@param reference_flow:[n,3]
@param knn:
@return:
blended_flow:[m,3]
'''
dists, idx = knn_point_np (knn, reference_loc, query_loc)
dists[dists < 1e-10] = 1e-10
weight = 1.0 / dists
weight = weight / np.sum(weight, -1, keepdims=True) # [B,N,3]
blended_flow = np.sum (reference_flow [idx] * weight.reshape ([-1, knn, 1]), axis=1, keepdims=False)
return blended_flow
def multual_nn_correspondence(src_pcd_deformed, tgt_pcd, search_radius=0.3, knn=1):
src_idx = np.arange(src_pcd_deformed.shape[0])
s2t_dists, ref_tgt_idx = knn_point_np (knn, tgt_pcd, src_pcd_deformed)
s2t_dists, ref_tgt_idx = s2t_dists[:,0], ref_tgt_idx [:, 0]
valid_distance = s2t_dists < search_radius
_, ref_src_idx = knn_point_np (knn, src_pcd_deformed, tgt_pcd)
_, ref_src_idx = _, ref_src_idx [:, 0]
cycle_src_idx = ref_src_idx [ ref_tgt_idx ]
is_mutual_nn = cycle_src_idx == src_idx
mutual_nn = np.logical_and( is_mutual_nn, valid_distance)
correspondences = np.stack([src_idx [ mutual_nn ], ref_tgt_idx[mutual_nn] ] , axis=0)
return correspondences | 2,983 | 36.3 | 104 | py |
lepard | lepard-main/datasets/__init__.py | 0 | 0 | 0 | py |
|
lepard | lepard-main/datasets/_3dmatch.py | import os, sys, glob, torch
# sys.path.append("../")
[sys.path.append(i) for i in ['.', '..']]
import numpy as np
import torch
import random
from scipy.spatial.transform import Rotation
from torch.utils.data import Dataset
from lib.benchmark_utils import to_o3d_pcd, to_tsfm, KDTree_corr
from lib.utils import load_obj
from lib.benchmark_utils import to_o3d_pcd, to_tsfm, get_correspondences
class _3DMatch(Dataset):
def __init__(self, config,split, data_augmentation=True):
super(_3DMatch, self).__init__()
assert split in ['train','val','test']
if 'overfit' in config.exp_dir:
d_slice = config.batch_size
else :
d_slice = None
self.infos = self.read_entries( config.split[split] , config.data_root, d_slice=d_slice )
self.base_dir = config.data_root
self.data_augmentation = data_augmentation
self.config = config
self.rot_factor = 1.
self.augment_noise = config.augment_noise
self.max_points = 30000
self.overlap_radius = 0.0375
def read_entries (self, split, data_root, d_slice=None, shuffle= True):
infos = load_obj(split) # we use the split prepared by Predator
if d_slice:
for k, v in infos.items():
infos[k] = v[:d_slice]
return infos
def __len__(self):
return len(self.infos['rot'])
def __getitem__(self, item, debug=False):
# get transformation
rot = self.infos['rot'][item]
trans = self.infos['trans'][item]
if 'gt_cov' in self.infos:
gt_cov = self.infos['gt_cov'][item]
else :
gt_cov = None
# get pointcloud
src_path = os.path.join(self.base_dir, self.infos['src'][item])
tgt_path = os.path.join(self.base_dir, self.infos['tgt'][item])
src_pcd = torch.load(src_path)
tgt_pcd = torch.load(tgt_path)
# if we get too many points, we do some downsampling
if (src_pcd.shape[0] > self.max_points):
idx = np.random.permutation(src_pcd.shape[0])[:self.max_points]
src_pcd = src_pcd[idx]
if (tgt_pcd.shape[0] > self.max_points):
idx = np.random.permutation(tgt_pcd.shape[0])[:self.max_points]
tgt_pcd = tgt_pcd[idx]
if debug:
import mayavi.mlab as mlab
c_red = (224. / 255., 0 / 255., 125 / 255.)
c_pink = (224. / 255., 75. / 255., 232. / 255.)
c_blue = (0. / 255., 0. / 255., 255. / 255.)
scale_factor = 0.02
# mlab.points3d(s_pc[ :, 0] , s_pc[ :, 1], s_pc[:, 2], scale_factor=scale_factor , color=c_blue)
mlab.points3d(src_pcd[ :, 0] , src_pcd[ :, 1], src_pcd[:, 2], scale_factor=scale_factor , color=c_red)
mlab.points3d(tgt_pcd[ :, 0] , tgt_pcd[ :, 1], tgt_pcd[:, 2], scale_factor=scale_factor , color=c_blue)
mlab.show()
# add gaussian noise
if self.data_augmentation:
# rotate the point cloud
euler_ab = np.random.rand(3) * np.pi * 2 / self.rot_factor # anglez, angley, anglex
rot_ab = Rotation.from_euler('zyx', euler_ab).as_matrix()
if (np.random.rand(1)[0] > 0.5):
src_pcd = np.matmul(rot_ab, src_pcd.T).T
rot = np.matmul(rot, rot_ab.T)
else:
tgt_pcd = np.matmul(rot_ab, tgt_pcd.T).T
rot = np.matmul(rot_ab, rot)
trans = np.matmul(rot_ab, trans)
src_pcd += (np.random.rand(src_pcd.shape[0], 3) - 0.5) * self.augment_noise
tgt_pcd += (np.random.rand(tgt_pcd.shape[0], 3) - 0.5) * self.augment_noise
# get correspondence at fine level
tsfm = to_tsfm(rot, trans)
correspondences = get_correspondences(to_o3d_pcd(src_pcd), to_o3d_pcd(tgt_pcd), tsfm,self.overlap_radius)
if debug:
import mayavi.mlab as mlab
c_red = (224. / 255., 0 / 255., 125 / 255.)
c_pink = (224. / 255., 75. / 255., 232. / 255.)
c_blue = (0. / 255., 0. / 255., 255. / 255.)
scale_factor = 0.02
# mlab.points3d(s_pc[ :, 0] , s_pc[ :, 1], s_pc[:, 2], scale_factor=scale_factor , color=c_blue)
mlab.points3d(src_pcd[ :, 0] , src_pcd[ :, 1], src_pcd[:, 2], scale_factor=scale_factor , color=c_red)
mlab.points3d(tgt_pcd[ :, 0] , tgt_pcd[ :, 1], tgt_pcd[:, 2], scale_factor=scale_factor , color=c_blue)
mlab.show()
if (trans.ndim == 1):
trans = trans[:, None]
src_feats = np.ones_like(src_pcd[:, :1]).astype(np.float32)
tgt_feats = np.ones_like(tgt_pcd[:, :1]).astype(np.float32)
rot = rot.astype(np.float32)
trans = trans.astype(np.float32)
return src_pcd, tgt_pcd, src_feats, tgt_feats, correspondences, rot, trans, gt_cov
if __name__ == '__main__':
from lib.utils import load_config
from easydict import EasyDict as edict
from lib.tictok import Timers
import yaml
def join(loader, node):
seq = loader.construct_sequence(node)
return '_'.join([str(i) for i in seq])
yaml.add_constructor('!join', join)
config = "/home/liyang/workspace/Regformer/configs/train/3dmatch.yaml"
with open(config,'r') as f:
config = yaml.load(f, Loader=yaml.Loader)
config = edict(config)
config.timers=Timers()
D = _3DMatch(config, "test")
for i in range (len(D)):
try:
if i%1000 == 0 :
print (i,"/",len(D))
D.__getitem__(i, debug=True)
except:
pass
# print ( D.data_entries[i] )
# print (os.remove(D.data_entries[i]) )
| 5,766 | 33.327381 | 116 | py |
lepard | lepard-main/configs/models.py | architectures = dict()
kpfcn_backbone = [
'simple',
'resnetb',
'resnetb_strided',
'resnetb',
'resnetb',
'resnetb_strided',
'resnetb',
'resnetb',
'resnetb_strided',
'resnetb',
'resnetb',
'nearest_upsample',
'unary',
'nearest_upsample',
'unary',
'nearest_upsample',
'unary'
]
architectures['3dmatch'] = kpfcn_backbone
architectures['4dmatch'] = kpfcn_backbone
| 428 | 16.16 | 41 | py |
lepard | lepard-main/lib/tester.py | from lib.trainer import Trainer
import torch
from tqdm import tqdm
from models.loss import MatchMotionLoss as MML
import numpy as np
from models.matching import Matching as CM
import math
class _3DMatchTester(Trainer):
"""
3DMatch tester
"""
def __init__(self,args):
Trainer.__init__(self, args)
def test(self):
n = 3
afmr = 0.
arr = 0
air = 0
for i in range(n): # combat ransac nondeterministic
thr =0.05
rr, ir, fmr = self.test_thr(thr)
afmr+=fmr
arr+=rr
air+=ir
print( "conf_threshold", thr, "registration recall:", rr, " Inlier rate:", ir, "FMR:", fmr)
print("average registration recall:", arr / n, afmr/n, air/n)
# print ("registration recall:", self.test_thr())
def test_thr(self, conf_threshold=None):
# print('Start to evaluate on test datasets...')
# os.makedirs(f'{self.snapshot_dir}/{self.config.dataset}',exist_ok=True)
num_iter = math.ceil(len(self.loader['test'].dataset) // self.loader['test'].batch_size)
c_loader_iter = self.loader['test'].__iter__()
self.model.eval()
success1 = 0.
IR=0.
FMR=0.
with torch.no_grad():
for idx in tqdm(range(num_iter)): # loop through this epoch
##################################
if self.timers: self.timers.tic('load batch')
inputs = c_loader_iter.next()
for k, v in inputs.items():
if type(v) == list:
inputs[k] = [item.to(self.device) for item in v]
elif type(v) in [dict, float, type(None), np.ndarray]:
pass
else:
inputs[k] = v.to(self.device)
if self.timers: self.timers.toc('load batch')
##################################
if self.timers: self.timers.tic('forward pass')
data = self.model(inputs, timers=self.timers) # [N1, C1], [N2, C2]
if self.timers: self.timers.toc('forward pass')
match_pred, _, _ = CM.get_match(data['conf_matrix_pred'], thr=conf_threshold, mutual=False)
rot, trn = MML.ransac_regist_coarse(data['s_pcd'], data['t_pcd'], data['src_mask'], data['tgt_mask'], match_pred)
ir = MML.compute_inlier_ratio(match_pred, data, inlier_thr=0.1).mean()
rr1 = MML.compute_registration_recall(rot, trn, data, thr=0.2) # 0.2m
vis = False
if vis:
pcd = data['points'][0].cpu().numpy()
lenth = data['stack_lengths'][0][0]
spcd, tpcd = pcd[:lenth] , pcd[lenth:]
import mayavi.mlab as mlab
c_red = (224. / 255., 0 / 255., 125 / 255.)
c_pink = (224. / 255., 75. / 255., 232. / 255.)
c_blue = (0. / 255., 0. / 255., 255. / 255.)
scale_factor = 0.02
# mlab.points3d(s_pc[ :, 0] , s_pc[ :, 1], s_pc[:, 2], scale_factor=scale_factor , color=c_blue)
mlab.points3d(spcd[:, 0], spcd[:, 1], spcd[:, 2], scale_factor=scale_factor,
color=c_red)
mlab.points3d(tpcd[:, 0], tpcd[:, 1], tpcd[:, 2], scale_factor=scale_factor,
color=c_blue)
mlab.show()
spcd = ( np.matmul(rot, spcd.T) + trn ).T
mlab.points3d(spcd[:, 0], spcd[:, 1], spcd[:, 2], scale_factor=scale_factor,
color=c_red)
mlab.points3d(tpcd[:, 0], tpcd[:, 1], tpcd[:, 2], scale_factor=scale_factor,
color=c_blue)
mlab.show()
bs = len(rot)
assert bs==1
success1 += bs * rr1
IR += bs*ir
FMR += (ir>0.05).float()
recall1 = success1/len(self.loader['test'].dataset)
IRate = IR/len(self.loader['test'].dataset)
FMR = FMR/len(self.loader['test'].dataset)
return recall1, IRate, FMR
def blend_anchor_motion (query_loc, reference_loc, reference_flow , knn=3, search_radius=0.1) :
'''approximate flow on query points
this function assume query points are sub- or un-sampled from reference locations
@param query_loc:[m,3]
@param reference_loc:[n,3]
@param reference_flow:[n,3]
@param knn:
@return:
blended_flow:[m,3]
'''
from datasets.utils import knn_point_np
dists, idx = knn_point_np (knn, reference_loc, query_loc)
dists[dists < 1e-10] = 1e-10
mask = dists>search_radius
dists[mask] = 1e+10
weight = 1.0 / dists
weight = weight / np.sum(weight, -1, keepdims=True) # [B,N,3]
blended_flow = np.sum (reference_flow [idx] * weight.reshape ([-1, knn, 1]), axis=1, keepdims=False)
mask = mask.sum(axis=1)<3
return blended_flow, mask
def compute_nrfmr( match_pred, data, recall_thr=0.04):
s_pcd, t_pcd = data['s_pcd'], data['t_pcd']
s_pcd_raw = data ['src_pcd_list']
sflow_list = data['sflow_list']
metric_index_list = data['metric_index_list']
batched_rot = data['batched_rot'] # B,3,3
batched_trn = data['batched_trn']
nrfmr = 0.
for i in range ( len(s_pcd_raw)):
# get the metric points' transformed position
metric_index = metric_index_list[i]
sflow = sflow_list[i]
s_pcd_raw_i = s_pcd_raw[i]
metric_pcd = s_pcd_raw_i [ metric_index ]
metric_sflow = sflow [ metric_index ]
metric_pcd_deformed = metric_pcd + metric_sflow
metric_pcd_wrapped_gt = ( torch.matmul( batched_rot[i], metric_pcd_deformed.T) + batched_trn[i] ).T
# use the match prediction as the motion anchor
match_pred_i = match_pred[ match_pred[:, 0] == i ]
s_id , t_id = match_pred_i[:,1], match_pred_i[:,2]
s_pcd_matched= s_pcd[i][s_id]
t_pcd_matched= t_pcd[i][t_id]
motion_pred = t_pcd_matched - s_pcd_matched
metric_motion_pred, valid_mask = blend_anchor_motion(
metric_pcd.cpu().numpy(), s_pcd_matched.cpu().numpy(), motion_pred.cpu().numpy(), knn=3, search_radius=0.1)
metric_pcd_wrapped_pred = metric_pcd + torch.from_numpy(metric_motion_pred).to(metric_pcd)
debug = False
if debug:
import mayavi.mlab as mlab
c_red = (224. / 255., 0 / 255., 125 / 255.)
c_pink = (224. / 255., 75. / 255., 232. / 255.)
c_blue = (0. / 255., 0. / 255., 255. / 255.)
scale_factor = 0.013
metric_pcd_wrapped_gt = metric_pcd_wrapped_gt.cpu()
metric_pcd_wrapped_pred = metric_pcd_wrapped_pred.cpu()
err = metric_pcd_wrapped_pred - metric_pcd_wrapped_gt
mlab.points3d(metric_pcd_wrapped_gt[:, 0], metric_pcd_wrapped_gt[:, 1], metric_pcd_wrapped_gt[:, 2], scale_factor=scale_factor, color=c_pink)
mlab.points3d(metric_pcd_wrapped_pred[ :, 0] , metric_pcd_wrapped_pred[ :, 1], metric_pcd_wrapped_pred[:, 2], scale_factor=scale_factor , color=c_blue)
mlab.quiver3d(metric_pcd_wrapped_gt[:, 0], metric_pcd_wrapped_gt[:, 1], metric_pcd_wrapped_gt[:, 2], err[:, 0], err[:, 1], err[:, 2],
scale_factor=1, mode='2ddash', line_width=1.)
mlab.show()
dist = torch.sqrt( torch.sum( (metric_pcd_wrapped_pred - metric_pcd_wrapped_gt)**2, dim=1 ) )
r = (dist < recall_thr).float().sum() / len(dist)
nrfmr = nrfmr + r
nrfmr = nrfmr /len(s_pcd_raw)
return nrfmr
class _4DMatchTester(Trainer):
"""
3DMatch tester
"""
def __init__(self,args):
Trainer.__init__(self, args)
def test(self):
for thr in [ 0.05, 0.1, 0.2]:
# for thr in [ 0.1 ]:
import time
start = time.time()
ir, fmr, nspl = self.test_thr(thr)
print( "conf_threshold", thr, "NFMR:", fmr, " Inlier rate:", ir, "Number sample:", nspl)
print( "time costs:", time.time() - start)
def test_thr(self, conf_threshold=None):
num_iter = math.ceil(len(self.loader['test'].dataset) // self.loader['test'].batch_size)
c_loader_iter = self.loader['test'].__iter__()
self.model.eval()
assert self.loader['test'].batch_size == 1
IR=0.
NR_FMR=0.
inlier_thr = recall_thr = 0.04
n_sample = 0.
with torch.no_grad():
for idx in tqdm(range(num_iter)): # loop through this epoch
##################################
if self.timers: self.timers.tic('load batch')
inputs = c_loader_iter.next()
for k, v in inputs.items():
if type(v) == list:
inputs[k] = [item.to(self.device) for item in v]
elif type(v) in [ dict, float, type(None), np.ndarray]:
pass
else:
inputs[k] = v.to(self.device)
if self.timers: self.timers.toc('load batch')
##################################
if self.timers: self.timers.tic('forward pass')
data = self.model(inputs, timers=self.timers) # [N1, C1], [N2, C2]
if self.timers: self.timers.toc('forward pass')
match_pred, _, _ = CM.get_match(data['conf_matrix_pred'], thr=conf_threshold, mutual=True)
ir = MML.compute_inlier_ratio(match_pred, data, inlier_thr=inlier_thr, s2t_flow=data['coarse_flow'][0][None] )[0]
nrfmr = compute_nrfmr(match_pred, data, recall_thr=recall_thr)
IR += ir
NR_FMR += nrfmr
n_sample += match_pred.shape[0]
IRate = IR/len(self.loader['test'].dataset)
NR_FMR = NR_FMR/len(self.loader['test'].dataset)
n_sample = n_sample/len(self.loader['test'].dataset)
if self.timers: self.timers.print()
return IRate, NR_FMR, n_sample
def get_trainer(config):
if config.dataset == '3dmatch':
return _3DMatchTester(config)
elif config.dataset == '4dmatch':
return _4DMatchTester(config)
else:
raise NotImplementedError
| 10,544 | 34.385906 | 164 | py |
lepard | lepard-main/lib/visualization.py |
c_red = (224. / 255., 0 / 255., 125 / 255.)
c_pink = (224. / 255., 75. / 255., 232. / 255.)
c_blue = (0. / 255., 0. / 255., 255. / 255.)
c_green = (0. / 255., 255. / 255., 0. / 255.)
c_gray1 = (100. / 255., 100. / 255., 100. / 255.)
c_gray2 = (175. / 255., 175. / 255., 175. / 255.)
def viz_flow_mayavi( s_pc,flow = None, s_pc_deformed=None, t_pc=None, scale_factor = 0.02):
import mayavi.mlab as mlab
mlab.points3d(s_pc[:, 0], s_pc[:, 1], s_pc[:, 2], scale_factor=scale_factor, color=c_red)
if flow is not None:
mlab.quiver3d(s_pc[:, 0], s_pc[:, 1], s_pc[:, 2],
flow[:, 0], flow[:, 1], flow[:, 2], scale_factor=1)
if t_pc is not None:
mlab.points3d(t_pc[:, 0], t_pc[:, 1], t_pc[:, 2], scale_factor=scale_factor, color=c_blue)
if s_pc_deformed is not None:
mlab.points3d(s_pc_deformed[:, 0], s_pc_deformed[:, 1], s_pc_deformed[:, 2], scale_factor=scale_factor, color=c_green)
mlab.show()
def viz_coarse_nn_correspondence_mayavi(s_pc, t_pc, correspondence, f_src_pcd=None, f_tgt_pcd=None, scale_factor = 0.02):
'''
@param s_pc: [S,3]
@param t_pc: [T,3]
@param correspondence: [2,K]
@param f_src_pcd: [S1,3]
@param f_tgt_pcd: [T1,3]
@param scale_factor:
@return:
'''
import mayavi
import mayavi.mlab as mlab
if f_src_pcd is not None:
mlab.points3d(f_src_pcd[:, 0], f_src_pcd[:, 1], f_src_pcd[:, 2], scale_factor=scale_factor * 0.25, color=c_gray1)
else:
mlab.points3d(s_pc[:, 0], s_pc[:, 1], s_pc[:, 2], scale_factor=scale_factor*0.75, color=c_gray1)
if f_tgt_pcd is not None:
mlab.points3d(f_tgt_pcd[:, 0], f_tgt_pcd[:, 1], f_tgt_pcd[:, 2], scale_factor=scale_factor * 0.25, color=c_gray2)
else :
mlab.points3d(t_pc[:, 0], t_pc[:, 1], t_pc[:, 2], scale_factor=scale_factor*0.75, color=c_gray2)
s_cpts = s_pc[correspondence[0]]
t_cpts = t_pc[correspondence[1]]
flow = t_cpts-s_cpts
mlab.points3d(s_cpts[:, 0], s_cpts[:, 1], s_cpts[:, 2], scale_factor=scale_factor , color=c_red)
mlab.points3d(t_cpts[:, 0], t_cpts[:, 1], t_cpts[:, 2], scale_factor=scale_factor , color=c_blue)
mlab.quiver3d(s_cpts[:, 0], s_cpts[:, 1], s_cpts[:, 2], flow[:, 0], flow[:, 1], flow[:, 2],
scale_factor=1, mode='2ddash', line_width=1.)
mlab.show()
| 2,352 | 36.951613 | 126 | py |
lepard | lepard-main/lib/timer.py | import time
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0.0
self.sq_sum = 0.0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
self.sq_sum += val ** 2 * n
self.var = self.sq_sum / self.count - self.avg ** 2
class Timer(object):
"""A simple timer."""
def __init__(self):
self.total_time = 0.
self.calls = 0
self.start_time = 0.
self.diff = 0.
self.avg = 0.
def reset(self):
self.total_time = 0
self.calls = 0
self.start_time = 0
self.diff = 0
self.avg = 0
def tic(self):
# using time.time instead of time.clock because time time.clock
# does not normalize for multithreading
self.start_time = time.time()
def toc(self, average=True):
self.diff = time.time() - self.start_time
self.total_time += self.diff
self.calls += 1
self.avg = self.total_time / self.calls
if average:
return self.avg
else:
return self.diff
| 1,335 | 22.438596 | 71 | py |
lepard | lepard-main/lib/benchmark_utils.py | import os,re,sys,json,yaml,random, glob, argparse, torch, pickle
from tqdm import tqdm
import numpy as np
from scipy.spatial.transform import Rotation
import open3d as o3d
_EPS = 1e-7 # To prevent division by zero
def viz_coarse_nn_correspondence_mayavi(s_pc, t_pc, good_c, bad_c, f_src_pcd=None, f_tgt_pcd=None, scale_factor=0.02):
'''
@param s_pc: [S,3]
@param t_pc: [T,3]
@param correspondence: [2,K]
@param f_src_pcd: [S1,3]
@param f_tgt_pcd: [T1,3]
@param scale_factor:
@return:
'''
import mayavi.mlab as mlab
c_red = (224. / 255., 0 / 255., 0 / 255.)
c_pink = (224. / 255., 75. / 255., 232. / 255.)
c_blue = (0. / 255., 0. / 255., 255. / 255.)
c_green = (0. / 255., 255. / 255., 0. / 255.)
c_gray1 = (255 / 255., 255 / 255., 125 / 255.)
c_gray2 = (125. / 255., 125. / 255., 255. / 255.)
if f_src_pcd is not None:
mlab.points3d(f_src_pcd[:, 0], f_src_pcd[:, 1], f_src_pcd[:, 2], scale_factor=scale_factor * 0.25,
color=c_gray1)
else:
mlab.points3d(s_pc[:, 0], s_pc[:, 1], s_pc[:, 2], scale_factor=scale_factor * 0.75, color=c_gray1)
if f_tgt_pcd is not None:
mlab.points3d(f_tgt_pcd[:, 0], f_tgt_pcd[:, 1], f_tgt_pcd[:, 2], scale_factor=scale_factor * 0.25,
color=c_gray2)
else:
mlab.points3d(t_pc[:, 0], t_pc[:, 1], t_pc[:, 2], scale_factor=scale_factor * 0.75, color=c_gray2)
s_cpts_god = s_pc[good_c[0]]
t_cpts_god = t_pc[good_c[1]]
flow_good = t_cpts_god - s_cpts_god
s_cpts_bd = s_pc[bad_c[0]]
t_cpts_bd = t_pc[bad_c[1]]
flow_bad = t_cpts_bd - s_cpts_bd
def match_draw(s_cpts, t_cpts, flow, color):
mlab.points3d(s_cpts[:, 0], s_cpts[:, 1], s_cpts[:, 2], scale_factor=scale_factor * 0.35, color=c_blue)
mlab.points3d(t_cpts[:, 0], t_cpts[:, 1], t_cpts[:, 2], scale_factor=scale_factor * 0.35, color=c_pink)
mlab.quiver3d(s_cpts[:, 0], s_cpts[:, 1], s_cpts[:, 2], flow[:, 0], flow[:, 1], flow[:, 2],
scale_factor=1, mode='2ddash', line_width=1., color=color)
match_draw(s_cpts_god, t_cpts_god, flow_good, c_green)
match_draw(s_cpts_bd, t_cpts_bd, flow_bad, c_red)
mlab.show()
def correspondence_viz(src_raw, tgt_raw, src_pcd, tgt_pcd, corrs, inlier_mask, max=200):
perm = np.random.permutation(corrs.shape[1])
ind = perm[:max]
corrs = corrs[:, ind]
inlier_mask = inlier_mask[ind]
good_c = corrs[:, inlier_mask]
bad_c = corrs[:, ~inlier_mask]
offset = np.array([[1.45, 0, 0]])
# src_pcd = src_pcd + offset
# src_raw = src_raw + offset
tgt_pcd = tgt_pcd + offset
tgt_raw = tgt_raw + offset
viz_coarse_nn_correspondence_mayavi(src_pcd, tgt_pcd, good_c, bad_c, src_raw, tgt_raw, scale_factor=0.07)
def fmr_wrt_distance(data,split,inlier_ratio_threshold=0.05):
"""
calculate feature match recall wrt distance threshold
"""
fmr_wrt_distance =[]
for distance_threshold in range(1,21):
inlier_ratios =[]
distance_threshold /=100.0
for idx in range(data.shape[0]):
inlier_ratio = (data[idx] < distance_threshold).mean()
inlier_ratios.append(inlier_ratio)
fmr = 0
for ele in split:
fmr += (np.array(inlier_ratios[ele[0]:ele[1]]) > inlier_ratio_threshold).mean()
fmr /= 8
fmr_wrt_distance.append(fmr*100)
return fmr_wrt_distance
def fmr_wrt_inlier_ratio(data, split, distance_threshold=0.1):
"""
calculate feature match recall wrt inlier ratio threshold
"""
fmr_wrt_inlier =[]
for inlier_ratio_threshold in range(1,21):
inlier_ratios =[]
inlier_ratio_threshold /=100.0
for idx in range(data.shape[0]):
inlier_ratio = (data[idx] < distance_threshold).mean()
inlier_ratios.append(inlier_ratio)
fmr = 0
for ele in split:
fmr += (np.array(inlier_ratios[ele[0]:ele[1]]) > inlier_ratio_threshold).mean()
fmr /= 8
fmr_wrt_inlier.append(fmr*100)
return fmr_wrt_inlier
def to_tensor(array):
"""
Convert array to tensor
"""
if(not isinstance(array,torch.Tensor)):
return torch.from_numpy(array).float()
else:
return array
def to_array(tensor):
"""
Conver tensor to array
"""
if(not isinstance(tensor,np.ndarray)):
if(tensor.device == torch.device('cpu')):
return tensor.numpy()
else:
return tensor.cpu().numpy()
else:
return tensor
def to_tsfm(rot,trans):
tsfm = np.eye(4)
tsfm[:3,:3]=rot
tsfm[:3,3]=trans.flatten()
return tsfm
def to_o3d_pcd(xyz):
"""
Convert tensor/array to open3d PointCloud
xyz: [N, 3]
"""
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(to_array(xyz))
return pcd
def to_o3d_feats(embedding):
"""
Convert tensor/array to open3d features
embedding: [N, 3]
"""
feats = o3d.registration.Feature()
feats.data = to_array(embedding).T
return feats
def get_correspondences(src_pcd, tgt_pcd, trans, search_voxel_size, K=None):
src_pcd.transform(trans)
correspondences = KDTree_corr ( src_pcd, tgt_pcd, search_voxel_size, K=None)
correspondences = torch.from_numpy(correspondences)
return correspondences
def KDTree_corr ( src_pcd_transformed, tgt_pcd, search_voxel_size, K=None):
pcd_tree = o3d.geometry.KDTreeFlann(tgt_pcd)
correspondences = []
for i, point in enumerate(src_pcd_transformed.points):
[count, idx, _] = pcd_tree.search_radius_vector_3d(point, search_voxel_size)
if K is not None:
idx = idx[:K]
for j in idx:
correspondences.append([i, j])
correspondences = np.array(correspondences)
return correspondences
def get_blue():
"""
Get color blue for rendering
"""
return [0, 0.651, 0.929]
def get_yellow():
"""
Get color yellow for rendering
"""
return [1, 0.706, 0]
def random_sample(pcd, feats, N):
"""
Do random sampling to get exact N points and associated features
pcd: [N,3]
feats: [N,C]
"""
if(isinstance(pcd,torch.Tensor)):
n1 = pcd.size(0)
elif(isinstance(pcd, np.ndarray)):
n1 = pcd.shape[0]
if n1 == N:
return pcd, feats
if n1 > N:
choice = np.random.permutation(n1)[:N]
else:
choice = np.random.choice(n1, N)
return pcd[choice], feats[choice]
def get_angle_deviation(R_pred,R_gt):
"""
Calculate the angle deviation between two rotaion matrice
The rotation error is between [0,180]
Input:
R_pred: [B,3,3]
R_gt : [B,3,3]
Return:
degs: [B]
"""
R=np.matmul(R_pred,R_gt.transpose(0,2,1))
tr=np.trace(R,0,1,2)
rads=np.arccos(np.clip((tr-1)/2,-1,1)) # clip to valid range
degs=rads/np.pi*180
return degs
def ransac_pose_estimation(src_pcd, tgt_pcd, src_feat, tgt_feat, mutual = False, distance_threshold = 0.05, ransac_n = 3):
"""
RANSAC pose estimation with two checkers
We follow D3Feat to set ransac_n = 3 for 3DMatch and ransac_n = 4 for KITTI.
For 3DMatch dataset, we observe significant improvement after changing ransac_n from 4 to 3.
"""
if(mutual):
if(torch.cuda.device_count()>=1):
device = torch.device('cuda')
else:
device = torch.device('cpu')
src_feat, tgt_feat = to_tensor(src_feat), to_tensor(tgt_feat)
scores = torch.matmul(src_feat.to(device), tgt_feat.transpose(0,1).to(device)).cpu()
selection = mutual_selection(scores[None,:,:])[0]
row_sel, col_sel = np.where(selection)
corrs = o3d.utility.Vector2iVector(np.array([row_sel,col_sel]).T)
src_pcd = to_o3d_pcd(src_pcd)
tgt_pcd = to_o3d_pcd(tgt_pcd)
result_ransac = o3d.registration.registration_ransac_based_on_correspondence(
source=src_pcd, target=tgt_pcd,corres=corrs,
max_correspondence_distance=distance_threshold,
estimation_method=o3d.registration.TransformationEstimationPointToPoint(False),
ransac_n=4,
criteria=o3d.registration.RANSACConvergenceCriteria(50000, 1000))
else:
src_pcd = to_o3d_pcd(src_pcd)
tgt_pcd = to_o3d_pcd(tgt_pcd)
src_feats = to_o3d_feats(src_feat)
tgt_feats = to_o3d_feats(tgt_feat)
result_ransac = o3d.registration.registration_ransac_based_on_feature_matching(
src_pcd, tgt_pcd, src_feats, tgt_feats,distance_threshold,
o3d.registration.TransformationEstimationPointToPoint(False), ransac_n,
[o3d.registration.CorrespondenceCheckerBasedOnEdgeLength(0.9),
o3d.registration.CorrespondenceCheckerBasedOnDistance(distance_threshold)],
o3d.registration.RANSACConvergenceCriteria(50000, 1000))
return result_ransac.transformation
def get_inlier_ratio(src_pcd, tgt_pcd, src_feat, tgt_feat, rot, trans, inlier_distance_threshold = 0.1):
"""
Compute inlier ratios with and without mutual check, return both
"""
src_pcd = to_tensor(src_pcd)
tgt_pcd = to_tensor(tgt_pcd)
src_feat = to_tensor(src_feat)
tgt_feat = to_tensor(tgt_feat)
rot, trans = to_tensor(rot), to_tensor(trans)
results =dict()
results['w']=dict()
results['wo']=dict()
if(torch.cuda.device_count()>=1):
device = torch.device('cuda')
else:
device = torch.device('cpu')
src_pcd = (torch.matmul(rot, src_pcd.transpose(0,1)) + trans).transpose(0,1)
scores = torch.matmul(src_feat.to(device), tgt_feat.transpose(0,1).to(device)).cpu()
########################################
# 1. calculate inlier ratios wo mutual check
_, idx = scores.max(-1)
dist = torch.norm(src_pcd- tgt_pcd[idx],dim=1)
results['wo']['distance'] = dist.numpy()
c_inlier_ratio = (dist < inlier_distance_threshold).float().mean()
results['wo']['inlier_ratio'] = c_inlier_ratio
########################################
# 2. calculate inlier ratios w mutual check
selection = mutual_selection(scores[None,:,:])[0]
row_sel, col_sel = np.where(selection)
dist = torch.norm(src_pcd[row_sel]- tgt_pcd[col_sel],dim=1)
results['w']['distance'] = dist.numpy()
c_inlier_ratio = (dist < inlier_distance_threshold).float().mean()
results['w']['inlier_ratio'] = c_inlier_ratio
return results
def mutual_selection(score_mat):
"""
Return a {0,1} matrix, the element is 1 if and only if it's maximum along both row and column
Args: np.array()
score_mat: [B,N,N]
Return:
mutuals: [B,N,N]
"""
score_mat=to_array(score_mat)
if(score_mat.ndim==2):
score_mat=score_mat[None,:,:]
mutuals=np.zeros_like(score_mat)
for i in range(score_mat.shape[0]): # loop through the batch
c_mat=score_mat[i]
flag_row=np.zeros_like(c_mat)
flag_column=np.zeros_like(c_mat)
max_along_row=np.argmax(c_mat,1)[:,None]
max_along_column=np.argmax(c_mat,0)[None,:]
np.put_along_axis(flag_row,max_along_row,1,1)
np.put_along_axis(flag_column,max_along_column,1,0)
mutuals[i]=(flag_row.astype(np.bool)) & (flag_column.astype(np.bool))
return mutuals.astype(np.bool)
| 11,442 | 31.882184 | 122 | py |
lepard | lepard-main/lib/utils.py | import os,re,sys,json,yaml,random, argparse, torch, pickle
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
from scipy.spatial.transform import Rotation
from sklearn.neighbors import NearestNeighbors
from scipy.spatial.distance import minkowski
_EPS = 1e-7 # To prevent division by zero
class Logger:
def __init__(self, path):
self.path = path
log_path = self.path + '/log'
if os.path.exists(log_path):
os.remove(log_path)
self.fw = open(log_path,'a')
def write(self, text):
self.fw.write(text)
self.fw.flush()
def close(self):
self.fw.close()
def save_obj(obj, path ):
"""
save a dictionary to a pickle file
"""
with open(path, 'wb') as f:
pickle.dump(obj, f)
def load_obj(path):
"""
read a dictionary from a pickle file
"""
with open(path, 'rb') as f:
return pickle.load(f)
def load_config(path):
"""
Loads config file:
Args:
path (str): path to the config file
Returns:
config (dict): dictionary of the configuration parameters, merge sub_dicts
"""
with open(path,'r') as f:
cfg = yaml.safe_load(f)
config = dict()
for key, value in cfg.items():
for k,v in value.items():
config[k] = v
return config
def setup_seed(seed):
"""
fix random seed for deterministic training
"""
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
def square_distance(src, dst, normalised = False):
"""
Calculate Euclid distance between each two points.
Args:
src: source points, [B, N, C]
dst: target points, [B, M, C]
Returns:
dist: per-point square distance, [B, N, M]
"""
B, N, _ = src.shape
_, M, _ = dst.shape
dist = -2 * torch.matmul(src, dst.permute(0, 2, 1))
if(normalised):
dist += 2
else:
dist += torch.sum(src ** 2, dim=-1)[:, :, None]
dist += torch.sum(dst ** 2, dim=-1)[:, None, :]
dist = torch.clamp(dist, min=1e-12, max=None)
return dist
def validate_gradient(model):
"""
Confirm all the gradients are non-nan and non-inf
"""
for name, param in model.named_parameters():
if param.grad is not None:
if torch.any(torch.isnan(param.grad)):
return False
if torch.any(torch.isinf(param.grad)):
return False
return True
def natural_key(string_):
"""
Sort strings by numbers in the name
"""
return [int(s) if s.isdigit() else s for s in re.split(r'(\d+)', string_)] | 2,759 | 23.424779 | 82 | py |
lepard | lepard-main/lib/ply.py | #
#
# 0===============================0
# | PLY files reader/writer |
# 0===============================0
#
#
# ----------------------------------------------------------------------------------------------------------------------
#
# function to read/write .ply files
#
# ----------------------------------------------------------------------------------------------------------------------
#
# Hugues THOMAS - 10/02/2017
#
# ----------------------------------------------------------------------------------------------------------------------
#
# Imports and global variables
# \**********************************/
#
# Basic libs
import numpy as np
import sys
# Define PLY types
ply_dtypes = dict([
(b'int8', 'i1'),
(b'char', 'i1'),
(b'uint8', 'u1'),
(b'uchar', 'u1'),
(b'int16', 'i2'),
(b'short', 'i2'),
(b'uint16', 'u2'),
(b'ushort', 'u2'),
(b'int32', 'i4'),
(b'int', 'i4'),
(b'uint32', 'u4'),
(b'uint', 'u4'),
(b'float32', 'f4'),
(b'float', 'f4'),
(b'float64', 'f8'),
(b'double', 'f8')
])
# Numpy reader format
valid_formats = {'ascii': '', 'binary_big_endian': '>',
'binary_little_endian': '<'}
# ----------------------------------------------------------------------------------------------------------------------
#
# Functions
# \***************/
#
def parse_header(plyfile, ext):
# Variables
line = []
properties = []
num_points = None
while b'end_header' not in line and line != b'':
line = plyfile.readline()
if b'element' in line:
line = line.split()
num_points = int(line[2])
elif b'property' in line:
line = line.split()
properties.append((line[2].decode(), ext + ply_dtypes[line[1]]))
return num_points, properties
def parse_mesh_header(plyfile, ext):
# Variables
line = []
vertex_properties = []
num_points = None
num_faces = None
current_element = None
while b'end_header' not in line and line != b'':
line = plyfile.readline()
# Find point element
if b'element vertex' in line:
current_element = 'vertex'
line = line.split()
num_points = int(line[2])
elif b'element face' in line:
current_element = 'face'
line = line.split()
num_faces = int(line[2])
elif b'property' in line:
if current_element == 'vertex':
line = line.split()
vertex_properties.append((line[2].decode(), ext + ply_dtypes[line[1]]))
elif current_element == 'vertex':
if not line.startswith('property list uchar int'):
raise ValueError('Unsupported faces property : ' + line)
return num_points, num_faces, vertex_properties
def read_ply(filename, triangular_mesh=False):
"""
Read ".ply" files
Parameters
----------
filename : string
the name of the file to read.
Returns
-------
result : array
data stored in the file
Examples
--------
Store data in file
>>> points = np.random.rand(5, 3)
>>> values = np.random.randint(2, size=10)
>>> write_ply('example.ply', [points, values], ['x', 'y', 'z', 'values'])
Read the file
>>> data = read_ply('example.ply')
>>> values = data['values']
array([0, 0, 1, 1, 0])
>>> points = np.vstack((data['x'], data['y'], data['z'])).T
array([[ 0.466 0.595 0.324]
[ 0.538 0.407 0.654]
[ 0.850 0.018 0.988]
[ 0.395 0.394 0.363]
[ 0.873 0.996 0.092]])
"""
with open(filename, 'rb') as plyfile:
# Check if the file start with ply
if b'ply' not in plyfile.readline():
raise ValueError('The file does not start whith the word ply')
# get binary_little/big or ascii
fmt = plyfile.readline().split()[1].decode()
if fmt == "ascii":
raise ValueError('The file is not binary')
# get extension for building the numpy dtypes
ext = valid_formats[fmt]
# PointCloud reader vs mesh reader
if triangular_mesh:
# Parse header
num_points, num_faces, properties = parse_mesh_header(plyfile, ext)
# Get point data
vertex_data = np.fromfile(plyfile, dtype=properties, count=num_points)
# Get face data
face_properties = [('k', ext + 'u1'),
('v1', ext + 'i4'),
('v2', ext + 'i4'),
('v3', ext + 'i4')]
faces_data = np.fromfile(plyfile, dtype=face_properties, count=num_faces)
# Return vertex data and concatenated faces
faces = np.vstack((faces_data['v1'], faces_data['v2'], faces_data['v3'])).T
data = [vertex_data, faces]
else:
# Parse header
num_points, properties = parse_header(plyfile, ext)
# Get data
data = np.fromfile(plyfile, dtype=properties, count=num_points)
return data
def header_properties(field_list, field_names):
# List of lines to write
lines = []
# First line describing element vertex
lines.append('element vertex %d' % field_list[0].shape[0])
# Properties lines
i = 0
for fields in field_list:
for field in fields.T:
lines.append('property %s %s' % (field.dtype.name, field_names[i]))
i += 1
return lines
def write_ply(filename, field_list, field_names, triangular_faces=None):
"""
Write ".ply" files
Parameters
----------
filename : string
the name of the file to which the data is saved. A '.ply' extension will be appended to the
file name if it does no already have one.
field_list : list, tuple, numpy array
the fields to be saved in the ply file. Either a numpy array, a list of numpy arrays or a
tuple of numpy arrays. Each 1D numpy array and each column of 2D numpy arrays are considered
as one field.
field_names : list
the name of each fields as a list of strings. Has to be the same length as the number of
fields.
Examples
--------
>>> points = np.random.rand(10, 3)
>>> write_ply('example1.ply', points, ['x', 'y', 'z'])
>>> values = np.random.randint(2, size=10)
>>> write_ply('example2.ply', [points, values], ['x', 'y', 'z', 'values'])
>>> colors = np.random.randint(255, size=(10,3), dtype=np.uint8)
>>> field_names = ['x', 'y', 'z', 'red', 'green', 'blue', values']
>>> write_ply('example3.ply', [points, colors, values], field_names)
"""
# Format list input to the right form
field_list = list(field_list) if (type(field_list) == list or type(field_list) == tuple) else list((field_list,))
for i, field in enumerate(field_list):
if field.ndim < 2:
field_list[i] = field.reshape(-1, 1)
if field.ndim > 2:
print('fields have more than 2 dimensions')
return False
# check all fields have the same number of data
n_points = [field.shape[0] for field in field_list]
if not np.all(np.equal(n_points, n_points[0])):
print('wrong field dimensions')
return False
# Check if field_names and field_list have same nb of column
n_fields = np.sum([field.shape[1] for field in field_list])
if (n_fields != len(field_names)):
print('wrong number of field names')
return False
# Add extension if not there
if not filename.endswith('.ply'):
filename += '.ply'
# open in text mode to write the header
with open(filename, 'w') as plyfile:
# First magical word
header = ['ply']
# Encoding format
header.append('format binary_' + sys.byteorder + '_endian 1.0')
# Points properties description
header.extend(header_properties(field_list, field_names))
# Add faces if needded
if triangular_faces is not None:
header.append('element face {:d}'.format(triangular_faces.shape[0]))
header.append('property list uchar int vertex_indices')
# End of header
header.append('end_header')
# Write all lines
for line in header:
plyfile.write("%s\n" % line)
# open in binary/append to use tofile
with open(filename, 'ab') as plyfile:
# Create a structured array
i = 0
type_list = []
for fields in field_list:
for field in fields.T:
type_list += [(field_names[i], field.dtype.str)]
i += 1
data = np.empty(field_list[0].shape[0], dtype=type_list)
i = 0
for fields in field_list:
for field in fields.T:
data[field_names[i]] = field
i += 1
data.tofile(plyfile)
if triangular_faces is not None:
triangular_faces = triangular_faces.astype(np.int32)
type_list = [('k', 'uint8')] + [(str(ind), 'int32') for ind in range(3)]
data = np.empty(triangular_faces.shape[0], dtype=type_list)
data['k'] = np.full((triangular_faces.shape[0],), 3, dtype=np.uint8)
data['0'] = triangular_faces[:, 0]
data['1'] = triangular_faces[:, 1]
data['2'] = triangular_faces[:, 2]
data.tofile(plyfile)
return True
def describe_element(name, df):
""" Takes the columns of the dataframe and builds a ply-like description
Parameters
----------
name: str
df: pandas DataFrame
Returns
-------
element: list[str]
"""
property_formats = {'f': 'float', 'u': 'uchar', 'i': 'int'}
element = ['element ' + name + ' ' + str(len(df))]
if name == 'face':
element.append("property list uchar int points_indices")
else:
for i in range(len(df.columns)):
# get first letter of dtype to infer format
f = property_formats[str(df.dtypes[i])[0]]
element.append('property ' + f + ' ' + df.columns.values[i])
return element
| 10,301 | 28.267045 | 120 | py |
lepard | lepard-main/lib/tictok.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import time
import numpy as np
from collections import defaultdict
class Timer(object):
def __init__(self):
self.reset()
def tic(self):
self.start_time = time.time()
def toc(self, average=True):
self.diff = time.time() - self.start_time
self.total_time += self.diff
self.calls += 1
def tictoc(self, diff):
self.diff = diff
self.total_time += diff
self.calls += 1
def total(self):
""" return the total amount of time """
return self.total_time
def avg(self):
""" return the average amount of time """
return self.total_time / float(self.calls)
def reset(self):
self.total_time = 0.
self.calls = 0
self.start_time = 0.
self.diff = 0.
class Timers(object):
def __init__(self):
self.timers = defaultdict(Timer)
def tic(self, key):
self.timers[key].tic()
def toc(self, key):
self.timers[key].toc()
def tictoc(self, key, diff):
self.timers[key].tictoc( diff)
def print(self, key=None):
if key is None:
# print all time
for k, v in self.timers.items():
print("{:}: \t average {:.4f}, total {:.4f} ,\t calls {:}".format(k.ljust(30), v.avg(), v.total_time, v.calls))
else:
print("Average time for {:}: {:}".format(key, self.timers[key].avg()))
def get_avg(self, key):
return self.timers[key].avg() | 1,644 | 24.307692 | 130 | py |
lepard | lepard-main/lib/__init__.py | 0 | 0 | 0 | py |
|
lepard | lepard-main/lib/trainer.py | import gc
import os
import torch
import torch.nn as nn
import numpy as np
from tensorboardX import SummaryWriter
from tqdm import tqdm
from lib.timer import AverageMeter
from lib.utils import Logger, validate_gradient
from lib.tictok import Timers
class Trainer(object):
def __init__(self, args):
self.config = args
# parameters
self.start_epoch = 1
self.max_epoch = args.max_epoch
self.save_dir = args.save_dir
self.device = args.device
self.verbose = args.verbose
self.model = args.model
self.model = self.model.to(self.device)
self.optimizer = args.optimizer
self.scheduler = args.scheduler
self.scheduler_freq = args.scheduler_freq
self.snapshot_dir = args.snapshot_dir
self.iter_size = args.iter_size
self.verbose_freq = args.verbose_freq // args.batch_size + 1
if 'overfit' in self.config.exp_dir:
self.verbose_freq = 1
self.loss = args.desc_loss
self.best_loss = 1e5
self.best_recall = -1e5
self.summary_writer = SummaryWriter(log_dir=args.tboard_dir)
self.logger = Logger(args.snapshot_dir)
self.logger.write(f'#parameters {sum([x.nelement() for x in self.model.parameters()]) / 1000000.} M\n')
if (args.pretrain != ''):
self._load_pretrain(args.pretrain)
self.loader = dict()
self.loader['train'] = args.train_loader
self.loader['val'] = args.val_loader
self.loader['test'] = args.test_loader
self.timers = args.timers
with open(f'{args.snapshot_dir}/model', 'w') as f:
f.write(str(self.model))
f.close()
def _snapshot(self, epoch, name=None):
state = {
'epoch': epoch,
'state_dict': self.model.state_dict(),
'optimizer': self.optimizer.state_dict(),
'scheduler': self.scheduler.state_dict(),
'best_loss': self.best_loss,
'best_recall': self.best_recall
}
if name is None:
filename = os.path.join(self.save_dir, f'model_{epoch}.pth')
else:
filename = os.path.join(self.save_dir, f'model_{name}.pth')
self.logger.write(f"Save model to {filename}\n")
torch.save(state, filename, _use_new_zipfile_serialization=False)
def _load_pretrain(self, resume):
print ("loading pretrained", resume)
if os.path.isfile(resume):
state = torch.load(resume)
self.model.load_state_dict(state['state_dict'])
self.start_epoch = state['epoch']
self.scheduler.load_state_dict(state['scheduler'])
self.optimizer.load_state_dict(state['optimizer'])
self.best_loss = state['best_loss']
self.best_recall = state['best_recall']
self.logger.write(f'Successfully load pretrained model from {resume}!\n')
self.logger.write(f'Current best loss {self.best_loss}\n')
self.logger.write(f'Current best recall {self.best_recall}\n')
else:
raise ValueError(f"=> no checkpoint found at '{resume}'")
def _get_lr(self, group=0):
return self.optimizer.param_groups[group]['lr']
def inference_one_batch(self, inputs, phase):
assert phase in ['train', 'val', 'test']
inputs ['phase'] = phase
if (phase == 'train'):
self.model.train()
if self.timers: self.timers.tic('forward pass')
data = self.model(inputs, timers=self.timers) # [N1, C1], [N2, C2]
if self.timers: self.timers.toc('forward pass')
if self.timers: self.timers.tic('compute loss')
loss_info = self.loss( data)
if self.timers: self.timers.toc('compute loss')
if self.timers: self.timers.tic('backprop')
loss_info['loss'].backward()
if self.timers: self.timers.toc('backprop')
else:
self.model.eval()
with torch.no_grad():
data = self.model(inputs, timers=self.timers) # [N1, C1], [N2, C2]
loss_info = self.loss(data)
return loss_info
def inference_one_epoch(self, epoch, phase):
gc.collect()
assert phase in ['train', 'val', 'test']
# init stats meter
stats_meter = None # self.stats_meter()
num_iter = int(len(self.loader[phase].dataset) // self.loader[phase].batch_size) # drop last incomplete batch
c_loader_iter = self.loader[phase].__iter__()
self.optimizer.zero_grad()
for c_iter in tqdm(range(num_iter)): # loop through this epoch
if self.timers: self.timers.tic('one_iteration')
##################################
if self.timers: self.timers.tic('load batch')
inputs = c_loader_iter.next()
# for gpu_div_i, _ in enumerate(inputs):
for k, v in inputs.items():
if type(v) == list:
inputs [k] = [item.to(self.device) for item in v]
elif type(v) in [ dict, float, type(None), np.ndarray]:
pass
else:
inputs [k] = v.to(self.device)
if self.timers: self.timers.toc('load batch')
##################################
if self.timers: self.timers.tic('inference_one_batch')
loss_info = self.inference_one_batch(inputs, phase)
if self.timers: self.timers.toc('inference_one_batch')
###################################################
# run optimisation
# if self.timers: self.timers.tic('run optimisation')
if ((c_iter + 1) % self.iter_size == 0 and phase == 'train'):
gradient_valid = validate_gradient(self.model)
if (gradient_valid):
self.optimizer.step()
else:
self.logger.write('gradient not valid\n')
self.optimizer.zero_grad()
# if self.timers: self.timers.toc('run optimisation')
################################
torch.cuda.empty_cache()
if stats_meter is None:
stats_meter = dict()
for key, _ in loss_info.items():
stats_meter[key] = AverageMeter()
for key, value in loss_info.items():
stats_meter[key].update(value)
if phase == 'train' :
if (c_iter + 1) % self.verbose_freq == 0 and self.verbose :
curr_iter = num_iter * (epoch - 1) + c_iter
for key, value in stats_meter.items():
self.summary_writer.add_scalar(f'{phase}/{key}', value.avg, curr_iter)
dump_mess=True
if dump_mess:
message = f'{phase} Epoch: {epoch} [{c_iter + 1:4d}/{num_iter}]'
for key, value in stats_meter.items():
message += f'{key}: {value.avg:.2f}\t'
self.logger.write(message + '\n')
if self.timers: self.timers.toc('one_iteration')
# report evaluation score at end of each epoch
if phase in ['val', 'test']:
for key, value in stats_meter.items():
self.summary_writer.add_scalar(f'{phase}/{key}', value.avg, epoch)
message = f'{phase} Epoch: {epoch}'
for key, value in stats_meter.items():
message += f'{key}: {value.avg:.2f}\t'
self.logger.write(message + '\n')
return stats_meter
def train(self):
print('start training...')
for epoch in range(self.start_epoch, self.max_epoch):
with torch.autograd.set_detect_anomaly(True):
if self.timers: self.timers.tic('run one epoch')
stats_meter = self.inference_one_epoch(epoch, 'train')
if self.timers: self.timers.toc('run one epoch')
self.scheduler.step()
if 'overfit' in self.config.exp_dir :
if stats_meter['loss'].avg < self.best_loss:
self.best_loss = stats_meter['loss'].avg
self._snapshot(epoch, 'best_loss')
if self.timers: self.timers.print()
else : # no validation step for overfitting
if self.config.do_valid:
stats_meter = self.inference_one_epoch(epoch, 'val')
if stats_meter['loss'].avg < self.best_loss:
self.best_loss = stats_meter['loss'].avg
self._snapshot(epoch, 'best_loss')
if self.timers: self.timers.print()
# finish all epoch
print("Training finish!") | 8,861 | 34.590361 | 117 | py |
lepard | lepard-main/kernels/kernel_points.py |
#
#
# 0=================================0
# | Kernel Point Convolutions |
# 0=================================0
#
#
# ----------------------------------------------------------------------------------------------------------------------
#
# Functions handling the disposition of kernel points.
#
# ----------------------------------------------------------------------------------------------------------------------
#
# Hugues THOMAS - 11/06/2018
#
import time
import numpy as np
from os import makedirs
from os.path import join, exists
from lib.ply import read_ply, write_ply
# ------------------------------------------------------------------------------------------
#
# Functions
# \***************/
#
#
def create_3D_rotations(axis, angle):
"""
Create rotation matrices from a list of axes and angles. Code from wikipedia on quaternions
:param axis: float32[N, 3]
:param angle: float32[N,]
:return: float32[N, 3, 3]
"""
t1 = np.cos(angle)
t2 = 1 - t1
t3 = axis[:, 0] * axis[:, 0]
t6 = t2 * axis[:, 0]
t7 = t6 * axis[:, 1]
t8 = np.sin(angle)
t9 = t8 * axis[:, 2]
t11 = t6 * axis[:, 2]
t12 = t8 * axis[:, 1]
t15 = axis[:, 1] * axis[:, 1]
t19 = t2 * axis[:, 1] * axis[:, 2]
t20 = t8 * axis[:, 0]
t24 = axis[:, 2] * axis[:, 2]
R = np.stack([t1 + t2 * t3,
t7 - t9,
t11 + t12,
t7 + t9,
t1 + t2 * t15,
t19 - t20,
t11 - t12,
t19 + t20,
t1 + t2 * t24], axis=1)
return np.reshape(R, (-1, 3, 3))
def spherical_Lloyd(radius, num_cells, dimension=3, fixed='center', approximation='monte-carlo',
approx_n=5000, max_iter=500, momentum=0.9, verbose=0):
"""
Creation of kernel point via Lloyd algorithm. We use an approximation of the algorithm, and compute the Voronoi
cell centers with discretization of space. The exact formula is not trivial with part of the sphere as sides.
:param radius: Radius of the kernels
:param num_cells: Number of cell (kernel points) in the Voronoi diagram.
:param dimension: dimension of the space
:param fixed: fix position of certain kernel points ('none', 'center' or 'verticals')
:param approximation: Approximation method for Lloyd's algorithm ('discretization', 'monte-carlo')
:param approx_n: Number of point used for approximation.
:param max_iter: Maximum nu;ber of iteration for the algorithm.
:param momentum: Momentum of the low pass filter smoothing kernel point positions
:param verbose: display option
:return: points [num_kernels, num_points, dimension]
"""
#######################
# Parameters definition
#######################
# Radius used for optimization (points are rescaled afterwards)
radius0 = 1.0
#######################
# Kernel initialization
#######################
# Random kernel points (Uniform distribution in a sphere)
kernel_points = np.zeros((0, dimension))
while kernel_points.shape[0] < num_cells:
new_points = np.random.rand(num_cells, dimension) * 2 * radius0 - radius0
kernel_points = np.vstack((kernel_points, new_points))
d2 = np.sum(np.power(kernel_points, 2), axis=1)
kernel_points = kernel_points[np.logical_and(d2 < radius0 ** 2, (0.9 * radius0) ** 2 < d2), :]
kernel_points = kernel_points[:num_cells, :].reshape((num_cells, -1))
# Optional fixing
if fixed == 'center':
kernel_points[0, :] *= 0
if fixed == 'verticals':
kernel_points[:3, :] *= 0
kernel_points[1, -1] += 2 * radius0 / 3
kernel_points[2, -1] -= 2 * radius0 / 3
##############################
# Approximation initialization
##############################
# Initialize figure
if verbose > 1:
fig = plt.figure()
# Initialize discretization in this method is chosen
if approximation == 'discretization':
side_n = int(np.floor(approx_n ** (1. / dimension)))
dl = 2 * radius0 / side_n
coords = np.arange(-radius0 + dl/2, radius0, dl)
if dimension == 2:
x, y = np.meshgrid(coords, coords)
X = np.vstack((np.ravel(x), np.ravel(y))).T
elif dimension == 3:
x, y, z = np.meshgrid(coords, coords, coords)
X = np.vstack((np.ravel(x), np.ravel(y), np.ravel(z))).T
elif dimension == 4:
x, y, z, t = np.meshgrid(coords, coords, coords, coords)
X = np.vstack((np.ravel(x), np.ravel(y), np.ravel(z), np.ravel(t))).T
else:
raise ValueError('Unsupported dimension (max is 4)')
elif approximation == 'monte-carlo':
X = np.zeros((0, dimension))
else:
raise ValueError('Wrong approximation method chosen: "{:s}"'.format(approximation))
# Only points inside the sphere are used
d2 = np.sum(np.power(X, 2), axis=1)
X = X[d2 < radius0 * radius0, :]
#####################
# Kernel optimization
#####################
# Warning if at least one kernel point has no cell
warning = False
# moving vectors of kernel points saved to detect convergence
max_moves = np.zeros((0,))
for iter in range(max_iter):
# In the case of monte-carlo, renew the sampled points
if approximation == 'monte-carlo':
X = np.random.rand(approx_n, dimension) * 2 * radius0 - radius0
d2 = np.sum(np.power(X, 2), axis=1)
X = X[d2 < radius0 * radius0, :]
# Get the distances matrix [n_approx, K, dim]
differences = np.expand_dims(X, 1) - kernel_points
sq_distances = np.sum(np.square(differences), axis=2)
# Compute cell centers
cell_inds = np.argmin(sq_distances, axis=1)
centers = []
for c in range(num_cells):
bool_c = (cell_inds == c)
num_c = np.sum(bool_c.astype(np.int32))
if num_c > 0:
centers.append(np.sum(X[bool_c, :], axis=0) / num_c)
else:
warning = True
centers.append(kernel_points[c])
# Update kernel points with low pass filter to smooth mote carlo
centers = np.vstack(centers)
moves = (1 - momentum) * (centers - kernel_points)
kernel_points += moves
# Check moves for convergence
max_moves = np.append(max_moves, np.max(np.linalg.norm(moves, axis=1)))
# Optional fixing
if fixed == 'center':
kernel_points[0, :] *= 0
if fixed == 'verticals':
kernel_points[0, :] *= 0
kernel_points[:3, :-1] *= 0
if verbose:
print('iter {:5d} / max move = {:f}'.format(iter, np.max(np.linalg.norm(moves, axis=1))))
if warning:
print('{:}WARNING: at least one point has no cell{:}'.format(bcolors.WARNING, bcolors.ENDC))
if verbose > 1:
plt.clf()
plt.scatter(X[:, 0], X[:, 1], c=cell_inds, s=20.0,
marker='.', cmap=plt.get_cmap('tab20'))
#plt.scatter(kernel_points[:, 0], kernel_points[:, 1], c=np.arange(num_cells), s=100.0,
# marker='+', cmap=plt.get_cmap('tab20'))
plt.plot(kernel_points[:, 0], kernel_points[:, 1], 'k+')
circle = plt.Circle((0, 0), radius0, color='r', fill=False)
fig.axes[0].add_artist(circle)
fig.axes[0].set_xlim((-radius0 * 1.1, radius0 * 1.1))
fig.axes[0].set_ylim((-radius0 * 1.1, radius0 * 1.1))
fig.axes[0].set_aspect('equal')
plt.draw()
plt.pause(0.001)
plt.show(block=False)
###################
# User verification
###################
# Show the convergence to ask user if this kernel is correct
if verbose:
if dimension == 2:
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=[10.4, 4.8])
ax1.plot(max_moves)
ax2.scatter(X[:, 0], X[:, 1], c=cell_inds, s=20.0,
marker='.', cmap=plt.get_cmap('tab20'))
# plt.scatter(kernel_points[:, 0], kernel_points[:, 1], c=np.arange(num_cells), s=100.0,
# marker='+', cmap=plt.get_cmap('tab20'))
ax2.plot(kernel_points[:, 0], kernel_points[:, 1], 'k+')
circle = plt.Circle((0, 0), radius0, color='r', fill=False)
ax2.add_artist(circle)
ax2.set_xlim((-radius0 * 1.1, radius0 * 1.1))
ax2.set_ylim((-radius0 * 1.1, radius0 * 1.1))
ax2.set_aspect('equal')
plt.title('Check if kernel is correct.')
plt.draw()
plt.show()
if dimension > 2:
plt.figure()
plt.plot(max_moves)
plt.title('Check if kernel is correct.')
plt.show()
# Rescale kernels with real radius
return kernel_points * radius
def kernel_point_optimization_debug(radius, num_points, num_kernels=1, dimension=3,
fixed='center', ratio=0.66, verbose=0):
"""
Creation of kernel point via optimization of potentials.
:param radius: Radius of the kernels
:param num_points: points composing kernels
:param num_kernels: number of wanted kernels
:param dimension: dimension of the space
:param fixed: fix position of certain kernel points ('none', 'center' or 'verticals')
:param ratio: ratio of the radius where you want the kernels points to be placed
:param verbose: display option
:return: points [num_kernels, num_points, dimension]
"""
#######################
# Parameters definition
#######################
# Radius used for optimization (points are rescaled afterwards)
radius0 = 1
diameter0 = 2
# Factor multiplicating gradients for moving points (~learning rate)
moving_factor = 1e-2
continuous_moving_decay = 0.9995
# Gradient threshold to stop optimization
thresh = 1e-5
# Gradient clipping value
clip = 0.05 * radius0
#######################
# Kernel initialization
#######################
# Random kernel points
kernel_points = np.random.rand(num_kernels * num_points - 1, dimension) * diameter0 - radius0
while (kernel_points.shape[0] < num_kernels * num_points):
new_points = np.random.rand(num_kernels * num_points - 1, dimension) * diameter0 - radius0
kernel_points = np.vstack((kernel_points, new_points))
d2 = np.sum(np.power(kernel_points, 2), axis=1)
kernel_points = kernel_points[d2 < 0.5 * radius0 * radius0, :]
kernel_points = kernel_points[:num_kernels * num_points, :].reshape((num_kernels, num_points, -1))
# Optionnal fixing
if fixed == 'center':
kernel_points[:, 0, :] *= 0
if fixed == 'verticals':
kernel_points[:, :3, :] *= 0
kernel_points[:, 1, -1] += 2 * radius0 / 3
kernel_points[:, 2, -1] -= 2 * radius0 / 3
#####################
# Kernel optimization
#####################
# Initialize figure
if verbose>1:
fig = plt.figure()
saved_gradient_norms = np.zeros((10000, num_kernels))
old_gradient_norms = np.zeros((num_kernels, num_points))
for iter in range(10000):
# Compute gradients
# *****************
# Derivative of the sum of potentials of all points
A = np.expand_dims(kernel_points, axis=2)
B = np.expand_dims(kernel_points, axis=1)
interd2 = np.sum(np.power(A - B, 2), axis=-1)
inter_grads = (A - B) / (np.power(np.expand_dims(interd2, -1), 3/2) + 1e-6)
inter_grads = np.sum(inter_grads, axis=1)
# Derivative of the radius potential
circle_grads = 10*kernel_points
# All gradients
gradients = inter_grads + circle_grads
if fixed == 'verticals':
gradients[:, 1:3, :-1] = 0
# Stop condition
# **************
# Compute norm of gradients
gradients_norms = np.sqrt(np.sum(np.power(gradients, 2), axis=-1))
saved_gradient_norms[iter, :] = np.max(gradients_norms, axis=1)
# Stop if all moving points are gradients fixed (low gradients diff)
if fixed == 'center' and np.max(np.abs(old_gradient_norms[:, 1:] - gradients_norms[:, 1:])) < thresh:
break
elif fixed == 'verticals' and np.max(np.abs(old_gradient_norms[:, 3:] - gradients_norms[:, 3:])) < thresh:
break
elif np.max(np.abs(old_gradient_norms - gradients_norms)) < thresh:
break
old_gradient_norms = gradients_norms
# Move points
# ***********
# Clip gradient to get moving dists
moving_dists = np.minimum(moving_factor * gradients_norms, clip)
# Fix central point
if fixed == 'center':
moving_dists[:, 0] = 0
if fixed == 'verticals':
moving_dists[:, 0] = 0
# Move points
kernel_points -= np.expand_dims(moving_dists, -1) * gradients / np.expand_dims(gradients_norms + 1e-6, -1)
if verbose:
print('iter {:5d} / max grad = {:f}'.format(iter, np.max(gradients_norms[:, 3:])))
if verbose > 1:
plt.clf()
plt.plot(kernel_points[0, :, 0], kernel_points[0, :, 1], '.')
circle = plt.Circle((0, 0), radius, color='r', fill=False)
fig.axes[0].add_artist(circle)
fig.axes[0].set_xlim((-radius*1.1, radius*1.1))
fig.axes[0].set_ylim((-radius*1.1, radius*1.1))
fig.axes[0].set_aspect('equal')
plt.draw()
plt.pause(0.001)
plt.show(block=False)
print(moving_factor)
# moving factor decay
moving_factor *= continuous_moving_decay
# Rescale radius to fit the wanted ratio of radius
r = np.sqrt(np.sum(np.power(kernel_points, 2), axis=-1))
kernel_points *= ratio / np.mean(r[:, 1:])
# Rescale kernels with real radius
return kernel_points * radius, saved_gradient_norms
def load_kernels(radius, num_kpoints, dimension, fixed, lloyd=False):
# Kernel directory
kernel_dir = 'kernels/dispositions'
if not exists(kernel_dir):
makedirs(kernel_dir)
# To many points switch to Lloyds
if num_kpoints > 30:
lloyd = True
# Kernel_file
kernel_file = join(kernel_dir, 'k_{:03d}_{:s}_{:d}D.ply'.format(num_kpoints, fixed, dimension))
# Check if already done
if not exists(kernel_file):
if lloyd:
# Create kernels
kernel_points = spherical_Lloyd(1.0,
num_kpoints,
dimension=dimension,
fixed=fixed,
verbose=0)
else:
# Create kernels
kernel_points, grad_norms = kernel_point_optimization_debug(1.0,
num_kpoints,
num_kernels=100,
dimension=dimension,
fixed=fixed,
verbose=0)
# Find best candidate
best_k = np.argmin(grad_norms[-1, :])
# Save points
kernel_points = kernel_points[best_k, :, :]
write_ply(kernel_file, kernel_points, ['x', 'y', 'z'])
else:
data = read_ply(kernel_file)
kernel_points = np.vstack((data['x'], data['y'], data['z'])).T
# Random roations for the kernel
# N.B. 4D random rotations not supported yet
R = np.eye(dimension)
theta = np.random.rand() * 2 * np.pi
if dimension == 2:
if fixed != 'vertical':
c, s = np.cos(theta), np.sin(theta)
R = np.array([[c, -s], [s, c]], dtype=np.float32)
elif dimension == 3:
if fixed != 'vertical':
c, s = np.cos(theta), np.sin(theta)
R = np.array([[c, -s, 0], [s, c, 0], [0, 0, 1]], dtype=np.float32)
else:
phi = (np.random.rand() - 0.5) * np.pi
# Create the first vector in carthesian coordinates
u = np.array([np.cos(theta) * np.cos(phi), np.sin(theta) * np.cos(phi), np.sin(phi)])
# Choose a random rotation angle
alpha = np.random.rand() * 2 * np.pi
# Create the rotation matrix with this vector and angle
R = create_3D_rotations(np.reshape(u, (1, -1)), np.reshape(alpha, (1, -1)))[0]
R = R.astype(np.float32)
# Add a small noise
kernel_points = kernel_points + np.random.normal(scale=0.01, size=kernel_points.shape)
# Scale kernels
kernel_points = radius * kernel_points
# Rotate kernels
kernel_points = np.matmul(kernel_points, R)
return kernel_points.astype(np.float32)
| 17,189 | 35.496815 | 120 | py |
sngan.pytorch | sngan.pytorch-master/test.py | # -*- coding: utf-8 -*-
# @Date : 2019-07-25
# @Author : Xinyu Gong (xy_gong@tamu.edu)
# @Link : None
# @Version : 0.0
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cfg
import models
from functions import validate
from utils.utils import set_log_dir, create_logger
from utils.inception_score import _init_inception
from utils.fid_score import create_inception_graph, check_or_download_inception
import torch
import os
import numpy as np
from tensorboardX import SummaryWriter
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
def main():
args = cfg.parse_args()
torch.cuda.manual_seed(args.random_seed)
assert args.exp_name
assert args.load_path.endswith('.pth')
assert os.path.exists(args.load_path)
args.path_helper = set_log_dir('logs_eval', args.exp_name)
logger = create_logger(args.path_helper['log_path'], phase='test')
# set tf env
_init_inception()
inception_path = check_or_download_inception(None)
create_inception_graph(inception_path)
# import network
gen_net = eval('models.'+args.model+'.Generator')(args=args).cuda()
# fid stat
if args.dataset.lower() == 'cifar10':
fid_stat = 'fid_stat/fid_stats_cifar10_train.npz'
else:
raise NotImplementedError(f'no fid stat for {args.dataset.lower()}')
assert os.path.exists(fid_stat)
# initial
fixed_z = torch.cuda.FloatTensor(np.random.normal(0, 1, (25, args.latent_dim)))
# set writer
logger.info(f'=> resuming from {args.load_path}')
checkpoint_file = args.load_path
assert os.path.exists(checkpoint_file)
checkpoint = torch.load(checkpoint_file)
if 'avg_gen_state_dict' in checkpoint:
gen_net.load_state_dict(checkpoint['avg_gen_state_dict'])
epoch = checkpoint['epoch']
logger.info(f'=> loaded checkpoint {checkpoint_file} (epoch {epoch})')
else:
gen_net.load_state_dict(checkpoint)
logger.info(f'=> loaded checkpoint {checkpoint_file}')
logger.info(args)
writer_dict = {
'writer': SummaryWriter(args.path_helper['log_path']),
'valid_global_steps': 0,
}
inception_score, fid_score = validate(args, fixed_z, fid_stat, gen_net, writer_dict)
logger.info(f'Inception score: {inception_score}, FID score: {fid_score}.')
if __name__ == '__main__':
main()
| 2,425 | 29.708861 | 88 | py |
sngan.pytorch | sngan.pytorch-master/functions.py | # -*- coding: utf-8 -*-
# @Date : 2019-07-25
# @Author : Xinyu Gong (xy_gong@tamu.edu)
# @Link : None
# @Version : 0.0
import os
import numpy as np
import torch
import torch.nn as nn
from torchvision.utils import make_grid
from imageio import imsave
from tqdm import tqdm
from copy import deepcopy
import logging
from utils.inception_score import get_inception_score
from utils.fid_score import calculate_fid_given_paths
logger = logging.getLogger(__name__)
def train(args, gen_net: nn.Module, dis_net: nn.Module, gen_optimizer, dis_optimizer, gen_avg_param, train_loader, epoch,
writer_dict, schedulers=None):
writer = writer_dict['writer']
gen_step = 0
# train mode
gen_net = gen_net.train()
dis_net = dis_net.train()
for iter_idx, (imgs, _) in enumerate(tqdm(train_loader)):
global_steps = writer_dict['train_global_steps']
# Adversarial ground truths
real_imgs = imgs.type(torch.cuda.FloatTensor)
# Sample noise as generator input
z = torch.cuda.FloatTensor(np.random.normal(0, 1, (imgs.shape[0], args.latent_dim)))
# ---------------------
# Train Discriminator
# ---------------------
dis_optimizer.zero_grad()
real_validity = dis_net(real_imgs)
fake_imgs = gen_net(z).detach()
assert fake_imgs.size() == real_imgs.size()
fake_validity = dis_net(fake_imgs)
# cal loss
d_loss = torch.mean(nn.ReLU(inplace=True)(1.0 - real_validity)) + \
torch.mean(nn.ReLU(inplace=True)(1 + fake_validity))
d_loss.backward()
dis_optimizer.step()
writer.add_scalar('d_loss', d_loss.item(), global_steps)
# -----------------
# Train Generator
# -----------------
if global_steps % args.n_critic == 0:
gen_optimizer.zero_grad()
gen_z = torch.cuda.FloatTensor(np.random.normal(0, 1, (args.gen_batch_size, args.latent_dim)))
gen_imgs = gen_net(gen_z)
fake_validity = dis_net(gen_imgs)
# cal loss
g_loss = -torch.mean(fake_validity)
g_loss.backward()
gen_optimizer.step()
# adjust learning rate
if schedulers:
gen_scheduler, dis_scheduler = schedulers
g_lr = gen_scheduler.step(global_steps)
d_lr = dis_scheduler.step(global_steps)
writer.add_scalar('LR/g_lr', g_lr, global_steps)
writer.add_scalar('LR/d_lr', d_lr, global_steps)
# moving average weight
for p, avg_p in zip(gen_net.parameters(), gen_avg_param):
avg_p.mul_(0.999).add_(0.001, p.data)
writer.add_scalar('g_loss', g_loss.item(), global_steps)
gen_step += 1
# verbose
if gen_step and iter_idx % args.print_freq == 0:
tqdm.write(
"[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G loss: %f]" %
(epoch, args.max_epoch, iter_idx % len(train_loader), len(train_loader), d_loss.item(), g_loss.item()))
writer_dict['train_global_steps'] = global_steps + 1
def validate(args, fixed_z, fid_stat, gen_net: nn.Module, writer_dict):
writer = writer_dict['writer']
global_steps = writer_dict['valid_global_steps']
# eval mode
gen_net = gen_net.eval()
# generate images
sample_imgs = gen_net(fixed_z)
img_grid = make_grid(sample_imgs, nrow=5, normalize=True, scale_each=True)
# get fid and inception score
fid_buffer_dir = os.path.join(args.path_helper['sample_path'], 'fid_buffer')
os.makedirs(fid_buffer_dir)
eval_iter = args.num_eval_imgs // args.eval_batch_size
img_list = list()
for iter_idx in tqdm(range(eval_iter), desc='sample images'):
z = torch.cuda.FloatTensor(np.random.normal(0, 1, (args.eval_batch_size, args.latent_dim)))
# Generate a batch of images
gen_imgs = gen_net(z).mul_(127.5).add_(127.5).clamp_(0.0, 255.0).permute(0, 2, 3, 1).to('cpu', torch.uint8).numpy()
for img_idx, img in enumerate(gen_imgs):
file_name = os.path.join(fid_buffer_dir, f'iter{iter_idx}_b{img_idx}.png')
imsave(file_name, img)
img_list.extend(list(gen_imgs))
# get inception score
logger.info('=> calculate inception score')
mean, std = get_inception_score(img_list)
# get fid score
logger.info('=> calculate fid score')
fid_score = calculate_fid_given_paths([fid_buffer_dir, fid_stat], inception_path=None)
os.system('rm -r {}'.format(fid_buffer_dir))
writer.add_image('sampled_images', img_grid, global_steps)
writer.add_scalar('Inception_score/mean', mean, global_steps)
writer.add_scalar('Inception_score/std', std, global_steps)
writer.add_scalar('FID_score', fid_score, global_steps)
writer_dict['valid_global_steps'] = global_steps + 1
return mean, fid_score
class LinearLrDecay(object):
def __init__(self, optimizer, start_lr, end_lr, decay_start_step, decay_end_step):
assert start_lr > end_lr
self.optimizer = optimizer
self.delta = (start_lr - end_lr) / (decay_end_step - decay_start_step)
self.decay_start_step = decay_start_step
self.decay_end_step = decay_end_step
self.start_lr = start_lr
self.end_lr = end_lr
def step(self, current_step):
if current_step <= self.decay_start_step:
lr = self.start_lr
elif current_step >= self.decay_end_step:
lr = self.end_lr
else:
lr = self.start_lr - self.delta * (current_step - self.decay_start_step)
for param_group in self.optimizer.param_groups:
param_group['lr'] = lr
return lr
def load_params(model, new_param):
for p, new_p in zip(model.parameters(), new_param):
p.data.copy_(new_p)
def copy_params(model):
flatten = deepcopy(list(p.data for p in model.parameters()))
return flatten
| 6,022 | 32.837079 | 123 | py |
sngan.pytorch | sngan.pytorch-master/cfg.py | # -*- coding: utf-8 -*-
# @Date : 2019-07-25
# @Author : Xinyu Gong (xy_gong@tamu.edu)
# @Link : None
# @Version : 0.0
import argparse
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
'--max_epoch',
type=int,
default=200,
help='number of epochs of training')
parser.add_argument(
'--max_iter',
type=int,
default=None,
help='set the max iteration number')
parser.add_argument(
'-gen_bs',
'--gen_batch_size',
type=int,
default=64,
help='size of the batches')
parser.add_argument(
'-dis_bs',
'--dis_batch_size',
type=int,
default=64,
help='size of the batches')
parser.add_argument(
'--g_lr',
type=float,
default=0.0002,
help='adam: gen learning rate')
parser.add_argument(
'--d_lr',
type=float,
default=0.0002,
help='adam: disc learning rate')
parser.add_argument(
'--lr_decay',
action='store_true',
help='learning rate decay or not')
parser.add_argument(
'--beta1',
type=float,
default=0.0,
help='adam: decay of first order momentum of gradient')
parser.add_argument(
'--beta2',
type=float,
default=0.9,
help='adam: decay of first order momentum of gradient')
parser.add_argument(
'--num_workers',
type=int,
default=8,
help='number of cpu threads to use during batch generation')
parser.add_argument(
'--latent_dim',
type=int,
default=128,
help='dimensionality of the latent space')
parser.add_argument(
'--img_size',
type=int,
default=32,
help='size of each image dimension')
parser.add_argument(
'--channels',
type=int,
default=3,
help='number of image channels')
parser.add_argument(
'--n_critic',
type=int,
default=1,
help='number of training steps for discriminator per iter')
parser.add_argument(
'--val_freq',
type=int,
default=20,
help='interval between each validation')
parser.add_argument(
'--print_freq',
type=int,
default=50,
help='interval between each verbose')
parser.add_argument(
'--load_path',
type=str,
help='The reload model path')
parser.add_argument(
'--exp_name',
type=str,
help='The name of exp')
parser.add_argument(
'--d_spectral_norm',
type=str2bool,
default=False,
help='add spectral_norm on discriminator?')
parser.add_argument(
'--g_spectral_norm',
type=str2bool,
default=False,
help='add spectral_norm on generator?')
parser.add_argument(
'--dataset',
type=str,
default='cifar10',
help='dataset type')
parser.add_argument(
'--data_path',
type=str,
default='./data',
help='The path of data set')
parser.add_argument('--init_type', type=str, default='normal',
choices=['normal', 'orth', 'xavier_uniform', 'false'],
help='The init type')
parser.add_argument('--gf_dim', type=int, default=64,
help='The base channel num of gen')
parser.add_argument('--df_dim', type=int, default=64,
help='The base channel num of disc')
parser.add_argument(
'--model',
type=str,
default='sngan_cifar10',
help='path of model')
parser.add_argument('--eval_batch_size', type=int, default=100)
parser.add_argument('--num_eval_imgs', type=int, default=50000)
parser.add_argument(
'--bottom_width',
type=int,
default=4,
help="the base resolution of the GAN")
parser.add_argument('--random_seed', type=int, default=12345)
opt = parser.parse_args()
return opt
| 4,330 | 27.30719 | 78 | py |
sngan.pytorch | sngan.pytorch-master/datasets.py | import torch
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from torch.utils.data import Dataset
class ImageDataset(object):
def __init__(self, args):
if args.dataset.lower() == 'cifar10':
Dt = datasets.CIFAR10
transform = transforms.Compose([
transforms.Resize(args.img_size),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
args.n_classes = 10
elif args.dataset.lower() == 'stl10':
Dt = datasets.STL10
transform = transforms.Compose([
transforms.Resize(args.img_size),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
else:
raise NotImplementedError('Unknown dataset: {}'.format(args.dataset))
if args.dataset.lower() == 'stl10':
self.train = torch.utils.data.DataLoader(
Dt(root=args.data_path, split='train+unlabeled', transform=transform, download=True),
batch_size=args.dis_batch_size, shuffle=True,
num_workers=args.num_workers, pin_memory=True)
self.valid = torch.utils.data.DataLoader(
Dt(root=args.data_path, split='test', transform=transform),
batch_size=args.dis_batch_size, shuffle=False,
num_workers=args.num_workers, pin_memory=True)
self.test = self.valid
else:
self.train = torch.utils.data.DataLoader(
Dt(root=args.data_path, train=True, transform=transform, download=True),
batch_size=args.dis_batch_size, shuffle=True,
num_workers=args.num_workers, pin_memory=True)
self.valid = torch.utils.data.DataLoader(
Dt(root=args.data_path, train=False, transform=transform),
batch_size=args.dis_batch_size, shuffle=False,
num_workers=args.num_workers, pin_memory=True)
self.test = self.valid
| 2,115 | 40.490196 | 101 | py |
sngan.pytorch | sngan.pytorch-master/train.py | # -*- coding: utf-8 -*-
# @Date : 2019-07-25
# @Author : Xinyu Gong (xy_gong@tamu.edu)
# @Link : None
# @Version : 0.0
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cfg
import models
import datasets
from functions import train, validate, LinearLrDecay, load_params, copy_params
from utils.utils import set_log_dir, save_checkpoint, create_logger
from utils.inception_score import _init_inception
from utils.fid_score import create_inception_graph, check_or_download_inception
import torch
import os
import numpy as np
import torch.nn as nn
from tensorboardX import SummaryWriter
from tqdm import tqdm
from copy import deepcopy
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
def main():
args = cfg.parse_args()
torch.cuda.manual_seed(args.random_seed)
# set tf env
_init_inception()
inception_path = check_or_download_inception(None)
create_inception_graph(inception_path)
# import network
gen_net = eval('models.'+args.model+'.Generator')(args=args).cuda()
dis_net = eval('models.'+args.model+'.Discriminator')(args=args).cuda()
# weight init
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv2d') != -1:
if args.init_type == 'normal':
nn.init.normal_(m.weight.data, 0.0, 0.02)
elif args.init_type == 'orth':
nn.init.orthogonal_(m.weight.data)
elif args.init_type == 'xavier_uniform':
nn.init.xavier_uniform(m.weight.data, 1.)
else:
raise NotImplementedError('{} unknown inital type'.format(args.init_type))
elif classname.find('BatchNorm2d') != -1:
nn.init.normal_(m.weight.data, 1.0, 0.02)
nn.init.constant_(m.bias.data, 0.0)
gen_net.apply(weights_init)
dis_net.apply(weights_init)
# set optimizer
gen_optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, gen_net.parameters()),
args.g_lr, (args.beta1, args.beta2))
dis_optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, dis_net.parameters()),
args.d_lr, (args.beta1, args.beta2))
gen_scheduler = LinearLrDecay(gen_optimizer, args.g_lr, 0.0, 0, args.max_iter * args.n_critic)
dis_scheduler = LinearLrDecay(dis_optimizer, args.d_lr, 0.0, 0, args.max_iter * args.n_critic)
# set up data_loader
dataset = datasets.ImageDataset(args)
train_loader = dataset.train
# fid stat
if args.dataset.lower() == 'cifar10':
fid_stat = 'fid_stat/fid_stats_cifar10_train.npz'
elif args.dataset.lower() == 'stl10':
fid_stat = 'fid_stat/stl10_train_unlabeled_fid_stats_48.npz'
else:
raise NotImplementedError(f'no fid stat for {args.dataset.lower()}')
assert os.path.exists(fid_stat)
# epoch number for dis_net
args.max_epoch = args.max_epoch * args.n_critic
if args.max_iter:
args.max_epoch = np.ceil(args.max_iter * args.n_critic / len(train_loader))
# initial
fixed_z = torch.cuda.FloatTensor(np.random.normal(0, 1, (25, args.latent_dim)))
gen_avg_param = copy_params(gen_net)
start_epoch = 0
best_fid = 1e4
# set writer
if args.load_path:
print(f'=> resuming from {args.load_path}')
assert os.path.exists(args.load_path)
checkpoint_file = os.path.join(args.load_path, 'Model', 'checkpoint.pth')
assert os.path.exists(checkpoint_file)
checkpoint = torch.load(checkpoint_file)
start_epoch = checkpoint['epoch']
best_fid = checkpoint['best_fid']
gen_net.load_state_dict(checkpoint['gen_state_dict'])
dis_net.load_state_dict(checkpoint['dis_state_dict'])
gen_optimizer.load_state_dict(checkpoint['gen_optimizer'])
dis_optimizer.load_state_dict(checkpoint['dis_optimizer'])
avg_gen_net = deepcopy(gen_net)
avg_gen_net.load_state_dict(checkpoint['avg_gen_state_dict'])
gen_avg_param = copy_params(avg_gen_net)
del avg_gen_net
args.path_helper = checkpoint['path_helper']
logger = create_logger(args.path_helper['log_path'])
logger.info(f'=> loaded checkpoint {checkpoint_file} (epoch {start_epoch})')
else:
# create new log dir
assert args.exp_name
args.path_helper = set_log_dir('logs', args.exp_name)
logger = create_logger(args.path_helper['log_path'])
logger.info(args)
writer_dict = {
'writer': SummaryWriter(args.path_helper['log_path']),
'train_global_steps': start_epoch * len(train_loader),
'valid_global_steps': start_epoch // args.val_freq,
}
# train loop
lr_schedulers = (gen_scheduler, dis_scheduler) if args.lr_decay else None
for epoch in tqdm(range(int(start_epoch), int(args.max_epoch)), desc='total progress'):
train(args, gen_net, dis_net, gen_optimizer, dis_optimizer, gen_avg_param, train_loader, epoch, writer_dict,
lr_schedulers)
if epoch and epoch % args.val_freq == 0 or epoch == int(args.max_epoch)-1:
backup_param = copy_params(gen_net)
load_params(gen_net, gen_avg_param)
inception_score, fid_score = validate(args, fixed_z, fid_stat, gen_net, writer_dict)
logger.info(f'Inception score: {inception_score}, FID score: {fid_score} || @ epoch {epoch}.')
load_params(gen_net, backup_param)
if fid_score < best_fid:
best_fid = fid_score
is_best = True
else:
is_best = False
else:
is_best = False
avg_gen_net = deepcopy(gen_net)
load_params(avg_gen_net, gen_avg_param)
save_checkpoint({
'epoch': epoch + 1,
'model': args.model,
'gen_state_dict': gen_net.state_dict(),
'dis_state_dict': dis_net.state_dict(),
'avg_gen_state_dict': avg_gen_net.state_dict(),
'gen_optimizer': gen_optimizer.state_dict(),
'dis_optimizer': dis_optimizer.state_dict(),
'best_fid': best_fid,
'path_helper': args.path_helper
}, is_best, args.path_helper['ckpt_path'])
del avg_gen_net
if __name__ == '__main__':
main()
| 6,393 | 37.287425 | 116 | py |
sngan.pytorch | sngan.pytorch-master/models/sngan_64.py | import torch.nn as nn
class GenBlock(nn.Module):
def __init__(self, in_channels, out_channels, hidden_channels=None, ksize=3, pad=1,
activation=nn.ReLU(), upsample=False, n_classes=0):
super(GenBlock, self).__init__()
self.activation = activation
self.upsample = upsample
self.learnable_sc = in_channels != out_channels or upsample
hidden_channels = out_channels if hidden_channels is None else hidden_channels
self.n_classes = n_classes
self.c1 = nn.Conv2d(in_channels, hidden_channels, kernel_size=ksize, padding=pad)
self.c2 = nn.Conv2d(hidden_channels, out_channels, kernel_size=ksize, padding=pad)
self.b1 = nn.BatchNorm2d(in_channels)
self.b2 = nn.BatchNorm2d(hidden_channels)
if self.learnable_sc:
self.c_sc = nn.Conv2d(in_channels, out_channels, kernel_size=1, padding=0)
def upsample_conv(self, x, conv):
return conv(nn.UpsamplingNearest2d(scale_factor=2)(x))
def residual(self, x):
h = x
h = self.b1(h)
h = self.activation(h)
h = self.upsample_conv(h, self.c1) if self.upsample else self.c1(h)
h = self.b2(h)
h = self.activation(h)
h = self.c2(h)
return h
def shortcut(self, x):
if self.learnable_sc:
x = self.upsample_conv(x, self.c_sc) if self.upsample else self.c_sc(x)
return x
else:
return x
def forward(self, x):
return self.residual(x) + self.shortcut(x)
class Generator(nn.Module):
def __init__(self, args, activation=nn.ReLU(), n_classes=0):
super(Generator, self).__init__()
self.bottom_width = args.bottom_width
self.activation = activation
self.n_classes = n_classes
self.ch = args.gf_dim
self.l1 = nn.Linear(args.latent_dim, (self.bottom_width ** 2) * self.ch)
self.block2 = GenBlock(self.ch, self.ch, activation=activation, upsample=True, n_classes=n_classes)
self.block3 = GenBlock(self.ch, self.ch, activation=activation, upsample=True, n_classes=n_classes)
self.block4 = GenBlock(self.ch, self.ch, activation=activation, upsample=True, n_classes=n_classes)
self.b5 = nn.BatchNorm2d(self.ch)
self.c5 = nn.Conv2d(self.ch, 3, kernel_size=3, stride=1, padding=1)
def forward(self, z):
h = z
h = self.l1(h).view(-1, self.ch, self.bottom_width, self.bottom_width)
h = self.block2(h)
h = self.block3(h)
h = self.block4(h)
h = self.b5(h)
h = self.activation(h)
h = nn.Tanh()(self.c5(h))
return h
"""Discriminator"""
def _downsample(x):
# Downsample (Mean Avg Pooling with 2x2 kernel)
return nn.AvgPool2d(kernel_size=2)(x)
class OptimizedDisBlock(nn.Module):
def __init__(self, args, in_channels, out_channels, ksize=3, pad=1, activation=nn.ReLU()):
super(OptimizedDisBlock, self).__init__()
self.activation = activation
self.c1 = nn.Conv2d(in_channels, out_channels, kernel_size=ksize, padding=pad)
self.c2 = nn.Conv2d(out_channels, out_channels, kernel_size=ksize, padding=pad)
self.c_sc = nn.Conv2d(in_channels, out_channels, kernel_size=1, padding=0)
if args.d_spectral_norm:
self.c1 = nn.utils.spectral_norm(self.c1)
self.c2 = nn.utils.spectral_norm(self.c2)
self.c_sc = nn.utils.spectral_norm(self.c_sc)
def residual(self, x):
h = x
h = self.c1(h)
h = self.activation(h)
h = self.c2(h)
h = _downsample(h)
return h
def shortcut(self, x):
return self.c_sc(_downsample(x))
def forward(self, x):
return self.residual(x) + self.shortcut(x)
class DisBlock(nn.Module):
def __init__(self, args, in_channels, out_channels, hidden_channels=None, ksize=3, pad=1,
activation=nn.ReLU(), downsample=False):
super(DisBlock, self).__init__()
self.activation = activation
self.downsample = downsample
self.learnable_sc = (in_channels != out_channels) or downsample
hidden_channels = in_channels if hidden_channels is None else hidden_channels
self.c1 = nn.Conv2d(in_channels, hidden_channels, kernel_size=ksize, padding=pad)
self.c2 = nn.Conv2d(hidden_channels, out_channels, kernel_size=ksize, padding=pad)
if args.d_spectral_norm:
self.c1 = nn.utils.spectral_norm(self.c1)
self.c2 = nn.utils.spectral_norm(self.c2)
if self.learnable_sc:
self.c_sc = nn.Conv2d(in_channels, out_channels, kernel_size=1, padding=0)
if args.d_spectral_norm:
self.c_sc = nn.utils.spectral_norm(self.c_sc)
def residual(self, x):
h = x
h = self.activation(h)
h = self.c1(h)
h = self.activation(h)
h = self.c2(h)
if self.downsample:
h = _downsample(h)
return h
def shortcut(self, x):
if self.learnable_sc:
x = self.c_sc(x)
if self.downsample:
return _downsample(x)
else:
return x
else:
return x
def forward(self, x):
return self.residual(x) + self.shortcut(x)
class Discriminator(nn.Module):
def __init__(self, args, activation=nn.ReLU()):
super(Discriminator, self).__init__()
self.ch = args.df_dim
self.activation = activation
self.block1 = OptimizedDisBlock(args, 3, self.ch)
self.block2 = DisBlock(args, self.ch, self.ch, activation=activation, downsample=True)
self.block3 = DisBlock(args, self.ch, self.ch, activation=activation, downsample=False)
self.block4 = DisBlock(args, self.ch, self.ch, activation=activation, downsample=False)
self.l5 = nn.Linear(self.ch, 1, bias=False)
if args.d_spectral_norm:
self.l5 = nn.utils.spectral_norm(self.l5)
def forward(self, x):
h = x
h = self.block1(h)
h = self.block2(h)
h = self.block3(h)
h = self.block4(h)
h = self.activation(h)
# Global average pooling
h = h.sum(2).sum(2)
output = self.l5(h)
return output
| 6,296 | 34.778409 | 107 | py |
sngan.pytorch | sngan.pytorch-master/models/sngan_stl10.py | import torch.nn as nn
class GenBlock(nn.Module):
def __init__(self, in_channels, out_channels, hidden_channels=None, ksize=3, pad=1,
activation=nn.ReLU(), upsample=False, n_classes=0):
super(GenBlock, self).__init__()
self.activation = activation
self.upsample = upsample
self.learnable_sc = in_channels != out_channels or upsample
hidden_channels = out_channels if hidden_channels is None else hidden_channels
self.n_classes = n_classes
self.c1 = nn.Conv2d(in_channels, hidden_channels, kernel_size=ksize, padding=pad)
self.c2 = nn.Conv2d(hidden_channels, out_channels, kernel_size=ksize, padding=pad)
self.b1 = nn.BatchNorm2d(in_channels)
self.b2 = nn.BatchNorm2d(hidden_channels)
if self.learnable_sc:
self.c_sc = nn.Conv2d(in_channels, out_channels, kernel_size=1, padding=0)
def upsample_conv(self, x, conv):
return conv(nn.UpsamplingNearest2d(scale_factor=2)(x))
def residual(self, x):
h = x
h = self.b1(h)
h = self.activation(h)
h = self.upsample_conv(h, self.c1) if self.upsample else self.c1(h)
h = self.b2(h)
h = self.activation(h)
h = self.c2(h)
return h
def shortcut(self, x):
if self.learnable_sc:
x = self.upsample_conv(x, self.c_sc) if self.upsample else self.c_sc(x)
return x
else:
return x
def forward(self, x):
return self.residual(x) + self.shortcut(x)
class Generator(nn.Module):
def __init__(self, args, activation=nn.ReLU(), n_classes=0):
super(Generator, self).__init__()
self.bottom_width = args.bottom_width
self.activation = activation
self.n_classes = n_classes
self.ch = 512
self.l1 = nn.Linear(args.latent_dim, (self.bottom_width ** 2) * self.ch)
self.block2 = GenBlock(512, 256, activation=activation, upsample=True, n_classes=n_classes)
self.block3 = GenBlock(256, 128, activation=activation, upsample=True, n_classes=n_classes)
self.block4 = GenBlock(128, 64, activation=activation, upsample=True, n_classes=n_classes)
self.b5 = nn.BatchNorm2d(64)
self.c5 = nn.Conv2d(64, 3, kernel_size=3, stride=1, padding=1)
def forward(self, z):
h = z
h = self.l1(h).view(-1, self.ch, self.bottom_width, self.bottom_width)
h = self.block2(h)
h = self.block3(h)
h = self.block4(h)
h = self.b5(h)
h = self.activation(h)
h = nn.Tanh()(self.c5(h))
return h
"""Discriminator"""
def _downsample(x):
# Downsample (Mean Avg Pooling with 2x2 kernel)
return nn.AvgPool2d(kernel_size=2)(x)
class OptimizedDisBlock(nn.Module):
def __init__(self, args, in_channels, out_channels, ksize=3, pad=1, activation=nn.ReLU()):
super(OptimizedDisBlock, self).__init__()
self.activation = activation
self.c1 = nn.Conv2d(in_channels, out_channels, kernel_size=ksize, padding=pad)
self.c2 = nn.Conv2d(out_channels, out_channels, kernel_size=ksize, padding=pad)
self.c_sc = nn.Conv2d(in_channels, out_channels, kernel_size=1, padding=0)
if args.d_spectral_norm:
self.c1 = nn.utils.spectral_norm(self.c1)
self.c2 = nn.utils.spectral_norm(self.c2)
self.c_sc = nn.utils.spectral_norm(self.c_sc)
def residual(self, x):
h = x
h = self.c1(h)
h = self.activation(h)
h = self.c2(h)
h = _downsample(h)
return h
def shortcut(self, x):
return self.c_sc(_downsample(x))
def forward(self, x):
return self.residual(x) + self.shortcut(x)
class DisBlock(nn.Module):
def __init__(self, args, in_channels, out_channels, hidden_channels=None, ksize=3, pad=1,
activation=nn.ReLU(), downsample=False):
super(DisBlock, self).__init__()
self.activation = activation
self.downsample = downsample
self.learnable_sc = (in_channels != out_channels) or downsample
hidden_channels = in_channels if hidden_channels is None else hidden_channels
self.c1 = nn.Conv2d(in_channels, hidden_channels, kernel_size=ksize, padding=pad)
self.c2 = nn.Conv2d(hidden_channels, out_channels, kernel_size=ksize, padding=pad)
if args.d_spectral_norm:
self.c1 = nn.utils.spectral_norm(self.c1)
self.c2 = nn.utils.spectral_norm(self.c2)
if self.learnable_sc:
self.c_sc = nn.Conv2d(in_channels, out_channels, kernel_size=1, padding=0)
if args.d_spectral_norm:
self.c_sc = nn.utils.spectral_norm(self.c_sc)
def residual(self, x):
h = x
h = self.activation(h)
h = self.c1(h)
h = self.activation(h)
h = self.c2(h)
if self.downsample:
h = _downsample(h)
return h
def shortcut(self, x):
if self.learnable_sc:
x = self.c_sc(x)
if self.downsample:
return _downsample(x)
else:
return x
else:
return x
def forward(self, x):
return self.residual(x) + self.shortcut(x)
class Discriminator(nn.Module):
def __init__(self, args, activation=nn.ReLU()):
super(Discriminator, self).__init__()
self.activation = activation
self.block1 = OptimizedDisBlock(args, 3, 64)
self.block2 = DisBlock(args, 64, 128, activation=activation, downsample=True)
self.block3 = DisBlock(args, 128, 256, activation=activation, downsample=True)
self.block4 = DisBlock(args, 256, 512, activation=activation, downsample=True)
self.block5 = DisBlock(args, 512, 1024, activation=activation, downsample=False)
self.l6 = nn.Linear(1024, 1, bias=False)
if args.d_spectral_norm:
self.l6 = nn.utils.spectral_norm(self.l6)
def forward(self, x):
h = x
h = self.block1(h)
h = self.block2(h)
h = self.block3(h)
h = self.block4(h)
h = self.block5(h)
h = self.activation(h)
# Global average pooling
h = h.sum(2).sum(2)
output = self.l6(h)
return output
| 6,305 | 34.426966 | 99 | py |
sngan.pytorch | sngan.pytorch-master/models/sngan_cifar10.py | import torch.nn as nn
from .gen_resblock import GenBlock
class Generator(nn.Module):
def __init__(self, args, activation=nn.ReLU(), n_classes=0):
super(Generator, self).__init__()
self.bottom_width = args.bottom_width
self.activation = activation
self.n_classes = n_classes
self.ch = args.gf_dim
self.l1 = nn.Linear(args.latent_dim, (self.bottom_width ** 2) * self.ch)
self.block2 = GenBlock(self.ch, self.ch, activation=activation, upsample=True, n_classes=n_classes)
self.block3 = GenBlock(self.ch, self.ch, activation=activation, upsample=True, n_classes=n_classes)
self.block4 = GenBlock(self.ch, self.ch, activation=activation, upsample=True, n_classes=n_classes)
self.b5 = nn.BatchNorm2d(self.ch)
self.c5 = nn.Conv2d(self.ch, 3, kernel_size=3, stride=1, padding=1)
def forward(self, z):
h = z
h = self.l1(h).view(-1, self.ch, self.bottom_width, self.bottom_width)
h = self.block2(h)
h = self.block3(h)
h = self.block4(h)
h = self.b5(h)
h = self.activation(h)
h = nn.Tanh()(self.c5(h))
return h
"""Discriminator"""
def _downsample(x):
# Downsample (Mean Avg Pooling with 2x2 kernel)
return nn.AvgPool2d(kernel_size=2)(x)
class OptimizedDisBlock(nn.Module):
def __init__(self, args, in_channels, out_channels, ksize=3, pad=1, activation=nn.ReLU()):
super(OptimizedDisBlock, self).__init__()
self.activation = activation
self.c1 = nn.Conv2d(in_channels, out_channels, kernel_size=ksize, padding=pad)
self.c2 = nn.Conv2d(out_channels, out_channels, kernel_size=ksize, padding=pad)
self.c_sc = nn.Conv2d(in_channels, out_channels, kernel_size=1, padding=0)
if args.d_spectral_norm:
self.c1 = nn.utils.spectral_norm(self.c1)
self.c2 = nn.utils.spectral_norm(self.c2)
self.c_sc = nn.utils.spectral_norm(self.c_sc)
def residual(self, x):
h = x
h = self.c1(h)
h = self.activation(h)
h = self.c2(h)
h = _downsample(h)
return h
def shortcut(self, x):
return self.c_sc(_downsample(x))
def forward(self, x):
return self.residual(x) + self.shortcut(x)
class DisBlock(nn.Module):
def __init__(self, args, in_channels, out_channels, hidden_channels=None, ksize=3, pad=1,
activation=nn.ReLU(), downsample=False):
super(DisBlock, self).__init__()
self.activation = activation
self.downsample = downsample
self.learnable_sc = (in_channels != out_channels) or downsample
hidden_channels = in_channels if hidden_channels is None else hidden_channels
self.c1 = nn.Conv2d(in_channels, hidden_channels, kernel_size=ksize, padding=pad)
self.c2 = nn.Conv2d(hidden_channels, out_channels, kernel_size=ksize, padding=pad)
if args.d_spectral_norm:
self.c1 = nn.utils.spectral_norm(self.c1)
self.c2 = nn.utils.spectral_norm(self.c2)
if self.learnable_sc:
self.c_sc = nn.Conv2d(in_channels, out_channels, kernel_size=1, padding=0)
if args.d_spectral_norm:
self.c_sc = nn.utils.spectral_norm(self.c_sc)
def residual(self, x):
h = x
h = self.activation(h)
h = self.c1(h)
h = self.activation(h)
h = self.c2(h)
if self.downsample:
h = _downsample(h)
return h
def shortcut(self, x):
if self.learnable_sc:
x = self.c_sc(x)
if self.downsample:
return _downsample(x)
else:
return x
else:
return x
def forward(self, x):
return self.residual(x) + self.shortcut(x)
class Discriminator(nn.Module):
def __init__(self, args, activation=nn.ReLU()):
super(Discriminator, self).__init__()
self.ch = args.df_dim
self.activation = activation
self.block1 = OptimizedDisBlock(args, 3, self.ch)
self.block2 = DisBlock(args, self.ch, self.ch, activation=activation, downsample=True)
self.block3 = DisBlock(args, self.ch, self.ch, activation=activation, downsample=False)
self.block4 = DisBlock(args, self.ch, self.ch, activation=activation, downsample=False)
self.l5 = nn.Linear(self.ch, 1, bias=False)
if args.d_spectral_norm:
self.l5 = nn.utils.spectral_norm(self.l5)
def forward(self, x):
h = x
h = self.block1(h)
h = self.block2(h)
h = self.block3(h)
h = self.block4(h)
h = self.activation(h)
# Global average pooling
h = h.sum(2).sum(2)
output = self.l5(h)
return output
| 4,805 | 34.338235 | 107 | py |
sngan.pytorch | sngan.pytorch-master/models/__init__.py | # -*- coding: utf-8 -*-
# @Date : 2019-07-25
# @Author : Xinyu Gong (xy_gong@tamu.edu)
# @Link : None
# @Version : 0.0
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import models.sngan_cifar10
import models.sngan_stl10
| 291 | 21.461538 | 42 | py |
sngan.pytorch | sngan.pytorch-master/models/gen_resblock.py | # -*- coding: utf-8 -*-
# @Date : 3/26/20
# @Author : Xinyu Gong (xy_gong@tamu.edu)
# @Link : None
# @Version : 0.0
import torch.nn as nn
class GenBlock(nn.Module):
def __init__(self, in_channels, out_channels, hidden_channels=None, ksize=3, pad=1,
activation=nn.ReLU(), upsample=False, n_classes=0):
super(GenBlock, self).__init__()
self.activation = activation
self.upsample = upsample
self.learnable_sc = in_channels != out_channels or upsample
hidden_channels = out_channels if hidden_channels is None else hidden_channels
self.n_classes = n_classes
self.c1 = nn.Conv2d(in_channels, hidden_channels, kernel_size=ksize, padding=pad)
self.c2 = nn.Conv2d(hidden_channels, out_channels, kernel_size=ksize, padding=pad)
self.b1 = nn.BatchNorm2d(in_channels)
self.b2 = nn.BatchNorm2d(hidden_channels)
if self.learnable_sc:
self.c_sc = nn.Conv2d(in_channels, out_channels, kernel_size=1, padding=0)
def upsample_conv(self, x, conv):
return conv(nn.UpsamplingNearest2d(scale_factor=2)(x))
def residual(self, x):
h = x
h = self.b1(h)
h = self.activation(h)
h = self.upsample_conv(h, self.c1) if self.upsample else self.c1(h)
h = self.b2(h)
h = self.activation(h)
h = self.c2(h)
return h
def shortcut(self, x):
if self.learnable_sc:
x = self.upsample_conv(x, self.c_sc) if self.upsample else self.c_sc(x)
return x
else:
return x
def forward(self, x):
return self.residual(x) + self.shortcut(x) | 1,671 | 33.833333 | 90 | py |
sngan.pytorch | sngan.pytorch-master/utils/cal_fid_stat.py | # -*- coding: utf-8 -*-
# @Date : 2019-07-26
# @Author : Xinyu Gong (xy_gong@tamu.edu)
# @Link : None
# @Version : 0.0
import os
import glob
import argparse
import numpy as np
from imageio import imread
import tensorflow as tf
import utils.fid_score as fid
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
'--data_path',
type=str,
required=True,
help='set path to training set jpg images dir')
parser.add_argument(
'--output_file',
type=str,
default='fid_stat/fid_stats_cifar10_train.npz',
help='path for where to store the statistics')
opt = parser.parse_args()
print(opt)
return opt
def main():
args = parse_args()
########
# PATHS
########
data_path = args.data_path
output_path = args.output_file
# if you have downloaded and extracted
# http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz
# set this path to the directory where the extracted files are, otherwise
# just set it to None and the script will later download the files for you
inception_path = None
print("check for inception model..", end=" ", flush=True)
inception_path = fid.check_or_download_inception(inception_path) # download inception if necessary
print("ok")
# loads all images into memory (this might require a lot of RAM!)
print("load images..", end=" ", flush=True)
image_list = glob.glob(os.path.join(data_path, '*.jpg'))
images = np.array([imread(str(fn)).astype(np.float32) for fn in image_list])
print("%d images found and loaded" % len(images))
print("create inception graph..", end=" ", flush=True)
fid.create_inception_graph(inception_path) # load the graph into the current TF graph
print("ok")
print("calculte FID stats..", end=" ", flush=True)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
sess.run(tf.global_variables_initializer())
mu, sigma = fid.calculate_activation_statistics(images, sess, batch_size=100)
np.savez_compressed(output_path, mu=mu, sigma=sigma)
print("finished")
if __name__ == '__main__':
main()
| 2,264 | 29.2 | 103 | py |
sngan.pytorch | sngan.pytorch-master/utils/utils.py | # -*- coding: utf-8 -*-
# @Date : 2019-07-25
# @Author : Xinyu Gong (xy_gong@tamu.edu)
# @Link : None
# @Version : 0.0
import os
import torch
import dateutil.tz
from datetime import datetime
import time
import logging
def create_logger(log_dir, phase='train'):
time_str = time.strftime('%Y-%m-%d-%H-%M')
log_file = '{}_{}.log'.format(time_str, phase)
final_log_file = os.path.join(log_dir, log_file)
head = '%(asctime)-15s %(message)s'
logging.basicConfig(filename=str(final_log_file),
format=head)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
console = logging.StreamHandler()
logging.getLogger('').addHandler(console)
return logger
def set_log_dir(root_dir, exp_name):
path_dict = {}
os.makedirs(root_dir, exist_ok=True)
# set log path
exp_path = os.path.join(root_dir, exp_name)
now = datetime.now(dateutil.tz.tzlocal())
timestamp = now.strftime('%Y_%m_%d_%H_%M_%S')
prefix = exp_path + '_' + timestamp
os.makedirs(prefix)
path_dict['prefix'] = prefix
# set checkpoint path
ckpt_path = os.path.join(prefix, 'Model')
os.makedirs(ckpt_path)
path_dict['ckpt_path'] = ckpt_path
log_path = os.path.join(prefix, 'Log')
os.makedirs(log_path)
path_dict['log_path'] = log_path
# set sample image path for fid calculation
sample_path = os.path.join(prefix, 'Samples')
os.makedirs(sample_path)
path_dict['sample_path'] = sample_path
return path_dict
def save_checkpoint(states, is_best, output_dir,
filename='checkpoint.pth'):
torch.save(states, os.path.join(output_dir, filename))
if is_best:
torch.save(states, os.path.join(output_dir, 'checkpoint_best.pth'))
| 1,772 | 26.703125 | 75 | py |
sngan.pytorch | sngan.pytorch-master/utils/inception_score.py | # Code derived from tensorflow/tensorflow/models/image/imagenet/classify_image.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tqdm import tqdm
import os.path
import tarfile
import numpy as np
from six.moves import urllib
import tensorflow as tf
import math
import sys
MODEL_DIR = '/tmp/imagenet'
DATA_URL = 'http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz'
softmax = None
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
# Call this function with list of images. Each of elements should be a
# numpy array with values ranging from 0 to 255.
def get_inception_score(images, splits=10):
assert (type(images) == list)
assert (type(images[0]) == np.ndarray)
assert (len(images[0].shape) == 3)
assert (np.max(images[0]) > 10)
assert (np.min(images[0]) >= 0.0)
inps = []
for img in images:
img = img.astype(np.float32)
inps.append(np.expand_dims(img, 0))
bs = 100
with tf.Session(config=config) as sess:
preds = []
n_batches = int(math.ceil(float(len(inps)) / float(bs)))
for i in tqdm(range(n_batches), desc="Calculate inception score"):
sys.stdout.flush()
inp = inps[(i * bs):min((i + 1) * bs, len(inps))]
inp = np.concatenate(inp, 0)
pred = sess.run(softmax, {'ExpandDims:0': inp})
preds.append(pred)
preds = np.concatenate(preds, 0)
scores = []
for i in range(splits):
part = preds[(i * preds.shape[0] // splits):((i + 1) * preds.shape[0] // splits), :]
kl = part * (np.log(part) - np.log(np.expand_dims(np.mean(part, 0), 0)))
kl = np.mean(np.sum(kl, 1))
scores.append(np.exp(kl))
sess.close()
return np.mean(scores), np.std(scores)
# This function is called automatically.
def _init_inception():
global softmax
if not os.path.exists(MODEL_DIR):
os.makedirs(MODEL_DIR)
filename = DATA_URL.split('/')[-1]
filepath = os.path.join(MODEL_DIR, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' % (
filename, float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)
print()
statinfo = os.stat(filepath)
print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.')
tarfile.open(filepath, 'r:gz').extractall(MODEL_DIR)
with tf.gfile.FastGFile(os.path.join(
MODEL_DIR, 'classify_image_graph_def.pb'), 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
_ = tf.import_graph_def(graph_def, name='')
# Works with an arbitrary minibatch size.
with tf.Session(config=config) as sess:
pool3 = sess.graph.get_tensor_by_name('pool_3:0')
ops = pool3.graph.get_operations()
for op_idx, op in enumerate(ops):
for o in op.outputs:
shape = o.get_shape()
if shape._dims != []:
shape = [s.value for s in shape]
new_shape = []
for j, s in enumerate(shape):
if s == 1 and j == 0:
new_shape.append(None)
else:
new_shape.append(s)
o.__dict__['_shape_val'] = tf.TensorShape(new_shape)
w = sess.graph.get_operation_by_name("softmax/logits/MatMul").inputs[1]
logits = tf.matmul(tf.squeeze(pool3, [1, 2]), w)
softmax = tf.nn.softmax(logits)
sess.close()
| 3,818 | 36.07767 | 96 | py |
sngan.pytorch | sngan.pytorch-master/utils/__init__.py | # -*- coding: utf-8 -*-
# @Date : 2019-07-25
# @Author : Xinyu Gong (xy_gong@tamu.edu)
# @Link : None
# @Version : 0.0
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from utils import utils
| 261 | 20.833333 | 42 | py |
sngan.pytorch | sngan.pytorch-master/utils/fid_score.py | #!/usr/bin/env python3
""" Calculates the Frechet Inception Distance (FID) to evaluate GANs.
The FID metric calculates the distance between two distributions of images.
Typically, we have summary statistics (mean & covariance matrix) of one
of these distributions, while the 2nd distribution is given by a GAN.
When run as a stand-alone program, it compares the distribution of
images that are stored as PNG/JPEG at a specified location with a
distribution given by summary statistics (in pickle format).
The FID is calculated by assuming that X_1 and X_2 are the activations of
the pool_3 layer of the inception net for generated samples and real world
samples respectively.
See --help to see further details.
"""
from __future__ import absolute_import, division, print_function
import numpy as np
import os
import tensorflow as tf
from imageio import imread
from scipy import linalg
import pathlib
import warnings
class InvalidFIDException(Exception):
pass
def create_inception_graph(pth):
"""Creates a graph from saved GraphDef file."""
# Creates graph from saved graph_def.pb.
with tf.gfile.FastGFile(pth, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
_ = tf.import_graph_def(graph_def, name='FID_Inception_Net')
# -------------------------------------------------------------------------------
# code for handling inception net derived from
# https://github.com/openai/improved-gan/blob/master/inception_score/model.py
def _get_inception_layer(sess):
"""Prepares inception net for batched usage and returns pool_3 layer. """
layername = 'FID_Inception_Net/pool_3:0'
pool3 = sess.graph.get_tensor_by_name(layername)
ops = pool3.graph.get_operations()
for op_idx, op in enumerate(ops):
for o in op.outputs:
shape = o.get_shape()
if shape._dims != []:
shape = [s.value for s in shape]
new_shape = []
for j, s in enumerate(shape):
if s == 1 and j == 0:
new_shape.append(None)
else:
new_shape.append(s)
o.__dict__['_shape_val'] = tf.TensorShape(new_shape)
return pool3
# -------------------------------------------------------------------------------
def get_activations(images, sess, batch_size=50, verbose=False):
"""Calculates the activations of the pool_3 layer for all images.
Params:
-- images : Numpy array of dimension (n_images, hi, wi, 3). The values
must lie between 0 and 256.
-- sess : current session
-- batch_size : the images numpy array is split into batches with batch size
batch_size. A reasonable batch size depends on the disposable hardware.
-- verbose : If set to True and parameter out_step is given, the number of calculated
batches is reported.
Returns:
-- A numpy array of dimension (num images, 2048) that contains the
activations of the given tensor when feeding inception with the query tensor.
"""
inception_layer = _get_inception_layer(sess)
d0 = images.shape[0]
if batch_size > d0:
print("warning: batch size is bigger than the data size. setting batch size to data size")
batch_size = d0
n_batches = d0 // batch_size
n_used_imgs = n_batches * batch_size
pred_arr = np.empty((n_used_imgs, 2048))
for i in range(n_batches):
if verbose:
print("\rPropagating batch %d/%d" % (i + 1, n_batches), end="", flush=True)
start = i * batch_size
end = start + batch_size
batch = images[start:end]
pred = sess.run(inception_layer, {'FID_Inception_Net/ExpandDims:0': batch})
pred_arr[start:end] = pred.reshape(batch_size, -1)
if verbose:
print(" done")
return pred_arr
# -------------------------------------------------------------------------------
def calculate_frechet_distance(mu1, sigma1, mu2, sigma2, eps=1e-6):
"""Numpy implementation of the Frechet Distance.
The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1)
and X_2 ~ N(mu_2, C_2) is
d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)).
Stable version by Dougal J. Sutherland.
Params:
-- mu1 : Numpy array containing the activations of the pool_3 layer of the
inception net ( like returned by the function 'get_predictions')
for generated samples.
-- mu2 : The sample mean over activations of the pool_3 layer, precalcualted
on an representive data set.
-- sigma1: The covariance matrix over activations of the pool_3 layer for
generated samples.
-- sigma2: The covariance matrix over activations of the pool_3 layer,
precalcualted on an representive data set.
Returns:
-- : The Frechet Distance.
"""
mu1 = np.atleast_1d(mu1)
mu2 = np.atleast_1d(mu2)
sigma1 = np.atleast_2d(sigma1)
sigma2 = np.atleast_2d(sigma2)
assert mu1.shape == mu2.shape, "Training and test mean vectors have different lengths"
assert sigma1.shape == sigma2.shape, "Training and test covariances have different dimensions"
diff = mu1 - mu2
# product might be almost singular
covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False)
if not np.isfinite(covmean).all():
msg = "fid calculation produces singular product; adding %s to diagonal of cov estimates" % eps
warnings.warn(msg)
offset = np.eye(sigma1.shape[0]) * eps
covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset))
# numerical error might give slight imaginary component
if np.iscomplexobj(covmean):
if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3):
m = np.max(np.abs(covmean.imag))
raise ValueError("Imaginary component {}".format(m))
covmean = covmean.real
tr_covmean = np.trace(covmean)
return diff.dot(diff) + np.trace(sigma1) + np.trace(sigma2) - 2 * tr_covmean
# -------------------------------------------------------------------------------
def calculate_activation_statistics(images, sess, batch_size=50, verbose=False):
"""Calculation of the statistics used by the FID.
Params:
-- images : Numpy array of dimension (n_images, hi, wi, 3). The values
must lie between 0 and 255.
-- sess : current session
-- batch_size : the images numpy array is split into batches with batch size
batch_size. A reasonable batch size depends on the available hardware.
-- verbose : If set to True and parameter out_step is given, the number of calculated
batches is reported.
Returns:
-- mu : The mean over samples of the activations of the pool_3 layer of
the incption model.
-- sigma : The covariance matrix of the activations of the pool_3 layer of
the incption model.
"""
act = get_activations(images, sess, batch_size, verbose)
mu = np.mean(act, axis=0)
sigma = np.cov(act, rowvar=False)
return mu, sigma
# ------------------
# The following methods are implemented to obtain a batched version of the activations.
# This has the advantage to reduce memory requirements, at the cost of slightly reduced efficiency.
# - Pyrestone
# ------------------
def load_image_batch(files):
"""Convenience method for batch-loading images
Params:
-- files : list of paths to image files. Images need to have same dimensions for all files.
Returns:
-- A numpy array of dimensions (num_images,hi, wi, 3) representing the image pixel values.
"""
return np.array([imread(str(fn)).astype(np.float32) for fn in files])
def get_activations_from_files(files, sess, batch_size=50, verbose=False):
"""Calculates the activations of the pool_3 layer for all images.
Params:
-- files : list of paths to image files. Images need to have same dimensions for all files.
-- sess : current session
-- batch_size : the images numpy array is split into batches with batch size
batch_size. A reasonable batch size depends on the disposable hardware.
-- verbose : If set to True and parameter out_step is given, the number of calculated
batches is reported.
Returns:
-- A numpy array of dimension (num images, 2048) that contains the
activations of the given tensor when feeding inception with the query tensor.
"""
inception_layer = _get_inception_layer(sess)
d0 = len(files)
if batch_size > d0:
print("warning: batch size is bigger than the data size. setting batch size to data size")
batch_size = d0
n_batches = d0 // batch_size
n_used_imgs = n_batches * batch_size
pred_arr = np.empty((n_used_imgs, 2048))
for i in range(n_batches):
if verbose:
print("\rPropagating batch %d/%d" % (i + 1, n_batches), end="", flush=True)
start = i * batch_size
end = start + batch_size
batch = load_image_batch(files[start:end])
pred = sess.run(inception_layer, {'FID_Inception_Net/ExpandDims:0': batch})
pred_arr[start:end] = pred.reshape(batch_size, -1)
del batch # clean up memory
if verbose:
print(" done")
return pred_arr
def calculate_activation_statistics_from_files(files, sess, batch_size=50, verbose=False):
"""Calculation of the statistics used by the FID.
Params:
-- files : list of paths to image files. Images need to have same dimensions for all files.
-- sess : current session
-- batch_size : the images numpy array is split into batches with batch size
batch_size. A reasonable batch size depends on the available hardware.
-- verbose : If set to True and parameter out_step is given, the number of calculated
batches is reported.
Returns:
-- mu : The mean over samples of the activations of the pool_3 layer of
the incption model.
-- sigma : The covariance matrix of the activations of the pool_3 layer of
the incption model.
"""
act = get_activations_from_files(files, sess, batch_size, verbose)
mu = np.mean(act, axis=0)
sigma = np.cov(act, rowvar=False)
return mu, sigma
# -------------------------------------------------------------------------------
# -------------------------------------------------------------------------------
# The following functions aren't needed for calculating the FID
# they're just here to make this module work as a stand-alone script
# for calculating FID scores
# -------------------------------------------------------------------------------
def check_or_download_inception(inception_path):
""" Checks if the path to the inception file is valid, or downloads
the file if it is not present. """
INCEPTION_URL = 'http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz'
if inception_path is None:
inception_path = '/tmp'
inception_path = pathlib.Path(inception_path)
model_file = inception_path / 'classify_image_graph_def.pb'
if not model_file.exists():
print("Downloading Inception model")
from urllib import request
import tarfile
fn, _ = request.urlretrieve(INCEPTION_URL)
with tarfile.open(fn, mode='r') as f:
f.extract('classify_image_graph_def.pb', str(model_file.parent))
return str(model_file)
def _handle_path(path, sess, low_profile=False):
if path.endswith('.npz'):
f = np.load(path)
m, s = f['mu'][:], f['sigma'][:]
f.close()
else:
path = pathlib.Path(path)
files = list(path.glob('*.jpg')) + list(path.glob('*.png'))
if low_profile:
m, s = calculate_activation_statistics_from_files(files, sess)
else:
x = np.array([imread(str(fn)).astype(np.float32) for fn in files])
m, s = calculate_activation_statistics(x, sess)
del x # clean up memory
return m, s
def calculate_fid_given_paths(paths, inception_path, low_profile=False):
""" Calculates the FID of two paths. """
# inception_path = check_or_download_inception(inception_path)
for p in paths:
if not os.path.exists(p):
raise RuntimeError("Invalid path: %s" % p)
# from utils import memory
# memory()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
sess.run(tf.global_variables_initializer())
m1, s1 = _handle_path(paths[0], sess, low_profile=low_profile)
m2, s2 = _handle_path(paths[1], sess, low_profile=low_profile)
fid_value = calculate_frechet_distance(m1, s1, m2, s2)
sess.close()
del m1, s1, m2, s2
return fid_value
| 13,063 | 39.196923 | 103 | py |
neu-nbv | neu-nbv-main/scripts/planning/simulator_planning.py | import rospy
import os
import sys
root_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
sys.path.insert(0, root_dir)
import yaml
import argparse
from planner import get_planner
from datetime import datetime
def main():
# planning experiment in simulator using a specific planner
args = parse_args()
rospy.init_node(args.planner_type)
# find planner configuration file
experiment_path = os.path.join(
root_dir,
"experiments",
"simulator",
datetime.now().strftime("%d-%m-%Y-%H-%M"),
)
planner_cfg_path = os.path.join(
"planning/config", f"{args.planner_type}_planner.yaml"
)
assert os.path.exists(planner_cfg_path)
with open(planner_cfg_path, "r") as config_file:
planner_cfg = yaml.safe_load(config_file)
planner_cfg.update(args.__dict__)
planner_cfg["planner_type"] = args.planner_type
planner_cfg["experiment_path"] = experiment_path
planner_cfg["experiment_id"] = "record"
nbv_planner = get_planner(planner_cfg)
nbv_planner.start()
def parse_args():
parser = argparse.ArgumentParser()
# mandatory arguments
parser.add_argument(
"--planner_type", "-P", type=str, required=True, help="planner_type"
)
# arguments with default values
parser.add_argument(
"--planning_budget",
"-BG",
type=int,
default=20,
help="maximal measurments for the mission",
)
parser.add_argument(
"--device",
type=str,
default="cuda",
help="config file path",
)
parser.add_argument(
"--gpu_id",
type=str,
default="0",
help="gpu to use, space delimited",
)
parser.add_argument(
"--initial_view",
type=list,
default=[0, 0],
help="prefixed initial camera view angle",
)
parser.add_argument(
"--random_initial",
action="store_true",
help="use random inital camera pose",
)
args = parser.parse_args()
return args
if __name__ == "__main__":
main()
| 2,103 | 22.640449 | 76 | py |
neu-nbv | neu-nbv-main/scripts/planning/dtu_experiment.py | import sys
import os
root_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
sys.path.insert(0, root_dir)
from neural_rendering.evaluation.pretrained_model import PretrainedModel
from neural_rendering.data import get_data
from neural_rendering.utils import parser, util
import yaml
from dotmap import DotMap
import torch
import warnings
import numpy as np
import pandas
import seaborn as sb
import copy
from scipy.spatial import distance
from datetime import datetime
import random
import pickle
from dotmap import DotMap
warnings.filterwarnings("ignore")
# follow pixelnerf setup
candidate_index_list = [
6,
7,
8,
9,
10,
13,
14,
15,
16,
17,
21,
22,
23,
24,
25,
31,
32,
33,
34,
35,
41,
42,
43,
44,
45,
]
def setup_random_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
def get_nbv_ref_index(
model, images, poses, focal, c, z_near, z_far, candidate_list, budget, ref_index
):
_, _, H, W = images.shape
for i in range(budget):
remain_candidate_list = list(set(candidate_list) - set(ref_index))
reward_list = []
model.network.encode(
images[ref_index].unsqueeze(0),
poses[ref_index].unsqueeze(0),
focal.unsqueeze(0),
c.unsqueeze(0),
)
for target_view in remain_candidate_list:
novel_pose = poses[target_view]
target_rays = util.gen_rays(
novel_pose.unsqueeze(0), W, H, focal, z_near, z_far, c
)
target_rays = target_rays.reshape(1, H * W, -1)
predict = DotMap(model.renderer_par(target_rays))
uncertainty = predict["uncertainty"][0]
reward = torch.sum(uncertainty**2).cpu().numpy()
reward_list.append(reward)
nbv_index = np.argmax(reward_list)
new_ref_index = remain_candidate_list[nbv_index]
ref_index.append(new_ref_index)
return ref_index
def get_camera_view_direction(poses):
poses = poses.cpu().numpy()
view_direction = -poses[..., :3, 2]
view_direction = view_direction / np.linalg.norm(view_direction)
return view_direction
def get_max_dist_ref_index(poses, ref_index, candidate_list, budget):
view_direction = get_camera_view_direction(poses)
for i in range(budget):
remain_candidate_list = list(set(candidate_list) - set(ref_index))
cos_distance_list = []
for idx in remain_candidate_list:
cos_dist = 0.0
for image_idx in ref_index:
cos_dist += distance.cosine(
view_direction[idx], view_direction[image_idx]
)
cos_distance_list.append(cos_dist)
new_ref_index = remain_candidate_list[np.argmax(cos_distance_list)]
ref_index.append(new_ref_index)
return ref_index
def main():
# planning experiment on DTU using baseline planners and our planner
setup_random_seed(10)
args = parser.parse_args(planning_args)
dtu_nbv_planner = DTUNBVPlanning(args)
experiment_path = args.experiment_path
if args.evaluation_only:
with open(f"{experiment_path}/saved_index_dict.pkl", "rb") as f:
index_record = pickle.load(f)
else:
experiment_path = os.path.join(
root_dir,
"experiments",
"dtu",
datetime.now().strftime("%d-%m-%Y-%H-%M"),
)
os.makedirs(experiment_path)
index_record = dtu_nbv_planner.planning()
with open(f"{experiment_path}/saved_index_dict.pkl", "wb") as f:
pickle.dump(index_record, f)
total_df = dtu_nbv_planner.evaluation(index_record)
total_df.to_csv(f"{experiment_path}/dataframe.csv")
class DTUNBVPlanning:
"""
planning on DTU using different view selection methods: max_view_distance, random, and our uncertainty guided
"""
def __init__(self, args):
log_path = os.path.join(root_dir, "neural_rendering", "logs", args.model_name)
assert os.path.exists(log_path), "experiment does not exist"
with open(f"{log_path}/training_setup.yaml", "r") as config_file:
cfg = yaml.safe_load(config_file)
checkpoint_path = os.path.join(log_path, "checkpoints", "best.ckpt")
assert os.path.exists(checkpoint_path), "checkpoint does not exist"
ckpt_file = torch.load(checkpoint_path)
gpu_id = list(map(int, args.gpu_id.split()))
self.device = util.get_cuda(gpu_id[0])
self.repeat = args.repeat
self.model = PretrainedModel(cfg["model"], ckpt_file, self.device, gpu_id)
cfg["data"]["dataset"]["data_rootdir"] = os.path.join(
root_dir, "neural_rendering/data/dataset/dtu_dataset/rs_dtu_4/DTU"
)
datamodule = get_data(cfg["data"])
self.dataset = datamodule.load_dataset("val")
self.z_near = self.dataset.z_near
self.z_far = self.dataset.z_far
def planning(self):
print(f"---------- planning ---------- \n")
ON = len(self.dataset)
selection_type = ["Max. View Distance", "Random", "Ours"]
nview_list = [2, 3, 4, 5, 6, 7, 8, 9] # maximal budget = 9
scene_index = range(ON)
ref_index_record = {}
with torch.no_grad():
for nviews in nview_list:
ref_index_record[nviews] = {}
print(f"---------- {nviews} views experiment---------- \n")
for i in scene_index:
data_instance = self.dataset.__getitem__(i)
scene_title = data_instance["scan_name"]
ref_index_record[nviews][i] = {}
print(f"test on {scene_title}")
images = data_instance["images"].to(self.device)
focal = data_instance["focal"].to(self.device)
c = data_instance["c"].to(self.device)
poses = data_instance["poses"].to(self.device)
# random initialize first 2 ref images for all methods
for r in range(self.repeat):
ref_index_record[nviews][i][r] = {}
initial_ref_index = list(
np.random.choice(candidate_index_list, 2, replace=False)
)
candidate_list = list(
set(candidate_index_list) - set(initial_ref_index)
)
budget = nviews - 2
for stype in selection_type:
print(f"---------- repeat: {r}, {stype} ---------- \n")
if stype == "Max. View Distance":
ref_index = get_max_dist_ref_index(
poses,
copy.deepcopy(initial_ref_index),
candidate_list,
budget,
)
print(ref_index)
elif stype == "Random":
random_ref_index = list(
np.random.choice(
candidate_index_list, budget, replace=True
)
)
ref_index = initial_ref_index + random_ref_index
print(ref_index)
ref_index = np.unique(ref_index)
elif stype == "Ours":
ref_index = get_nbv_ref_index(
self.model,
images,
poses,
focal,
c,
self.z_near,
self.z_far,
candidate_list,
budget,
copy.deepcopy(initial_ref_index),
)
print(ref_index)
ref_index_record[nviews][i][r][stype] = ref_index
return ref_index_record
def evaluation(self, index_record):
print(f"---------- evaluation ---------- \n")
total_df = pandas.DataFrame(
{
"Planning Type": [],
"Reference Image Number": [],
"PSNR": [],
"SSIM": [],
"Scene": [],
}
)
with torch.no_grad():
for nviews, nviews_dict in index_record.items():
print(f"---------- {nviews} views experiment---------- \n")
for scene_id, scene_dict in nviews_dict.items():
data_instance = self.dataset.__getitem__(scene_id)
scene_title = data_instance["scan_name"]
print(f"test on {scene_title}")
images = data_instance["images"].to(self.device)
images_0to1 = images * 0.5 + 0.5
_, _, H, W = images.shape
focal = data_instance["focal"].to(self.device)
c = data_instance["c"].to(self.device)
poses = data_instance["poses"].to(self.device)
psnr_per_scene = []
ssim_per_scene = []
# random initialize first 2 ref images for all methods
for repeat, repeat_dict in scene_dict.items():
for stype, ref_index in repeat_dict.items():
print(f"---------- repeat: {repeat}, {stype} ---------- \n")
print(ref_index)
self.model.network.encode(
images[ref_index].unsqueeze(0),
poses[ref_index].unsqueeze(0),
focal.unsqueeze(0),
c.unsqueeze(0),
)
test_index = list(
set(candidate_index_list) - set(ref_index)
)
psnr_per_test = []
ssim_per_test = []
for target_view in test_index:
gt = (
images_0to1[target_view]
.permute(1, 2, 0)
.cpu()
.numpy()
)
novel_pose = poses[target_view]
target_rays = util.gen_rays(
novel_pose.unsqueeze(0),
W,
H,
focal,
self.z_near,
self.z_far,
c,
)
target_rays = target_rays.reshape(1, H * W, -1)
predict = DotMap(self.model.renderer_par(target_rays))
metrics_dict = util.calc_metrics(
predict, torch.tensor(gt)
)
psnr_per_test.append(metrics_dict["psnr"])
ssim_per_test.append(metrics_dict["ssim"])
psnr_per_scene = np.mean(psnr_per_test)
ssim_per_scene = np.mean(ssim_per_test)
print(psnr_per_scene, ssim_per_scene)
dataframe = pandas.DataFrame(
{
"Planning Type": stype,
"Reference Image Number": nviews,
"PSNR": psnr_per_scene,
"SSIM": ssim_per_scene,
"Scene": scene_id,
},
index=[repeat],
)
total_df = total_df.append(dataframe)
return total_df
def planning_args(parser):
"""
Parse arguments for evaluation setup.
"""
parser.add_argument(
"--model_name",
"-M",
type=str,
required=True,
help="model name of pretrained model",
)
parser.add_argument(
"--repeat",
"-R",
type=int,
default=5,
help="repeat times for planning experiment",
)
# arguments with default values
parser.add_argument(
"--evaluation_only", action="store_true", help="evaluation mode"
)
parser.add_argument(
"--experiment_path",
type=str,
default="not defined",
help="must be defined in evaluation mode",
)
parser.add_argument(
"--gpu_id", type=str, default="0", help="GPU(s) to use, space delimited"
)
return parser
if __name__ == "__main__":
main()
| 13,632 | 34.046272 | 113 | py |
neu-nbv | neu-nbv-main/scripts/planning/simulator_experiment.py | import rospy
import os
import sys
root_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
sys.path.insert(0, root_dir)
import yaml
import argparse
from planner import get_planner
from planner.utils import uniform_sampling
import numpy as np
import scipy.spatial as spatial
from datetime import datetime
import imageio
import glob
from dotmap import DotMap
import torch
from neural_rendering.utils import util
from neural_rendering.evaluation.pretrained_model import PretrainedModel
import pandas
import torch.nn.functional as F
planner_title = {
"max_distance": "Max. View Distance",
"random": "Random",
"neural_nbv": "Ours",
}
def setup_random_seed(seed):
np.random.seed(seed)
def main():
# planning experiment in simulator using baseline planners and our planner
setup_random_seed(10)
rospy.init_node("simulator_experiment")
args = parse_args()
planner_type_list = ["max_distance", "random", "neural_nbv"]
repeat = args.repeat
experiment_path = args.experiment_path
if not args.evaluation_only:
experiment_path = os.path.join(
root_dir,
"experiments",
"simulator",
datetime.now().strftime("%d-%m-%Y-%H-%M"),
)
os.makedirs(experiment_path, exist_ok=True)
print("---------- planning ----------")
for i in range(repeat):
# initialize planning with 2 same views
random_initial_view = []
for _ in range(2):
random_initial_view.append(
uniform_sampling(radius=2, phi_min=0.15)
) # hard-coded, should be the same for config file
for planner_type in planner_type_list:
# find planner configuration file
print(
f"---------- {planner_type} planner, experiment ID {i} ----------\n"
)
planner_cfg_path = os.path.join(
"planning/config", f"{planner_type}_planner.yaml"
)
assert os.path.exists(planner_cfg_path)
with open(planner_cfg_path, "r") as config_file:
planner_cfg = yaml.safe_load(config_file)
planner_cfg.update(args.__dict__)
planner_cfg["planner_type"] = planner_type
planner_cfg["experiment_path"] = experiment_path
planner_cfg["experiment_id"] = i
nbv_planner = get_planner(planner_cfg)
nbv_planner.start(initial_view=random_initial_view)
print("---------- evaluation ----------")
gpu_id = list(map(int, args.gpu_id.split()))
device = util.get_cuda(gpu_id[0])
log_path = os.path.join(root_dir, "neural_rendering", "logs", args.model_name)
assert os.path.exists(log_path), "experiment does not exist"
with open(f"{log_path}/training_setup.yaml", "r") as config_file:
cfg = yaml.safe_load(config_file)
checkpoint_path = os.path.join(log_path, "checkpoints", "best.ckpt")
assert os.path.exists(checkpoint_path), "checkpoint does not exist"
ckpt_file = torch.load(checkpoint_path)
model = PretrainedModel(cfg["model"], ckpt_file, device, gpu_id)
# load test view data as ground truth
test_rgbs, test_poses, focal, c = get_image_data(
args.test_data_path, "normal", device
)
# configure rendering information
nview = int(args.nviews)
_, _, H, W = test_rgbs.shape
z_near = cfg["data"]["dataset"]["z_near"]
z_far = cfg["data"]["dataset"]["z_far"]
step_list = [2, 4, 6, 8, 10, 12, 14, 16, 18, 20]
total_df = pandas.DataFrame(
{
"Planning Type": [],
"Reference Image Num.": [],
"PSNR": [],
"SSIM": [],
}
)
for r in range(repeat):
for planner_type in planner_type_list:
ref_data_path = os.path.join(experiment_path, planner_type, str(r))
ref_rgbs, ref_poses, _, _ = get_image_data(ref_data_path, "normal", device)
for step in step_list:
print(
f"---------- planner:{planner_type}, repeat {r}, step {step} ----------\n"
)
ref_kd_tree = spatial.KDTree(ref_poses[:step, :3, 3].cpu().numpy())
psnr_list = []
ssim_list = []
with torch.no_grad():
for i, rgb in enumerate(test_rgbs):
pose = test_poses[i]
gt = rgb * 0.5 + 0.5
gt = gt.permute(1, 2, 0).cpu().numpy()
_, ref_index = ref_kd_tree.query(
pose[:3, 3].cpu().numpy(), np.minimum(nview, step)
)
model.network.encode(
ref_rgbs[ref_index].unsqueeze(0),
ref_poses[ref_index].unsqueeze(0),
focal.unsqueeze(0),
c.unsqueeze(0),
)
target_rays = util.gen_rays(
pose.unsqueeze(0), W, H, focal, z_near, z_far, c
)
target_rays = target_rays.reshape(1, H * W, -1)
predict = DotMap(model.renderer_par(target_rays))
metrics_dict = util.calc_metrics(predict, torch.tensor(gt))
psnr_list.append(metrics_dict["psnr"])
ssim_list.append(metrics_dict["ssim"])
psnr_mean = np.mean(psnr_list)
ssim_mean = np.mean(ssim_list)
print("psnr:", psnr_mean, "ssim:", ssim_mean)
dataframe = pandas.DataFrame(
{
"Planning Type": planner_title[planner_type],
"Reference Image Num.": step,
"PSNR": psnr_mean,
"SSIM": ssim_mean,
},
index=[r],
)
total_df = total_df.append(dataframe)
total_df.to_csv(f"{experiment_path}/dataframe.csv")
image_to_tensor = util.get_image_to_tensor_balanced()
def get_image_data(data_path, coordinate_format, device, rescale=0.5):
assert os.path.exists(data_path)
rgb_paths = [
x
for x in glob.glob(f"{data_path}/images/*")
if (x.endswith(".jpg") or x.endswith(".png"))
]
rgb_paths = sorted(rgb_paths)
images = []
poses = []
for image_path in rgb_paths:
image = imageio.imread(image_path)[..., :3]
image = image_to_tensor(image)
images.append(image)
pose_list = np.load(f"{data_path}/trajectory.npy")
for pose in pose_list:
pose = util.coordinate_transformation(pose, format=coordinate_format)
poses.append(pose)
with open(f"{data_path}/camera_info.yaml") as file:
intrinsic = yaml.safe_load(file)
images = torch.stack(images).to(device)
poses = torch.stack(poses).to(device)
if rescale != 1:
_, _, H, W = images.shape
H = int(rescale * H)
W = int(rescale * W)
images = F.interpolate(images, size=[W, H], mode="area")
focal = rescale * torch.tensor(intrinsic["focal"], dtype=torch.float32).to(device)
c = rescale * torch.tensor(intrinsic["c"], dtype=torch.float32).to(device)
assert len(images) == len(poses)
return images, poses, focal, c
def test_visualize(results_dict):
import matplotlib.pyplot as plt
H = 400
W = 400
rgb = results_dict.rgb[0].cpu().numpy().reshape(H, W, 3)
depth = results_dict.depth[0].cpu().numpy().reshape(H, W)
uncertainty = results_dict.uncertainty[0].cpu().numpy().reshape(H, W)
fig, axs = plt.subplots(1, 3)
axs[0].imshow(rgb)
axs[1].imshow(uncertainty)
axs[2].imshow(depth)
plt.show()
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
"-M",
type=str,
required=True,
help="model name of pretrained model",
)
parser.add_argument(
"--test_data_path",
"-TD",
type=str,
required=True,
help="data path",
)
# mandatory arguments
parser.add_argument(
"--repeat",
"-rp",
type=int,
default=10,
help="repeat experiment",
)
# arguments with default values
parser.add_argument(
"--nviews", "-nv", type=int, default=5, help="number of reference views"
)
parser.add_argument(
"--planning_budget",
"-BG",
type=int,
default=20,
help="maximal measurments for the mission",
)
parser.add_argument(
"--device",
type=str,
default="cuda",
help="config file path",
)
parser.add_argument(
"--gpu_id",
type=str,
default="0",
help="gpu to use, space delimited",
)
parser.add_argument(
"--evaluation_only", action="store_true", help="evaluation mode"
)
parser.add_argument(
"--experiment_path",
type=str,
default="not defined",
help="must be defined in evaluation mode",
)
args = parser.parse_args()
return args
if __name__ == "__main__":
main()
| 9,450 | 29.685065 | 94 | py |
neu-nbv | neu-nbv-main/scripts/planning/planner/simulator_bridge.py | import rospy
from gazebo_msgs.msg import ModelState
from sensor_msgs.msg import Image, CameraInfo
from cv_bridge import CvBridge
from . import utils
import numpy as np
class SimulatorBridge:
def __init__(self, cfg):
self.cv_bridge = CvBridge()
self.current_rgb = None
self.current_depth = None
self.camera_type = cfg["camera_type"]
self.sensor_noise = cfg["sensor_noise"]
self.get_simulator_camera_info()
self.pose_pub = rospy.Publisher(
"/gazebo/set_model_state", ModelState, queue_size=1, latch=True
)
if self.camera_type == "rgb_camera":
self.rgb_sub = rospy.Subscriber(
"/rgb_camera/rgb_image_raw", Image, self.update_rgb
)
elif self.camera_type == "rgbd_camera":
self.rgb_sub = rospy.Subscriber(
"/rgbd_camera/rgb_image_raw", Image, self.update_rgb
)
self.depth_sub = rospy.Subscriber(
"/rgbd_camera/depth_image_raw", Image, self.update_depth
)
def get_simulator_camera_info(self):
camera_info_raw = rospy.wait_for_message(
f"/{self.camera_type}/camera_info", CameraInfo
)
K = camera_info_raw.K # intrinsic matrix
H = int(camera_info_raw.height) # image height
W = int(camera_info_raw.width) # image width
self.camera_info = {
"image_resolution": [H, W],
"c": [K[2], K[5]],
"focal": [K[0], K[4]],
}
def move_camera(self, pose):
quaternion = utils.rotation_2_quaternion(pose[:3, :3])
translation = pose[:3, -1]
camera_pose_msg = ModelState()
camera_pose_msg.model_name = self.camera_type
camera_pose_msg.pose.position.x = translation[0]
camera_pose_msg.pose.position.y = translation[1]
camera_pose_msg.pose.position.z = translation[2]
camera_pose_msg.pose.orientation.x = quaternion[0]
camera_pose_msg.pose.orientation.y = quaternion[1]
camera_pose_msg.pose.orientation.z = quaternion[2]
camera_pose_msg.pose.orientation.w = quaternion[3]
self.pose_pub.publish(camera_pose_msg)
def update_rgb(self, data):
self.current_rgb = data
def update_depth(self, data):
self.current_depth = data
def get_image(self):
rgb = self.cv_bridge.imgmsg_to_cv2(self.current_rgb, "rgb8")
rgb = np.array(rgb, dtype=float)
if self.sensor_noise != 0:
noise = np.random.normal(0.0, self.sensor_noise, rgb.shape)
rgb += noise
if self.camera_type == "rgb_camera":
depth = None
elif self.camera_type == "rgbd_camera":
depth = self.cv_bridge.imgmsg_to_cv2(self.current_depth, "32FC1")
return np.asarray(rgb), np.asarray(depth)
| 2,869 | 33.166667 | 77 | py |
neu-nbv | neu-nbv-main/scripts/planning/planner/utils.py | from scipy.spatial.transform import Rotation as R
import cv2
import numpy as np
import json
# from rembg import remove
import os
import imageio
def get_roi_mask(rgb):
"""binary mask for ROIs using color thresholding"""
hsv = cv2.cvtColor(np.array(rgb, dtype=np.uint8), cv2.COLOR_RGB2HSV)
lower_red = np.array([0, 50, 50])
upper_red = np.array([20, 255, 255])
mask0 = cv2.inRange(hsv, lower_red, upper_red)
lower_red = np.array([160, 50, 50])
upper_red = np.array([180, 255, 255])
mask1 = cv2.inRange(hsv, lower_red, upper_red)
mask = mask0 + mask1
mask = mask + 1e-5
return mask
def get_black_mask(rgb):
"""binary mask for ROIs using color thresholding"""
lower_black = np.array([250, 250, 250])
upper_black = np.array([255, 255, 255])
mask = cv2.inRange(rgb, lower_black, upper_black)
return mask
def visualize_uncertainty(uncertainty):
variance = np.exp(uncertainty)
def rotation_2_quaternion(rotation_matrix):
r = R.from_matrix(rotation_matrix)
return r.as_quat()
def xyz_to_view(xyz, radius):
phi = np.arcsin(xyz[2] / radius) # phi from 0 to 0.5*pi
theta = np.arctan2(xyz[1], xyz[0]) % (2 * np.pi) # theta from 0 to 2*pi
return [phi, theta]
def view_to_pose(view, radius):
phi, theta = view
# phi should be within [min_phi, 0.5*np.pi)
if phi >= 0.5 * np.pi:
phi = np.pi - phi
pose = np.eye(4)
x = radius * np.cos(theta) * np.cos(phi)
y = radius * np.sin(theta) * np.cos(phi)
z = radius * np.sin(phi)
translation = np.array([x, y, z])
rotation = R.from_euler("ZYZ", [theta, -phi, np.pi]).as_matrix()
pose[:3, -1] = translation
pose[:3, :3] = rotation
return pose
def view_to_pose_batch(views, radius):
num = len(views)
phi = views[:, 0]
theta = views[:, 1]
# phi should be within [min_phi, 0.5*np.pi)
index = phi >= 0.5 * np.pi
phi[index] = np.pi - phi[index]
poses = np.broadcast_to(np.identity(4), (num, 4, 4)).copy()
x = radius * np.cos(theta) * np.cos(phi)
y = radius * np.sin(theta) * np.cos(phi)
z = radius * np.sin(phi)
translations = np.stack((x, y, z), axis=-1)
angles = np.stack((theta, -phi, np.pi * np.ones(num)), axis=-1)
rotations = R.from_euler("ZYZ", angles).as_matrix()
poses[:, :3, -1] = translations
poses[:, :3, :3] = rotations
return poses
def random_view(current_xyz, radius, phi_min, min_view_change, max_view_change):
"""
random scatter view direction changes by given current position and view change range.
"""
u = current_xyz / np.linalg.norm(current_xyz)
# pick a random vector:
r = np.random.multivariate_normal(np.zeros_like(u), np.eye(len(u)))
# form a vector perpendicular to u:
uperp = r - r.dot(u) * u
uperp = uperp / np.linalg.norm(uperp)
# random view angle change in radian
random_view_change = np.random.uniform(low=min_view_change, high=max_view_change)
cosine = np.cos(random_view_change)
w = cosine * u + np.sqrt(1 - cosine**2 + 1e-8) * uperp
w = radius * w / np.linalg.norm(w)
view = xyz_to_view(w, radius)
if view[0] < phi_min:
view[0] = phi_min
return view
def uniform_sampling(radius, phi_min):
"""
uniformly generate unit vector on hemisphere.
then calculate corresponding view direction targeting coordinate origin.
"""
xyz = np.array([0.0, 0.0, 0.0])
# avoid numerical error
while np.linalg.norm(xyz) < 0.001:
xyz[0] = np.random.uniform(low=-1.0, high=1.0)
xyz[1] = np.random.uniform(low=-1.0, high=1.0)
xyz[2] = np.random.uniform(low=0.0, high=1.0)
xyz = radius * xyz / np.linalg.norm(xyz)
view = xyz_to_view(xyz, radius)
if view[0] < phi_min:
view[0] = phi_min
return view
def focal_len_to_fov(focal, resolution):
"""
calculate FoV based on given focal length adn image resolution
Args:
focal: [fx, fy]
resolution: [W, H]
Returns:
FoV: [HFoV, VFoV]
"""
focal = np.asarray(focal)
resolution = np.asarray(resolution)
return 2 * np.arctan(0.5 * resolution / focal)
def mask_out_background(image_path):
"""remove background"""
rgb = imageio.imread(image_path)
masked_rgb = remove(rgb)
# H, W, _ = rgb.shape
# masked_rgb = np.ones((H, W, 4)) * 255
# masked_rgb[..., :3] = rgb
# mask_white = rgb >= np.array([254, 254, 254])
# mask_white = np.all(mask_white, axis=-1)
# mask_black = rgb <= np.array([1, 1, 1])
# mask_black = np.all(mask_black, axis=-1)
# masked_rgb[mask_white] = [0, 0, 0, 0]
# masked_rgb[mask_black] = [0, 0, 0, 0]
return masked_rgb
def record_render_data(path, camera_info, trajectory, use_masked_image=False):
transformation = np.array(
[[0, 0, -1, 0], [-1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1]]
) # transform gazebo coordinate to opengl format
opencv_trajectory = np.empty(trajectory.shape)
for i, pose in enumerate(trajectory):
opencv_trajectory[i] = pose @ transformation
resolution = camera_info["image_resolution"]
c = camera_info["c"]
focal = camera_info["focal"]
fov = focal_len_to_fov(focal, resolution)
record_dict = {}
record_dict["camera_angle_x"] = fov[0]
record_dict["camera_angle_y"] = fov[1]
record_dict["fl_x"] = focal[0]
record_dict["fl_y"] = focal[1]
record_dict["k1"] = 0.000001
record_dict["k2"] = 0.000001
record_dict["p1"] = 0.000001
record_dict["p2"] = 0.000001
record_dict["cx"] = c[0]
record_dict["cy"] = c[1]
record_dict["w"] = resolution[0]
record_dict["h"] = resolution[1]
record_dict["frames"] = []
record_dict["scale"] = 1.0
record_dict["aabb_scale"] = 2.0
for i, pose in enumerate(opencv_trajectory):
image_file = f"images/{i+1:04d}.png"
image_path = os.path.join(path, image_file)
if use_masked_image:
masked_image = mask_out_background(image_path)
image_file = f"images/masked_{i+1:04d}.png"
image_path = os.path.join(path, image_file)
imageio.imwrite(image_path, masked_image)
data_frame = {
"file_path": image_file,
# "sharpness": 30.0,
"transform_matrix": pose.tolist(),
}
record_dict["frames"].append(data_frame)
with open(f"{path}/transforms.json", "w") as f:
json.dump(record_dict, f, indent=4)
# for test only
# for i, pose in enumerate(opencv_trajectory[50:]):
# data_frame = {
# "file_path": f"images/{i+51:04d}.jpg",
# "sharpness": 30.0,
# "transform_matrix": pose.tolist(),
# }
# record_dict["frames"].append(data_frame)
# with open(f"{path}/test_transforms.json", "w") as f:
# json.dump(record_dict, f, indent=4)
def test():
view = []
# for i in range(5):
# new_view = uniform_sampling(2, 0.15)
# view.append(new_view)
# print("view:", new_view)
current_xyz = [0, 0, 2]
for i in range(500):
local = random_view(current_xyz, 2, 0.15, 0.2, 1.05)
view.append(local)
xyz_list = view_to_pose_batch(np.array(view), 2)[..., :3, 3]
print(xyz_list)
for xyz in xyz_list:
view = xyz_to_view(xyz, 2)
print(view)
fig = plt.figure()
ax = fig.add_subplot(projection="3d")
ax.scatter(xyz_list[..., 0], xyz_list[..., 1], xyz_list[..., 2])
ax.set_xlabel("X Label")
ax.set_ylabel("Y Label")
ax.set_zlabel("Z Label")
ax.set_zlim(0, 2.5)
ax.set_xlim(-2.5, 2.5)
ax.set_ylim(-2.5, 2.5)
plt.show()
if __name__ == "__main__":
import matplotlib.pyplot as plt
test()
| 7,770 | 26.459364 | 90 | py |
neu-nbv | neu-nbv-main/scripts/planning/planner/__init__.py | from .neural_nbv.neural_nbv_planner import NeuralNBVPlanner
from .baselines.random_planner import RandomPlanner
from .baselines.max_distance_planner import MaxDistancePlanner
def get_planner(cfg):
planner_type = cfg["planner_type"]
if planner_type == "neural_nbv":
return NeuralNBVPlanner(cfg)
elif planner_type == "random":
return RandomPlanner(cfg)
elif planner_type == "max_distance":
return MaxDistancePlanner(cfg)
| 462 | 29.866667 | 62 | py |
neu-nbv | neu-nbv-main/scripts/planning/planner/planner.py | from .simulator_bridge import SimulatorBridge
from . import utils
import time
import os
from datetime import datetime
import numpy as np
import imageio.v2 as imageio
import yaml
class Planner:
def __init__(self, cfg):
self.simulator_bridge = SimulatorBridge(cfg["simulation_bridge"])
self.camera_info = self.simulator_bridge.camera_info
self.record_path = os.path.join(
cfg["experiment_path"], cfg["planner_type"], str(cfg["experiment_id"])
)
self.planning_budget = cfg["planning_budget"]
self.initial_type = cfg["initial_type"]
self.H, self.W = self.camera_info[
"image_resolution"
] # original image resolution from simulator
self.trajectory = np.empty((self.planning_budget, 4, 4))
self.view_trajectory = np.empty((self.planning_budget, 2)) # [phi, theta]
self.rgb_measurements = np.empty((self.planning_budget, self.H, self.W, 3))
self.depth_measurements = np.empty((self.planning_budget, self.H, self.W))
self.step = 0
self.config_actionspace(cfg["action_space"])
def config_actionspace(self, cfg):
"""set hemisphere actionspace parameters"""
self.min_height = cfg["min_height"]
self.radius = cfg["radius"]
self.phi_min = np.arcsin(self.min_height / self.radius)
self.phi_max = 0.5 * np.pi
self.theta_min = 0
self.theta_max = 2 * np.pi
def init_camera_pose(self, initial_view):
print("------ start mission ------ \n")
print("------ initialize camera pose ------ \n")
if initial_view is None:
if self.initial_type == "random":
initial_view = utils.uniform_sampling(self.radius, self.phi_min)
elif self.initial_type == "pre_calculated":
self.get_view_list()
initial_view = next(self.view_list)
self.move_sensor(initial_view)
else:
for view in initial_view:
self.move_sensor(view)
def start(self, initial_view=None):
self.init_camera_pose(initial_view)
while self.step < self.planning_budget:
next_view = self.plan_next_view()
self.move_sensor(next_view)
self.record_experiment()
print("------ complete mission ------\n")
# rospy.signal_shutdown("shut down ros node")
def move_sensor(self, view):
pose = utils.view_to_pose(view, self.radius)
self.simulator_bridge.move_camera(pose)
self.current_view = view
self.current_pose = pose
print(pose)
print(
f"------ reach given pose and take measurement No.{self.step + 1} ------\n"
)
time.sleep(1) # lazy solution to make sure we receive correct images
rgb, depth = self.simulator_bridge.get_image()
self.record_step(view, pose, rgb, depth)
self.step += 1
def plan_next_view(self):
raise NotImplementedError("plan_next_view method is not implemented")
def record_experiment(self):
print("------ record experiment data ------\n")
os.makedirs(self.record_path, exist_ok=True)
images_path = os.path.join(self.record_path, "images")
os.mkdir(images_path)
depths_path = os.path.join(self.record_path, "depths")
os.mkdir(depths_path)
for i, rgb in enumerate(self.rgb_measurements):
imageio.imwrite(
f"{images_path}/{i+1:04d}.png", (rgb * 255).astype(np.uint8)
)
if len(self.depth_measurements) > 0:
for i, depth in enumerate(self.depth_measurements):
with open(f"{depths_path}/depth_{i+1:04d}.npy", "wb") as f:
depth_array = np.array(depth, dtype=np.float32)
np.save(f, depth_array)
with open(f"{self.record_path}/trajectory.npy", "wb") as f:
np.save(f, self.trajectory)
with open(f"{self.record_path}/camera_info.yaml", "w") as f:
yaml.safe_dump(self.camera_info, f)
# record json data required for instant-ngp training
utils.record_render_data(self.record_path, self.camera_info, self.trajectory)
def record_step(self, view, pose, rgb, depth):
self.record_trajectory(view, pose)
self.record_rgb_measurement(rgb)
if depth is not None:
self.record_depth_measurement(depth)
def record_rgb_measurement(self, rgb):
rgb = np.clip(rgb, a_min=0, a_max=255)
rgb = rgb / 255
self.rgb_measurements[self.step] = rgb
def record_depth_measurement(self, depth):
self.depth_measurements[self.step] = depth
def record_trajectory(self, view, pose):
self.view_trajectory[self.step] = view
self.trajectory[self.step] = pose
| 4,844 | 34.364964 | 87 | py |
neu-nbv | neu-nbv-main/scripts/planning/planner/neural_nbv/neural_nbv_planner.py | import numpy as np
from scipy.spatial.transform import Rotation as R
from planner.planner import Planner
from planner.utils import view_to_pose_batch, random_view, uniform_sampling
from neural_rendering.evaluation.pretrained_model import PretrainedModel
import torch
from dotmap import DotMap
from neural_rendering.utils import util
import torch.nn.functional as F
import scipy.spatial as spatial
import matplotlib.pyplot as plt
import time
import yaml
import os
class NeuralNBVPlanner(Planner):
def __init__(self, cfg):
super().__init__(cfg)
self.device = cfg["device"]
self.gpu_id = list(map(int, cfg["gpu_id"].split()))
self.init_sensor_model(cfg)
self.image_to_tensor = util.get_image_to_tensor_balanced()
self.num_candidates = cfg["num_candidates"]
self.sample_type = cfg["sample_type"]
self.view_change = cfg["view_change"]
self.local_view_change = cfg["local_view_change"]
self.selection_range = cfg["selection_range"]
self.hierachical_sampling = cfg["use_hierachical_sampling"]
self.sample_ratio = cfg["sample_ratio"]
self.K = cfg["top_k"]
self.max_ref_num = cfg["maximal_ref"]
self.reward_type = cfg["reward_type"]
self.render_batch_size = cfg["render_batch_size"]
self.uncertainty_th = cfg["uncertainty_threshold"]
self.candidate_views = None
self.candidate_poses = None
self.render_pairs = None
self.trajectory_kdtree = None
# self.depth_for_renderer = torch.empty(
# (self.planning_budget, self.H, self.W)
# ).to(self.device)
def init_sensor_model(self, cfg):
assert os.path.exists(cfg["config_path"])
assert os.path.exists(cfg["checkpoint_path"])
with open(cfg["config_path"], "r") as config_file:
model_cfg = yaml.safe_load(config_file)["model"]
ckpt_file = torch.load(cfg["checkpoint_path"])
self.model = PretrainedModel(model_cfg, ckpt_file, self.device, self.gpu_id)
# original image format
H, W = self.camera_info["image_resolution"] # (H, W)
focal = self.camera_info["focal"] # (f_x, f_y)
c = self.camera_info["c"] # (c_x, c_y)
# desired image format for redendering input
render_info = cfg["render_info"]
H_ref, W_ref = render_info["ref_image_resolution"]
ref_focal = [0, 0]
ref_c = [0, 0]
if np.any([H, W] != [H_ref, W_ref]):
scale_h = H_ref / H
scale_w = W_ref / W
ref_focal[0] = scale_w * focal[0]
ref_focal[1] = scale_h * focal[1]
ref_c[0] = scale_w * c[0]
ref_c[1] = scale_h * c[1]
self.ref_focal = torch.tensor(ref_focal, dtype=torch.float32).to(self.device)
self.ref_c = torch.tensor(ref_c, dtype=torch.float32).to(self.device)
self.ref_image_resolution = (H_ref, W_ref)
self.trajectory_for_renderer = torch.empty((self.planning_budget, 4, 4)).to(
self.device
)
self.rgb_for_renderer = torch.empty((self.planning_budget, 3, H_ref, W_ref)).to(
self.device
)
# desired image format for redendering output
render_scale = render_info["render_scale"]
self.H_render = int(render_scale * H_ref)
self.W_render = int(render_scale * W_ref)
render_scale = torch.tensor(
[
self.W_render / W_ref,
self.H_render / H_ref,
]
).to(self.device)
self.render_focal = render_scale * self.ref_focal
self.render_c = render_scale * self.ref_c
self.z_near, self.z_far = render_info["scene_range"]
def render_novel_views(self, candidate_poses):
candidate_num = len(candidate_poses)
reward_list = np.zeros(candidate_num)
distance_all, ref_index_all = self.trajectory_kdtree.query(
candidate_poses[:, :3, 3], np.minimum(self.max_ref_num, self.step)
)
# distance_all = torch.tensor(distance_all)
# ref_index_all = torch.tensor(ref_index_all)
bool_mask = ~np.isinf(distance_all)
novel_poses = util.coordinate_transformation(
candidate_poses, format="normal"
).to(self.device)
# render novel view in batch
split_novel_view = torch.split(
torch.arange(candidate_num), self.render_batch_size, dim=0
)
for i in split_novel_view:
ref_index = torch.tensor(ref_index_all[i] * bool_mask[i])
ref_images = self.rgb_for_renderer[ref_index]
ref_poses = self.trajectory_for_renderer[ref_index]
render_results = self.rendering(ref_images, ref_poses, novel_poses[i])
reward_list[i] = self.cal_reward(render_results)
return reward_list
def rendering(self, ref_images, ref_poses, novel_poses):
NP = len(novel_poses)
with torch.no_grad():
self.model.network.encode(
ref_images,
ref_poses,
self.ref_focal.unsqueeze(0),
self.ref_c.unsqueeze(0),
)
target_rays = util.gen_rays(
novel_poses,
self.W_render,
self.H_render,
self.render_focal,
self.z_near,
self.z_far,
self.render_c,
) # (IN, H, W, 8)
target_rays = target_rays.reshape(NP, self.H_render * self.W_render, -1)
predict = DotMap(self.model.renderer_par(target_rays))
return predict
def cal_reward(self, render_results):
uncertainty = render_results["uncertainty"]
reward = torch.mean(uncertainty**2, dim=-1).cpu().numpy()
reward = np.log10(reward)
return reward
# one stage planning
def start_planning(self):
candidate_views, candidate_poses = self.local_sampling(
self.num_candidates, self.current_pose[:3, 3], view_change=self.view_change
)
reward_list = self.render_novel_views(candidate_poses)
nbv_index = np.argmax(reward_list)
return candidate_views[nbv_index]
def global_sampling(self, num):
view_list = np.empty((num, 2))
for i in range(num):
view_list[i] = uniform_sampling(self.radius, self.phi_min)
pose_list = view_to_pose_batch(view_list, self.radius)
return view_list, pose_list
def local_sampling(self, num, xyz, view_change, min_view_change=0.2):
view_list = np.empty((num, 2))
for i in range(num):
view_list[i] = random_view(
xyz, self.radius, self.phi_min, min_view_change, view_change
)
pose_list = view_to_pose_batch(view_list, self.radius)
return view_list, pose_list
def plan_next_view(self):
import time
if self.step > 1:
t1 = time.time()
nbv = self.start_planning()
t2 = time.time()
print((t2 - t1))
return nbv
# need at least two views to start the planning
else:
random_next_view = random_view(
self.current_pose[:3, 3],
self.radius,
self.phi_min,
self.view_change - 0.1,
self.view_change,
)
return random_next_view
def record_trajectory(self, view, pose):
self.view_trajectory[self.step] = view
self.trajectory[self.step] = pose
# maintain current measurment positions in kd tree
self.trajectory_kdtree = spatial.KDTree(self.trajectory[: self.step + 1, :3, 3])
self.trajectory_for_renderer[self.step] = util.coordinate_transformation(
pose, format="normal"
).to(self.device)
def record_rgb_measurement(self, rgb):
rgb = np.clip(rgb, a_min=0, a_max=255)
rgb = rgb / 255
self.rgb_measurements[self.step] = rgb
ref_image = self.image_to_tensor(rgb).to(self.device)
ref_image = F.interpolate(
ref_image.unsqueeze(0), size=self.ref_image_resolution, mode="area"
).squeeze(0)
self.rgb_for_renderer[self.step] = ref_image
def test_visualize(self, ref_images, results_dict):
import matplotlib.pyplot as plt
H = 60
W = 60
for i in range(self.render_batch_size):
rgb = results_dict.rgb[i].cpu().numpy().reshape(H, W, 3)
depth = results_dict.depth[i].cpu().numpy().reshape(H, W)
uncertainty = results_dict.uncertainty[i].cpu().numpy().reshape(H, W)
fig, axs = plt.subplots(1, 3)
axs[0].imshow(rgb)
axs[1].imshow(uncertainty)
axs[2].imshow(depth)
plt.show()
| 8,864 | 33.901575 | 88 | py |
neu-nbv | neu-nbv-main/scripts/planning/planner/baselines/random_planner.py | from planner.planner import Planner
from planner.utils import random_view, uniform_sampling
import numpy as np
class RandomPlanner(Planner):
def __init__(self, cfg):
super().__init__(cfg)
print("initial ")
self.num_candidates = cfg["num_candidates"]
self.view_change = cfg["view_change"]
self.planning_type = cfg["planning_type"]
def plan_next_view(self):
view_list = np.empty((self.num_candidates, 2))
if self.planning_type == "local":
for i in range(self.num_candidates):
view_list[i] = random_view(
self.current_pose[:3, 3],
self.radius,
self.phi_min,
min_view_change=0.2,
max_view_change=self.view_change,
)
elif self.planning_type == "global":
for i in range(self.num_candidates):
view_list[i] = uniform_sampling(self.radius, self.phi_min)
nbv_index = np.random.choice(len(view_list))
nbv = view_list[nbv_index]
return nbv
| 1,101 | 32.393939 | 74 | py |
neu-nbv | neu-nbv-main/scripts/planning/planner/baselines/max_distance_planner.py | from planner.planner import Planner
from planner.utils import view_to_pose_batch, random_view, uniform_sampling
import numpy as np
from scipy.spatial import distance
class MaxDistancePlanner(Planner):
def __init__(self, cfg):
super().__init__(cfg)
self.num_candidates = cfg["num_candidates"]
self.view_change = cfg["view_change"]
self.planning_type = cfg["planning_type"]
def get_camera_view_direction(self, poses):
view_direction = poses[..., :3, 0]
view_direction = view_direction / np.linalg.norm(view_direction)
return view_direction
def plan_next_view(self):
view_list = np.empty((self.num_candidates, 2))
if self.planning_type == "local":
for i in range(self.num_candidates):
view_list[i] = random_view(
self.current_pose[:3, 3],
self.radius,
self.phi_min,
min_view_change=0.2,
max_view_change=self.view_change,
)
elif self.planning_type == "global":
for i in range(self.num_candidates):
view_list[i] = uniform_sampling(self.radius, self.phi_min)
pose_list = view_to_pose_batch(view_list, self.radius)
new_view_list = self.get_camera_view_direction(pose_list)
reference_pose_list = self.trajectory[: self.step]
reference_view_list = self.get_camera_view_direction(reference_pose_list)
dist_list = []
for view in new_view_list:
dist = 0
count = 0
for ref_view in reference_view_list:
# print(view, ref_view)
cos_dist = distance.cosine(view, ref_view)
if cos_dist < 0.6:
dist += cos_dist
count += 1
# print(dist)
dist_list.append(dist / count)
nbv = view_list[np.argmax(dist_list)]
return nbv
| 1,982 | 34.410714 | 81 | py |
neu-nbv | neu-nbv-main/scripts/utils/plot.py | import seaborn as sb
import matplotlib.pyplot as plt
import pandas
import os
import argparse
from matplotlib.ticker import FormatStrFormatter
from matplotlib.ticker import MaxNLocator
from matplotlib import rc
PLOT_FONT_SIZE = 22
PLOT_LEGEND_SIZE = 18
PLOT_TICKS_SIZE = 18
PLOT_LINE_WIDTH = 4
plt.rcParams["figure.figsize"] = [12, 8]
plt.rcParams["figure.autolayout"] = True
sb.set_palette("bright")
def main():
args = plot_args()
dataframe_path = args.dataframe_path
os.path.exists(dataframe_path), "dataframe does not exist!"
plot(dataframe_path)
def plot(dataframe_path):
# DTU experiment plot
save_path = os.path.dirname(dataframe_path)
total_df = pandas.read_csv(dataframe_path)
fig, (ax1, ax2) = plt.subplots(1, 2, sharex=True)
sb.lineplot(
total_df,
x="Reference Image Number",
y="PSNR",
hue="Planning Type",
linewidth=PLOT_LINE_WIDTH,
ax=ax1,
errorbar=("sd", 1),
palette=["C2", "C0", "C3"],
)
ax1.set_ylabel("PSNR", fontsize=PLOT_FONT_SIZE)
ax1.set_xlabel("Number of collected images", fontsize=PLOT_FONT_SIZE)
ax1.yaxis.set_major_formatter(FormatStrFormatter("%.1f"))
ax1.yaxis.set_major_locator(MaxNLocator(nbins=4))
ax1.tick_params(axis="both", labelsize=PLOT_TICKS_SIZE)
handles, labels = ax1.get_legend_handles_labels()
order = [2, 0, 1]
ax1.legend(
[handles[idx] for idx in order],
[labels[idx] for idx in order],
loc="lower right",
fontsize=PLOT_LEGEND_SIZE,
frameon=False,
)
sb.lineplot(
total_df,
x="Reference Image Number",
y="SSIM",
hue="Planning Type",
linewidth=PLOT_LINE_WIDTH,
ax=ax2,
errorbar=("sd", 1),
palette=["C2", "C0", "C3"],
)
ax2.set_xlabel("Number of collected images", fontsize=PLOT_FONT_SIZE)
ax2.set_ylabel("SSIM", fontsize=PLOT_FONT_SIZE)
ax2.yaxis.set_major_formatter(FormatStrFormatter("%.2f"))
ax2.yaxis.set_major_locator(MaxNLocator(nbins=4))
ax2.get_legend().remove()
ax2.tick_params(axis="both", labelsize=PLOT_TICKS_SIZE)
plt.xticks([2, 3, 4, 5, 6, 7, 8, 9])
plt.savefig(f"{save_path}/plot_results.svg", bbox_inches="tight")
plt.clf()
def plot_args():
# mandatory arguments
args = None
parser = argparse.ArgumentParser()
parser.add_argument(
"--dataframe_path",
type=str,
required=True,
help="path to dataframe",
)
args = parser.parse_args()
return args
if __name__ == "__main__":
main()
# # gazebo car experiment plot
# total_df = pandas.read_csv("dataframe/experiment_gazebo_car_0.csv")
# total_df = total_df.append(pandas.read_csv("dataframe/experiment_gazebo_car_1.csv"))
# fig, (ax1, ax2) = plt.subplots(1, 2, sharex=True)
# sb.lineplot(
# total_df,
# x="Reference Image Num.",
# y="PSNR",
# hue="Planning Type",
# linewidth=PLOT_LINE_WIDTH,
# ax=ax1,
# errorbar=("sd", 1),
# palette=["C2", "C0", "C3"],
# )
# ax1.set_xlabel("Number of collected images", fontsize=PLOT_FONT_SIZE)
# ax1.set_ylabel("PSNR", fontsize=PLOT_FONT_SIZE)
# ax1.yaxis.set_major_formatter(FormatStrFormatter("%.1f"))
# ax1.yaxis.set_major_locator(MaxNLocator(nbins=4))
# # ax1.tick_params(labelsize=PLOT_TICKS_SIZE)
# ax1.legend(loc="lower right", fontsize=PLOT_FONT_SIZE)
# ax1.tick_params(axis="both", labelsize=PLOT_TICKS_SIZE)
# handles, labels = ax1.get_legend_handles_labels()
# order = [2, 0, 1]
# ax1.legend(
# [handles[idx] for idx in order],
# [labels[idx] for idx in order],
# loc="lower right",
# fontsize=PLOT_LEGEND_SIZE,
# frameon=False,
# )
# sb.lineplot(
# total_df,
# x="Reference Image Num.",
# y="SSIM",
# hue="Planning Type",
# linewidth=PLOT_LINE_WIDTH,
# ax=ax2,
# errorbar=("sd", 1),
# palette=["C2", "C0", "C3"],
# )
# ax2.set_xlabel("Number of collected images", fontsize=PLOT_FONT_SIZE)
# plt.xticks([2, 4, 6, 8, 10, 12, 14, 16, 18, 20])
# ax2.set_ylabel("SSIM", fontsize=PLOT_FONT_SIZE)
# ax2.yaxis.set_major_formatter(FormatStrFormatter("%.2f"))
# ax2.yaxis.set_major_locator(MaxNLocator(nbins=4))
# ax2.get_legend().remove()
# ax2.tick_params(axis="both", labelsize=PLOT_TICKS_SIZE)
# # ax2.tick_params(labelsize=PLOT_TICKS_SIZE)
# # plt.show()
# plt.xticks(fontsize=PLOT_TICKS_SIZE)
# plt.yticks(fontsize=PLOT_TICKS_SIZE)
# plt.savefig("dataframe/gazebo_car.svg", bbox_inches="tight")
# plt.clf()
# # # gazebo indoor experiment plot
# total_df = pandas.read_csv("dataframe/experiment_gazebo_indoor_0.csv")
# total_df = total_df.append(pandas.read_csv("dataframe/experiment_gazebo_indoor_1.csv"))
# fig, (ax1, ax2) = plt.subplots(1, 2, sharex=True)
# sb.lineplot(
# total_df,
# x="Reference Image Num.",
# y="PSNR",
# hue="Planning Type",
# linewidth=PLOT_LINE_WIDTH,
# ax=ax1,
# errorbar=("sd", 1),
# palette=["C2", "C0", "C3"],
# )
# ax1.set_ylabel("PSNR", fontsize=PLOT_FONT_SIZE)
# ax1.set_xlabel("Number of collected images", fontsize=PLOT_FONT_SIZE)
# ax1.yaxis.set_major_formatter(FormatStrFormatter("%.1f"))
# ax1.set_yticks([13.4, 14.6, 15.8, 17.0])
# # ax1.yaxis.set_major_locator(MaxNLocator(nbins=5))
# ax1.tick_params(axis="both", labelsize=PLOT_TICKS_SIZE)
# # ax1.tick_params(labelsize=PLOT_TICKS_SIZE)
# ax1.legend(loc="lower right", fontsize=PLOT_FONT_SIZE)
# handles, labels = ax1.get_legend_handles_labels()
# order = [2, 0, 1]
# ax1.legend(
# [handles[idx] for idx in order],
# [labels[idx] for idx in order],
# loc="lower right",
# fontsize=PLOT_LEGEND_SIZE,
# frameon=False,
# )
# sb.lineplot(
# total_df,
# x="Reference Image Num.",
# y="SSIM",
# hue="Planning Type",
# linewidth=PLOT_LINE_WIDTH,
# ax=ax2,
# errorbar=("sd", 1),
# palette=["C2", "C0", "C3"],
# )
# ax2.set_xlabel("Number of collected images", fontsize=PLOT_FONT_SIZE)
# ax2.set_ylabel("SSIM", fontsize=PLOT_FONT_SIZE)
# ax2.yaxis.set_major_formatter(FormatStrFormatter("%.2f"))
# ax2.yaxis.set_major_locator(MaxNLocator(nbins=4))
# ax2.get_legend().remove()
# ax2.tick_params(axis="both", labelsize=PLOT_TICKS_SIZE)
# # ax2.tick_params(labelsize=PLOT_TICKS_SIZE)
# # plt.show()
# plt.xticks([2, 4, 6, 8, 10, 12, 14, 16, 18, 20])
# plt.xticks(fontsize=PLOT_TICKS_SIZE)
# plt.yticks(fontsize=PLOT_TICKS_SIZE)
# plt.savefig("dataframe/gazebo_indoor.svg", bbox_inches="tight")
# plt.clf()
| 6,469 | 29.518868 | 89 | py |
PolSF | PolSF-master/label2dto3d.py | import numpy as np
from skimage import io
from scipy import misc
### The original PolSAR data is downloaded from www.ietr.fr/polsarpro-bio/sanfrancisco.
### Courtesy of CNSA, CSA, ESA, IECAS, ISRO, JAXA, MDA, NASA-JPL, NSOAS.
### The color code and the class are shown as follows:
# SF-ALOS2
# https://www.ietr.fr/polsarpro-bio/san-francisco/dataset/SAN_FRANCISCO_ALOS2.zip
# color = [[0,0,0],[132,112,255],[0,0,255],[0,255,0],[192,0,0],[0,255,255],[255,255,0]]
## 0, unlabel, 1,Montain,2,Water,3,Vegetation,4,High-Density Urban,5,Low-Density Urban,6,Developd
# SF-GF3
# https://www.ietr.fr/polsarpro-bio/san-francisco/dataset/SAN_FRANCISCO_GF3.zip
# color = [[0,0,0],[132,112,255],[0,0,255],[0,255,0],[192,0,0],[0,255,255],[255,255,0]]
## 0, unlabel, 1,Montain,2,Water,3,Vegetation,4,High-Density Urban,5,Low-Density Urban,6,Developd
# SF-RISAT
# https://www.ietr.fr/polsarpro-bio/san-francisco/dataset/SAN_FRANCISCO_RISAT.zip
# color = [[0,0,0],[132,112,255],[0,0,255],[0,255,0],[192,0,0],[0,255,255],[255,255,0]]
## 0, unlabel, 1,Montain,2,Water,3,Vegetation,4,High-Density Urban,5,Low-Density Urban,6,Developd
# SF-RS2
# https://www.ietr.fr/polsarpro-bio/san-francisco/dataset/SAN_FRANCISCO_RS2.zip
# color = [[0,0,0], [0,0,255],[0,255,0],[255,0,0],[255,255,0],[255,0,255]]
## 0, unlabel, 1,Water,2,Vegetation,3,High-Density Urban,4,Low-Density Urban,5,Developd
# SF-AIRSAR
# https://www.ietr.fr/polsarpro-bio/san-francisco/dataset/SAN_FRANCISCO_AIRSAR.zip
color = [[0,0,0], [0,255,255],[255,255,0],[0,0,255],[255,0,0],[0,255,0]]
## 0, unlabel, 1,Montain,2,Water,3,Urban,4,Vegetation ,5, Bare soil
# label_files = '/home/bidlc/labelposar/PolSF/SF-AIRSAR/SF-AIRSAR-label2d.png'
datanameall = ['SF-AIRSAR','SF-AIRSAR','SF-AIRSAR','SF-AIRSAR','SF-AIRSAR']
dataname = datanameall[4]
label_files = './'+dataname+'/'+dataname+'-label2d.png'
label = io.imread(label_files)
mm = label.shape[0]
nn = label.shape[1]
label3d = np.zeros([mm, nn, 3])
for j in range(mm):
for k in range(nn):
print(j,k)
label3d[j][k]=color[int(label[j][k])]
misc.imsave('/home/bidlc/labelposar/PolSF/'+dataname+'/'+dataname+'-label3d-test.png', label3d) | 2,153 | 45.826087 | 97 | py |
MP-FedXGB | MP-FedXGB-main/VerticalXGBoost.py | import numpy as np
import pandas as pd
from mpi4py import MPI
from datetime import *
from SSCalculation import *
from Tree import *
import math
import time
np.random.seed(10)
clientNum = 4
class LeastSquareLoss:
def gradient(self, actual, predicted):
return -(actual - predicted)
def hess(self, actual, predicted):
return np.ones_like(actual)
class LogLoss():
def gradient(self, actual, predicted):
prob = 1.0 / (1.0 + np.exp(-predicted))
return prob - actual
def hess(self, actual, predicted):
prob = 1.0 / (1.0 + np.exp(-predicted))
return prob * (1.0 - prob) # Mind the dimension
class VerticalXGBoostClassifier:
def __init__(self, rank, lossfunc, splitclass, _lambda=1, _gamma=0.5, _epsilon=0.1, n_estimators=3, max_depth=3):
if lossfunc == 'LogLoss':
self.loss = LogLoss()
else:
self.loss = LeastSquareLoss()
self._lambda = _lambda
self._gamma = _gamma
self._epsilon = _epsilon
self.n_estimators = n_estimators # Number of trees
self.max_depth = max_depth # Maximum depth for tree
self.rank = rank
self.trees = []
self.splitclass = splitclass
for _ in range(n_estimators):
tree = VerticalXGBoostTree(rank=self.rank,
lossfunc=self.loss,
splitclass=self.splitclass,
_lambda=self._lambda,
_gamma=self._gamma,
_epsilon=self._epsilon,
_maxdepth=self.max_depth,
clientNum=clientNum)
self.trees.append(tree)
def getQuantile(self, colidx):
split_list = []
if self.rank != 0: # For client nodes
data = self.data.copy()
idx = np.argsort(data[:, colidx], axis=0)
data = data[idx]
value_list = sorted(list(set(list(data[:, colidx])))) # Record all the different value
hess = np.ones_like(data[:, colidx])
data = np.concatenate((data, hess.reshape(-1, 1)), axis=1)
sum_hess = np.sum(hess)
last = value_list[0]
i = 1
if len(value_list) == 1: # For those who has only one value, do such process.
last_cursor = last
else:
last_cursor = value_list[1]
split_list.append((-np.inf, value_list[0]))
# if len(value_list) == 15000:
# print(self.rank, colidx)
# print(value_list)
while i < len(value_list):
cursor = value_list[i]
small_hess = np.sum(data[:, -1][data[:, colidx] <= last]) / sum_hess
big_hess = np.sum(data[:, -1][data[:, colidx] <= cursor]) / sum_hess
# print(colidx, self.rank, np.abs(big_hess - small_hess), last, cursor)
if np.abs(big_hess - small_hess) < self._epsilon:
last_cursor = cursor
else:
judge = value_list.index(cursor) - value_list.index(last)
if judge == 1: # Although it didn't satisfy the criterion, it has no more split, so we must add it.
split_list.append((last, cursor))
last = cursor
else: # Move forward and record the last.
split_list.append((last, last_cursor))
last = last_cursor
last_cursor = cursor
i += 1
if split_list[-1][1] != value_list[-1]:
split_list.append((split_list[-1][1], value_list[-1])) # Add the top value into split_list.
split_list = np.array(split_list)
return split_list
def getAllQuantile(self): # Global quantile, must be calculated before tree building, avoiding recursion.
self_maxlen = 0
if self.rank != 0:
dict = {i:self.getQuantile(i) for i in range(self.data.shape[1])} # record all the split
self_maxlen = max([len(dict[i]) for i in dict.keys()])
else:
dict = {}
recv_maxlen = comm.gather(self_maxlen, root=1)
maxlen = None
if self.rank == 1:
maxlen = max(recv_maxlen)
self.maxSplitNum = comm.bcast(maxlen, root=1)
# print('MaxSplitNum: ', self.maxSplitNum)
self.quantile = dict
def fit(self, X, y):
data_num = X.shape[0]
y = np.reshape(y, (data_num, 1))
y_pred = np.zeros(np.shape(y))
self.data = X.copy()
self.getAllQuantile()
for i in range(self.n_estimators):
# print('In classifier fit, rank: ', self.rank)
tree = self.trees[i]
tree.data, tree.maxSplitNum, tree.quantile = self.data, self.maxSplitNum, self.quantile
y_and_pred = np.concatenate((y, y_pred), axis=1)
tree.fit(y_and_pred, i)
if i == self.n_estimators - 1: # The last tree, no need for prediction update.
continue
else:
update_pred = tree.predict(X)
if self.rank == 1:
# print('test')
update_pred = np.reshape(update_pred, (data_num, 1))
y_pred += update_pred
def predict(self, X):
y_pred = None
data_num = X.shape[0]
# Make predictions
for tree in self.trees:
# Estimate gradient and update prediction
update_pred = tree.predict(X)
if y_pred is None:
y_pred = np.zeros_like(update_pred).reshape(data_num, -1)
if self.rank == 1:
update_pred = np.reshape(update_pred, (data_num, 1))
y_pred += update_pred
return y_pred
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
def main1():
data = pd.read_csv('./iris.csv').values
zero_index = data[:, -1] == 0
one_index = data[:, -1] == 1
zero_data = data[zero_index]
one_data = data[one_index]
train_size_zero = int(zero_data.shape[0] * 0.8)
train_size_one = int(one_data.shape[0] * 0.8)
X_train, X_test = np.concatenate((zero_data[:train_size_zero, :-1], one_data[:train_size_one, :-1]), 0), \
np.concatenate((zero_data[train_size_zero:, :-1], one_data[train_size_one:, :-1]), 0)
y_train, y_test = np.concatenate((zero_data[:train_size_zero, -1].reshape(-1,1), one_data[:train_size_one, -1].reshape(-1, 1)), 0), \
np.concatenate((zero_data[train_size_zero:, -1].reshape(-1, 1), one_data[train_size_one:, -1].reshape(-1, 1)), 0)
X_train_A = X_train[:, 0].reshape(-1, 1)
X_train_B = X_train[:, 2].reshape(-1, 1)
X_train_C = X_train[:, 1].reshape(-1, 1)
X_train_D = X_train[:, 3].reshape(-1, 1)
X_test_A = X_test[:, 0].reshape(-1, 1)
X_test_B = X_test[:, 2].reshape(-1, 1)
X_test_C = X_test[:, 1].reshape(-1, 1)
X_test_D = X_test[:, 3].reshape(-1, 1)
splitclass = SSCalculate()
model = VerticalXGBoostClassifier(rank=rank, lossfunc='LogLoss', splitclass=splitclass)
if rank == 1:
model.fit(X_train_A, y_train)
print('end 1')
elif rank == 2:
model.fit(X_train_B, np.zeros_like(y_train))
print('end 2')
elif rank == 3:
model.fit(X_train_C, np.zeros_like(y_train))
print('end 3')
elif rank == 4:
model.fit(X_train_D, np.zeros_like(y_train))
print('end 4')
else:
model.fit(np.zeros_like(X_train_B), np.zeros_like(y_train))
print('end 0')
if rank == 1:
y_pred = model.predict(X_test_A)
elif rank == 2:
y_pred = model.predict(X_test_B)
elif rank == 3:
y_pred = model.predict(X_test_C)
elif rank == 4:
y_pred = model.predict(X_test_D)
else:
model.predict(np.zeros_like(X_test_A))
if rank == 1:
y_ori = y_pred.copy()
y_pred = 1.0 / (1.0 + np.exp(-y_pred))
y_pred[y_pred > 0.5] = 1
y_pred[y_pred <= 0.5] = 0
result = y_pred - y_test
print(np.sum(result == 0) / y_pred.shape[0])
# for i in range(y_test.shape[0]):
# print(y_test[i], y_pred[i], y_ori[i])
def main2():
data = pd.read_csv('./GiveMeSomeCredit/cs-training.csv')
data.dropna(inplace=True)
data = data[['SeriousDlqin2yrs',
'RevolvingUtilizationOfUnsecuredLines', 'age',
'NumberOfTime30-59DaysPastDueNotWorse', 'DebtRatio', 'MonthlyIncome',
'NumberOfOpenCreditLinesAndLoans', 'NumberOfTimes90DaysLate',
'NumberRealEstateLoansOrLines', 'NumberOfTime60-89DaysPastDueNotWorse',
'NumberOfDependents']].values
ori_data = data.copy()
# Add features
# for i in range(1):
# data = np.concatenate((data, ori_data[:, 1:]), axis=1)
data = data / data.max(axis=0)
ratio = 10000 / data.shape[0]
zero_index = data[:, 0] == 0
one_index = data[:, 0] == 1
zero_data = data[zero_index]
one_data = data[one_index]
zero_ratio = len(zero_data) / data.shape[0]
one_ratio = len(one_data) / data.shape[0]
num = 7500
train_size_zero = int(zero_data.shape[0] * ratio) + 1
train_size_one = int(one_data.shape[0] * ratio)
X_train, X_test = np.concatenate((zero_data[:train_size_zero, 1:], one_data[:train_size_one, 1:]), 0), \
np.concatenate((zero_data[train_size_zero:train_size_zero+int(num * zero_ratio)+1, 1:], one_data[train_size_one:train_size_one+int(num * one_ratio), 1:]), 0)
y_train, y_test = np.concatenate(
(zero_data[:train_size_zero, 0].reshape(-1, 1), one_data[:train_size_one, 0].reshape(-1, 1)), 0), \
np.concatenate((zero_data[train_size_zero:train_size_zero+int(num * zero_ratio)+1, 0].reshape(-1, 1),
one_data[train_size_one:train_size_one+int(num * one_ratio), 0].reshape(-1, 1)), 0)
X_train_A = X_train[:, :2]
X_train_B = X_train[:, 2:4]
X_train_C = X_train[:, 4:7]
X_train_D = X_train[:, 7:]
X_test_A = X_test[:, :2]
X_test_B = X_test[:, 2:4]
X_test_C = X_test[:, 4:7]
X_test_D = X_test[:, 7:]
splitclass = SSCalculate()
model = VerticalXGBoostClassifier(rank=rank, lossfunc='LogLoss', splitclass=splitclass, max_depth=3, n_estimators=3, _epsilon=0.1)
start = datetime.now()
if rank == 1:
model.fit(X_train_A, y_train)
end = datetime.now()
print('In fitting 1: ', end - start)
time = end - start
for i in range(clientNum + 1):
if i == 1:
pass
else:
time += comm.recv(source=i)
print(time / (clientNum + 1))
final_time = time / (clientNum + 1)
print('end 1')
print(final_time)
elif rank == 2:
model.fit(X_train_B, np.zeros_like(y_train))
end = datetime.now()
comm.send(end - start, dest=1)
print('In fitting 2: ', end - start)
print('end 2')
elif rank == 3:
model.fit(X_train_C, np.zeros_like(y_train))
end = datetime.now()
print('In fitting 3: ', end - start)
comm.send(end - start, dest=1)
print('end 3')
elif rank == 4:
model.fit(X_train_D, np.zeros_like(y_train))
end = datetime.now()
print('In fitting 4: ', end - start)
comm.send(end - start, dest=1)
print('end 4')
else:
model.fit(np.zeros_like(X_train_B), np.zeros_like(y_train))
end = datetime.now()
print('In fitting 0: ', end - start)
comm.send(end - start, dest=1)
print('end 0')
if rank == 1:
y_pred = model.predict(X_test_A)
elif rank == 2:
y_pred = model.predict(X_test_B)
elif rank == 3:
y_pred = model.predict(X_test_C)
elif rank == 4:
y_pred = model.predict(X_test_D)
else:
model.predict(np.zeros_like(X_test_A))
if rank == 1:
y_pred = 1.0 / (1.0 + np.exp(-y_pred))
y_pred2 = y_pred.copy()
y_pred2[y_pred2 > 0.5] = 1
y_pred2[y_pred2 <= 0.5] = 0
y_pred2 = y_pred2.reshape(-1,1)
y_test = y_test.reshape(-1,1)
result = y_pred2 - y_test
print(np.sum(result == 0) / y_pred.shape[0])
# for i in range(y_test.shape[0]):
# print(y_test[i], y_pred[i])
def main3():
data = np.load('./adult.npy')
data = data / data.max(axis=0)
ratio = 0.8
zero_index = data[:, 0] == 0
one_index = data[:, 0] == 1
zero_data = data[zero_index]
one_data = data[one_index]
train_size_zero = int(zero_data.shape[0] * ratio) + 1
train_size_one = int(one_data.shape[0] * ratio)
X_train, X_test = np.concatenate((zero_data[:train_size_zero, 1:], one_data[:train_size_one, 1:]), 0), \
np.concatenate((zero_data[train_size_zero:, 1:], one_data[train_size_one:, 1:]), 0)
y_train, y_test = np.concatenate(
(zero_data[:train_size_zero, 0].reshape(-1, 1), one_data[:train_size_one, 0].reshape(-1, 1)), 0), \
np.concatenate((zero_data[train_size_zero:, 0].reshape(-1, 1),
one_data[train_size_one:, 0].reshape(-1, 1)), 0)
segment_A = int(0.2 * (data.shape[1] - 1))
segment_B = segment_A + int(0.2 * (data.shape[1] - 1))
segment_C = segment_B + int(0.3 * (data.shape[1] - 1))
X_train_A = X_train[:, 0:segment_A]
X_train_B = X_train[:, segment_A:segment_B]
X_train_C = X_train[:, segment_B:segment_C]
X_train_D = X_train[:, segment_C:]
X_test_A = X_test[:, :segment_A]
X_test_B = X_test[:, segment_A:segment_B]
X_test_C = X_test[:, segment_B:segment_C]
X_test_D = X_test[:, segment_C:]
splitclass = SSCalculate()
model = VerticalXGBoostClassifier(rank=rank, lossfunc='LogLoss', splitclass=splitclass, max_depth=3, n_estimators=3, _epsilon=0.1)
if rank == 1:
start = datetime.now()
model.fit(X_train_A, y_train)
end = datetime.now()
print('In fitting: ', end - start)
print('end 1')
elif rank == 2:
model.fit(X_train_B, np.zeros_like(y_train))
print('end 2')
elif rank == 3:
model.fit(X_train_C, np.zeros_like(y_train))
print('end 3')
elif rank == 4:
model.fit(X_train_D, np.zeros_like(y_train))
else:
model.fit(np.zeros_like(X_train_B), np.zeros_like(y_train))
print('end 0')
if rank == 1:
y_pred = model.predict(X_test_A)
elif rank == 2:
y_pred = model.predict(X_test_B)
elif rank == 3:
y_pred = model.predict(X_test_C)
elif rank == 4:
y_pred = model.predict(X_test_D)
else:
model.predict(np.zeros_like(X_test_A))
if rank == 1:
y_ori = y_pred.copy()
y_pred = 1.0 / (1.0 + np.exp(-y_pred))
y_pred2 = y_pred.copy()
y_pred2[y_pred2 > 0.5] = 1
y_pred2[y_pred2 <= 0.5] = 0
y_pred2 = y_pred2.reshape(-1,1)
y_test = y_test.reshape(-1,1)
result = y_pred2 - y_test
print(np.sum(result == 0) / y_pred.shape[0])
# for i in range(y_test.shape[0]):
# print(y_test[i], y_pred[i])
if __name__ == '__main__':
main2()
| 15,375 | 37.059406 | 179 | py |
MP-FedXGB | MP-FedXGB-main/XGBoost.py | import numpy as np
import progressbar
import pandas as pd
from datetime import *
np.random.seed(10)
class LeastSquareLoss():
def gradient(self, actual, predicted):
return -(actual - predicted)
def hess(self, actual, predicted):
return np.ones_like(actual)
class LogLoss():
def gradient(self, actual, predicted):
prob = 1.0 / (1.0 + np.exp(-predicted))
return prob - actual
def hess(self, actual, predicted):
prob = 1.0 / (1.0 + np.exp(-predicted))
return prob * (1.0 - prob) # Mind the dimension
class Tree:
def __init__(self, value=None, leftBranch=None, rightBranch=None, col=-1, result=None):
self.value = value
self.leftBranch = leftBranch
self.rightBranch = rightBranch
self.col = col
self.result = result
class XGBoostTree:
def __init__(self, lossfunc, _lambda, _gamma, _max_depth, _epsilon=0.05):
self.loss = lossfunc
self._lambda = _lambda
self._gamma = _gamma
self._epsilon = _epsilon
self._max_depth = _max_depth
def _split(self, y):
y, y_pred = y[:, 0].reshape(-1, 1), y[:, 1].reshape(-1, 1)
return y, y_pred
def _leaf_gain(self, g, h):
nominator = np.power(g, 2)
denominator = h + self._lambda
return nominator / denominator
def _split_criterion(self, left_g, left_h, right_g, right_h):
gain = (self._leaf_gain(left_g, left_h) + self._leaf_gain(right_g, right_h) -
self._leaf_gain(left_g + right_g, left_h + right_h)) * 0.5 - self._gamma
# print('In split criterion')
# print(self._leaf_gain(left_g, left_h))
# print(self._leaf_gain(right_g, right_h))
# print(self._leaf_gain(left_g + right_g, left_h + right_h))
# print('Out split criterion')
return gain
def getQuantile(self, colidx, X, y, y_pred):
split_list = []
data = np.concatenate((X, y, y_pred), axis=1)
data = data.copy()
idx = np.argsort(data[:, colidx], axis=0)
data = data[idx]
value_list = sorted(list(set(list(data[:, colidx])))) # Record all the different value
hess = np.ones_like(data[:, colidx])
data = np.concatenate((data, hess.reshape(-1, 1)), axis=1)
sum_hess = np.sum(hess)
last = value_list[0]
i = 1
if len(value_list) == 1: # For those who has only one value, do such process.
last_cursor = last
else:
last_cursor = value_list[1]
split_list.append((-np.inf, value_list[0]))
while i < len(value_list):
cursor = value_list[i]
small_hess = np.sum(data[:, -1][data[:, colidx] <= last]) / sum_hess
big_hess = np.sum(data[:, -1][data[:, colidx] <= cursor]) / sum_hess
# print(colidx, self.rank, np.abs(big_hess - small_hess), last, cursor)
if np.abs(big_hess - small_hess) < self._epsilon:
last_cursor = cursor
else:
judge = value_list.index(cursor) - value_list.index(last)
if judge == 1: # Although it didn't satisfy the criterion, it has no more split, so we must add it.
split_list.append((last, cursor))
last = cursor
else: # Move forward and record the last.
split_list.append((last, last_cursor))
last = last_cursor
last_cursor = cursor
i += 1
if split_list[-1][1] != value_list[-1]:
split_list.append((split_list[-1][1], value_list[-1])) # Add the top value into split_list.
split_list = np.array(split_list)
return split_list
def getAllQuantile(self, X, y): # Global quantile, must be calculated before tree building, avoiding recursion.
y, y_pred = self._split(y)
column_length = X.shape[1]
dict = {i: self.getQuantile(i, X, y, y_pred) for i in range(column_length)} # record all the split
self.quantile = dict
def buildTree(self, X, y, depth=1):
data = np.concatenate((X, y), axis=1)
y, y_pred = self._split(y)
column_length = X.shape[1]
gradient = self.loss.gradient(y, y_pred) # Calculate the loss at begining, avoiding later calculation.
hessian = self.loss.hess(y, y_pred)
# print('*' * 10, 'Gradient')
# print(np.concatenate([gradient, hessian], axis=1)[:20])
G = np.sum(gradient)
H = np.sum(hessian)
if depth > self._max_depth:
return Tree(result=- G / (H + self._lambda))
bestGain = 0
bestSplit = 0
bestSet = ()
for col in range(column_length):
splitList = self.quantile[col]
GL = 0
HL = 0
for k in range(splitList.shape[0]):
left = splitList[k][0]
right = splitList[k][1]
idx = ((data[:, col] <= right) & (data[:, col] > left))
GL += np.sum(gradient[idx])
HL += np.sum(hessian[idx])
GR = G - GL
HR = H - HL
gain = self._split_criterion(GL, HL, GR, HR)
if gain > bestGain:
bestGain = gain
bestSplit = (col, right)
bestSet = (data[data[:, col] <= right], data[data[:, col] > right])
if bestGain > 0:
# print('Split value: ', bestSplit[1])
# print('Into left')
leftBranch = self.buildTree(bestSet[0][:, :-2], bestSet[0][:, -2:], depth + 1)
# print('Out left')
# print('Into right')
rightBranch = self.buildTree(bestSet[1][:, :-2], bestSet[1][:, -2:], depth + 1)
# print('Out right')
return Tree(value=bestSplit[1], leftBranch=leftBranch, rightBranch=rightBranch, col=bestSplit[0])
else:
# print(-G/(H + self._lambda))
return Tree(result=- G / (H + self._lambda))
def fit(self, X, y):
self.getAllQuantile(X, y)
self.Tree = self.buildTree(X, y)
def classify(self, tree, data):
if tree.result != None:
return tree.result
else:
branch = None
v = data[tree.col]
if isinstance(v, int) or isinstance(v, float):
if v > tree.value:
branch = tree.rightBranch
else:
branch = tree.leftBranch
return self.classify(branch, data)
def predict(self, data):
data_num = data.shape[0]
result = []
for i in range(data_num):
result.append(self.classify(self.Tree, data[i]))
result = np.array(result).reshape((-1, 1))
return result
class XGBoostClassifier:
def __init__(self, lossfunc, _lambda=1, _gamma=0.5, _epsilon=0.1, n_estimators=3, learning_rate=1, min_samples_split=2, max_depth=3):
if lossfunc == 'LogLoss':
self.loss = LogLoss()
else:
self.loss = LeastSquareLoss()
self._lambda = _lambda
self._gamma = _gamma
self._epsilon = _epsilon
self.n_estimators = n_estimators # Number of trees
self.learning_rate = learning_rate # Step size for weight update
self.min_samples_split = min_samples_split # The minimum n of sampels to justify split
self.max_depth = max_depth # Maximum depth for tree
self.bar = progressbar.ProgressBar()
self.trees = []
for _ in range(n_estimators):
tree = XGBoostTree(
lossfunc=self.loss,
_lambda=self._lambda,
_gamma=self._gamma,
_max_depth=self.max_depth,
_epsilon=self._epsilon,)
self.trees.append(tree)
def fit(self, X, y):
data_num = X.shape[0]
y = np.reshape(y, (data_num, 1))
y_pred = np.zeros(np.shape(y))
# y_pred = np.random.rand(y.shape[0]).reshape(-1, 1)
for i in range(self.n_estimators):
tree = self.trees[i]
y_and_pred = np.concatenate((y, y_pred), axis=1)
# print(y_and_pred)
tree.fit(X, y_and_pred)
print('-' * 100)
update_pred = tree.predict(X)
update_pred = np.reshape(update_pred, (data_num, 1))
y_pred += update_pred
def predict(self, X):
y_pred = None
data_num = X.shape[0]
# Make predictions
for tree in self.trees:
# Estimate gradient and update prediction
update_pred = tree.predict(X)
update_pred = np.reshape(update_pred, (data_num, 1))
if y_pred is None:
y_pred = np.zeros_like(update_pred).reshape(data_num, -1)
y_pred += update_pred
return y_pred
def main():
data = pd.read_csv('./filtered_data_median.csv').values
# train_size = int(data.shape[0] * 0.9)
# X_train, X_test = data[:train_size, :-2], data[train_size:, :-2]
# y_train, y_test = data[:train_size, -2].reshape(-1, 1), data[train_size:, -2].reshape(-1, 1)
zero_index = data[:, -2] == 0
one_index = data[:, -2] == 1
zero_data = data[zero_index]
one_data = data[one_index]
train_size_zero = int(zero_data.shape[0] * 0.8)
train_size_one = int(one_data.shape[0] * 0.8)
X_train, X_test = np.concatenate((zero_data[:train_size_zero, :-2], one_data[:train_size_one, :-2]), 0), \
np.concatenate((zero_data[train_size_zero:, :-2], one_data[train_size_one:, :-2]), 0)
y_train, y_test = np.concatenate((zero_data[:train_size_zero, -2].reshape(-1,1), one_data[:train_size_one, -2].reshape(-1, 1)), 0), \
np.concatenate((zero_data[train_size_zero:, -2].reshape(-1, 1), one_data[train_size_one:, -2].reshape(-1, 1)), 0)
model = XGBoostClassifier(lossfunc='LogLoss')
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
y_ori = y_pred.copy()
y_pred = 1.0 / (1.0 + np.exp(-y_pred))
y_pred[y_pred > 0.5] = 1
y_pred[y_pred <= 0.5] = 0
result = y_pred - y_test
print(np.sum(result == 0) / y_pred.shape[0])
# for i in range(y_test.shape[0]):
# print(y_test[i], y_pred[i], y_ori[i])
def main2():
data = pd.read_csv('./iris.csv').values
zero_index = data[:, -1] == 0
one_index = data[:, -1] == 1
zero_data = data[zero_index]
one_data = data[one_index]
train_size_zero = int(zero_data.shape[0] * 0.8)
train_size_one = int(one_data.shape[0] * 0.8)
X_train, X_test = np.concatenate((zero_data[:train_size_zero, :-1], one_data[:train_size_one, :-1]), 0), \
np.concatenate((zero_data[train_size_zero:, :-1], one_data[train_size_one:, :-1]), 0)
y_train, y_test = np.concatenate((zero_data[:train_size_zero, -1].reshape(-1,1), one_data[:train_size_one, -1].reshape(-1, 1)), 0), \
np.concatenate((zero_data[train_size_zero:, -1].reshape(-1, 1), one_data[train_size_one:, -1].reshape(-1, 1)), 0)
model = XGBoostClassifier(lossfunc='LogLoss', n_estimators=3)
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
y_ori = y_pred.copy()
y_pred = 1.0 / (1.0 + np.exp(-y_pred))
y_pred[y_pred > 0.5] = 1
y_pred[y_pred <= 0.5] = 0
result = y_pred - y_test
print(np.sum(result == 0) / y_pred.shape[0])
for i in range(y_test.shape[0]):
print(y_test[i], y_pred[i], y_ori[i])
def main3():
from sklearn.ensemble import GradientBoostingClassifier
data = pd.read_csv('./iris.csv').values
zero_index = data[:, -1] == 0
one_index = data[:, -1] == 1
zero_data = data[zero_index]
one_data = data[one_index]
train_size_zero = int(zero_data.shape[0] * 0.8)
train_size_one = int(one_data.shape[0] * 0.8)
X_train, X_test = np.concatenate((zero_data[:train_size_zero, :-1], one_data[:train_size_one, :-1]), 0), \
np.concatenate((zero_data[train_size_zero:, :-1], one_data[train_size_one:, :-1]), 0)
y_train, y_test = np.concatenate((zero_data[:train_size_zero, -1].reshape(-1,1), one_data[:train_size_one, -1].reshape(-1, 1)), 0), \
np.concatenate((zero_data[train_size_zero:, -1].reshape(-1, 1), one_data[train_size_one:, -1].reshape(-1, 1)), 0)
model = model = XGBoostClassifier(lossfunc='LogLoss')
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
print(y_pred)
y_pred[y_pred > 0.5] = 1
y_pred[y_pred <= 0.5] = 0
result = y_pred - y_test
print(np.sum(result == 0) / y_pred.shape[0])
for i in range(y_test.shape[0]):
print(y_test[i], y_pred[i])
def main6():
data_train = pd.read_csv('./GiveMeSomeCredit/cs-training.csv')
data_train = data_train[['SeriousDlqin2yrs',
'RevolvingUtilizationOfUnsecuredLines', 'age',
'NumberOfTime30-59DaysPastDueNotWorse', 'DebtRatio', 'MonthlyIncome',
'NumberOfOpenCreditLinesAndLoans', 'NumberOfTimes90DaysLate',
'NumberRealEstateLoansOrLines', 'NumberOfTime60-89DaysPastDueNotWorse',
'NumberOfDependents']].values
data_train.dtype = 'float16'
data_test = pd.read_csv('./GiveMeSomeCredit/cs-training.csv')
data_test = data_test[['SeriousDlqin2yrs',
'RevolvingUtilizationOfUnsecuredLines', 'age',
'NumberOfTime30-59DaysPastDueNotWorse', 'DebtRatio', 'MonthlyIncome',
'NumberOfOpenCreditLinesAndLoans', 'NumberOfTimes90DaysLate',
'NumberRealEstateLoansOrLines', 'NumberOfTime60-89DaysPastDueNotWorse',
'NumberOfDependents']].values
data_test.dtype = 'float16'
y_train = data_train[:, 0]
X_train = data_train[:, 1:]
X_test = data_test[:, 1:]
model = XGBoostClassifier(lossfunc='LogLoss')
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
y_ori = y_pred.copy()
y_pred = 1.0 / (1.0 + np.exp(-y_pred))
y_pred2 = y_pred.copy()
y_pred2[y_pred2 > 0.5] = 1
y_pred2[y_pred2 <= 0.5] = 0
print(y_pred)
# result = y_pred2 - y_test
# print(np.sum(result == 0) / y_pred.shape[0])
if __name__ == '__main__':
start = datetime.now()
main3()
end = datetime.now()
print(end - start)
| 14,368 | 39.590395 | 137 | py |
MP-FedXGB | MP-FedXGB-main/SSCalculation.py | import numpy as np
import pandas as pd
from mpi4py import MPI
from datetime import *
import math
import time
from VerticalXGBoost import *
from Tree import *
np.random.seed(10)
clientNum = 4
comm = MPI.COMM_WORLD
class SSCalculate:
def SSSplit(self, data, clientNum):
r = np.array([np.random.uniform(0, 4, (data.shape[0], data.shape[1])) for i in range(clientNum - 1)])
data = data.astype('float64')
data -= np.sum(r, axis=0).astype('float64')
data = np.expand_dims(data, axis=0)
dataList = np.concatenate([r, data], axis=0)
return dataList
def SMUL(self, data_A, data_B, rank):
if len(data_A.shape) <= 1:
data_A = data_A.reshape(-1, 1)
data_B = data_B.reshape(-1, 1)
if rank == 0: # Send shared data
a = np.random.rand(data_A.shape[0], data_A.shape[1])
b = np.random.rand(data_A.shape[0], data_A.shape[1])
c = a * b
dataList_a = self.SSSplit(a, clientNum)
dataList_b = self.SSSplit(b, clientNum)
dataList_c = self.SSSplit(c, clientNum)
for i in range(1, clientNum + 1):
comm.send([dataList_a[i - 1], dataList_b[i - 1], dataList_c[i - 1]], dest=i)
return a
elif rank == 1:
ra, rb, rc = comm.recv(source=0)
ei = data_A - ra
fi = data_B - rb
eList = []
fList = []
for i in range(2, clientNum + 1):
temp_e, temp_f = comm.recv(source=i)
eList.append(temp_e)
fList.append(temp_f)
e = np.sum(np.array(eList), axis=0) + ei
f = np.sum(np.array(fList), axis=0) + fi
for i in range(2, clientNum + 1):
comm.send((e, f), dest=i)
zi = e * f + f * ra + e * rb + rc
return zi
else:
ra, rb, rc = comm.recv(source=0)
ei = data_A - ra
fi = data_B - rb
comm.send((ei, fi), dest=1)
e, f = comm.recv(source=1)
zi = f * ra + e * rb + rc
return zi
def SDIV(self, data_A, data_B, rank):
# iter = 8
# factor = 1.9
if len(data_A.shape) <= 1:
data_A = data_A.reshape(-1, 1)
data_B = data_B.reshape(-1, 1)
iter = 20
if rank == 0: # Send shared data
divisor_list = []
for i in range(1, clientNum + 1):
divisor_list.append(comm.recv(source=i))
divisor_list = np.array(divisor_list)
divisor = np.min(divisor_list, axis=0) / 10
divisor /= clientNum # Equally share the divisor to parties.
# divisor = divisor / np.ceil(clientNum / 2)
for i in range(1, clientNum + 1):
comm.send(divisor, dest=i)
for i in range(iter):
self.SMUL(data_B, divisor, rank)
self.SMUL(divisor, divisor, rank)
self.SMUL(data_A, data_B, rank)
return divisor
else:
divisor = np.zeros_like(data_B)
divisor.dtype = np.float64
for i in range(data_B.shape[0]):
for j in range(data_B.shape[1]):
k = 0
data = abs(data_B[i, j])
if data > 1:
while data >= 1:
data /= 10
k += 1
divisor[i, j] = 1 / pow(10, k)
else:
while data <= 1:
data *= 10
k += 1
k -= 1
divisor[i, j] = 1 * pow(10, k)
comm.send(divisor, dest=0)
divisor = comm.recv(source=0)
for i in range(iter):
t = 2 / clientNum - self.SMUL(data_B, divisor, rank)
divisor_next = self.SMUL(divisor, t, rank)
divisor = divisor_next
result = self.SMUL(data_A, divisor, rank)
return result
# Implement ARGMAX by calculating SS division in build_tree.
def SARGMAX(self, data, rank):
new_col_index_list = None
new_row_index_list = None
row_idx_dict = {}
for k in range(data.shape[0]):
ori_value_list = data[k, :]
value_list = ori_value_list.copy()
col_index_list = [i for i in range(0, len(ori_value_list))]
while ori_value_list.shape[0] > 1:
if rank != 0:
if len(ori_value_list) % 2 == 0: # Even
value_list = [ori_value_list[i] - ori_value_list[i + 1] for i in range(0, len(ori_value_list), 2)]
else:
value_list = [ori_value_list[i] - ori_value_list[i + 1] for i in range(0, len(ori_value_list) - 1, 2)]
value_list.append(value_list[-1])
value_list = np.array(value_list)
total_value_list = comm.gather(value_list, root=0)
if rank == 0:
total_value_list = total_value_list[1:] # Rip out the nonsense list from rank 0.
shared_value_sum = np.sum(np.array(total_value_list), axis=0)
sign_list = np.array(shared_value_sum >= 0) # Record the judgement.
new_col_index_list = []
iter_size = len(sign_list)
if len(ori_value_list) % 2 != 0:
iter_size -= 1
for j in range(iter_size):
if sign_list[j]: # True, or the former value is bigger than the latter.
new_col_index_list.append(col_index_list[j * 2])
else:
new_col_index_list.append(col_index_list[j * 2 + 1])
if len(ori_value_list) % 2 != 0: # Odd
new_col_index_list.append(col_index_list[-1])
new_col_index_list = comm.bcast(new_col_index_list, root=0)
ori_value_list = np.array([data[k, i] for i in new_col_index_list])
col_index_list = new_col_index_list
col_idx = col_index_list[0] # Retrieve out the only col index.
row_idx_dict[k] = col_idx
ori_value_list = np.array([data[i, row_idx_dict[i]] for i in row_idx_dict.keys()])
value_list = ori_value_list.copy()
row_index_list = [i for i in range(0, len(ori_value_list))]
while ori_value_list.shape[0] > 1:
if rank != 0:
if len(ori_value_list) % 2 == 0: # Even
value_list = [ori_value_list[i] - ori_value_list[i + 1] for i in range(0, len(ori_value_list), 2)]
else:
value_list = [ori_value_list[i] - ori_value_list[i + 1] for i in range(0, len(ori_value_list) - 1, 2)]
value_list.append(ori_value_list[-1])
value_list = np.array(value_list)
total_value_list = comm.gather(value_list, root=0)
if rank == 0:
total_value_list = total_value_list[1:] # Rip out the nonsense list from rank 0.
shared_value_sum = np.sum(np.array(total_value_list), axis=0)
sign_list = np.array(shared_value_sum >= 0) # Record the judgement.
new_row_index_list = []
iter_size = len(sign_list)
if len(ori_value_list) % 2 != 0:
iter_size -= 1
for j in range(iter_size):
if sign_list[j]: # True, or the former value is bigger than the latter.
new_row_index_list.append(row_index_list[j * 2])
else:
new_row_index_list.append(row_index_list[j * 2 + 1])
if len(ori_value_list) % 2 != 0: # Odd
new_row_index_list.append(row_index_list[-1])
new_row_index_list = comm.bcast(new_row_index_list, root=0)
ori_value_list = np.array([data[i, row_idx_dict[i]] for i in new_row_index_list])
row_index_list = new_row_index_list
return row_index_list[0], row_idx_dict[row_index_list[0]] # Return feature and split position
# Implement ARGMAX and rip out SS division.
def SARGMAX_ver2(self, gain_left_up, gain_left_down, gain_right_up, gain_right_down, rank):
new_col_index_list = None
new_row_index_list = None
row_idx_dict = {}
row_num = gain_left_up.shape[0]
nominator_sign_list = denominator_sign_list = None
for k in range(row_num):
col_index_list = [i for i in range(0, len(gain_right_down[0, :]))]
while len(col_index_list) > 1:
iter_size = len(col_index_list)
if iter_size % 2 != 0: # Odd
iter_size -= 1
list1 = [gain_left_up[k, col_index_list[i]] for i in range(0, iter_size, 2)]
list2 = [gain_right_down[k, col_index_list[i]] for i in range(0, iter_size, 2)]
list3 = [gain_right_up[k, col_index_list[i]] for i in range(0, iter_size, 2)]
list4 = [gain_left_down[k, col_index_list[i]] for i in range(0, iter_size, 2)]
list5 = [gain_left_up[k, col_index_list[i + 1]] for i in range(0, iter_size, 2)]
list6 = [gain_right_down[k, col_index_list[i + 1]] for i in range(0, iter_size, 2)]
list7 = [gain_right_up[k, col_index_list[i + 1]] for i in range(0, iter_size, 2)]
list8 = [gain_left_down[k, col_index_list[i + 1]] for i in range(0, iter_size, 2)]
nominator1 = self.SMUL(np.array(list1), np.array(list8), rank) - self.SMUL(np.array(list5),
np.array(list4), rank)
nominator2 = self.SMUL(np.array(list3), np.array(list6), rank) - self.SMUL(np.array(list7),
np.array(list2), rank)
denominator1 = self.SMUL(np.array(list4), np.array(list8), rank)
denominator2 = self.SMUL(np.array(list2), np.array(list6), rank)
total_nominator = self.SMUL(nominator1, denominator2, rank) + self.SMUL(nominator2, denominator1, rank)
total_denominator = self.SMUL(denominator1, denominator2, rank)
total_nominator_list = comm.gather(total_nominator, root=2)
total_deominator_list = comm.gather(total_denominator, root=1)
if rank == 2:
total_nominator_list = total_nominator_list[1:]
nominator_sign_list = np.sum(np.array(total_nominator_list), axis=0)
nominator_sign_list[nominator_sign_list >= 0] = 1
nominator_sign_list[nominator_sign_list < 0] = -1
comm.send(nominator_sign_list, dest=1)
elif rank == 1:
total_denominator_list = total_deominator_list[1:]
denominator_sign_list = np.sum(np.array(total_denominator_list), axis=0)
denominator_sign_list[denominator_sign_list >= 0] = 1
denominator_sign_list[denominator_sign_list < 0] = -1
nominator_sign_list = comm.recv(source=2)
sign_list = denominator_sign_list * nominator_sign_list # Record the judgement.
sign_list = sign_list >= 0 + 0
new_col_index_list = []
iter_size = len(sign_list)
for j in range(iter_size):
if sign_list[j]: # True, or the former value is bigger than the latter.
new_col_index_list.append(col_index_list[j * 2])
else:
new_col_index_list.append(col_index_list[j * 2 + 1])
if len(col_index_list) % 2 != 0: # Odd
new_col_index_list.append(col_index_list[-1])
new_col_index_list = comm.bcast(new_col_index_list, root=1)
col_index_list = new_col_index_list
col_idx = col_index_list[0] # Retrieve out the only col index.
row_idx_dict[k] = col_idx
row_index_list = [i for i in row_idx_dict.keys()]
nominator_sign_list = denominator_sign_list = None
while len(row_index_list) > 1:
iter_size = len(row_index_list)
if len(row_index_list) % 2 != 0: # Odd
iter_size -= 1
list1 = [gain_left_up[row_index_list[i], row_idx_dict[row_index_list[i]]] for i in
range(0, iter_size, 2)]
list2 = [gain_right_down[row_index_list[i], row_idx_dict[row_index_list[i]]] for i in
range(0, iter_size, 2)]
list3 = [gain_right_up[row_index_list[i], row_idx_dict[row_index_list[i]]] for i in
range(0, iter_size, 2)]
list4 = [gain_left_down[row_index_list[i], row_idx_dict[row_index_list[i]]] for i in
range(0, iter_size, 2)]
list5 = [gain_left_up[row_index_list[i + 1], row_idx_dict[row_index_list[i + 1]]] for i in
range(0, iter_size, 2)]
list6 = [gain_right_down[row_index_list[i + 1], row_idx_dict[row_index_list[i + 1]]] for i in
range(0, iter_size, 2)]
list7 = [gain_right_up[row_index_list[i + 1], row_idx_dict[row_index_list[i + 1]]] for i in
range(0, iter_size, 2)]
list8 = [gain_left_down[row_index_list[i + 1], row_idx_dict[row_index_list[i + 1]]] for i in
range(0, iter_size, 2)]
nominator1 = self.SMUL(np.array(list1), np.array(list8), rank) - self.SMUL(np.array(list5),
np.array(list4),
rank)
nominator2 = self.SMUL(np.array(list3), np.array(list6), rank) - self.SMUL(np.array(list7),
np.array(list2),
rank)
denominator1 = self.SMUL(np.array(list4), np.array(list8), rank)
denominator2 = self.SMUL(np.array(list2), np.array(list6), rank)
total_nominator = self.SMUL(nominator1, denominator2, rank) + self.SMUL(nominator2, denominator1, rank)
total_denominator = self.SMUL(denominator1, denominator2, rank)
total_nominator_list = comm.gather(total_nominator, root=2)
total_deominator_list = comm.gather(total_denominator, root=1)
if rank == 2:
total_nominator_list = total_nominator_list[1:]
nominator_sign_list = np.sum(np.array(total_nominator_list), axis=0)
nominator_sign_list[nominator_sign_list >= 0] = 1
nominator_sign_list[nominator_sign_list < 0] = -1
comm.send(nominator_sign_list, dest=1)
elif rank == 1:
total_denominator_list = total_deominator_list[1:]
denominator_sign_list = np.sum(np.array(total_denominator_list), axis=0)
denominator_sign_list[denominator_sign_list >= 0] = 1
denominator_sign_list[denominator_sign_list < 0] = -1
nominator_sign_list = comm.recv(source=2)
sign_list = denominator_sign_list * nominator_sign_list # Record the judgement.
sign_list = sign_list >= 0 + 0
new_row_index_list = []
iter_size = len(sign_list)
for j in range(iter_size):
if sign_list[j]: # True, or the former value is bigger than the latter.
new_row_index_list.append(row_index_list[j * 2])
else:
new_row_index_list.append(row_index_list[j * 2 + 1])
if len(row_index_list) % 2 != 0: # Odd
new_row_index_list.append(row_index_list[-1])
new_row_index_list = comm.bcast(new_row_index_list, root=1)
row_index_list = new_row_index_list
# if rank == 0:
# print(row_index_list[0], row_idx_dict[row_index_list[0]])
return row_index_list[0], row_idx_dict[row_index_list[0]] # Return feature and split position
# Implement the Fisrt-tree trick from Kewei Cheng's paper.
def SARGMAX_ver3(self, gain_left_up, gain_left_down, gain_right_up, gain_right_down, rank, tree_num, legal_featureList):
new_col_index_list = None
new_row_index_list = None
row_idx_dict = {}
row_num = gain_left_up.shape[0]
permission = True
for k in range(row_num):
if tree_num == 0: # The first tree.
if rank == 1: # The first party who holds labels.
if k not in legal_featureList:
permission = False
else:
permission = True
comm.send(permission, dest=0)
for i in range(2, clientNum + 1):
comm.send(permission, dest=i)
else:
permission = comm.recv(source=1)
if not permission:
continue # Jump to the next feature
gain_left_up_ori = gain_left_up[k, :]
gain_left_down_ori = gain_left_down[k, :]
gain_right_up_ori = gain_right_up[k, :]
gain_right_down_ori = gain_right_down[k, :]
value_list = np.zeros_like(gain_left_up_ori)
col_index_list = [i for i in range(0, len(value_list))]
while len(col_index_list) > 1:
iter_size = len(col_index_list)
if len(col_index_list) % 2 != 0: # Odd
iter_size -= 1
list1 = [gain_left_up_ori[col_index_list[i]] for i in range(0, iter_size, 2)]
list2 = [gain_right_down_ori[col_index_list[i]] for i in range(0, iter_size, 2)]
list3 = [gain_right_up_ori[col_index_list[i]] for i in range(0, iter_size, 2)]
list4 = [gain_left_down_ori[col_index_list[i]] for i in range(0, iter_size, 2)]
list5 = [gain_left_up_ori[col_index_list[i + 1]] for i in range(0, iter_size, 2)]
list6 = [gain_right_down_ori[col_index_list[i + 1]] for i in range(0, iter_size, 2)]
list7 = [gain_right_up_ori[col_index_list[i + 1]] for i in range(0, iter_size, 2)]
list8 = [gain_left_down_ori[col_index_list[i + 1]] for i in range(0, iter_size, 2)]
nominator1 = self.SMUL(np.array(list1), np.array(list8), rank) - self.SMUL(np.array(list5), np.array(list4), rank)
nominator2 = self.SMUL(np.array(list3), np.array(list6), rank) - self.SMUL(np.array(list7), np.array(list2), rank)
denominator1 = self.SMUL(np.array(list4), np.array(list8), rank)
denominator2 = self.SMUL(np.array(list2), np.array(list6), rank)
total_nominator1_list = comm.gather(nominator1, root=2)
total_nominator2_list = comm.gather(nominator2, root=2)
total_denominator1_list = comm.gather(denominator1, root=2)
total_denominator2_list = comm.gather(denominator2, root=2)
if rank == 2:
total_nominator1_list = total_nominator1_list[1:] # Rip out the nonsense list from rank 0.
total_nominator2_list = total_nominator2_list[1:]
total_denominator1_list = total_denominator1_list[1:]
total_denominator2_list = total_denominator2_list[1:]
shared_nominator1_sum = np.sum(np.array(total_nominator1_list), axis=0)
shared_nominator2_sum = np.sum(np.array(total_nominator2_list), axis=0)
shared_denominator1_sum = np.sum(np.array(total_denominator1_list), axis=0)
shared_denominator2_sum = np.sum(np.array(total_denominator2_list), axis=0)
shared_value_final = shared_nominator1_sum / shared_denominator1_sum + shared_nominator2_sum / shared_denominator2_sum
sign_list = np.array(shared_value_final >= 0) # Record the judgement.
new_col_index_list = []
iter_size = len(sign_list)
for j in range(iter_size):
if sign_list[j]: # True, or the former value is bigger than the latter.
new_col_index_list.append(col_index_list[j * 2])
else:
new_col_index_list.append(col_index_list[j * 2 + 1])
if len(col_index_list) % 2 != 0: # Odd
new_col_index_list.append(col_index_list[-1])
new_col_index_list = comm.bcast(new_col_index_list, root=2)
col_index_list = new_col_index_list
col_idx = col_index_list[0] # Retrieve out the only col index.
row_idx_dict[k] = col_idx
row_index_list = [i for i in row_idx_dict.keys()]
while len(row_index_list) > 1:
iter_size = len(row_index_list)
if len(row_index_list) % 2 != 0: # Odd
iter_size -= 1
list1 = [gain_left_up[row_index_list[i], row_idx_dict[row_index_list[i]]] for i in range(0, iter_size, 2)]
list2 = [gain_right_down[row_index_list[i], row_idx_dict[row_index_list[i]]] for i in
range(0, iter_size, 2)]
list3 = [gain_right_up[row_index_list[i], row_idx_dict[row_index_list[i]]] for i in range(0, iter_size, 2)]
list4 = [gain_left_down[row_index_list[i], row_idx_dict[row_index_list[i]]] for i in range(0, iter_size, 2)]
list5 = [gain_left_up[row_index_list[i + 1], row_idx_dict[row_index_list[i + 1]]] for i in
range(0, iter_size, 2)]
list6 = [gain_right_down[row_index_list[i + 1], row_idx_dict[row_index_list[i + 1]]] for i in
range(0, iter_size, 2)]
list7 = [gain_right_up[row_index_list[i + 1], row_idx_dict[row_index_list[i + 1]]] for i in
range(0, iter_size, 2)]
list8 = [gain_left_down[row_index_list[i + 1], row_idx_dict[row_index_list[i + 1]]] for i in
range(0, iter_size, 2)]
nominator1 = self.SMUL(np.array(list1), np.array(list8), rank) - self.SMUL(np.array(list5), np.array(list4),
rank)
nominator2 = self.SMUL(np.array(list3), np.array(list6), rank) - self.SMUL(np.array(list7), np.array(list2),
rank)
denominator1 = self.SMUL(np.array(list4), np.array(list8), rank)
denominator2 = self.SMUL(np.array(list2), np.array(list6), rank)
total_nominator1_list = comm.gather(nominator1, root=2)
total_nominator2_list = comm.gather(nominator2, root=2)
total_denominator1_list = comm.gather(denominator1, root=2)
total_denominator2_list = comm.gather(denominator2, root=2)
if rank == 2:
total_nominator1_list = total_nominator1_list[1:] # Rip out the nonsense list from rank 0.
total_nominator2_list = total_nominator2_list[1:]
total_denominator1_list = total_denominator1_list[1:]
total_denominator2_list = total_denominator2_list[1:]
shared_nominator1_sum = np.sum(np.array(total_nominator1_list), axis=0)
shared_nominator2_sum = np.sum(np.array(total_nominator2_list), axis=0)
shared_denominator1_sum = np.sum(np.array(total_denominator1_list), axis=0)
shared_denominator2_sum = np.sum(np.array(total_denominator2_list), axis=0)
shared_value_final = shared_nominator1_sum / shared_denominator1_sum + shared_nominator2_sum / shared_denominator2_sum
sign_list = np.array(shared_value_final >= 0) # Record the judgement.
new_row_index_list = []
iter_size = len(sign_list)
for j in range(iter_size):
if sign_list[j]: # True, or the former value is bigger than the latter.
new_row_index_list.append(row_index_list[j * 2])
else:
new_row_index_list.append(row_index_list[j * 2 + 1])
if len(row_index_list) % 2 != 0: # Odd
new_row_index_list.append(row_index_list[-1])
new_row_index_list = comm.bcast(new_row_index_list, root=2)
row_index_list = new_row_index_list
return row_index_list[0], row_idx_dict[row_index_list[0]] # Return feature and split position
# Implement the First-layer mask and optimize the judgment.
def SARGMAX_ver4(self, gain_left_up, gain_left_down, gain_right_up, gain_right_down, rank, depth, legal_featureList):
new_col_index_list = None
new_row_index_list = None
row_idx_dict = {}
row_num = gain_left_up.shape[0]
nominator_sign_list = denominator_sign_list = None
for k in range(row_num):
if depth == 1: # The first layer.
if rank == 1: # The first party who holds labels.
if k not in legal_featureList:
permission = False
else:
permission = True
comm.send(permission, dest=0)
for i in range(2, clientNum + 1):
comm.send(permission, dest=i)
else:
permission = comm.recv(source=1)
if not permission:
continue # Jump to the next feature
col_index_list = [i for i in range(0, len(gain_right_down[0, :]))]
while len(col_index_list) > 1:
iter_size = len(col_index_list)
if iter_size % 2 != 0: # Odd
iter_size -= 1
list1 = [gain_left_up[k, col_index_list[i]] for i in range(0, iter_size, 2)]
list2 = [gain_right_down[k, col_index_list[i]] for i in range(0, iter_size, 2)]
list3 = [gain_right_up[k, col_index_list[i]] for i in range(0, iter_size, 2)]
list4 = [gain_left_down[k, col_index_list[i]] for i in range(0, iter_size, 2)]
list5 = [gain_left_up[k, col_index_list[i + 1]] for i in range(0, iter_size, 2)]
list6 = [gain_right_down[k, col_index_list[i + 1]] for i in range(0, iter_size, 2)]
list7 = [gain_right_up[k, col_index_list[i + 1]] for i in range(0, iter_size, 2)]
list8 = [gain_left_down[k, col_index_list[i + 1]] for i in range(0, iter_size, 2)]
nominator1 = self.SMUL(np.array(list1), np.array(list8), rank) - self.SMUL(np.array(list5),
np.array(list4), rank)
nominator2 = self.SMUL(np.array(list3), np.array(list6), rank) - self.SMUL(np.array(list7),
np.array(list2), rank)
denominator1 = self.SMUL(np.array(list4), np.array(list8), rank)
denominator2 = self.SMUL(np.array(list2), np.array(list6), rank)
total_nominator = self.SMUL(nominator1, denominator2, rank) + self.SMUL(nominator2, denominator1, rank)
total_denominator = self.SMUL(denominator1, denominator2, rank)
total_nominator_list = comm.gather(total_nominator, root=2)
total_deominator_list = comm.gather(total_denominator, root=1)
if rank == 2:
total_nominator_list = total_nominator_list[1:]
nominator_sign_list = np.sum(np.array(total_nominator_list), axis=0)
nominator_sign_list[nominator_sign_list >= 0] = 1
nominator_sign_list[nominator_sign_list < 0] = -1
comm.send(nominator_sign_list, dest=1)
elif rank == 1:
total_denominator_list = total_deominator_list[1:]
denominator_sign_list = np.sum(np.array(total_denominator_list), axis=0)
denominator_sign_list[denominator_sign_list >= 0] = 1
denominator_sign_list[denominator_sign_list < 0] = -1
nominator_sign_list = comm.recv(source=2)
sign_list = denominator_sign_list * nominator_sign_list # Record the judgement.
sign_list = sign_list >= 0 + 0
new_col_index_list = []
iter_size = len(sign_list)
for j in range(iter_size):
if sign_list[j]: # True, or the former value is bigger than the latter.
new_col_index_list.append(col_index_list[j * 2])
else:
new_col_index_list.append(col_index_list[j * 2 + 1])
if len(col_index_list) % 2 != 0: # Odd
new_col_index_list.append(col_index_list[-1])
new_col_index_list = comm.bcast(new_col_index_list, root=1)
col_index_list = new_col_index_list
col_idx = col_index_list[0] # Retrieve out the only col index.
row_idx_dict[k] = col_idx
row_index_list = [i for i in row_idx_dict.keys()]
nominator_sign_list = denominator_sign_list = None
while len(row_index_list) > 1:
iter_size = len(row_index_list)
if len(row_index_list) % 2 != 0: # Odd
iter_size -= 1
list1 = [gain_left_up[row_index_list[i], row_idx_dict[row_index_list[i]]] for i in
range(0, iter_size, 2)]
list2 = [gain_right_down[row_index_list[i], row_idx_dict[row_index_list[i]]] for i in
range(0, iter_size, 2)]
list3 = [gain_right_up[row_index_list[i], row_idx_dict[row_index_list[i]]] for i in
range(0, iter_size, 2)]
list4 = [gain_left_down[row_index_list[i], row_idx_dict[row_index_list[i]]] for i in
range(0, iter_size, 2)]
list5 = [gain_left_up[row_index_list[i + 1], row_idx_dict[row_index_list[i + 1]]] for i in
range(0, iter_size, 2)]
list6 = [gain_right_down[row_index_list[i + 1], row_idx_dict[row_index_list[i + 1]]] for i in
range(0, iter_size, 2)]
list7 = [gain_right_up[row_index_list[i + 1], row_idx_dict[row_index_list[i + 1]]] for i in
range(0, iter_size, 2)]
list8 = [gain_left_down[row_index_list[i + 1], row_idx_dict[row_index_list[i + 1]]] for i in
range(0, iter_size, 2)]
nominator1 = self.SMUL(np.array(list1), np.array(list8), rank) - self.SMUL(np.array(list5),
np.array(list4),
rank)
nominator2 = self.SMUL(np.array(list3), np.array(list6), rank) - self.SMUL(np.array(list7),
np.array(list2),
rank)
denominator1 = self.SMUL(np.array(list4), np.array(list8), rank)
denominator2 = self.SMUL(np.array(list2), np.array(list6), rank)
total_nominator = self.SMUL(nominator1, denominator2, rank) + self.SMUL(nominator2, denominator1, rank)
total_denominator = self.SMUL(denominator1, denominator2, rank)
total_nominator_list = comm.gather(total_nominator, root=2)
total_deominator_list = comm.gather(total_denominator, root=1)
if rank == 2:
total_nominator_list = total_nominator_list[1:]
nominator_sign_list = np.sum(np.array(total_nominator_list), axis=0)
nominator_sign_list[nominator_sign_list >= 0] = 1
nominator_sign_list[nominator_sign_list < 0] = -1
comm.send(nominator_sign_list, dest=1)
elif rank == 1:
total_denominator_list = total_deominator_list[1:]
denominator_sign_list = np.sum(np.array(total_denominator_list), axis=0)
denominator_sign_list[denominator_sign_list >= 0] = 1
denominator_sign_list[denominator_sign_list < 0] = -1
nominator_sign_list = comm.recv(source=2)
sign_list = denominator_sign_list * nominator_sign_list # Record the judgement.
sign_list = sign_list >= 0 + 0
new_row_index_list = []
iter_size = len(sign_list)
for j in range(iter_size):
if sign_list[j]: # True, or the former value is bigger than the latter.
new_row_index_list.append(row_index_list[j * 2])
else:
new_row_index_list.append(row_index_list[j * 2 + 1])
if len(row_index_list) % 2 != 0: # Odd
new_row_index_list.append(row_index_list[-1])
new_row_index_list = comm.bcast(new_row_index_list, root=1)
row_index_list = new_row_index_list
# if rank == 0:
# print(row_index_list[0], row_idx_dict[row_index_list[0]])
return row_index_list[0], row_idx_dict[row_index_list[0]] # Return feature and split position
# The initial version of judging the best loss reduction's sign, but will recover the value with random factor.
def SSIGN(self, data, rank):
random_num = 0
result_list = None
sign = None
if rank == 1:
nowTime = datetime.now().strftime("%Y%m%d%H%M%S") # Generate one time data to be the random factor.
uniqueFactor = int(str(nowTime)[-3:])
random_num = np.random.rand(1) * uniqueFactor
random_num = comm.bcast(random_num, root=1)
result_list = comm.gather(random_num * data, root=0)
if rank == 0:
result_sum = np.sum(np.array(result_list[1:]))
if result_sum > 0:
sign = '+'
elif result_sum == 0:
sign = '='
else:
sign = '-'
sign = comm.bcast(sign, root=0)
return sign
def SSIGN_ver2(self, gain_left_up, gain_left_down, gain_right_up, gain_right_down, cgain_up, cgain_down, gamma, rank):
sign = None
nominator = self.SMUL(self.SMUL(gain_left_up, gain_right_down, rank) + self.SMUL(gain_left_down, gain_right_up, rank) - self.SMUL(gain_left_down, gain_right_down, rank) * gamma * 2, cgain_down, rank)\
- self.SMUL(self.SMUL(gain_left_down, gain_right_down, rank), cgain_up, rank)
denominator = self.SMUL(self.SMUL(gain_left_down, gain_right_down, rank), cgain_down, rank) * 2
if rank * rank > 1: # Select rank exclude 0 and 1
comm.send(nominator, dest=1)
elif rank == 1:
nominator_list = []
nominator_list.append(nominator)
for i in range(2, clientNum + 1):
nominator_list.append(comm.recv(source=i))
# nominator += comm.recv(source=i)
nominator = np.sum(nominator_list)
if nominator > 0:
sign = 1
elif nominator == 0:
sign = 0
else:
sign = -1
if rank != 0 and rank != 2:
comm.send(denominator, dest=2)
elif rank == 2:
denominator_list = []
denominator_list.append(denominator)
for i in range(1, clientNum + 1):
if i == 2:
pass
else:
denominator_list.append(comm.recv(source=i))
# denominator += comm.recv(source=i)
denominator = np.sum(denominator_list)
if denominator > 0:
sign = 1
elif denominator == 0:
sign = 0
else:
sign = -1
if rank == 2:
comm.send(sign, dest=1)
elif rank == 1:
sign *= comm.recv(source=2) # Judge the final sign.
if sign == 1:
sign = '+'
sign = comm.bcast(sign, root=1)
return sign
def S_GD(self, a, b, rank, lamb):
temp_a = 0
shared_step = 0
coef = 2
m = coef * lamb
iter = 0
if rank != 0:
temp_a = a.copy() + np.random.uniform(0.1*m, m) * 0.5
if rank != 1:
comm.send(temp_a, dest=1)
# if rank == 1:
# for i in range(2, clientNum + 1):
# temp_a += comm.recv(source=i)
# shared_step = np.array(1 / (2 * temp_a)).reshape(-1, 1)
# if temp_a >= 2 * clientNum * m:
# max_step = math.log(1e-14, math.e)
# worst_case = math.log(0.5, math.e)
# iter = max_step / worst_case
# z = temp_a / (clientNum * m)
# iter *= worst_case / math.log(1 / z, math.e)
# iter = int(np.ceil(iter))
# else:
# max_step = math.log(1e-14, math.e)
# z = temp_a / (clientNum * m)
# worst_case = math.log(1 - (z * coef - 1) / (z * coef))
# worst_case_iter = int(np.ceil(max_step / worst_case))
# if z <= 1:
# iter = worst_case_iter
# else:
# step1 = worst_case_iter
# step2 = int(np.ceil(max_step / math.log(1 / z, math.e)))
# iter = min(step1, step2)
if rank == 1:
temp_a_list = []
temp_a_list.append(temp_a)
for i in range(2, clientNum + 1):
temp_a_list.append(comm.recv(source=i))
temp_a = np.sum(temp_a_list)
temp_a *= 2 # The a is transmitted as a/2 from each client, we must restore it first.
shared_step = np.array(1 / temp_a).reshape(-1, 1)
if temp_a <= clientNum * m:
max_step = math.log(1e-14, math.e)
z = temp_a / (clientNum * m)
iter = max_step / math.log((z * coef - 1) / (z * coef), math.e)
iter = int(np.ceil(iter))
else:
max_step = math.log(1e-14, math.e)
z = temp_a / (clientNum * m)
iter1 = max_step / math.log((z * coef - 1) / (z * coef), math.e)
iter2 = max_step / math.log(1 / z, math.e)
iter1 = int(np.ceil(iter1))
iter2 = int(np.ceil(iter2))
iter = min(iter1, iter2)
# print('*' * 20, iter)
eta = comm.bcast(shared_step, root=1)
iter = comm.bcast(iter, root=1)
w = np.array([[0]])
for j in range(iter):
wi = w - eta * (2 * self.SMUL(a, w, rank) + b)
w = wi
return w
| 39,914 | 54.36061 | 208 | py |
MP-FedXGB | MP-FedXGB-main/data_adult_process.py | import pandas as pd
import numpy as np
training_data = './adult.data'
columns = ['Age','Workclass','fnlwgt','Education','EdNum','MaritalStatus',
'Occupation','Relationship','Race','Sex','CapitalGain',
'CapitalLoss','HoursPerWeek','Country','Income']
income = pd.read_csv(training_data, names=columns)
income.dropna(inplace=True)
income['Income'].replace(' <=50K', 0,inplace=True)
income['Income'].replace(' >50K', 1,inplace=True)
y = income['Income']
temp = income.iloc[:, :-1]
# 将文本转换为数值型用于拟合模型
income_=pd.get_dummies(temp,columns=['Relationship','Sex','MaritalStatus','Workclass',
'Education','Country','Occupation','Race'])
income = np.concatenate([y.values.reshape(-1, 1), income_.values,], axis=1)
print(income.shape)
np.save('adult.npy', income) | 799 | 39 | 86 | py |
MP-FedXGB | MP-FedXGB-main/Tree.py | import numpy as np
import pandas as pd
from mpi4py import MPI
from datetime import *
import math
import time
from SSCalculation import *
from VerticalXGBoost import *
np.random.seed(10)
clientNum = 4
comm = MPI.COMM_WORLD
class Tree:
def __init__(self, value=None, leftBranch=None, rightBranch=None, col=-1, result=None, isDummy=False):
self.value = value
self.leftBranch = leftBranch
self.rightBranch = rightBranch
self.col = col
self.result = result
self.isDummy = isDummy
class VerticalXGBoostTree:
def __init__(self, rank, lossfunc, splitclass, _lambda, _gamma, _epsilon, _maxdepth, clientNum):
self.featureList = []
self.featureIdxMapping = {}
self._maxdepth = _maxdepth
self.rank = rank
self.loss = lossfunc
self.split = splitclass
self._lambda = _lambda / clientNum
self._gamma = _gamma
self._epsilon = _epsilon
def setMapping(self):
rand = np.random.permutation(self.data.shape[1]) # Get column number
if self.rank == 0:
return
if len(self.featureList) == 0:
self.featureIdxMapping = {self.featureList[0]:rand[0]}
else:
self.featureIdxMapping = {self.featureList[i]:rand[i] for i in range(len(self.featureList))}
def _split(self, y_and_pred):
y, y_pred = y_and_pred[:, 0].reshape(-1, 1), y_and_pred[:, 1].reshape(-1, 1)
return y, y_pred
def AggBucket(self, shared_G, shared_H):
bg_Matrix = np.zeros((self.featureNum, self.maxSplitNum))
bh_Matrix = np.zeros((self.featureNum, self.maxSplitNum))
for j in range(self.featureNum):
indexMatrix = np.zeros((self.maxSplitNum, self.data.shape[0]))
indexMatrixArray = None
currentRank = None
if self.rank != 0:
if j in self.featureList:
# print('Rank ' + str(self.rank) + ' I have: ', j)
mapped_idx = self.featureIdxMapping[j]
splitNum = len(self.quantile[mapped_idx])
splitList = self.quantile[mapped_idx]
for k in range(splitNum):
left = splitList[k][0]
right = splitList[k][1]
indexMatrix[k, :] = ((self.data[:, self.featureIdxMapping[j]] <= right) & (
self.data[:, self.featureIdxMapping[j]] > left)) + 0 # Type conversion
indexMatrixArray = self.split.SSSplit(indexMatrix, clientNum)
temp = np.zeros_like(indexMatrixArray[0])
temp = np.expand_dims(temp, axis=0)
indexMatrixArray = np.concatenate([temp, indexMatrixArray], axis=0)
comm.send(self.rank, dest=1)
if self.rank == 1:
currentRank = comm.recv()
currentRank = comm.bcast(currentRank, root=1)
indexMatrix = comm.scatter(indexMatrixArray, root=currentRank)
bg_Matrix[j, :] = np.sum(self.split.SMUL(indexMatrix, np.tile(shared_G.copy(), (1, self.maxSplitNum)).T, self.rank), axis=1).T
bh_Matrix[j, :] = np.sum(self.split.SMUL(indexMatrix, np.tile(shared_H.copy(), (1, self.maxSplitNum)).T, self.rank), axis=1).T
return bg_Matrix, bh_Matrix
# Implemented SARGMAX, but didn't rip out division.
def buildTree(self, shared_G, shared_H, shared_S, depth=1):
shared_gsum = np.sum(shared_G).reshape(-1, 1)
shared_hsum = np.sum(shared_H).reshape(-1, 1)
if depth > self._maxdepth:
a = -shared_gsum
a = a.reshape(-1, 1)
b = shared_hsum
b = b.reshape(-1, 1) + self._lambda
value = self.split.SDIV(a, b, self.rank)
return Tree(result=value)
currentRank = None
cgain = self.split.SDIV(self.split.SMUL(shared_gsum, shared_gsum, self.rank), shared_hsum + self._lambda, self.rank)
BG, BH = self.AggBucket(shared_G, shared_H)
shared_gain = np.zeros((self.featureNum, self.maxSplitNum))
shared_sl = np.ones((self.data.shape[0], 1))
shared_sr = np.ones((self.data.shape[0], 1))
shared_gsum_L = np.array([0.0]).reshape(-1, 1)
shared_hsum_L = np.array([0.0]).reshape(-1, 1)
start = None
if self.rank == 1:
start = datetime.now()
for j in range(self.featureNum):
if self.rank != 0:
if j in self.featureList:
gsum_L, hsum_L = 0, 0
gsum_L_array = self.split.SSSplit(np.array([gsum_L]).reshape(-1, 1), clientNum)
hsum_L_array = self.split.SSSplit(np.array([hsum_L]).reshape(-1, 1), clientNum)
temp = np.zeros_like(gsum_L_array[0])
temp = np.expand_dims(temp, axis=0)
shared_gsum_L = np.concatenate([temp.copy(), gsum_L_array], axis=0) # Add zero matrix to rank 0.
shared_hsum_L = np.concatenate([temp.copy(), hsum_L_array], axis=0) # Add zero matrix to rank 0.
comm.send(self.rank, dest=1)
if self.rank == 1:
currentRank = comm.recv()
currentRank = comm.bcast(currentRank, root=1)
shared_gsum_L = comm.scatter(shared_gsum_L, root=currentRank)
shared_hsum_L = comm.scatter(shared_hsum_L, root=currentRank)
for k in range(self.maxSplitNum):
shared_gsum_L += BG[j, k]
shared_hsum_L += BH[j, k]
shared_gsum_R = shared_gsum - shared_gsum_L
shared_hsum_R = shared_hsum - shared_hsum_L
gain_left = self.split.SDIV(self.split.SMUL(shared_gsum_L, shared_gsum_L, self.rank),
shared_hsum_L + self._lambda, self.rank)
gain_right = self.split.SDIV(self.split.SMUL(shared_gsum_R, shared_gsum_R, self.rank),
shared_hsum_R + self._lambda, self.rank)
shared_gain[j, k] = gain_left + gain_right - cgain
# Combine all the gain from clients, and find the max gain at client 1 (the party who holds label).
shared_gain /= 2
shared_gain -= self._gamma / clientNum
j_best, k_best = self.split.SARGMAX(shared_gain, self.rank)
if self.rank == 1:
print(datetime.now() - start)
gain_sign = self.split.SSIGN(shared_gain[j_best, k_best], self.rank)
if gain_sign == '+':
if self.rank != 0: # Avoid entering calculator node.
if j_best in self.featureList:
sl = np.ones((self.data.shape[0], 1))
idx = self.data[:, self.featureIdxMapping[j_best]] > self.quantile[self.featureIdxMapping[j_best]][k_best][1]
sl[idx] = 0
sr = 1 - sl
sl_array = self.split.SSSplit(sl, clientNum)
sr_array = self.split.SSSplit(sr, clientNum)
temp = np.zeros_like(sl_array[0])
temp = np.expand_dims(temp, axis=0)
shared_sl = np.concatenate([temp, sl_array], axis=0) # Add zero matrix to rank 0.
shared_sr = np.concatenate([temp, sr_array], axis=0) # Add zero matrix to rank 0.
comm.send(self.rank, dest=1)
if self.rank == 1:
currentRank = comm.recv()
currentRank = comm.bcast(currentRank, root=1)
shared_sl = comm.scatter(shared_sl, root=currentRank)
shared_sr = comm.scatter(shared_sr, root=currentRank)
shared_sl = self.split.SMUL(shared_S, shared_sl, self.rank)
shared_sr = self.split.SMUL(shared_S, shared_sr, self.rank)
shared_gl = self.split.SMUL(shared_sl, shared_G, self.rank)
shared_gr = self.split.SMUL(shared_sr, shared_G, self.rank)
shared_hl = self.split.SMUL(shared_sl, shared_H, self.rank)
shared_hr = self.split.SMUL(shared_sr, shared_H, self.rank)
# print('In build tree, into left', self.rank)
leftBranch = self.buildTree(shared_gl, shared_hl, shared_sl, depth + 1)
# print('In build tree, out of left', self.rank)
rightBranch = self.buildTree(shared_gr, shared_hr, shared_sr, depth + 1)
# print('In build tree, out of right', self.rank)
if self.rank != 0:
if j_best in self.featureList:
return Tree(value=self.quantile[self.featureIdxMapping[j_best]][k_best][1], leftBranch=leftBranch,
rightBranch=rightBranch, col=j_best, isDummy=False)
else:
return Tree(leftBranch=leftBranch, rightBranch=rightBranch, isDummy=True) # Return a dummy node
else:
return
else:
a = -shared_gsum
a = a.reshape(-1, 1)
b = shared_hsum
b = b.reshape(-1, 1) + self._lambda
value = self.split.SDIV(a, b, self.rank)
return Tree(result=value)
# Implemented both SARGMAX and rip out division in LSplit, but calculates leaf weight with SS division.
def buildTree_ver2(self, shared_G, shared_H, shared_S, depth=1):
shared_gsum = np.sum(shared_G).reshape(-1, 1)
shared_hsum = np.sum(shared_H).reshape(-1, 1)
if depth > self._maxdepth:
a = -shared_gsum
a = a.reshape(-1, 1)
b = shared_hsum
b = b.reshape(-1, 1) + self._lambda
value = self.split.SDIV(a, b, self.rank)
return Tree(result=value)
currentRank = None
cgain_up = self.split.SMUL(shared_gsum, shared_gsum, self.rank)
cgain_down = shared_hsum + self._lambda
gain_left_up, gain_left_down, gain_right_up, gain_right_down = np.zeros((self.featureNum, self.maxSplitNum)), np.zeros((self.featureNum, self.maxSplitNum)), np.zeros((self.featureNum, self.maxSplitNum)), np.zeros((self.featureNum, self.maxSplitNum))
BG, BH = self.AggBucket(shared_G, shared_H)
shared_sl = np.ones((self.data.shape[0], 1))
shared_sr = np.ones((self.data.shape[0], 1))
for j in range(self.featureNum):
shared_gsum_L = np.array([0.0]).reshape(-1, 1)
shared_hsum_L = np.array([0.0]).reshape(-1, 1)
for k in range(self.maxSplitNum):
shared_gsum_L += BG[j, k]
shared_hsum_L += BH[j, k]
shared_gsum_R = shared_gsum - shared_gsum_L
shared_hsum_R = shared_hsum - shared_hsum_L
gain_left_up[j, k] = self.split.SMUL(shared_gsum_L, shared_gsum_L, self.rank)
gain_left_down[j, k] = shared_hsum_L + self._lambda
gain_right_up[j, k] = self.split.SMUL(shared_gsum_R, shared_gsum_R, self.rank)
gain_right_down[j, k] = shared_hsum_R + self._lambda
# Combine all the gain from clients, and find the max gain at client 1 (the party who holds label).
j_best, k_best = self.split.SARGMAX_ver2(gain_left_up, gain_left_down, gain_right_up, gain_right_down, self.rank)
gain_sign = self.split.SSIGN_ver2(gain_left_up[j_best, k_best], gain_left_down[j_best, k_best], gain_right_up[j_best, k_best], gain_right_down[j_best, k_best], cgain_up, cgain_down, self._gamma, self.rank)
if gain_sign == '+':
if self.rank != 0: # Avoid entering calculator node.
if j_best in self.featureList:
sl = np.ones((self.data.shape[0], 1))
idx = self.data[:, self.featureIdxMapping[j_best]] > \
self.quantile[self.featureIdxMapping[j_best]][k_best][1]
sl[idx] = 0
sr = 1 - sl
sl_array = self.split.SSSplit(sl, clientNum)
sr_array = self.split.SSSplit(sr, clientNum)
temp = np.zeros_like(sl_array[0])
temp = np.expand_dims(temp, axis=0)
shared_sl = np.concatenate([temp, sl_array], axis=0) # Add zero matrix to rank 0.
shared_sr = np.concatenate([temp, sr_array], axis=0) # Add zero matrix to rank 0.
comm.send(self.rank, dest=1)
if self.rank == 1:
currentRank = comm.recv()
currentRank = comm.bcast(currentRank, root=1)
shared_sl = comm.scatter(shared_sl, root=currentRank)
shared_sr = comm.scatter(shared_sr, root=currentRank)
shared_sl = self.split.SMUL(shared_S, shared_sl, self.rank)
shared_sr = self.split.SMUL(shared_S, shared_sr, self.rank)
shared_gl = self.split.SMUL(shared_sl, shared_G, self.rank)
shared_gr = self.split.SMUL(shared_sr, shared_G, self.rank)
shared_hl = self.split.SMUL(shared_sl, shared_H, self.rank)
shared_hr = self.split.SMUL(shared_sr, shared_H, self.rank)
leftBranch = self.buildTree_ver2(shared_gl, shared_hl, shared_sl, depth + 1)
rightBranch = self.buildTree_ver2(shared_gr, shared_hr, shared_sr, depth + 1)
if self.rank != 0:
if j_best in self.featureList:
# print(depth, self.rank)
return Tree(value=self.quantile[self.featureIdxMapping[j_best]][k_best][1],
leftBranch=leftBranch,
rightBranch=rightBranch, col=j_best, isDummy=False)
else:
# print(depth, 'None', self.rank)
return Tree(leftBranch=leftBranch, rightBranch=rightBranch, isDummy=True) # Return a dummy node
else:
return
else:
a = -shared_gsum
a = a.reshape(-1, 1)
b = shared_hsum
b = b.reshape(-1, 1) + self._lambda
value = self.split.SDIV(a, b, self.rank)
return Tree(result=value)
# Implement the Fisrt-tree trick from Kewei Cheng's paper, but calculates leaf weight with SS division.
def buildTree_ver3(self, shared_G, shared_H, shared_S, depth=1, tree_num=0):
shared_gsum = np.sum(shared_G).reshape(-1, 1)
shared_hsum = np.sum(shared_H).reshape(-1, 1)
if depth > self._maxdepth:
a = -shared_gsum
a = a.reshape(-1, 1)
b = shared_hsum
b = b.reshape(-1, 1) + self._lambda
value = self.split.SDIV(a, b, self.rank)
return Tree(result=value)
currentRank = None
cgain_up = self.split.SMUL(shared_gsum, shared_gsum, self.rank)
cgain_down = shared_hsum + self._lambda
gain_left_up, gain_left_down, gain_right_up, gain_right_down = np.zeros(
(self.featureNum, self.maxSplitNum)), np.zeros((self.featureNum, self.maxSplitNum)), np.zeros(
(self.featureNum, self.maxSplitNum)), np.zeros((self.featureNum, self.maxSplitNum))
BG, BH = self.AggBucket(shared_G, shared_H)
shared_sl = np.ones((self.data.shape[0], 1))
shared_sr = np.ones((self.data.shape[0], 1))
for j in range(self.featureNum):
if tree_num == 0: # The first tree.
if self.rank == 1: # The first party who holds labels.
if j not in self.featureList:
permission = False
else:
permission = True
comm.send(permission, dest=0)
for i in range(2, clientNum + 1):
comm.send(permission, dest=i)
else:
permission = comm.recv(source=1)
if not permission:
continue # Jump to the next feature
shared_gsum_L = np.array([0.0]).reshape(-1, 1)
shared_hsum_L = np.array([0.0]).reshape(-1, 1)
for k in range(self.maxSplitNum):
shared_gsum_L += BG[j, k]
shared_hsum_L += BH[j, k]
shared_gsum_R = shared_gsum - shared_gsum_L
shared_hsum_R = shared_hsum - shared_hsum_L
gain_left_up[j, k] = self.split.SMUL(shared_gsum_L, shared_gsum_L, self.rank)
gain_left_down[j, k] = shared_hsum_L + self._lambda
gain_right_up[j, k] = self.split.SMUL(shared_gsum_R, shared_gsum_R, self.rank)
gain_right_down[j, k] = shared_hsum_R + self._lambda
# Combine all the gain from clients, and find the max gain at client 1 (the party who holds label).
j_best, k_best = self.split.SARGMAX_ver3(gain_left_up, gain_left_down, gain_right_up, gain_right_down,
self.rank, tree_num, self.featureList)
gain_sign = self.split.SSIGN_ver2(gain_left_up[j_best, k_best], gain_left_down[j_best, k_best],
gain_right_up[j_best, k_best], gain_right_down[j_best, k_best],
cgain_up, cgain_down, self._gamma, self.rank)
if gain_sign == '+':
if self.rank != 0: # Avoid entering calculator node.
if j_best in self.featureList:
sl = np.ones((self.data.shape[0], 1))
idx = self.data[:, self.featureIdxMapping[j_best]] > \
self.quantile[self.featureIdxMapping[j_best]][k_best][1]
sl[idx] = 0
sr = 1 - sl
sl_array = self.split.SSSplit(sl, clientNum)
sr_array = self.split.SSSplit(sr, clientNum)
temp = np.zeros_like(sl_array[0])
temp = np.expand_dims(temp, axis=0)
shared_sl = np.concatenate([temp, sl_array], axis=0) # Add zero matrix to rank 0.
shared_sr = np.concatenate([temp, sr_array], axis=0) # Add zero matrix to rank 0.
comm.send(self.rank, dest=0)
if self.rank == 0:
currentRank = comm.recv()
currentRank = comm.bcast(currentRank, root=0)
shared_sl = comm.scatter(shared_sl, root=currentRank)
shared_sr = comm.scatter(shared_sr, root=currentRank)
shared_sl = self.split.SMUL(shared_S, shared_sl, self.rank)
shared_sr = self.split.SMUL(shared_S, shared_sr, self.rank)
shared_gl = self.split.SMUL(shared_sl, shared_G, self.rank)
shared_gr = self.split.SMUL(shared_sr, shared_G, self.rank)
shared_hl = self.split.SMUL(shared_sl, shared_H, self.rank)
shared_hr = self.split.SMUL(shared_sr, shared_H, self.rank)
leftBranch = self.buildTree_ver3(shared_gl, shared_hl, shared_sl, depth + 1, tree_num)
rightBranch = self.buildTree_ver3(shared_gr, shared_hr, shared_sr, depth + 1, tree_num)
if self.rank != 0:
if j_best in self.featureList:
return Tree(value=self.quantile[self.featureIdxMapping[j_best]][k_best][1],
leftBranch=leftBranch,
rightBranch=rightBranch, col=j_best, isDummy=False)
else:
return Tree(leftBranch=leftBranch, rightBranch=rightBranch,
isDummy=True) # Return a dummy node
else:
return
else:
a = -shared_gsum
a = a.reshape(-1, 1)
b = shared_hsum
b = b.reshape(-1, 1) + self._lambda
value = self.split.SDIV(a, b, self.rank)
return Tree(result=value)
# Implement the first-layer mask and gradient descent.
def buildTree_ver4(self, shared_G, shared_H, shared_S, depth=1):
shared_gsum = np.sum(shared_G).reshape(-1, 1)
shared_hsum = np.sum(shared_H).reshape(-1, 1)
iter = 10
if depth > self._maxdepth:
a = shared_hsum
a = a.reshape(-1, 1) + self._lambda
a *= 0.5
b = shared_gsum
b = b.reshape(-1, 1)
value = self.split.S_GD(a, b, self.rank, lamb=self._lambda)
return Tree(result=value)
currentRank = None
cgain_up = self.split.SMUL(shared_gsum, shared_gsum, self.rank)
cgain_down = shared_hsum + self._lambda
gain_left_up, gain_left_down, gain_right_up, gain_right_down = np.zeros(
(self.featureNum, self.maxSplitNum)), np.zeros((self.featureNum, self.maxSplitNum)), np.zeros(
(self.featureNum, self.maxSplitNum)), np.zeros((self.featureNum, self.maxSplitNum))
BG, BH = self.AggBucket(shared_G, shared_H)
shared_sl = np.ones((self.data.shape[0], 1))
shared_sr = np.ones((self.data.shape[0], 1))
start = None
if self.rank == 1:
start = datetime.now()
for j in range(self.featureNum):
shared_gsum_L = np.array([np.sum(BG[j, :k+1]) for k in range(self.maxSplitNum)])
shared_hsum_L = np.array([np.sum(BH[j, :k+1]) for k in range(self.maxSplitNum)])
shared_gsum_R = (shared_gsum - shared_gsum_L).reshape(-1,)
shared_hsum_R = (shared_hsum - shared_hsum_L).reshape(-1,)
gain_left_up[j, :] = self.split.SMUL(shared_gsum_L, shared_gsum_L, self.rank).T
gain_left_down[j, :] = shared_hsum_L + self._lambda
gain_right_up[j, :] = self.split.SMUL(shared_gsum_R, shared_gsum_R, self.rank).T
gain_right_down[j, :] = shared_hsum_R + self._lambda
# Original
j_best, k_best = self.split.SARGMAX_ver2(gain_left_up, gain_left_down, gain_right_up, gain_right_down,
self.rank)
if self.rank == 1:
print(datetime.now() - start)
gain_sign = self.split.SSIGN_ver2(gain_left_up[j_best, k_best], gain_left_down[j_best, k_best],
gain_right_up[j_best, k_best], gain_right_down[j_best, k_best],
cgain_up, cgain_down, self._gamma, self.rank)
if gain_sign == '+' or depth == 1: # For layer 1, splitte by the first party, we should pass it.
if self.rank != 0: # Avoid entering calculator node.
if j_best in self.featureList:
sl = np.ones((self.data.shape[0], 1))
idx = self.data[:, self.featureIdxMapping[j_best]] > \
self.quantile[self.featureIdxMapping[j_best]][k_best][1]
sl[idx] = 0
sr = 1 - sl
sl_array = self.split.SSSplit(sl, clientNum)
sr_array = self.split.SSSplit(sr, clientNum)
temp = np.zeros_like(sl_array[0])
temp = np.expand_dims(temp, axis=0)
shared_sl = np.concatenate([temp, sl_array], axis=0) # Add zero matrix to rank 0.
shared_sr = np.concatenate([temp, sr_array], axis=0) # Add zero matrix to rank 0.
comm.send(self.rank, dest=1)
if self.rank == 1:
currentRank = comm.recv()
currentRank = comm.bcast(currentRank, root=1)
shared_sl = comm.scatter(shared_sl, root=currentRank)
shared_sr = comm.scatter(shared_sr, root=currentRank)
shared_sl = self.split.SMUL(shared_S, shared_sl, self.rank)
shared_sr = self.split.SMUL(shared_S, shared_sr, self.rank)
shared_gl = self.split.SMUL(shared_sl, shared_G, self.rank)
shared_gr = self.split.SMUL(shared_sr, shared_G, self.rank)
shared_hl = self.split.SMUL(shared_sl, shared_H, self.rank)
shared_hr = self.split.SMUL(shared_sr, shared_H, self.rank)
leftBranch = self.buildTree_ver4(shared_gl, shared_hl, shared_sl, depth + 1)
rightBranch = self.buildTree_ver4(shared_gr, shared_hr, shared_sr, depth + 1)
if self.rank != 0:
if j_best in self.featureList:
return Tree(value=self.quantile[self.featureIdxMapping[j_best]][k_best][1],
leftBranch=leftBranch,
rightBranch=rightBranch, col=j_best, isDummy=False)
else:
return Tree(leftBranch=leftBranch, rightBranch=rightBranch,
isDummy=True) # Return a dummy node
else:
return
else:
a = shared_hsum
a = a.reshape(-1, 1) + self._lambda
a *= 0.5
b = shared_gsum
b = b.reshape(-1, 1)
value = self.split.S_GD(a, b, self.rank, lamb=self._lambda)
return Tree(result=value)
# This function contains no communication operation.
def getInfo(self, tree, data, belongs=1):
if self.rank == 0:
return
if tree.result != None:
return np.array([belongs]).reshape(-1, 1), np.array([tree.result]).reshape(-1, 1)
else:
left_belongs = 0
right_belongs = 0
if tree.isDummy:
if belongs == 1:
left_belongs = 1
right_belongs = 1
left_idx, left_result = self.getInfo(tree.leftBranch, data, left_belongs)
right_idx, right_result = self.getInfo(tree.rightBranch, data, right_belongs)
idx = np.concatenate((left_idx, right_idx), axis=0)
result = np.concatenate((left_result, right_result), axis=0)
return idx, result
v = data[0, self.featureIdxMapping[tree.col]]
if belongs == 1: # In selected branch
if v > tree.value:
right_belongs = 1
else:
left_belongs = 1
left_idx, left_result = self.getInfo(tree.leftBranch, data, left_belongs)
right_idx, right_result = self.getInfo(tree.rightBranch, data, right_belongs)
idx = np.concatenate((left_idx, right_idx), axis=0)
result = np.concatenate((left_result, right_result), axis=0)
return idx, result
def fit(self, y_and_pred, tree_num):
size = None
size_list = comm.gather(self.data.shape[1], root=2) # Gather all the feature size.
if self.rank == 2:
size = sum(size_list[1:])
self.featureNum = comm.bcast(size, root=2) # Broadcast how many feature there are in total.
if self.rank == 2:
random_list = np.random.permutation(self.featureNum)
start = 0
for i in range(1, clientNum + 1):
rand = random_list[start:start + size_list[i]]
if i == 2:
self.featureList = rand
else:
comm.send(rand, dest=i) # Send random_list to all the client, mask their feature index.
start += size_list[i]
elif self.rank != 0:
self.featureList = comm.recv(source=2)
self.setMapping()
shared_G, shared_H, shared_S = None, None, None
if self.rank == 1: # Calculate gradients on the node who have labels.
y, y_pred = self._split(y_and_pred)
G = self.loss.gradient(y, y_pred)
H = self.loss.hess(y, y_pred)
S = np.ones_like(y)
shared_G = self.split.SSSplit(G, clientNum) # Split G/H/indicator.
shared_H = self.split.SSSplit(H, clientNum)
shared_S = self.split.SSSplit(S, clientNum)
temp = np.zeros_like(shared_G[0])
temp = np.expand_dims(temp, axis=0)
shared_G = np.concatenate([temp.copy(), shared_G], axis=0)
shared_H = np.concatenate([temp.copy(), shared_H], axis=0)
shared_S = np.concatenate([temp.copy(), shared_S], axis=0)
shared_G = comm.scatter(shared_G, root=1)
shared_H = comm.scatter(shared_H, root=1)
shared_S = comm.scatter(shared_S, root=1)
self.Tree = self.buildTree_ver4(shared_G, shared_H, shared_S)
# self.Tree = self.buildTree_ver3(shared_G, shared_H, shared_S, depth=1, tree_num=tree_num)
# self.Tree = self.buildTree_ver2(shared_G, shared_H, shared_S)
# self.Tree = self.buildTree(shared_G, shared_H, shared_S)
def classify(self, tree, data):
idx_list = []
shared_idx = None
final_result = 0
if self.rank != 0:
idx, result = self.getInfo(tree, data)
for i in range(1, clientNum + 1):
if self.rank == i:
shared_idx = self.split.SSSplit(idx, clientNum)
temp = np.zeros_like(shared_idx[0])
temp = np.expand_dims(temp, axis=0)
shared_idx = np.concatenate([temp, shared_idx], axis=0)
shared_idx = comm.scatter(shared_idx, root=i)
idx_list.append(shared_idx)
final_idx = idx_list[0]
for i in range(1, clientNum):
final_idx = self.split.SMUL(final_idx, idx_list[i], self.rank)
if self.rank == 0:
result = np.zeros_like(final_idx)
temp_result = np.sum(self.split.SMUL(final_idx, result, self.rank))
temp_result = comm.gather(temp_result, root=1)
if self.rank == 1:
final_result = np.sum(temp_result[1:])
return final_result
def predict(self, data): # Encapsulated for many data
data_num = data.shape[0]
result = []
for i in range(data_num):
temp_result = self.classify(self.Tree, data[i].reshape(1, -1))
if self.rank == 1:
result.append(temp_result)
else:
pass
result = np.array(result).reshape((-1, 1))
return result
| 29,996 | 50.541237 | 257 | py |
fitclip | fitclip-main/gunicorn.conf.py | bind = "0.0.0.0:5000"
reload = True
timeout = 3600
wsgi_app = "demo.app"
| 73 | 13.8 | 21 | py |
fitclip | fitclip-main/util/structured_group_utils.py | """Useful utils when using `DataModuleStructuredGroup`."""
from typing import Any, Mapping, Sequence, Tuple
import torch
from aligner.video_text_module import TYPE_INPUT
from util.tensor_utils import pad
TYPE_MULTI_INPUT = Mapping[str, TYPE_INPUT]
# It's like `default_collate` but instead of a sequence we have a mapping, and we do `cat` instead of `stack`.
# It makes sense to be similar because we're merging multiple batches together.
# Note that using collate from the dataloader side. It's simpler, and more GPU-memory efficient.
def _cat_collate(batch: Sequence[Any]) -> Any:
elem = batch[0]
elem_type = type(batch)
if isinstance(elem, torch.Tensor):
return torch.cat(batch) # noqa
elif isinstance(elem, Mapping):
return {k: _cat_collate([d[k] for d in batch]) for k in elem}
elif isinstance(elem, (float, int, bytes, str)):
return batch
elif isinstance(elem, tuple) and hasattr(elem, '_fields'): # namedtuple
return elem_type(*(_cat_collate(samples) for samples in zip(*batch))) # noqa
elif isinstance(elem, Sequence):
return [x for d in batch for x in d]
else:
raise TypeError(f"Not sure how to collate type {elem_type}")
def _merge_datasets_batch(batches_by_dataset: TYPE_MULTI_INPUT) -> Tuple[TYPE_INPUT, Sequence[int]]:
lengths = [len(batch["video"]) for batch in batches_by_dataset.values()]
max_text_len = max(batch["text"]["input_ids"].shape[-1] for batch in batches_by_dataset.values())
for batch in batches_by_dataset.values():
batch["text"] = {k: pad(v, min_size=max_text_len, dim=-1) for k, v in batch["text"].items()}
batch = _cat_collate(list(batches_by_dataset.values()))
return batch, lengths
| 1,737 | 40.380952 | 110 | py |
fitclip | fitclip-main/util/viz_utils.py | import numpy as np
import torch
import torchvision
from matplotlib import pyplot as plt
from matplotlib.pyplot import subplots_adjust
from torchvision.transforms.functional import to_pil_image
from aligner.encoder.video_text_encoder import VideoTextEncoder
def visualize_images_tensor(images: torch.Tensor) -> plt.Axes:
"""`images` has shape (N, C, H, W)."""
grid = torchvision.utils.make_grid(images)
fig, ax = plt.subplots()
fig.tight_layout()
subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=None, hspace=None)
ax.autoscale_view("tight")
ax.imshow(np.asarray(to_pil_image(grid)))
ax.set(xticklabels=[], yticklabels=[], xticks=[], yticks=[])
return ax
def debug_batch(video: torch.Tensor, text: torch.Tensor, encoder: VideoTextEncoder) -> None:
video, text = video.detach().cpu(), text.detach().cpu()
video = encoder.to_bchw(video)
denormalized_images = encoder.denormalize_video_tensor(video).reshape(-1, *video.shape[2:])
visualize_images_tensor(denormalized_images)
plt.show()
for decoded in encoder.decode_text(text):
print(decoded)
| 1,139 | 29 | 95 | py |
fitclip | fitclip-main/util/tensor_utils.py | from typing import Any, Mapping, Optional, Sequence, TypeVar, Union
import pytorch_lightning as pl
import torch
import torch.nn.functional as F
from pytorch_lightning.utilities.apply_func import apply_to_collection
T = TypeVar("T")
def pad(t: torch.Tensor, min_size: int, dim: int = 1, value: Any = 0) -> torch.Tensor:
"""Pads the dim `dim` in `t` with the value `value` so the size is at least `min_size`."""
if dim < 0:
dim += len(t.shape)
if (count := t.shape[dim]) < min_size:
# `pad` keyword arg goes from the last dim to the first one in pairs, where the first value of the pair is
# for left padding and the other one for right padding.
return F.pad(t, pad=(0, 0) * (len(t.shape) - 1 - dim) + (0, min_size - count), value=value)
else:
return t
def split_in_collection(data: T, split_size_or_sections: Union[int, Sequence[int]]) -> Sequence[T]:
"""Applies `split` to the inside tensors of the collections and also generates one collection for each of the
returned elements from `split`."""
type_ = type(data)
if isinstance(data, torch.Tensor):
return data.split(split_size_or_sections)
elif isinstance(data, Mapping):
zipped = zip(*(split_in_collection(v, split_size_or_sections) for v in data.values()))
return [type_((k, v) for k, v in zip(data.keys(), z)) for z in zipped]
elif isinstance(data, Sequence):
return [type_(z) for z in zip(*(split_in_collection(e, split_size_or_sections) for e in data))]
else:
raise ValueError(f"Unsupported type for split: {type_}")
def _first_tensor_in_collection(data: Any) -> torch.Tensor:
if isinstance(data, torch.Tensor):
return data
elif isinstance(data, Mapping):
return _first_tensor_in_collection(data.values())
else:
return _first_tensor_in_collection(next(iter(data)))
def all_gather(lightning_module: pl.LightningModule, data: Any, group: Optional[Any] = None,
sync_grads: bool = False, return_world_size_dim: bool = False) -> Any:
"""Gathers a tensor, or multiple tensors inside a collection, so that the output number of dimensions is the same
regardless of the accelerator.
Note this is different from `pl.LightningModule.all_gather`, that for a single GPU it doesn't return a new
dimension but for the parallel settings it does.
"""
first_tensor_old_shape = _first_tensor_in_collection(data).shape
output = lightning_module.all_gather(data, group=group, sync_grads=sync_grads)
if len(first_tensor_new_shape := _first_tensor_in_collection(output).shape) == len(first_tensor_old_shape) + 1:
return output if return_world_size_dim else apply_to_collection(output, torch.Tensor,
lambda t: t.view(-1, *t.shape[2:]))
elif len(first_tensor_new_shape) == len(first_tensor_old_shape):
return apply_to_collection(output, torch.Tensor, torch.Tensor.unsqueeze, 0) if return_world_size_dim else output
else:
raise ValueError(f"Unexpected new shape for the first tensor in the collection: {first_tensor_new_shape} (old "
f"was {first_tensor_old_shape}). "
f"The new shape was expected to have the same number of dimensions or one more.")
| 3,355 | 49.089552 | 120 | py |
fitclip | fitclip-main/util/typing_utils.py | from os import PathLike
from typing import Union
# See https://stackoverflow.com/a/53418245/1165181
TYPE_PATH = Union[PathLike, str, bytes]
| 141 | 22.666667 | 50 | py |
fitclip | fitclip-main/util/checkpoint_utils.py | from typing import MutableMapping
import torch
from cached_path import cached_path
from util.typing_utils import TYPE_PATH
def state_dict_from_checkpoint_path(checkpoint_path: TYPE_PATH, prefix: str = "") -> MutableMapping[str, torch.Tensor]:
prefix += ("" if prefix.endswith(".") or not prefix else ".")
checkpoint = torch.load(cached_path(checkpoint_path))
return {k[len(prefix):]: v for k, v in checkpoint["state_dict"].items() if k.startswith(prefix)}
| 472 | 35.384615 | 119 | py |
fitclip | fitclip-main/util/video_utils.py | import os
from typing import Any, Callable, Iterable, Iterator, Optional, Sequence
from torchvision.datasets.video_utils import VideoClips
from util.typing_utils import TYPE_PATH
# From https://en.wikipedia.org/wiki/Video_file_format
VIDEO_FILE_EXTENSIONS = (".3g2", ".3gp", ".amv", ".asf", ".avi", ".drc", ".f4a", ".f4b", ".f4p", ".f4v", ".flv",
".gif", ".gifv", ".m2ts", ".m2v", ".m4p", ".m4v", ".mkv", ".mng", ".mov", ".mp2", ".mp4",
".mpe", ".mpeg", ".mpg", ".mpv", ".mts", ".mxf", ".nsv", ".ogg", ".ogv", ".qt", ".rm",
".rmvb", ".roq", ".svi", ".ts", ".viv", ".vob", ".webm", ".wmv", ".yuv")
def get_videos_in_folder(path: TYPE_PATH,
extensions: Optional[Iterable[str]] = VIDEO_FILE_EXTENSIONS) -> Iterator[str]:
extensions = None if extensions is None else tuple(extensions)
for folder, _, filenames in os.walk(path, followlinks=True):
for filename in filenames:
if os.path.isfile(full_path := os.path.join(folder, filename)) \
and (not extensions or filename.lower().endswith(extensions)):
yield full_path
def get_sorted_videos_in_folder(path: TYPE_PATH,
extensions: Optional[Iterable[str]] = VIDEO_FILE_EXTENSIONS,
key: Optional[Callable[[str], Any]] = None, reverse: bool = False) -> Iterator[str]:
"""Returns a sorted version of `get_videos_in_folder`.
Even though this can be simply applied by the caller, the fact that the main use case of `get_videos_in_folder`
is from a video dataset and that its order should be deterministic (but that `get_videos_in_folder` doesn't
guarantee it) makes this function handy and a wake-up call for this issue.
The videos in a PyTorch `Dataset` need to be deterministic e.g. for a distributed setting, when e.g. using
`DistributedSampler` for it to guarantee each data sample is used once and only once between all processes.
"""
return sorted(get_videos_in_folder(path, extensions), key=key, reverse=reverse)
def resample(num_frames: int, original_fps: float, new_fps: float) -> Sequence[int]:
"""Returns essentially the same as `VideoClips._resample_video_idx`. Unlike it, it always checks for the max frames
(the mentioned function doesn't do it when it returns a `slice`)."""
indices = VideoClips._resample_video_idx(num_frames, original_fps, new_fps)
if isinstance(indices, slice) and indices.stop is None:
indices = range(*indices.indices((indices.start or 0) + num_frames * indices.step))
return indices
| 2,659 | 53.285714 | 119 | py |
fitclip | fitclip-main/util/__init__.py | 0 | 0 | 0 | py |
|
fitclip | fitclip-main/util/argparse_with_defaults.py | import argparse
from typing import Any
# Copied from https://github.com/allenai/allennlp/blob/3aafb92/allennlp/commands/__init__.py
class ArgumentParserWithDefaults(argparse.ArgumentParser):
"""Custom argument parser that will display the default value for an argument in the help message. """
_action_defaults_to_ignore = {"help", "store_true", "store_false", "store_const"}
@staticmethod
def _is_empty_default(default: Any) -> bool:
return default is None or (isinstance(default, (str, list, tuple, set)) and not default)
def add_argument(self, *args, **kwargs) -> argparse.Action:
# Add default value to the help message when the default is meaningful.
default = kwargs.get("default")
if kwargs.get("action") not in self._action_defaults_to_ignore and not self._is_empty_default(default):
kwargs["help"] = f"{kwargs.get('help', '')} (default = {default})"
return super().add_argument(*args, **kwargs)
| 981 | 45.761905 | 111 | py |
fitclip | fitclip-main/util/iter_utils.py | import collections
import itertools
from typing import Any, Iterable, Iterator, Literal, Optional, Sequence, Tuple, TypeVar
T = TypeVar("T")
# See https://stackoverflow.com/a/50938015/1165181
def consume(it: Iterator[Any]) -> None:
collections.deque(it, maxlen=0)
# See https://docs.python.org/3/library/itertools.html#itertools-recipes
def pairwise(it: Iterable[T]) -> Iterable[Tuple[T, T]]:
a, b = itertools.tee(it)
next(b, None)
return zip(a, b)
# See https://stackoverflow.com/a/9001089/1165181
def can_be_iterated_more_than_once(it: Iterable[Any]) -> bool:
try:
object.__getattribute__(it, "__iter__")
except AttributeError:
return False
try:
object.__getattribute__(it, "__next__")
except AttributeError:
return True
return False
# See `grouper` in https://docs.python.org/3/library/itertools.html#itertools-recipes.
def batch(iterable: Iterable[T], n: int, *, incomplete: Literal["fill", "ignore"] = "ignore",
fill_value: Optional[Any] = None) -> Iterator[Iterable[T]]:
"""Batches the data into non-overlapping fixed-length batches.
Examples:
grouper("ABCDEFGH", 3) --> ABC DEF
grouper("ABCDEFGH", 3, incomplete="fill", fill_value="x") --> ABC DEF GHx
"""
args = [iter(iterable)] * n
if incomplete == "fill":
return itertools.zip_longest(*args, fillvalue=fill_value)
elif incomplete == "ignore":
return zip(*args)
else:
raise ValueError(f"Expected 'fill' or 'ignore'; got '{incomplete}'")
# See https://stackoverflow.com/a/312464/1165181
def batch_sequence(seq: Sequence[T], n: int) -> Iterator[Sequence[T]]:
"""Yield successive n-sized chunks from `seq`."""
for i in range(0, len(seq), n):
yield seq[i:i + n]
| 1,784 | 30.315789 | 93 | py |
fitclip | fitclip-main/scripts/apply_wise_ft.py | #!/usr/bin/env python
import argparse
import torch
from aligner.encoder.clip_video_text_encoder import load_clip_model
from aligner.wise import wise_state_dict
from util.argparse_with_defaults import ArgumentParserWithDefaults
def parse_args() -> argparse.Namespace:
parser = ArgumentParserWithDefaults("Applies weight-space ensembles for fine-tuning (WiSE-FT) on 2 CLIP "
"checkpoints.",
description="See https://arxiv.org/abs/2109.01903 for more info.")
parser.add_argument("input_path_or_name1", metavar="INPUT_FILE_OR_NAME_1")
parser.add_argument("input_path_or_name2", metavar="INPUT_FILE_OR_NAME_2")
parser.add_argument("output_path", metavar="OUTPUT_FILE")
parser.add_argument("--weight-for-2", type=float, default=0.5)
return parser.parse_args()
def main() -> None:
args = parse_args()
model1 = load_clip_model(args.input_path_or_name1)
model2 = load_clip_model(args.input_path_or_name2)
# We don't use the logic scale from CLIP but ours, so we had deleted it. Here we need to re-create the variable,
# so it doesn't fail when using the checkpoints.
model1.logit_scale = getattr(model1, "logit_scale", torch.tensor(float("nan")))
model2.logit_scale = getattr(model2, "logit_scale", torch.tensor(float("nan")))
state_dict = wise_state_dict(model1, model2, weight_for_2=args.weight_for_2)
torch.save(state_dict, args.output_path)
if __name__ == "__main__":
main()
| 1,526 | 37.175 | 116 | py |
fitclip | fitclip-main/scripts/csv_diff.py | #!/usr/bin/env python
import argparse
import pandas as pd
from util.argparse_with_defaults import ArgumentParserWithDefaults
def parse_args() -> argparse.Namespace:
parser = ArgumentParserWithDefaults()
parser.add_argument("path1", metavar="FILE1")
parser.add_argument("path2", metavar="FILE2")
return parser.parse_args()
def main() -> None:
args = parse_args()
df1 = pd.read_csv(args.path1)
df2 = pd.read_csv(args.path2)
# From https://stackoverflow.com/a/48647840/1165181
print(pd.concat([df1, df2]).drop_duplicates(keep=False).to_csv(index=False))
if __name__ == "__main__":
main()
| 635 | 21.714286 | 80 | py |
fitclip | fitclip-main/scripts/subcorr.py | #!/usr/bin/env python
import argparse
import sys
from typing import Any, Callable, Iterable, MutableMapping, Optional, Sequence, Union
import PIL.Image
import clip
import decord
import numpy as np
import seaborn as sns
import torch
from clip.model import CLIP
from matplotlib import pyplot as plt
from matplotlib.offsetbox import AnnotationBbox, OffsetImage
from spacy.tokens import Doc, Span
def get_video_info(path: str) -> MutableMapping[str, Any]:
video_reader = decord.VideoReader(path)
frame_indices = list(range(0, len(video_reader), 10))
frames = [PIL.Image.fromarray(f) for f in video_reader.get_batch(frame_indices).asnumpy()]
thumbnails_frame_indices = video_reader.get_key_indices()
thumbnails = [PIL.Image.fromarray(f) for f in video_reader.get_batch(thumbnails_frame_indices).asnumpy()]
thumbnails = [f.copy() for f in thumbnails]
for thumbnail in thumbnails:
thumbnail.thumbnail((64, 64))
return {
"frames": frames,
"frame_times": video_reader.get_frame_timestamp(frame_indices).mean(axis=-1), # noqa
"thumbnails": thumbnails,
"thumbnail_times": video_reader.get_frame_timestamp(thumbnails_frame_indices).mean(axis=-1), # noqa
}
def encode_visual(images: Iterable[PIL.Image.Image], clip_model: CLIP,
image_preprocessor: Callable[[PIL.Image.Image], torch.Tensor],
device: Optional[Any] = None) -> torch.Tensor:
images = torch.stack([image_preprocessor(image) for image in images])
if device is not None:
images = images.to(device)
with torch.inference_mode():
encoded_images = clip_model.encode_image(images)
return encoded_images / encoded_images.norm(dim=-1, keepdim=True)
def encode_text(text: str, clip_model: CLIP, device: Optional[Any] = None) -> torch.Tensor:
tokenized_texts = clip.tokenize([text])
if device is not None:
tokenized_texts = tokenized_texts.to(device)
with torch.inference_mode():
encoded_texts = clip_model.encode_text(tokenized_texts)
return encoded_texts / encoded_texts.norm(dim=-1, keepdim=True)
def text_probs(encoded_images: torch.Tensor, encoded_texts: torch.Tensor) -> np.ndarray:
with torch.inference_mode():
# clip_model.logit_scale.exp() == 100
return (100 * encoded_images @ encoded_texts.T).softmax(dim=0).squeeze(-1).cpu().numpy() # noqa
def create_figure(times: Sequence[float], probs: Sequence[float], thumbnail_times: Sequence[float],
thumbnails: Iterable[PIL.Image.Image], title: Union[Doc, Span, str]) -> plt.Axes:
# noinspection SpellCheckingInspection
sns.set(rc={"figure.figsize": (1.0 * len(thumbnail_times), 1.5)})
ax = sns.lineplot(x=times, y=probs)
plt.xticks(thumbnail_times)
ax.set_title(title.text if isinstance(title, (Doc, Span)) else title, fontsize=35, y=0.6)
ax.set(xlabel="time", ylabel="probability")
plt.fill_between(times, probs)
if isinstance(title, (Doc, Span)):
start_time = title[0]._.start_time
end_time = title[-1]._.end_time
plt.axvspan(start_time, end_time, alpha=0.5, color="red")
for i, (time, thumbnail) in enumerate(zip(thumbnail_times, thumbnails)):
im = OffsetImage(thumbnail, axes=ax)
ab = AnnotationBbox(im, (time, 0), xybox=(0, -60), frameon=False, boxcoords="offset points", pad=0)
ax.add_artist(ab)
plt.margins(x=0, tight=True)
plt.subplots_adjust(left=0, bottom=0, right=1, top=1, wspace=0, hspace=0)
return ax
def create_figure_for_text(encoded_frames: torch.Tensor, text: Union[Doc, Span, str], clip_model: CLIP,
times: Sequence[float], thumbnail_times: Sequence[float],
thumbnails: Iterable[PIL.Image.Image]) -> plt.Axes:
encoded_texts = encode_text(text.text if isinstance(text, (Doc, Span)) else text, clip_model,
device=encoded_frames.device)
probs = text_probs(encoded_frames, encoded_texts)
return create_figure(times, probs, thumbnail_times, thumbnails, text)
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser()
parser.add_argument("path", metavar="PATH")
return parser.parse_args()
def main() -> None:
sns.set_theme()
args = parse_args()
device = "cuda" if torch.cuda.is_available() else "cpu"
clip_model, image_preprocessor = clip.load("ViT-B/16", device=device)
# noinspection SpellCheckingInspection
video_info = get_video_info(args.path)
encoded_frames = encode_visual(video_info["frames"], clip_model, image_preprocessor, device=device)
for text in sys.stdin:
if text := text.strip():
create_figure_for_text(encoded_frames, text, clip_model, video_info["frame_times"],
video_info["thumbnail_times"], video_info["thumbnails"])
plt.show()
if __name__ == "__main__":
main()
| 4,981 | 35.101449 | 109 | py |
fitclip | fitclip-main/scripts/sample_csv.py | #!/usr/bin/env python
import argparse
import sys
import pandas as pd
from util.argparse_with_defaults import ArgumentParserWithDefaults
def parse_args() -> argparse.Namespace:
parser = ArgumentParserWithDefaults()
parser.add_argument("path", metavar="FILE", nargs="?", default="-")
parser.add_argument("-s", "--size", type=int, default=10)
args = parser.parse_args()
args.path = sys.stdin if args.path == "-" else args.path
return args
def main() -> None:
args = parse_args()
print(pd.read_csv(args.path).sample(args.size).to_csv(index=False))
if __name__ == "__main__":
main()
| 624 | 21.321429 | 71 | py |
fitclip | fitclip-main/scripts/prepare_trained_clip_checkpoint_for_evaluation.py | #!/usr/bin/env python
import argparse
import torch
from util.checkpoint_utils import state_dict_from_checkpoint_path
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser()
parser.add_argument("input_path", metavar="INPUT_FILE")
parser.add_argument("output_path", metavar="OUTPUT_FILE")
parser.add_argument("--prefix", default="encoder.model.")
return parser.parse_args()
def main() -> None:
args = parse_args()
state_dict = state_dict_from_checkpoint_path(args.input_path, prefix=args.prefix)
# We don't use the logic scale from CLIP but ours, so we had deleted it. Here we need to re-create the variable,
# so it doesn't fail when loading this `state_dict`.
state_dict["logit_scale"] = torch.tensor(float("nan"))
torch.save(state_dict, args.output_path)
if __name__ == "__main__":
main()
| 868 | 27.032258 | 116 | py |
fitclip | fitclip-main/scripts/checkpoint_to_state_dict.py | #!/usr/bin/env python
import argparse
import sys
import torch
from util.checkpoint_utils import state_dict_from_checkpoint_path
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser()
parser.add_argument("input_path", metavar="INPUT_FILE")
parser.add_argument("--prefix", default="encoder.model.")
return parser.parse_args()
def main() -> None:
args = parse_args()
state_dict = state_dict_from_checkpoint_path(args.input_path, prefix=args.prefix)
torch.save(state_dict, sys.stdout.buffer)
if __name__ == "__main__":
main()
| 582 | 22.32 | 85 | py |
fitclip | fitclip-main/scripts/list_possible_pos.py | #!/usr/bin/env python
import fileinput
from nltk.corpus import wordnet as wn
from nltk.corpus.reader import POS_LIST
if __name__ == "__main__":
for line in fileinput.input():
if line := line.strip():
print("".join(pos for pos in POS_LIST if wn.synsets(line, pos=pos)))
| 295 | 25.909091 | 80 | py |
fitclip | fitclip-main/scripts/prepare_trained_checkpoint_for_evaluation.py | #!/usr/bin/env python
import argparse
import torch
from cached_path import cached_path
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser()
parser.add_argument("input_path", metavar="INPUT_FILE", type=cached_path)
parser.add_argument("output_path", metavar="OUTPUT_FILE")
parser.add_argument("--prefix", default="encoder.model.")
return parser.parse_args()
def main() -> None:
args = parse_args()
checkpoint = torch.load(args.input_path)
prefix = args.prefix + ("" if args.prefix.endswith(".") else ".")
checkpoint["state_dict"] = {k[len(prefix):]: v for k, v in checkpoint["state_dict"].items() if k.startswith(prefix)}
torch.save(checkpoint, args.output_path)
if __name__ == "__main__":
main()
| 771 | 26.571429 | 120 | py |
fitclip | fitclip-main/scripts/speech_to_text.py | #!/usr/bin/env python
import sys
from google.cloud.speech_v1p1beta1 import RecognitionAudio, RecognitionConfig, RecognitionMetadata, \
SpeakerDiarizationConfig, SpeechClient
# We use a Python script as the `gcloud` equivalent command doesn't support the enhanced models
# (`gcloud ml speech recognize-long-running`, not even the alpha and beta ones).
# See https://cloud.google.com/sdk/gcloud/reference/alpha/ml/speech/recognize-long-running for more info.
# noinspection PyTypeChecker
def main() -> None:
assert len(sys.argv) == 2, f"Valid syntax: {sys.argv[0]} GS_PATH"
path = sys.argv[1] # -Cv9h3ic2JI.opus, czQwCto9O80.opus, mono: --BLA_8Qixs
if path.startswith("gs://"):
audio = RecognitionAudio(uri=path)
else:
with open(path, "rb") as file:
audio = RecognitionAudio(content=file.read())
kwargs = {
"audio_channel_count": 2, # It fails otherwise for many audios. FIXME: fails with mono
}
if path.endswith(".opus"):
kwargs["encoding"] = RecognitionConfig.AudioEncoding.OGG_OPUS # All our Opus videos are in an Ogg container.
# When using Ogg-Opus, the endpoint needs the following fields.
# See https://cloud.google.com/speech-to-text/docs/encoding
kwargs["sample_rate"] = 48000 # All our Opus audios I've seen use this rate (at least 100).
else:
kwargs["encoding"] = RecognitionConfig.AudioEncoding.ENCODING_UNSPECIFIED
metadata = RecognitionMetadata(original_media_type=RecognitionMetadata.OriginalMediaType.VIDEO)
config = RecognitionConfig(language_code="en-US", enable_word_time_offsets=True, enable_word_confidence=True,
# Option not supported in the enhanced video model:
# alternative_language_codes=["en-GB", "en-IN", "en-AU"],
enable_automatic_punctuation=True, use_enhanced=True, model="video", metadata=metadata,
diarization_config=SpeakerDiarizationConfig(enable_speaker_diarization=True,
min_speaker_count=1, max_speaker_count=10),
**kwargs)
response = SpeechClient().long_running_recognize(config=config, audio=audio)
result = response.result(timeout=10000)
print(type(result).to_json(result))
if __name__ == "__main__":
main()
| 2,428 | 47.58 | 118 | py |
fitclip | fitclip-main/scripts/open_clip_checkpoint_to_model.py | #!/usr/bin/env python
import argparse
import torch
from cached_path import cached_path
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser()
parser.add_argument("input_path", metavar="INPUT_FILE", type=cached_path)
parser.add_argument("output_path", metavar="OUTPUT_FILE")
return parser.parse_args()
def main() -> None:
args = parse_args()
checkpoint = torch.load(args.input_path)
state_dict = checkpoint["state_dict"]
first_key = next(iter(state_dict))
prefix = next(prefix for prefix in ["model", "module"] if first_key.startswith(prefix + "."))
torch.save({k[len(prefix + "."):]: v for k, v in state_dict.items()}, args.output_path)
if __name__ == "__main__":
main()
| 744 | 25.607143 | 97 | py |
fitclip | fitclip-main/demo/app.py | import json
import random
from typing import Tuple
from flask import Flask, Response, jsonify, request
from flask_cors import CORS, cross_origin
from demo.search import search_in_subtitles
app = application = Flask(__name__, static_url_path="/") # `application` is gunicorn's default, `app` is flask's.
cors = CORS(app)
app.config["CORS_HEADERS"] = "Content-Type"
@app.route("/search")
@cross_origin()
def search() -> Tuple[Response, int]:
try:
query = request.args.get("q", "[]")
pattern = json.loads(query) # [{"LOWER": "this", "DEP": {"IN": ["nsubj", "dobj", "iobj"]}}]
top_k = int(request.args.get("top_k", "10"))
results = list(search_in_subtitles(pattern))
return jsonify([
{
"video_id": span.doc._.video_id,
"start_time": span[0]._.start_time,
"end_time": span[-1]._.end_time,
"text": span.text,
}
for span in random.sample(results, min(top_k, len(results)))
]), 200
except Exception as e: # noqa
return jsonify(status=500, message=repr(e)), 500
@app.route("/")
def root() -> Response:
return app.send_static_file("index.html")
| 1,213 | 30.128205 | 114 | py |
fitclip | fitclip-main/demo/__init__.py | 0 | 0 | 0 | py |
|
fitclip | fitclip-main/demo/search.py | import json
import os
import re
from typing import Any, Iterable, Iterator, Mapping, Optional, Sequence
import spacy
import spacy_alignments
from cached_path import cached_path
from spacy.matcher import Matcher
from spacy.tokens import Doc, DocBin, Span, Token
from tqdm.auto import tqdm
RE_MULTIPLE_SPACES = re.compile(r" {2,}")
CAPTIONS_DIR = os.path.join(os.environ["SCRATCH_DIR"], "captions")
spacy.prefer_gpu()
NLP = spacy.load("en_core_web_trf")
Doc.set_extension("video_id", default=None)
Token.set_extension("start_time", default=None)
Token.set_extension("end_time", default=None)
def _list_caption_paths(dir_path: str) -> Iterator[str]:
with os.scandir(dir_path) as it:
for entry in it:
if entry.is_file() and entry.name.endswith(".json"): # noqa
yield entry.path # noqa
def _captions_to_text(caption_full_dict: Mapping[str, Any]) -> str:
return RE_MULTIPLE_SPACES.sub(" ", " ".join(d["alternatives"][0]["transcript"].strip()
for d in caption_full_dict["results"][:-1])).strip()
def _parse_caption_time(s: str) -> float:
return float(s[:-1])
def _load_caption(path: str) -> Optional[Mapping[str, Any]]:
with open(path) as file:
caption_full_dict = json.load(file)
if results := caption_full_dict["results"]:
tokens_info = results[-1]["alternatives"][0]["words"]
else:
tokens_info = None
if tokens_info:
return { # Save some memory by just keeping what we actually use.
"text": _captions_to_text(caption_full_dict),
"video_id": os.path.basename(path).rsplit(".", maxsplit=1)[0],
"tokens_info": [{
"word": wi["word"],
"start_time": _parse_caption_time(wi["startTime"]),
"end_time": _parse_caption_time(wi["endTime"]),
} for wi in tokens_info],
}
else:
return None # There are around 750/150k that fall here for different reasons.
def _add_caption_info_to_doc(doc: Doc, tokens_info: Sequence[Mapping[str, Any]]) -> Doc:
spacy2caption = spacy_alignments.get_alignments([t.text for t in doc], [w["word"] for w in tokens_info])[0]
for token, caption_token_indices in zip(doc, spacy2caption):
token._.start_time = tokens_info[caption_token_indices[0]]["start_time"]
token._.end_time = tokens_info[caption_token_indices[-1]]["end_time"]
return doc
def _create_docs() -> Iterator[Doc]:
caption_paths = list(_list_caption_paths(CAPTIONS_DIR))
# caption_paths = random.sample(caption_paths, min(len(caption_paths), 100))
# We don't keep the captions in memory as it can be a lot.
caption_it = (caption for path in caption_paths if (caption := _load_caption(path)))
doc_and_context_it = NLP.pipe(((c["text"], (c["video_id"], c["tokens_info"])) # noqa
for c in caption_it), as_tuples=True)
for doc, (video_id, tokens_info) in tqdm(doc_and_context_it, total=len(caption_paths), desc="Parsing"):
doc._.trf_data = None # It takes up a lot of memory.
doc._.video_id = video_id
yield _add_caption_info_to_doc(doc, tokens_info)
def _load_cached_docs() -> Iterator[Doc]:
print("Loading cached docs…")
with open(cached_path("parsed_docs"), "rb") as file:
return DocBin().from_bytes(file.read()).get_docs(NLP.vocab)
def _save_docs(docs: Iterable[Doc]) -> None:
print("Saving cached docs…")
with open("/tmp/.docs", "wb") as file:
file.write(DocBin(store_user_data=True, docs=docs).to_bytes())
if os.environ.get("LOAD_CACHED_DOCS", "0").lower() in {"1", "true", "y"}:
DOCS = list(_load_cached_docs())
else:
DOCS = list(_create_docs())
if os.environ.get("SAVE_CACHED_DOCS", "0").lower() in {"1", "true", "y"}:
_save_docs(DOCS)
print("Docs ready")
def search_in_subtitles(pattern: Iterable[Mapping[str, Any]]) -> Iterator[Span]:
matcher = Matcher(NLP.vocab)
matcher.add("search", [pattern])
for doc in DOCS:
for m in matcher(doc):
yield doc[m[1]:m[2]]
| 4,180 | 34.432203 | 111 | py |
fitclip | fitclip-main/aligner/video_text_module.py | from typing import Any, Literal, Mapping, MutableMapping, Optional, Sequence, Tuple, Union
import math
import pytorch_lightning as pl
import torch.distributed.nn
from overrides import overrides
from torch import nn
from torch.nn.modules.loss import _Loss
from aligner.encoder.video_text_encoder import TYPE_OUTPUT, VideoTextEncoder
from aligner.loss import NCELoss
from util.tensor_utils import all_gather
TYPE_INPUT = MutableMapping[str, Any]
TYPE_SPLIT = Literal["train", "val"]
def log_lr(pl_module: pl.LightningModule, **kwargs) -> None:
for i, optimizer in enumerate(pl_module.trainer.optimizers):
for j, param_group in enumerate(optimizer.param_groups):
if (lr := param_group.get("lr")) is not None: # noqa
pl_module.log(f"lr_{i}_group_{j}", lr, **kwargs)
class VideoTextLightningModule(pl.LightningModule): # noqa
def __init__(self, encoder: VideoTextEncoder, init_temperature: float = 0.05, min_temperature: float = 0.001,
fit_temperature: bool = True, loss: Optional[_Loss] = None) -> None:
super().__init__()
self.encoder = encoder
# Use the temperature as in CLIP: save it in log-space and fit it along with the model.
self.logit_scale = nn.Parameter(torch.tensor([- math.log(init_temperature)]), requires_grad=fit_temperature)
# The following constant is set also as a parameter, so it's moved to the correct device automatically.
self.max_logit_scale = nn.Parameter(torch.tensor([- math.log(min_temperature)]), requires_grad=False)
self.loss = loss or NCELoss()
@overrides(check_signature=False)
def forward(self, batch: TYPE_INPUT,
_batch_idx: int = 0) -> Union[TYPE_OUTPUT, Tuple[torch.Tensor, torch.Tensor, Sequence[str]]]:
batch.pop("video_id", None)
return self.encoder(**batch)
def _step(self, batch: TYPE_INPUT, batch_idx: int = 0) -> TYPE_OUTPUT:
return self(batch, batch_idx)
@overrides(check_signature=False)
def training_step(self, batch: TYPE_INPUT, _batch_idx: int = 0) -> TYPE_OUTPUT:
output = self._step(batch, _batch_idx)
# Need to log the step because PL doesn't log it in Neptune.
# See https://github.com/PyTorchLightning/pytorch-lightning/pull/5510
first_video_value = next(v for k, v in batch.items() if k.startswith("video"))
self.log("step", float(self.global_step), batch_size=len(first_video_value))
return output
def _step_end(self, output: TYPE_OUTPUT, split: TYPE_SPLIT,
log_kwargs: Optional[Mapping[str, Any]] = None) -> Union[torch.Tensor, TYPE_OUTPUT]:
log_kwargs = log_kwargs or {}
encoded_video, encoded_text = all_gather(self, output, sync_grads=split == "train")
batch_size = len(encoded_video)
logit_scale = self.logit_scale.exp()
scores = logit_scale * encoded_video @ encoded_text.T
loss = self.loss(scores)
# Note train loss it's already shown in the progress bar by PL by default.
#
# Note that we need to pass the batch size in the first step log
# as it can't be easily inferred by PL in our case.
self.log(f"loss/{split}", loss, prog_bar=split != "train", batch_size=batch_size, **log_kwargs)
if split == "train":
self.log("batch_size", float(batch_size), batch_size=batch_size)
self.log("temperature", 1 / logit_scale, batch_size=batch_size)
return loss if split == "train" else (encoded_video, encoded_text)
@overrides(check_signature=False)
def training_step_end(self, output: TYPE_OUTPUT) -> torch.Tensor:
loss = self._step_end(output, split="train")
log_lr(self)
return loss
@overrides(check_signature=False)
def predict_step(self, batch: TYPE_INPUT, batch_idx: int = 0) -> Mapping[str, torch.Tensor]:
encoded_video, encoded_text = self._step(batch, batch_idx)
return {
"encoded_videos": encoded_video,
"encoded_texts": encoded_text,
"video_ids": batch["video_id"]
}
@overrides(check_signature=False)
def optimizer_step(self, *args, **kwargs) -> None:
super().optimizer_step(*args, **kwargs)
if self.logit_scale >= self.max_logit_scale:
self.logit_scale.copy_(self.max_logit_scale)
| 4,376 | 43.663265 | 116 | py |
fitclip | fitclip-main/aligner/__main__.py | #!/usr/bin/env python
import logging
import os
from time import strftime
from typing import Mapping, Optional
import hydra
import torch
from omegaconf import DictConfig
from pytorch_lightning.loggers import NeptuneLogger, TensorBoardLogger
from aligner.cli import create_model_data_module_trainer_and_ckpt_path, init_cli
from aligner.logger_utils import get_logger_by_type
# Note it's better to have this as a module, so it's importable and DDP works fine in debug mode.
# Maybe this issue is caused by Hydra moving the CWD to somewhere else.
LOGGER = logging.getLogger(__name__)
# Set an env var, if empty, to the desired working directory in sweep mode. Then we read it from the config.
# This way we make sure all processes use the same folder.
# See https://github.com/PyTorchLightning/pytorch-lightning/issues/2727
os.environ.setdefault("SWEEP_DIR", f"multirun/{strftime('%Y-%m-%d')}/{strftime('%H-%M-%S')}")
@hydra.main(config_path="../config", config_name="trainer")
def main(cfg: DictConfig) -> Optional[float]:
init_cli(cfg)
if cfg.get("trainer", {}).get("strategy") == "dp":
LOGGER.warning("DP strategy not supported by the current metric logging scheme."
" See https://torchmetrics.readthedocs.io/en/stable/pages/lightning.html#logging-torchmetrics")
model, data_module, trainer, ckpt_path = create_model_data_module_trainer_and_ckpt_path(cfg)
output = None
if cfg.command == "train":
if cfg.get("validate_before_training"):
LOGGER.info("Validation before training started.")
with torch.inference_mode():
metrics_list = trainer.validate(model, datamodule=data_module, ckpt_path=ckpt_path)
LOGGER.info("Validation before training finished.")
if (tb_logger := get_logger_by_type(trainer, TensorBoardLogger)) and not tb_logger._default_hp_metric:
tb_logger.log_hyperparams(model.hparams_initial, metrics={k: v for metrics in metrics_list
for k, v in metrics.items()})
LOGGER.info("Training started.")
trainer.fit(model, datamodule=data_module, ckpt_path=ckpt_path)
if optimized_metric_name := cfg.get("optimized_metric_name"):
output = trainer.callback_metrics.get(optimized_metric_name)
elif cfg.command == "tune":
assert ckpt_path is None, "Checkpoint path not supported when tuning."
if trainer._accelerator_connector.is_distributed:
LOGGER.warning("Tuning with the PL Trainer is known to have some issues in distributed settings."
" See e.g. https://github.com/PyTorchLightning/pytorch-lightning/issues/4280")
LOGGER.info("Tuning started.")
trainer.tune(model, datamodule=data_module)
elif cfg.command in {"evaluate", "validate"}:
with torch.inference_mode():
trainer.validate(model, datamodule=data_module, ckpt_path=ckpt_path)
elif cfg.command == "test":
with torch.inference_mode():
trainer.test(model, datamodule=data_module, ckpt_path=ckpt_path)
elif cfg.command == "predict":
if trainer._accelerator_connector.is_distributed:
LOGGER.warning("Predicting with the PL Trainer is known to have some issues in distributed settings."
" See e.g. https://github.com/PyTorchLightning/pytorch-lightning/issues/10618")
output_path = cfg.get("output_path", "predictions.pt")
with torch.inference_mode():
predictions = trainer.predict(model, datamodule=data_module, ckpt_path=ckpt_path)
assert predictions
first_prediction = predictions[0]
assert isinstance(first_prediction, Mapping)
keys = first_prediction
predictions_map = {k: torch.cat([prediction[k] for prediction in predictions])
if isinstance(first_prediction[k], torch.Tensor)
else [p for prediction in predictions for p in prediction[k]]
for k in keys}
torch.save(predictions_map, output_path)
else:
raise ValueError(f"Unrecognized command: {cfg.command}")
if (neptune_logger := get_logger_by_type(trainer, NeptuneLogger)) and trainer.is_global_zero:
# In a Hydra multirun (sweep) scenario, Neptune experiments from finished runs are marked as still running
# unless we stop them manually. See https://github.com/PyTorchLightning/pytorch-lightning/issues/11368
neptune_logger.run.stop()
# Return the optimized metric value for hparam search.
return output
if __name__ == "__main__":
main()
| 4,715 | 43.490566 | 118 | py |
fitclip | fitclip-main/aligner/logger_utils.py | from typing import Optional, Type, TypeVar
import pytorch_lightning as pl
from pytorch_lightning.loggers import LightningLoggerBase, LoggerCollection
T = TypeVar("T", bound=LightningLoggerBase)
def get_logger_by_type(trainer: pl.Trainer, logger_class: Type[T]) -> Optional[T]:
if isinstance(trainer.logger, LoggerCollection):
return next((logger for logger in trainer.logger._logger_iterable if isinstance(logger, logger_class)), None)
elif isinstance(trainer.logger, logger_class):
return trainer.logger
else:
return None
| 564 | 32.235294 | 117 | py |
fitclip | fitclip-main/aligner/teacher_student.py | import itertools
from typing import Iterable, Mapping, MutableMapping, Optional, Tuple, Union
import torch.distributed.nn
from overrides import overrides
from torch import nn
from aligner.encoder import video_text_encoder
from aligner.encoder.video_text_encoder import TYPE_TOKENIZER, VideoTextEncoder
from aligner.loss import TeacherStudentNCELoss
from aligner.text_video_retrieval import TextVideoRetrievalLightningModule
from aligner.video_text_module import TYPE_INPUT, TYPE_SPLIT, log_lr
from util.tensor_utils import all_gather, pad, split_in_collection
TYPE_OUTPUT = Tuple[video_text_encoder.TYPE_OUTPUT, video_text_encoder.TYPE_OUTPUT]
TYPE_MULTI_OUTPUT = Mapping[str, TYPE_OUTPUT]
def _replace_in_tokenized_text(tokenized_text: MutableMapping[str, torch.Tensor],
new_tokenized_text: Mapping[str, torch.Tensor], start_idx: int, end_idx: int,
tokenizer: TYPE_TOKENIZER) -> None:
"""Replaces the content in the tensor `tokenized_text` from the index `start_idx` to `end_idx` (exclusive) for
`new_tokenized_text`.
When it needs to know details about the tokenization, it uses `tokenizer`.
"""
for k in tokenized_text:
padding_value = 0 if "mask" in k else getattr(tokenizer, "pad_token_id", 0)
# We suppose right padding.
if tokenized_text[k].shape[1] > new_tokenized_text[k].shape[1]:
padded = pad(new_tokenized_text[k], min_size=tokenized_text[k].shape[1], value=padding_value)
tokenized_text[k] = torch.cat((tokenized_text[k][:start_idx], padded, tokenized_text[k][end_idx:]))
elif tokenized_text[k].shape[1] < new_tokenized_text[k].shape[1]:
padded = pad(tokenized_text[k], min_size=new_tokenized_text[k].shape[1], value=padding_value)
tokenized_text[k] = torch.cat((padded[:start_idx], new_tokenized_text[k], padded[end_idx:]))
else:
tokenized_text[k] = torch.cat((tokenized_text[k][:start_idx], new_tokenized_text[k],
tokenized_text[k][end_idx:]))
class TeacherStudentLightningModule(TextVideoRetrievalLightningModule): # noqa
"""
Distillation training module.
If specified, `prompts` is used with the unlabeled dataset videos instead of the labels it provides (if any).
"""
def __init__(self, encoder: VideoTextEncoder, teacher: VideoTextEncoder, labeled_dataset_name: str = "labeled",
labeled_dataset_loss_share: Optional[float] = None,
dataset_names: Iterable[str] = ("labeled", "unlabeled"), prompts: Optional[Iterable[str]] = None,
**kwargs) -> None:
super().__init__(encoder=encoder, dataset_names=dataset_names, **kwargs)
self.teacher = teacher
assert self.dataset_names, "This module uses dataset names."
assert len(self.dataset_names) == 2, "The current implementation needs exactly 2 datasets."
# FIXME: it doesn't work with different datasets for training and evaluation, because it needs certain names
# for training; and this logic assumes the same dataset names for both.
if labeled_dataset_loss_share is None:
self.dataset_loss_share = {name: 1 / len(self.dataset_names) for name in self.dataset_names}
else:
self.dataset_loss_share = {labeled_dataset_name: labeled_dataset_loss_share}
self.dataset_loss_share.update((name, (1 - labeled_dataset_loss_share) / (len(self.dataset_names) - 1))
for name in self.dataset_names
if name != labeled_dataset_name)
self.teacher_student_logit_scale = nn.Parameter(self.logit_scale.clone(),
requires_grad=self.logit_scale.requires_grad)
# noinspection SpellCheckingInspection
self.teacher_student_loss = TeacherStudentNCELoss(reduction="batchmean")
for p in self.teacher.parameters():
p.requires_grad = False
self.labeled_dataset_name = labeled_dataset_name
self.unlabeled_dataset_name = next(k for k in self.dataset_names if k != labeled_dataset_name)
if prompts is None:
self.tokenized_prompts = None
self.teacher_tokenized_prompts = None
else:
prompts = list(prompts)
# We use parameters so the device and dtype are moved correctly along with this module.
self.tokenized_prompts = nn.ParameterDict((k, nn.Parameter(v, requires_grad=False)) # noqa
for k, v in encoder.get_tokenizer()(prompts).items())
self.teacher_tokenized_prompts = nn.ParameterDict((k, nn.Parameter(v, requires_grad=False)) # noqa
for k, v in teacher.get_tokenizer()(prompts).items())
@overrides(check_signature=False)
def _step(self, batch: TYPE_INPUT, _batch_idx: int = 0) -> TYPE_OUTPUT:
# Note we pass the labeled dataset portion to the teacher, but then we don't use it.
return self({"video": batch["video_student"], "text": batch["text_student"]}), \
self.teacher(video=batch["video_teacher"], text=batch["text_teacher"])
@overrides(check_signature=False)
def training_step(self, batch: TYPE_INPUT, _batch_idx: int = 0) -> TYPE_MULTI_OUTPUT:
keys, lengths = zip(*((key, sum(1 for _ in group))
for key, group in itertools.groupby(dataset for dataset in batch.pop("dataset"))))
assert len(keys) == len(self.dataset_names), "All datasets should be present in each batch."
if self.tokenized_prompts is None:
unlabeled_dataset_idx = None
else:
unlabeled_dataset_idx = keys.index(self.unlabeled_dataset_name)
start_idx_in_batch = sum(lengths[i] for i in range(unlabeled_dataset_idx))
end_idx_in_batch = start_idx_in_batch + lengths[unlabeled_dataset_idx]
_replace_in_tokenized_text(tokenized_text=batch["text_student"],
new_tokenized_text=self.tokenized_prompts, start_idx=start_idx_in_batch,
end_idx=end_idx_in_batch, tokenizer=self.encoder.get_tokenizer())
_replace_in_tokenized_text(tokenized_text=batch["text_teacher"],
new_tokenized_text=self.teacher_tokenized_prompts,
start_idx=start_idx_in_batch, end_idx=end_idx_in_batch,
tokenizer=self.teacher.get_tokenizer())
output = self._step(batch, _batch_idx)
# Need to log the step because PL doesn't log it in Neptune.
# See https://github.com/PyTorchLightning/pytorch-lightning/pull/5510
first_video_value = next(v for k, v in batch.items() if k.startswith("video"))
self.log(f"step", self.global_step, batch_size=len(first_video_value))
if self.tokenized_prompts is None:
split_output = split_in_collection(output, lengths)
else:
text_split_sections = list(lengths)
text_split_sections[unlabeled_dataset_idx] = len(next(iter(self.tokenized_prompts.values())))
student_video_sections = split_in_collection(output[0][0], lengths)
student_text_sections = split_in_collection(output[0][1], text_split_sections)
teacher_video_sections = split_in_collection(output[1][0], lengths)
teacher_text_sections = split_in_collection(output[1][1], text_split_sections)
split_output = (((student_video_sections[i], student_text_sections[i]),
(teacher_video_sections[i], teacher_text_sections[i]))
for i in range(len(student_video_sections)))
return dict(zip(keys, split_output))
def _dataset_step_end(self, output: TYPE_OUTPUT, split: TYPE_SPLIT,
dataset_name: Optional[str] = None) -> Union[torch.Tensor, video_text_encoder.TYPE_OUTPUT]:
gathered_output = all_gather(self, output, sync_grads=split == "train")
(encoded_video, encoded_text), (teacher_encoded_video, teacher_encoded_text) = gathered_output
batch_size = len(encoded_video)
logit_scale = self.logit_scale.exp()
scores = logit_scale * encoded_video @ encoded_text.T
if dataset_name == self.labeled_dataset_name:
loss = self.loss(scores)
else:
teacher_student_logit_scale = self.teacher_student_logit_scale.exp()
teacher_scores = teacher_student_logit_scale * teacher_encoded_video @ teacher_encoded_text.T
loss = self.teacher_student_loss(scores, teacher_scores) * teacher_student_logit_scale ** 2
if split == "train":
# Note that we need to pass the batch size in the first step log
# as it can't be easily inferred by PL in our case.
self.log("batch_size", float(batch_size), batch_size=batch_size)
self.log("temperature/labeled", 1 / logit_scale)
self.log("temperature/unlabeled", 1 / teacher_student_logit_scale)
prefix = f"loss/{split}_{dataset_name}" if dataset_name else f"loss/{split}"
# Note that we need to pass the batch size in the first step log
# as it can't be easily inferred by PL in our case.
self.log(prefix, loss, prog_bar=split != "train", batch_size=batch_size, add_dataloader_idx=False)
return loss if split == "train" else (encoded_video, encoded_text)
@overrides(check_signature=False)
def training_step_end(self, output: TYPE_MULTI_OUTPUT) -> torch.Tensor:
loss = sum(self._dataset_step_end(batch, split="train", dataset_name=name) * self.dataset_loss_share[name]
for name, batch in output.items())
self.log("loss/train", loss) # Note train loss it's already shown in the progress bar by PL by default.
log_lr(self)
return loss
@overrides(check_signature=False)
def _validation_dataset_step_end(self, output: TYPE_OUTPUT,
dataset_name: Optional[str] = None) -> video_text_encoder.TYPE_OUTPUT:
return self._dataset_step_end(output, split="val", dataset_name=dataset_name)
@overrides(check_signature=False)
def optimizer_step(self, *args, **kwargs) -> None:
super().optimizer_step(*args, **kwargs)
if self.teacher_student_logit_scale >= self.max_logit_scale:
self.teacher_student_logit_scale.copy_(self.max_logit_scale)
| 10,735 | 54.05641 | 117 | py |
fitclip | fitclip-main/aligner/loss.py | from typing import Literal
import torch
from overrides import overrides
from torch.nn import functional as F
from torch.nn.modules.loss import _Loss
TYPE_REDUCTION = Literal["none", "mean", "sum"]
# noinspection SpellCheckingInspection
TYPE_REDUCTION_KL_DIV = Literal["none", "batchmean", "mean", "sum"]
def _rows_to_columns_nce_loss(scores: torch.Tensor, reduction: TYPE_REDUCTION = "mean") -> torch.Tensor:
loss = - F.log_softmax(scores, dim=-1).diag()
if reduction == "mean":
return loss.mean()
elif reduction == "sum":
return loss.sum()
else:
return loss
def nce_loss(scores: torch.Tensor, reduction: TYPE_REDUCTION = "mean") -> torch.Tensor:
return (_rows_to_columns_nce_loss(scores, reduction=reduction)
+ _rows_to_columns_nce_loss(scores.T, reduction=reduction))
def _rows_to_columns_teacher_student_nce_loss(scores: torch.Tensor, teacher_scores: torch.Tensor,
reduction: TYPE_REDUCTION_KL_DIV = "mean") -> torch.Tensor:
logits = F.log_softmax(scores, dim=-1)
teacher_probs = F.softmax(teacher_scores, dim=-1)
return F.kl_div(logits, teacher_probs, reduction=reduction)
def teacher_student_nce_loss(scores: torch.Tensor, teacher_scores: torch.Tensor,
reduction: TYPE_REDUCTION_KL_DIV = "mean") -> torch.Tensor:
return (_rows_to_columns_teacher_student_nce_loss(scores, teacher_scores, reduction=reduction)
+ _rows_to_columns_teacher_student_nce_loss(scores.T, teacher_scores.T, reduction=reduction))
class NCELoss(_Loss):
@overrides(check_signature=False)
def forward(self, scores: torch.Tensor) -> torch.Tensor:
return nce_loss(scores, reduction=self.reduction) # noqa
class TeacherStudentNCELoss(_Loss):
@overrides(check_signature=False)
def forward(self, scores: torch.Tensor, teacher_scores: torch.Tensor) -> torch.Tensor:
return teacher_student_nce_loss(scores, teacher_scores, reduction=self.reduction) # noqa
class SimilarityLoss(_Loss):
@overrides(check_signature=False)
def forward(self, scores: torch.Tensor) -> torch.Tensor:
# Note we actually don't need all the scores.
loss = - torch.log(torch.sigmoid(scores.diag()))
if self.reduction == "mean":
return loss.mean()
elif self.reduction == "sum":
return loss.sum()
else:
return loss
| 2,447 | 36.090909 | 105 | py |
fitclip | fitclip-main/aligner/text_video_retrieval.py | from collections import OrderedDict
from typing import Iterable, Mapping, Optional, Sequence, Tuple, Union
import torch
import torch.distributed.nn
from overrides import overrides
from torch import nn
from torchmetrics import Metric, Recall
from aligner.encoder.video_text_encoder import TYPE_OUTPUT
from aligner.metrics import MedianRank, Rank
from aligner.video_text_module import TYPE_INPUT, VideoTextLightningModule
from util.tensor_utils import all_gather
class TextVideoRetrievalLightningModule(VideoTextLightningModule): # noqa
def __init__(self, *args, dataset_names: Optional[Iterable[str]] = None, compute_rank: bool = False,
**kwargs) -> None:
super().__init__(*args, **kwargs)
metrics_dict = {"r1": Recall(), "r5": Recall(top_k=5), "r10": Recall(top_k=10), "mr": MedianRank()}
if compute_rank:
metrics_dict["rank"] = Rank()
self.dataset_names = list(dataset_names) if dataset_names else None
self.multiple_datasets = self.dataset_names is not None and len(self.dataset_names) > 1
if self.multiple_datasets:
assert all("_" not in name for name in self.dataset_names), \
"Underscores in dataset names are problematic because of how we get their corresponding metrics."
self.metrics: Mapping[str, Metric] = nn.ModuleDict((f"{name}_{dataset_name}", metric.clone()) # noqa
for dataset_name in self.dataset_names
for name, metric in metrics_dict.items())
else:
self.metrics: Mapping[str, Metric] = nn.ModuleDict(metrics_dict)
@overrides(check_signature=False)
def validation_step(self, batch: TYPE_INPUT, batch_idx: int = 0,
dataloader_idx: Optional[int] = None) -> Tuple[TYPE_OUTPUT, Optional[int]]:
return self._step(batch, batch_idx), dataloader_idx
def _validation_dataset_step_end(self, output: TYPE_OUTPUT, dataset_name: Optional[str] = None) -> TYPE_OUTPUT:
encoded_video, encoded_text = all_gather(self, output)
batch_size = len(encoded_video)
logit_scale = self.logit_scale.exp()
scores = logit_scale * encoded_video @ encoded_text.T
loss = self.loss(scores)
# Note that we need to pass the batch size in the first step log
# as it can't be easily inferred by PL in our case.
key = "loss/val" + ("" if dataset_name is None else f"_{dataset_name}")
self.log(key, loss, prog_bar=True, batch_size=batch_size, add_dataloader_idx=False)
return encoded_video, encoded_text
@overrides(check_signature=False)
def validation_step_end(self, output: Tuple[TYPE_OUTPUT, int]) -> TYPE_OUTPUT:
step_output, data_loader_idx = output
assert self.multiple_datasets == (data_loader_idx is not None)
dataset_name = self.dataset_names[data_loader_idx] if self.multiple_datasets else None
return self._validation_dataset_step_end(step_output, dataset_name=dataset_name)
def _validate_dataset(self, outputs: Sequence[TYPE_OUTPUT], dataset_name: Optional[str] = None) -> None:
assert self.multiple_datasets == (dataset_name is not None)
encoded_videos, encoded_texts = (torch.cat(x) for x in zip(*outputs))
batch_size = len(encoded_videos)
scores = encoded_texts @ encoded_videos.T
target = torch.arange(scores.shape[-1], device=scores.device)
for name, metric in self.metrics.items():
if not dataset_name or name.endswith(f"_{dataset_name}"):
metric(scores, target)
# Note that we need to pass the batch size in the first step log
# as it can't be easily inferred by PL in our case.
self.log(name, metric, prog_bar=True, batch_size=batch_size, add_dataloader_idx=False)
@overrides(check_signature=False)
def validation_epoch_end(self, outputs: Union[Sequence[TYPE_OUTPUT], Sequence[Sequence[TYPE_OUTPUT]]]) -> None:
if self.multiple_datasets:
for i, (name, dataset_output) in enumerate(zip(self.dataset_names, outputs)):
# Necessary to set the current data loader ID so PL knows to which one the logged metrics belong
# (because it returns the metrics by data loader).
self._current_dataloader_idx = i
self._validate_dataset(dataset_output, dataset_name=name) # noqa
self._current_dataloader_idx = None
else:
self._validate_dataset(outputs)
if "rank" in self.metrics:
self.print(self.metrics["rank"].compute().tolist())
@overrides
def load_state_dict(self, state_dict: "OrderedDict[str, torch.Tensor]", strict: bool = True):
# If it's exactly this class, then ignore any teacher-related thing.
# We do it here, so we can control it more, and avoid bugs with a general solution.
if type(self) is TextVideoRetrievalLightningModule:
incompatible_keys = super().load_state_dict(state_dict, strict=False)
unexpected_keys = set(incompatible_keys.unexpected_keys)
for key in incompatible_keys.unexpected_keys:
if key.startswith("teacher"):
unexpected_keys.remove(key)
# We then do as in super:
if strict:
error_msgs = []
if unexpected_keys:
unexpected_key_str = ", ".join(f'"{k}"' for k in unexpected_keys)
error_msgs.append(f"Unexpected key(s) in state_dict: {unexpected_key_str}. ")
if incompatible_keys.missing_keys:
missing_keys_str = ', '.join(f'"{k}"' for k in incompatible_keys.missing_keys)
error_msgs.append(f"Missing key(s) in state_dict: {missing_keys_str}. ")
if error_msgs:
error_msgs_str = "\n\t".join(error_msgs)
raise RuntimeError(f"Error(s) in loading state_dict for {self.__class__.__name__}:\n\t"
f"{error_msgs_str}")
return incompatible_keys
else:
return super().load_state_dict(state_dict, strict)
| 6,325 | 46.924242 | 115 | py |
fitclip | fitclip-main/aligner/video_text_classification.py | import logging
import math
from typing import Any, Iterable, Mapping, Optional, Sequence, TypeVar
import torch
from overrides import overrides
from pytorch_lightning.callbacks import RichProgressBar
from pytorch_lightning.utilities.apply_func import apply_to_collection
from torch import nn
from torchmetrics import Accuracy, Metric
from aligner.encoder.video_text_encoder import VideoTextEncoder
from aligner.metrics import MedianRank
from aligner.video_text_module import VideoTextLightningModule
from util import iter_utils
LOGGER = logging.getLogger(__name__)
T = TypeVar("T")
def batch_tokenized_text(tokenized: Mapping[str, Sequence[T]], n: int) -> Iterable[Mapping[str, T]]:
tokenized_dicts = {k: iter(iter_utils.batch_sequence(v, n)) for k, v in tokenized.items()}
length = math.ceil(len(next(iter(tokenized.values()))) / n)
for _ in range(length):
yield {k: next(tokenized_dicts[k]) for k in tokenized}
class VideoTextClassificationLightningModule(VideoTextLightningModule): # noqa
def __init__(self, encoder: VideoTextEncoder, labels: Iterable[str], templates: Optional[Iterable[str]],
return_metrics_by_class: bool = False, **kwargs) -> None:
super().__init__(encoder, **kwargs)
labels = list(labels)
label_count = len(labels)
# If different templates are provided, we used them for each label
# and reset the labels to be {labels} x {templates}.
if templates:
templates = list(templates)
self.template_count = len(templates)
labels = [template.format(label) for label in labels for template in templates]
else:
self.template_count = 1
# We tokenize all the labels but defer the encoding until the model is in the device.
tokenized_labels = encoder.get_tokenizer()(labels)
device = next(encoder.parameters()).device
tokenized_labels = apply_to_collection(tokenized_labels, torch.Tensor, torch.Tensor.to, device)
self.tokenized_labels = nn.ParameterDict(apply_to_collection(tokenized_labels, torch.Tensor, nn.Parameter,
requires_grad=False))
# We encode just one label to allocate the size correctly.
encoded_text = self.encoder.encode_text({k: v[:1] for k, v in tokenized_labels.items()})
self.encoded_labels = nn.Parameter(torch.empty(label_count, encoded_text.shape[-1]), requires_grad=False)
self.metrics: Mapping[str, Metric] = nn.ModuleDict({"a1": Accuracy(), "a5": Accuracy(top_k=5),
"mr": MedianRank()})
if return_metrics_by_class:
self.metrics_by_class = nn.ModuleDict({f"a1_{k}": Accuracy() for k in range(label_count)})
else:
self.metrics_by_class = None
def _on_start(self) -> None:
# Note that for training they should be encoded at running time, not here.
# But we aren't training any text classification model but evaluating them.
#
# We compute them here and not during init because here the model is already in the device.
# This is especially much faster than in CPU (init) when using templates.
batch_size = 32
callback = next(callback for callback in self.trainer.callbacks if isinstance(callback, RichProgressBar))
progress = callback.progress
if self.trainer.is_global_zero:
progress_task = progress.add_task(
description="Encoding the labels",
total=math.ceil(len(next(iter(self.tokenized_labels.values()))) / batch_size))
else:
progress_task = None
encoded_label_list = []
for tokenized_labels_batch in batch_tokenized_text(self.tokenized_labels, batch_size):
encoded_label_list.append(self.encoder.encode_text(tokenized_labels_batch))
if progress_task is not None:
progress.update(progress_task, advance=1)
encoded_labels = torch.cat(encoded_label_list)
encoded_labels = encoded_labels.reshape(-1, self.template_count, encoded_labels.shape[1]).mean(dim=1)
self.encoded_labels.copy_(encoded_labels)
if progress_task is not None:
# If we remove it, it later fails, not sure why. So we just hide it.
progress.update(progress_task, visible=False)
@overrides
def on_validation_start(self) -> None:
self._on_start()
@overrides
def on_test_start(self) -> None:
self._on_start()
@overrides
def on_predict_start(self) -> None:
self._on_start()
@overrides(check_signature=False)
def forward(self, video: torch.Tensor) -> torch.Tensor:
return self.encoder.encode_video(video) @ self.encoded_labels.T
@overrides(check_signature=False)
def validation_step(self, batch: Mapping[str, Any], _batch_idx: int = 0) -> None:
scores = self(batch["video"])
label_id = batch["target"][1]
for name, metric in self.metrics.items():
metric(scores, label_id)
# Note that we need to pass the batch size in the first step log
# as it can't be easily inferred by PL in our case.
self.log(name, metric, prog_bar=True, batch_size=len(batch["video"]))
if self.metrics_by_class is not None:
for scores_instance, label_id_instance in zip(scores, label_id):
metric = self.metrics_by_class[f"a1_{label_id_instance}"]
metric(scores_instance.unsqueeze(0), label_id_instance.unsqueeze(0))
self.log(f"a1_{label_id_instance}", metric, batch_size=1)
@overrides(check_signature=False)
def predict_step(self, batch: Mapping[str, Any], _batch_idx: int = 0) -> Mapping[str, torch.Tensor]:
return {
"predictions": self(batch["video"]).argmax(dim=-1),
"labels": batch["target"][1],
"video_ids": batch["video_id"],
}
| 6,045 | 41.879433 | 114 | py |
fitclip | fitclip-main/aligner/cli.py | #!/usr/bin/env python
import copy
import logging
import warnings
from types import MethodType
from typing import Any, Mapping, Optional, Tuple, Type
import hydra
import pytorch_lightning as pl
from cached_path import cached_path
from omegaconf import DictConfig
from pytorch_lightning import seed_everything
from torch.optim import Optimizer
from aligner.data.data_module_group import DataModuleStructuredGroup, EvalDataModuleGroup, MixedBatchDataModule, \
TrainAndEvalDataModules
from aligner.data.video_data_module import ENCODER_OR_ENCODER_MAP, VideoClassificationDataModule
from aligner.encoder.video_text_encoder import VideoTextEncoder
from aligner.video_text_classification import VideoTextClassificationLightningModule
from aligner.video_text_module import VideoTextLightningModule
LOGGER = logging.getLogger(__name__)
# This is because PL can't automatically infer the batch size, that's needed for logging. But we set it manually
# within the modules.
warnings.filterwarnings("ignore", message=r"^Trying to infer the `batch_size` from an ambiguous collection\. .+")
# From https://stackoverflow.com/a/2020083/1165181
def fullname(klass: Type[Any]) -> str:
return f"{klass.__module__}.{klass.__qualname__}"
def set_logging_level(level: int) -> None:
logging.basicConfig(level=level)
# `basicConfig` will only work for new loggers, so we also need to set up the existing ones:
for logger in logging.root.manager.loggerDict.values():
if isinstance(logger, logging.Logger): # Otherwise, it could be a `logging.PlaceHolder`.
logger.setLevel(level)
logging.getLogger().setLevel(level) # The root logger is not present in the previous iterable.
def init_cli(cfg: DictConfig) -> None:
if cfg.get("silent"):
set_logging_level(logging.WARNING)
else:
set_logging_level(logging.INFO)
if "seed" in cfg:
seed_everything(cfg.seed, workers=True)
def instantiate_data_module(cfg: DictConfig, encoder: ENCODER_OR_ENCODER_MAP) -> pl.LightningDataModule:
kwargs = {}
if cfg._target_ in {fullname(klass) for klass in [DataModuleStructuredGroup, EvalDataModuleGroup,
MixedBatchDataModule]}:
if isinstance(cfg.data_modules, Mapping):
kwargs["data_modules"] = {k: instantiate_data_module(v, encoder=encoder) # noqa
for k, v in cfg.data_modules.items()}
else:
kwargs["data_modules"] = {instantiate_data_module(cfg_dm, encoder=encoder)
for cfg_dm in cfg.data_modules}
# Convert because otherwise the passed `data_modules` is a `DictConfig` instead of a `dict` and
# `train_dataloader` can't respect the same collection type as `DictConfig` can't have normal classes.
kwargs["_convert_"] = "all"
elif cfg._target_ == fullname(TrainAndEvalDataModules):
kwargs["train_data_module"] = instantiate_data_module(cfg.train_data_module, encoder=encoder)
kwargs["eval_data_module"] = instantiate_data_module(cfg.eval_data_module, encoder=encoder)
else:
kwargs["encoder"] = encoder
# Necessary as well when the encoder is a dict.
kwargs["_convert_"] = "all"
return hydra.utils.instantiate(cfg, **kwargs)
def create_model_data_module_trainer_and_ckpt_path(
cfg: DictConfig, model_kwargs: Optional[Mapping[str, Any]] = None) -> Tuple[VideoTextLightningModule,
pl.LightningDataModule, pl.Trainer,
str]:
model_kwargs = model_kwargs or {}
LOGGER.info(f"Instantiating encoder <{getattr(cfg.encoder, '_target_', type(cfg.encoder).__name__)}>…")
encoder: ENCODER_OR_ENCODER_MAP = hydra.utils.instantiate(cfg.encoder)
if isinstance(encoder, Mapping) and cfg.get("use_student_encoder_for_data_preprocessing"):
encoder_for_data_preprocessing = encoder["student"]
else:
encoder_for_data_preprocessing = encoder
LOGGER.info("Encoder instantiated.")
LOGGER.info(f"Instantiating data module <{cfg.data._target_}>…")
data_module = instantiate_data_module(cfg.data, encoder=encoder_for_data_preprocessing)
LOGGER.info("Data module instantiated.")
LOGGER.info(f"Instantiating model <{cfg.model._target_}>…")
if isinstance(encoder, Mapping):
model_kwargs.setdefault("encoder", encoder["student"])
model_kwargs.setdefault("teacher", encoder["teacher"])
else:
model_kwargs.setdefault("encoder", encoder)
if isinstance(data_module, VideoClassificationDataModule):
assert isinstance(encoder_for_data_preprocessing, VideoTextEncoder), \
"Encoder can't be a mapping and has to support text when doing classification."
cfg.model._target_ = fullname(VideoTextClassificationLightningModule)
model_kwargs.setdefault("labels", data_module.categories)
model_kwargs.setdefault("templates", data_module.templates)
if prompts_path := cfg.get("prompts"): # noqa
with open(cached_path(prompts_path)) as file:
model_kwargs.setdefault("prompts", [stripped_line
for line in file
if (stripped_line := line.strip())]) # noqa
model: VideoTextLightningModule = hydra.utils.instantiate(cfg.model, **model_kwargs)
LOGGER.info("Model instantiated.")
if "optimizer" in cfg:
LOGGER.info(f"Instantiating Optimizer <{cfg.optimizer._target_}>…")
def configure_optimizers(self: pl.LightningModule) -> Optimizer:
if (lr_ := self.hparams.get("lr")) is not None: # To be used by auto LR find.
cfg.optimizer["lr"] = lr_
return hydra.utils.instantiate(cfg.optimizer, self.parameters())
model.configure_optimizers = MethodType(configure_optimizers, model)
LOGGER.info("Optimizer instantiated.")
LOGGER.info(f"Instantiating trainer <{cfg.trainer._target_}>…")
trainer: pl.Trainer = hydra.utils.instantiate(cfg.trainer)
LOGGER.info("Trainer instantiated.")
# We do what `model.save_hyperparameters(cfg)` would do but without needing a current frame to get the args from.
# It turns out that, even if you provide args, it still checks the current frame for args, and set those
# conditioned by the provided args.
model._log_hyperparams = trainer.logger
model._set_hparams(cfg) # noqa
model._hparams_initial = copy.deepcopy(model._hparams)
ckpt_path = cached_path(cfg.checkpoint_path) if cfg.get("path") else None
return model, data_module, trainer, ckpt_path
| 6,809 | 44.099338 | 119 | py |
fitclip | fitclip-main/aligner/wise.py | import copy
from typing import Mapping, TypeVar
import torch
from torch import nn
T = TypeVar("T", bound=nn.Module)
def wise_state_dict(model1: T, model2: T, weight_for_2: float = 0.5) -> Mapping[str, torch.Tensor]:
state_dict1 = dict(model1.named_parameters())
state_dict2 = dict(model2.named_parameters())
assert set(state_dict1) == set(state_dict2)
return {k: (1 - weight_for_2) * state_dict1[k] + weight_for_2 * state_dict2[k] for k in state_dict1}
def wise(model1: T, model2: T, weight_for_2: float = 0.5, copy_model1: bool = True) -> T:
assert type(model1) is type(model2)
model = copy.deepcopy(model1 if copy_model1 else model2)
model.load_state_dict(wise_state_dict(model1, model2, weight_for_2=weight_for_2)) # noqa
return model
| 779 | 31.5 | 104 | py |
fitclip | fitclip-main/aligner/param_freezer.py | # Inspired from https://github.com/allenai/allennlp/blob/0d8c0fc/allennlp/training/optimizers.py
import logging
import re
from typing import Iterable, Optional, Union
import pytorch_lightning as pl
from overrides import overrides
LOGGER = logging.getLogger(__name__)
class ParamFreezer(pl.Callback):
def __init__(self, regexes: Iterable[Union[str, re.Pattern]]) -> None:
super().__init__()
self.regexes = [re.compile(regex) for regex in regexes]
@overrides
def setup(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule", stage: Optional[str] = None) -> None:
unused_regexes = {p.pattern for p in self.regexes}
params_to_tune = []
frozen_params = []
for name, param in pl_module.named_parameters():
for regex in self.regexes:
if regex.search(name):
param.requires_grad = False
if regex.pattern in unused_regexes:
unused_regexes.remove(regex.pattern)
frozen_params.append(name)
break
else:
params_to_tune.append(name)
LOGGER.debug(f"Params to tune: {params_to_tune}")
LOGGER.debug(f"Frozen params: {frozen_params}")
if unused_regexes:
LOGGER.warning(f"The following param regexes used for freezing didn't match any param name: "
f"{unused_regexes}")
| 1,450 | 32.744186 | 113 | py |
fitclip | fitclip-main/aligner/metrics.py | import torch
from overrides import overrides
from torchmetrics import Metric
class Rank(Metric):
is_differentiable: bool = False
higher_is_better: bool = False
full_state_update: bool = False
def __init__(self, **kwargs) -> None:
super().__init__(**kwargs)
self.add_state("ranks", default=[], dist_reduce_fx="cat")
@overrides(check_signature=False)
def update(self, predictions: torch.Tensor, target: torch.Tensor) -> None:
sorted_predicted_positions = predictions.argsort(dim=1, descending=True)
ranks = torch.where(sorted_predicted_positions == target.unsqueeze(-1))[1] # noqa
self.ranks.append(ranks)
@overrides
def compute(self) -> torch.Tensor:
# It could be already reduced depending on when we call it (e.g., at the epoch end).
return self.ranks if isinstance(self.ranks, torch.Tensor) else torch.cat(self.ranks)
class MeanRank(Rank):
@overrides
def compute(self) -> torch.Tensor:
return super().compute().mean() + 1
class MedianRank(Rank):
@overrides
def compute(self) -> torch.Tensor:
return super().compute().median() + 1
| 1,162 | 30.432432 | 92 | py |