repo
stringlengths 3
91
| file
stringlengths 16
152
| code
stringlengths 0
3.77M
| file_length
int64 0
3.77M
| avg_line_length
float64 0
16k
| max_line_length
int64 0
273k
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
monodle | monodle-main/tools/loc_error_by_shifting.py | import numpy as np
from PIL import Image
from lib.datasets.kitti.kitti_utils import Calibration
image_file = '../data/KITTI/object/training/image_2/000000.png'
image = Image.open(image_file)
calib_file = '../data/KITTI/object/training/calib/000000.txt'
calib = Calibration(calib_file)
img_w, img_h = image.size[0], image.size[1]
src_x, src_y = np.array([img_w/2]), np.array([img_h/2])
delta_x, delta_y = np.array([8]), np.array([6])
new_x, new_y = src_x + delta_x, src_y + delta_y
depth = np.array([60])
src_location = calib.img_to_rect(src_x, src_y, depth).reshape(-1)
new_location = calib.img_to_rect(new_x, new_y, depth).reshape(-1)
delta_location = np.abs(src_location - new_location)
loc_error = np.linalg.norm(delta_location)
print(src_location)
print(new_location)
print(delta_location)
print(loc_error)
# image.show()
if __name__ == '__main__':
pass | 869 | 27.064516 | 65 | py |
monodle | monodle-main/lib/helpers/decode_helper.py | import numpy as np
import torch
import torch.nn as nn
from lib.datasets.utils import class2angle
def decode_detections(dets, info, calibs, cls_mean_size, threshold):
'''
NOTE: THIS IS A NUMPY FUNCTION
input: dets, numpy array, shape in [batch x max_dets x dim]
input: img_info, dict, necessary information of input images
input: calibs, corresponding calibs for the input batch
output:
'''
results = {}
for i in range(dets.shape[0]): # batch
preds = []
for j in range(dets.shape[1]): # max_dets
cls_id = int(dets[i, j, 0])
score = dets[i, j, 1]
if score < threshold:
continue
# 2d bboxs decoding
x = dets[i, j, 2] * info['bbox_downsample_ratio'][i][0]
y = dets[i, j, 3] * info['bbox_downsample_ratio'][i][1]
w = dets[i, j, 4] * info['bbox_downsample_ratio'][i][0]
h = dets[i, j, 5] * info['bbox_downsample_ratio'][i][1]
bbox = [x-w/2, y-h/2, x+w/2, y+h/2]
# 3d bboxs decoding
# depth decoding
depth = dets[i, j, 6]
# dimensions decoding
dimensions = dets[i, j, 31:34]
dimensions += cls_mean_size[int(cls_id)]
# positions decoding
x3d = dets[i, j, 34] * info['bbox_downsample_ratio'][i][0]
y3d = dets[i, j, 35] * info['bbox_downsample_ratio'][i][1]
locations = calibs[i].img_to_rect(x3d, y3d, depth).reshape(-1)
locations[1] += dimensions[0] / 2
# heading angle decoding
alpha = get_heading_angle(dets[i, j, 7:31])
ry = calibs[i].alpha2ry(alpha, x3d)
score = score * dets[i, j, -1]
##### generate 2d bbox using 3d bbox
# h, w, l = dimensions
# x_corners = [l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2]
# y_corners = [0, 0, 0, 0, -h, -h, -h, -h]
# z_corners = [w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2]
# R = np.array([[np.cos(ry), 0, np.sin(ry)],
# [0, 1, 0],
# [-np.sin(ry), 0, np.cos(ry)]])
# corners3d = np.vstack([x_corners, y_corners, z_corners]) # (3, 8)
# corners3d = np.dot(R, corners3d).T
# corners3d = corners3d + locations
# bbox, _ = calibs[i].corners3d_to_img_boxes(corners3d.reshape(1, 8, 3))
# bbox = bbox.reshape(-1).tolist()
preds.append([cls_id, alpha] + bbox + dimensions.tolist() + locations.tolist() + [ry, score])
results[info['img_id'][i]] = preds
return results
def extract_dets_from_outputs(outputs, K=50):
# get src outputs
heatmap = outputs['heatmap']
heading = outputs['heading']
depth = outputs['depth'][:, 0:1, :, :]
sigma = outputs['depth'][:, 1:2, :, :]
size_3d = outputs['size_3d']
offset_3d = outputs['offset_3d']
size_2d = outputs['size_2d']
offset_2d = outputs['offset_2d']
heatmap= torch.clamp(heatmap.sigmoid_(), min=1e-4, max=1 - 1e-4)
depth = 1. / (depth.sigmoid() + 1e-6) - 1.
sigma = torch.exp(-sigma)
batch, channel, height, width = heatmap.size() # get shape
# perform nms on heatmaps
heatmap = _nms(heatmap)
scores, inds, cls_ids, xs, ys = _topk(heatmap, K=K)
offset_2d = _transpose_and_gather_feat(offset_2d, inds)
offset_2d = offset_2d.view(batch, K, 2)
xs2d = xs.view(batch, K, 1) + offset_2d[:, :, 0:1]
ys2d = ys.view(batch, K, 1) + offset_2d[:, :, 1:2]
offset_3d = _transpose_and_gather_feat(offset_3d, inds)
offset_3d = offset_3d.view(batch, K, 2)
xs3d = xs.view(batch, K, 1) + offset_3d[:, :, 0:1]
ys3d = ys.view(batch, K, 1) + offset_3d[:, :, 1:2]
heading = _transpose_and_gather_feat(heading, inds)
heading = heading.view(batch, K, 24)
depth = _transpose_and_gather_feat(depth, inds)
depth = depth.view(batch, K, 1)
sigma = _transpose_and_gather_feat(sigma, inds)
sigma = sigma.view(batch, K, 1)
size_3d = _transpose_and_gather_feat(size_3d, inds)
size_3d = size_3d.view(batch, K, 3)
cls_ids = cls_ids.view(batch, K, 1).float()
scores = scores.view(batch, K, 1)
# check shape
xs2d = xs2d.view(batch, K, 1)
ys2d = ys2d.view(batch, K, 1)
xs3d = xs3d.view(batch, K, 1)
ys3d = ys3d.view(batch, K, 1)
size_2d = _transpose_and_gather_feat(size_2d, inds)
size_2d = size_2d.view(batch, K, 2)
detections = torch.cat([cls_ids, scores, xs2d, ys2d, size_2d, depth, heading, size_3d, xs3d, ys3d, sigma], dim=2)
return detections
############### auxiliary function ############
def _nms(heatmap, kernel=3):
padding = (kernel - 1) // 2
heatmapmax = nn.functional.max_pool2d(heatmap, (kernel, kernel), stride=1, padding=padding)
keep = (heatmapmax == heatmap).float()
return heatmap * keep
def _topk(heatmap, K=50):
batch, cat, height, width = heatmap.size()
# batch * cls_ids * 50
topk_scores, topk_inds = torch.topk(heatmap.view(batch, cat, -1), K)
topk_inds = topk_inds % (height * width)
topk_ys = (topk_inds / width).int().float()
topk_xs = (topk_inds % width).int().float()
# batch * cls_ids * 50
topk_score, topk_ind = torch.topk(topk_scores.view(batch, -1), K)
topk_cls_ids = (topk_ind / K).int()
topk_inds = _gather_feat(topk_inds.view(batch, -1, 1), topk_ind).view(batch, K)
topk_ys = _gather_feat(topk_ys.view(batch, -1, 1), topk_ind).view(batch, K)
topk_xs = _gather_feat(topk_xs.view(batch, -1, 1), topk_ind).view(batch, K)
return topk_score, topk_inds, topk_cls_ids, topk_xs, topk_ys
def _gather_feat(feat, ind, mask=None):
'''
Args:
feat: tensor shaped in B * (H*W) * C
ind: tensor shaped in B * K (default: 50)
mask: tensor shaped in B * K (default: 50)
Returns: tensor shaped in B * K or B * sum(mask)
'''
dim = feat.size(2) # get channel dim
ind = ind.unsqueeze(2).expand(ind.size(0), ind.size(1), dim) # B*len(ind) --> B*len(ind)*1 --> B*len(ind)*C
feat = feat.gather(1, ind) # B*(HW)*C ---> B*K*C
if mask is not None:
mask = mask.unsqueeze(2).expand_as(feat) # B*50 ---> B*K*1 --> B*K*C
feat = feat[mask]
feat = feat.view(-1, dim)
return feat
def _transpose_and_gather_feat(feat, ind):
'''
Args:
feat: feature maps shaped in B * C * H * W
ind: indices tensor shaped in B * K
Returns:
'''
feat = feat.permute(0, 2, 3, 1).contiguous() # B * C * H * W ---> B * H * W * C
feat = feat.view(feat.size(0), -1, feat.size(3)) # B * H * W * C ---> B * (H*W) * C
feat = _gather_feat(feat, ind) # B * len(ind) * C
return feat
def get_heading_angle(heading):
heading_bin, heading_res = heading[0:12], heading[12:24]
cls = np.argmax(heading_bin)
res = heading_res[cls]
return class2angle(cls, res, to_label_format=True)
if __name__ == '__main__':
## testing
from lib.datasets.kitti.kitti_dataset import KITTI_Dataset
from torch.utils.data import DataLoader
dataset = KITTI_Dataset('../../data', 'train')
dataloader = DataLoader(dataset=dataset, batch_size=2)
| 7,269 | 34.637255 | 117 | py |
monodle | monodle-main/lib/helpers/scheduler_helper.py | import torch.nn as nn
import torch.optim.lr_scheduler as lr_sched
import math
def build_lr_scheduler(cfg, optimizer, last_epoch):
def lr_lbmd(cur_epoch):
cur_decay = 1
for decay_step in cfg['decay_list']:
if cur_epoch >= decay_step:
cur_decay = cur_decay * cfg['decay_rate']
return cur_decay
lr_scheduler = lr_sched.LambdaLR(optimizer, lr_lbmd, last_epoch=last_epoch)
warmup_lr_scheduler = None
if cfg['warmup']:
warmup_lr_scheduler = CosineWarmupLR(optimizer, num_epoch=5, init_lr=0.00001)
return lr_scheduler, warmup_lr_scheduler
def build_bnm_scheduler(cfg, model, last_epoch):
if not cfg['enabled']:
return None
def bnm_lmbd(cur_epoch):
cur_decay = 1
for decay_step in cfg['decay_list']:
if cur_epoch >= decay_step:
cur_decay = cur_decay * cfg['decay_rate']
return max(cfg['momentum']*cur_decay, cfg['clip'])
bnm_scheduler = BNMomentumScheduler(model, bnm_lmbd, last_epoch=last_epoch)
return bnm_scheduler
def set_bn_momentum_default(bn_momentum):
def fn(m):
if isinstance(m, (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d)):
m.momentum = bn_momentum
return fn
class BNMomentumScheduler(object):
def __init__(
self, model, bn_lambda, last_epoch=-1,
setter=set_bn_momentum_default
):
if not isinstance(model, nn.Module):
raise RuntimeError("Class '{}' is not a PyTorch nn Module".format(type(model).__name__))
self.model = model
self.setter = setter
self.lmbd = bn_lambda
self.step(last_epoch + 1)
self.last_epoch = last_epoch
def step(self, epoch=None):
if epoch is None:
epoch = self.last_epoch + 1
self.last_epoch = epoch
self.model.apply(self.setter(self.lmbd(epoch)))
class CosineWarmupLR(lr_sched._LRScheduler):
def __init__(self, optimizer, num_epoch, init_lr=0.0, last_epoch=-1):
self.num_epoch = num_epoch
self.init_lr = init_lr
super(CosineWarmupLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
return [self.init_lr + (base_lr - self.init_lr) *
(1 - math.cos(math.pi * self.last_epoch / self.num_epoch)) / 2
for base_lr in self.base_lrs]
class LinearWarmupLR(lr_sched._LRScheduler):
def __init__(self, optimizer, num_epoch, init_lr=0.0, last_epoch=-1):
self.num_epoch = num_epoch
self.init_lr = init_lr
super(LinearWarmupLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
return [self.init_lr + (base_lr - self.init_lr) * self.last_epoch / self.num_epoch
for base_lr in self.base_lrs]
if __name__ == '__main__':
# testing
import torch.optim as optim
from lib.models.centernet3d import CenterNet3D
import matplotlib.pyplot as plt
net = CenterNet3D()
optimizer = optim.Adam(net.parameters(), 0.01)
lr_warmup_scheduler_cosine = CosineWarmupLR(optimizer, 1000, init_lr=0.00001, last_epoch=-1)
lr_warmup_scheduler_linear = LinearWarmupLR(optimizer, 1000, init_lr=0.00001, last_epoch=-1)
batch_cosine, lr_cosine = [], []
batch_linear, lr_linear = [], []
for i in range(1000):
batch_cosine.append(i)
lr_cosine.append(lr_warmup_scheduler_cosine.get_lr())
batch_linear.append(i)
lr_linear.append(lr_warmup_scheduler_linear.get_lr())
lr_warmup_scheduler_cosine.step()
lr_warmup_scheduler_linear.step()
# vis
fig = plt.figure()
ax1 = fig.add_subplot(121)
ax1.scatter(batch_cosine, lr_cosine, c = 'r',marker = 'o')
ax2 = fig.add_subplot(122)
ax2.scatter(batch_linear, lr_linear, c = 'r',marker = 'o')
plt.show()
| 3,831 | 29.903226 | 100 | py |
monodle | monodle-main/lib/helpers/trainer_helper.py | import os
import tqdm
import torch
import numpy as np
import torch.nn as nn
from lib.helpers.save_helper import get_checkpoint_state
from lib.helpers.save_helper import load_checkpoint
from lib.helpers.save_helper import save_checkpoint
from lib.losses.centernet_loss import compute_centernet3d_loss
class Trainer(object):
def __init__(self,
cfg,
model,
optimizer,
train_loader,
test_loader,
lr_scheduler,
warmup_lr_scheduler,
logger):
self.cfg = cfg
self.model = model
self.optimizer = optimizer
self.train_loader = train_loader
self.test_loader = test_loader
self.lr_scheduler = lr_scheduler
self.warmup_lr_scheduler = warmup_lr_scheduler
self.logger = logger
self.epoch = 0
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# loading pretrain/resume model
if cfg.get('pretrain_model'):
assert os.path.exists(cfg['pretrain_model'])
load_checkpoint(model=self.model,
optimizer=None,
filename=cfg['pretrain_model'],
map_location=self.device,
logger=self.logger)
if cfg.get('resume_model', None):
assert os.path.exists(cfg['resume_model'])
self.epoch = load_checkpoint(model=self.model.to(self.device),
optimizer=self.optimizer,
filename=cfg['resume_model'],
map_location=self.device,
logger=self.logger)
self.lr_scheduler.last_epoch = self.epoch - 1
self.gpu_ids = list(map(int, cfg['gpu_ids'].split(',')))
self.model = torch.nn.DataParallel(model, device_ids=self.gpu_ids).to(self.device)
def train(self):
start_epoch = self.epoch
progress_bar = tqdm.tqdm(range(start_epoch, self.cfg['max_epoch']), dynamic_ncols=True, leave=True, desc='epochs')
for epoch in range(start_epoch, self.cfg['max_epoch']):
# reset random seed
# ref: https://github.com/pytorch/pytorch/issues/5059
np.random.seed(np.random.get_state()[1][0] + epoch)
# train one epoch
self.train_one_epoch()
self.epoch += 1
# update learning rate
if self.warmup_lr_scheduler is not None and epoch < 5:
self.warmup_lr_scheduler.step()
else:
self.lr_scheduler.step()
# save trained model
if (self.epoch % self.cfg['save_frequency']) == 0:
os.makedirs('checkpoints', exist_ok=True)
ckpt_name = os.path.join('checkpoints', 'checkpoint_epoch_%d' % self.epoch)
save_checkpoint(get_checkpoint_state(self.model, self.optimizer, self.epoch), ckpt_name)
progress_bar.update()
return None
def train_one_epoch(self):
self.model.train()
progress_bar = tqdm.tqdm(total=len(self.train_loader), leave=(self.epoch+1 == self.cfg['max_epoch']), desc='iters')
for batch_idx, (inputs, targets, _) in enumerate(self.train_loader):
inputs = inputs.to(self.device)
for key in targets.keys():
targets[key] = targets[key].to(self.device)
# train one batch
self.optimizer.zero_grad()
outputs = self.model(inputs)
total_loss, stats_batch = compute_centernet3d_loss(outputs, targets)
total_loss.backward()
self.optimizer.step()
progress_bar.update()
progress_bar.close()
| 3,871 | 34.522936 | 123 | py |
monodle | monodle-main/lib/helpers/save_helper.py | import os
import torch
import torch.nn as nn
def model_state_to_cpu(model_state):
model_state_cpu = type(model_state)() # ordered dict
for key, val in model_state.items():
model_state_cpu[key] = val.cpu()
return model_state_cpu
def get_checkpoint_state(model=None, optimizer=None, epoch=None):
optim_state = optimizer.state_dict() if optimizer is not None else None
if model is not None:
if isinstance(model, torch.nn.DataParallel):
model_state = model_state_to_cpu(model.module.state_dict())
else:
model_state = model.state_dict()
else:
model_state = None
return {'epoch': epoch, 'model_state': model_state, 'optimizer_state': optim_state}
def save_checkpoint(state, filename):
filename = '{}.pth'.format(filename)
torch.save(state, filename)
def load_checkpoint(model, optimizer, filename, map_location, logger=None):
if os.path.isfile(filename):
logger.info("==> Loading from checkpoint '{}'".format(filename))
checkpoint = torch.load(filename, map_location)
epoch = checkpoint.get('epoch', -1)
if model is not None and checkpoint['model_state'] is not None:
model.load_state_dict(checkpoint['model_state'])
if optimizer is not None and checkpoint['optimizer_state'] is not None:
optimizer.load_state_dict(checkpoint['optimizer_state'])
logger.info("==> Done")
else:
raise FileNotFoundError
return epoch | 1,501 | 33.136364 | 87 | py |
monodle | monodle-main/lib/helpers/optimizer_helper.py | import math
import torch
import torch.optim as optim
from torch.optim.optimizer import Optimizer
def build_optimizer(cfg_optimizer, model):
weights, biases = [], []
for name, param in model.named_parameters():
if 'bias' in name:
biases += [param]
else:
weights += [param]
parameters = [{'params': biases, 'weight_decay': 0},
{'params': weights, 'weight_decay': cfg_optimizer['weight_decay']}]
if cfg_optimizer['type'] == 'sgd':
optimizer = optim.SGD(parameters, lr=cfg_optimizer['lr'], momentum=0.9)
elif cfg_optimizer['type'] == 'adam':
optimizer = optim.Adam(parameters, lr=cfg_optimizer['lr'])
elif cfg_optimizer['type'] == 'adamw':
optimizer = AdamW(parameters, lr=cfg_optimizer['lr'])
else:
raise NotImplementedError("%s optimizer is not supported" % cfg_optimizer['type'])
return optimizer
class AdamW(Optimizer):
"""Implements Adam algorithm.
It has been proposed in `Adam: A Method for Stochastic Optimization`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
.. _Adam\: A Method for Stochastic Optimization:
https://arxiv.org/abs/1412.6980
.. _On the Convergence of Adam and Beyond:
https://openreview.net/forum?id=ryQu7f-RZ
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8,
weight_decay=0, amsgrad=False):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay, amsgrad=amsgrad)
super(AdamW, self).__init__(params, defaults)
def __setstate__(self, state):
super(AdamW, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('amsgrad', False)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
amsgrad = group['amsgrad']
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
if amsgrad:
# Maintains max of all exp. moving avg. of sq. grad. values
state['max_exp_avg_sq'] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
if amsgrad:
max_exp_avg_sq = state['max_exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
# if group['weight_decay'] != 0:
# grad = grad.add(group['weight_decay'], p.data)
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(1 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
if amsgrad:
# Maintains the maximum of all 2nd moment running avg. till now
torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
# Use the max. for normalizing running avg. of gradient
denom = max_exp_avg_sq.sqrt().add_(group['eps'])
else:
denom = exp_avg_sq.sqrt().add_(group['eps'])
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
step_size = group['lr'] * math.sqrt(bias_correction2) / bias_correction1
# p.data.addcdiv_(-step_size, exp_avg, denom)
p.data.add_(-step_size, torch.mul(p.data, group['weight_decay']).addcdiv_(1, exp_avg, denom) )
return loss | 5,576 | 41.9 | 116 | py |
monodle | monodle-main/lib/helpers/model_helper.py | from lib.models.centernet3d import CenterNet3D
def build_model(cfg):
if cfg['type'] == 'centernet3d':
return CenterNet3D(backbone=cfg['backbone'], neck=cfg['neck'], num_class=cfg['num_class'])
else:
raise NotImplementedError("%s model is not supported" % cfg['type'])
| 296 | 26 | 98 | py |
monodle | monodle-main/lib/helpers/dataloader_helper.py | import torch
import numpy as np
from torch.utils.data import DataLoader
from lib.datasets.kitti.kitti_dataset import KITTI_Dataset
# init datasets and dataloaders
def my_worker_init_fn(worker_id):
np.random.seed(np.random.get_state()[1][0] + worker_id)
def build_dataloader(cfg, workers=4):
# perpare dataset
if cfg['type'] == 'KITTI':
train_set = KITTI_Dataset(split='train', cfg=cfg)
test_set = KITTI_Dataset(split='val', cfg=cfg)
else:
raise NotImplementedError("%s dataset is not supported" % cfg['type'])
# prepare dataloader
train_loader = DataLoader(dataset=train_set,
batch_size=cfg['batch_size'],
num_workers=workers,
worker_init_fn=my_worker_init_fn,
shuffle=True,
pin_memory=False,
drop_last=True)
test_loader = DataLoader(dataset=test_set,
batch_size=cfg['batch_size'],
num_workers=workers,
worker_init_fn=my_worker_init_fn,
shuffle=False,
pin_memory=False,
drop_last=False)
return train_loader, test_loader
| 1,336 | 35.135135 | 78 | py |
monodle | monodle-main/lib/helpers/tester_helper.py | import os
import tqdm
import torch
from lib.helpers.save_helper import load_checkpoint
from lib.helpers.decode_helper import extract_dets_from_outputs
from lib.helpers.decode_helper import decode_detections
class Tester(object):
def __init__(self, cfg, model, dataloader, logger, eval=False):
self.cfg = cfg
self.model = model
self.dataloader = dataloader
self.max_objs = dataloader.dataset.max_objs # max objects per images, defined in dataset
self.class_name = dataloader.dataset.class_name
self.output_dir = './outputs'
self.dataset_type = cfg.get('type', 'KITTI')
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
self.logger = logger
self.eval = eval
def test(self):
assert self.cfg['mode'] in ['single', 'all']
# test a single checkpoint
if self.cfg['mode'] == 'single':
assert os.path.exists(self.cfg['checkpoint'])
load_checkpoint(model=self.model,
optimizer=None,
filename=self.cfg['checkpoint'],
map_location=self.device,
logger=self.logger)
self.model.to(self.device)
self.inference()
self.evaluate()
# test all checkpoints in the given dir
if self.cfg['mode'] == 'all':
checkpoints_list = []
for _, _, files in os.walk(self.cfg['checkpoints_dir']):
checkpoints_list = [os.path.join(self.cfg['checkpoints_dir'], f) for f in files if f.endswith(".pth")]
checkpoints_list.sort(key=os.path.getmtime)
for checkpoint in checkpoints_list:
load_checkpoint(model=self.model,
optimizer=None,
filename=checkpoint,
map_location=self.device,
logger=self.logger)
self.model.to(self.device)
self.inference()
self.evaluate()
def inference(self):
torch.set_grad_enabled(False)
self.model.eval()
results = {}
progress_bar = tqdm.tqdm(total=len(self.dataloader), leave=True, desc='Evaluation Progress')
for batch_idx, (inputs, _, info) in enumerate(self.dataloader):
# load evaluation data and move data to GPU.
inputs = inputs.to(self.device)
outputs = self.model(inputs)
dets = extract_dets_from_outputs(outputs=outputs, K=self.max_objs)
dets = dets.detach().cpu().numpy()
# get corresponding calibs & transform tensor to numpy
calibs = [self.dataloader.dataset.get_calib(index) for index in info['img_id']]
info = {key: val.detach().cpu().numpy() for key, val in info.items()}
cls_mean_size = self.dataloader.dataset.cls_mean_size
dets = decode_detections(dets=dets,
info=info,
calibs=calibs,
cls_mean_size=cls_mean_size,
threshold=self.cfg.get('threshold', 0.2))
results.update(dets)
progress_bar.update()
progress_bar.close()
# save the result for evaluation.
self.logger.info('==> Saving ...')
self.save_results(results)
def save_results(self, results, output_dir='./outputs'):
output_dir = os.path.join(output_dir, 'data')
os.makedirs(output_dir, exist_ok=True)
for img_id in results.keys():
if self.dataset_type == 'KITTI':
output_path = os.path.join(output_dir, '{:06d}.txt'.format(img_id))
else:
os.makedirs(os.path.join(output_dir, self.dataloader.dataset.get_sensor_modality(img_id)), exist_ok=True)
output_path = os.path.join(output_dir,
self.dataloader.dataset.get_sensor_modality(img_id),
self.dataloader.dataset.get_sample_token(img_id) + '.txt')
f = open(output_path, 'w')
for i in range(len(results[img_id])):
class_name = self.class_name[int(results[img_id][i][0])]
f.write('{} 0.0 0'.format(class_name))
for j in range(1, len(results[img_id][i])):
f.write(' {:.2f}'.format(results[img_id][i][j]))
f.write('\n')
f.close()
def evaluate(self):
self.dataloader.dataset.eval(results_dir='./outputs/data', logger=self.logger)
| 4,732 | 38.441667 | 121 | py |
monodle | monodle-main/lib/helpers/utils_helper.py | import torch
import numpy as np
import logging
import random
def create_logger(log_file, rank=0):
log_format = '%(asctime)s %(levelname)5s %(message)s'
logging.basicConfig(level=logging.INFO if rank == 0 else 'ERROR',
format=log_format,
filename=log_file)
console = logging.StreamHandler()
console.setLevel(logging.INFO if rank == 0 else 'ERROR')
console.setFormatter(logging.Formatter(log_format))
logging.getLogger(__name__).addHandler(console)
return logging.getLogger(__name__)
def set_random_seed(seed):
random.seed(seed)
np.random.seed(seed ** 2)
torch.manual_seed(seed ** 3)
torch.cuda.manual_seed(seed ** 4)
torch.cuda.manual_seed_all(seed ** 4)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True | 844 | 31.5 | 69 | py |
monodle | monodle-main/lib/models/centernet3d.py | import os
import cv2
import torch
import torch.nn as nn
import numpy as np
from lib.backbones import dla
from lib.backbones.dlaup import DLAUp
from lib.backbones.hourglass import get_large_hourglass_net
from lib.backbones.hourglass import load_pretrian_model
class CenterNet3D(nn.Module):
def __init__(self, backbone='dla34', neck='DLAUp', num_class=3, downsample=4):
"""
CenterNet for monocular 3D object detection.
:param backbone: the backbone of pipeline, such as dla34.
:param neck: the necks of detection, such as dla_up.
:param downsample: the ratio of down sample. [4, 8, 16, 32]
:param head_conv: the channels of convolution in head. default: 256
"""
assert downsample in [4, 8, 16, 32]
super().__init__()
self.heads = {'heatmap': num_class, 'offset_2d': 2, 'size_2d' :2, 'depth': 2, 'offset_3d': 2, 'size_3d':3, 'heading': 24}
self.backbone = getattr(dla, backbone)(pretrained=True, return_levels=True)
channels = self.backbone.channels # channels list for feature maps generated by backbone
self.first_level = int(np.log2(downsample))
scales = [2 ** i for i in range(len(channels[self.first_level:]))]
self.neck = DLAUp(channels[self.first_level:], scales_list=scales) # feature fusion [such as DLAup, FPN]
# initialize the head of pipeline, according to heads setting.
for head in self.heads.keys():
output_channels = self.heads[head]
fc = nn.Sequential(
nn.Conv2d(channels[self.first_level], 256, kernel_size=3, padding=1, bias=True),
nn.ReLU(inplace=True),
nn.Conv2d(256, output_channels, kernel_size=1, stride=1, padding=0, bias=True))
# initialization
if 'heatmap' in head:
fc[-1].bias.data.fill_(-2.19)
else:
self.fill_fc_weights(fc)
self.__setattr__(head, fc)
def forward(self, input):
feat = self.backbone(input)
feat = self.neck(feat[self.first_level:])
ret = {}
for head in self.heads:
ret[head] = self.__getattr__(head)(feat)
return ret
def fill_fc_weights(self, layers):
for m in layers.modules():
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight, std=0.001)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
if __name__ == '__main__':
import torch
net = CenterNet3D(backbone='dla34')
print(net)
input = torch.randn(4, 3, 384, 1280)
print(input.shape, input.dtype)
output = net(input)
print(output.keys())
| 2,712 | 32.085366 | 129 | py |
monodle | monodle-main/lib/datasets/utils.py | ''' some auxiliary functions for all datasets '''
import numpy as np
import cv2
num_heading_bin = 12 # hyper param
def angle2class(angle):
''' Convert continuous angle to discrete class and residual. '''
angle = angle % (2 * np.pi)
assert (angle >= 0 and angle <= 2 * np.pi)
angle_per_class = 2 * np.pi / float(num_heading_bin)
shifted_angle = (angle + angle_per_class / 2) % (2 * np.pi)
class_id = int(shifted_angle / angle_per_class)
residual_angle = shifted_angle - (class_id * angle_per_class + angle_per_class / 2)
return class_id, residual_angle
def class2angle(cls, residual, to_label_format=False):
''' Inverse function to angle2class. '''
angle_per_class = 2 * np.pi / float(num_heading_bin)
angle_center = cls * angle_per_class
angle = angle_center + residual
if to_label_format and angle > np.pi:
angle = angle - 2 * np.pi
return angle
def gaussian_radius(bbox_size, min_overlap=0.7):
height, width = bbox_size
a1 = 1
b1 = (height + width)
c1 = width * height * (1 - min_overlap) / (1 + min_overlap)
sq1 = np.sqrt(b1 ** 2 - 4 * a1 * c1)
r1 = (b1 + sq1) / 2
a2 = 4
b2 = 2 * (height + width)
c2 = (1 - min_overlap) * width * height
sq2 = np.sqrt(b2 ** 2 - 4 * a2 * c2)
r2 = (b2 + sq2) / 2
a3 = 4 * min_overlap
b3 = -2 * min_overlap * (height + width)
c3 = (min_overlap - 1) * width * height
sq3 = np.sqrt(b3 ** 2 - 4 * a3 * c3)
r3 = (b3 + sq3) / 2
return min(r1, r2, r3)
def gaussian2D(shape, sigma=1):
m, n = [(ss - 1.) / 2. for ss in shape]
y, x = np.ogrid[-m:m+1,-n:n+1]
h = np.exp(-(x * x + y * y) / (2 * sigma * sigma))
h[h < np.finfo(h.dtype).eps * h.max()] = 0
return h
def draw_umich_gaussian(heatmap, center, radius, k=1):
diameter = 2 * radius + 1
gaussian = gaussian2D((diameter, diameter), sigma=diameter / 6)
x, y = int(center[0]), int(center[1])
height, width = heatmap.shape[0:2]
left, right = min(x, radius), min(width - x, radius + 1)
top, bottom = min(y, radius), min(height - y, radius + 1)
masked_heatmap = heatmap[y - top:y + bottom, x - left:x + right]
masked_gaussian = gaussian[radius - top:radius + bottom, radius - left:radius + right]
if min(masked_gaussian.shape) > 0 and min(masked_heatmap.shape) > 0: # TODO debug
np.maximum(masked_heatmap, masked_gaussian * k, out=masked_heatmap)
return heatmap
def draw_msra_gaussian(heatmap, center, sigma):
tmp_size = sigma * 3
mu_x = int(center[0] + 0.5)
mu_y = int(center[1] + 0.5)
w, h = heatmap.shape[0], heatmap.shape[1]
ul = [int(mu_x - tmp_size), int(mu_y - tmp_size)]
br = [int(mu_x + tmp_size + 1), int(mu_y + tmp_size + 1)]
if ul[0] >= h or ul[1] >= w or br[0] < 0 or br[1] < 0:
return heatmap
size = 2 * tmp_size + 1
x = np.arange(0, size, 1, np.float32)
y = x[:, np.newaxis]
x0 = y0 = size // 2
g = np.exp(- ((x - x0) ** 2 + (y - y0) ** 2) / (2 * sigma ** 2))
g_x = max(0, -ul[0]), min(br[0], h) - ul[0]
g_y = max(0, -ul[1]), min(br[1], w) - ul[1]
img_x = max(0, ul[0]), min(br[0], h)
img_y = max(0, ul[1]), min(br[1], w)
heatmap[img_y[0]:img_y[1], img_x[0]:img_x[1]] = np.maximum(
heatmap[img_y[0]:img_y[1], img_x[0]:img_x[1]],
g[g_y[0]:g_y[1], g_x[0]:g_x[1]])
return heatmap
def draw_projected_box3d(image, corners3d, color=(255, 255, 255), thickness=1):
''' Draw 3d bounding box in image
input:
image: RGB image
corners3d: (8,3) array of vertices (in image plane) for the 3d box in following order:
1 -------- 0
/| /|
2 -------- 3 .
| | | |
. 5 -------- 4
|/ |/
6 -------- 7
'''
corners3d = corners3d.astype(np.int32)
for k in range(0, 4):
i, j = k, (k + 1) % 4
cv2.line(image, (corners3d[i, 0], corners3d[i, 1]), (corners3d[j, 0], corners3d[j, 1]), color, thickness, lineType=cv2.LINE_AA)
i, j = k + 4, (k + 1) % 4 + 4
cv2.line(image, (corners3d[i, 0], corners3d[i, 1]), (corners3d[j, 0], corners3d[j, 1]), color, thickness, lineType=cv2.LINE_AA)
i, j = k, k + 4
cv2.line(image, (corners3d[i, 0], corners3d[i, 1]), (corners3d[j, 0], corners3d[j, 1]), color, thickness, lineType=cv2.LINE_AA)
return image | 4,405 | 34.532258 | 135 | py |
monodle | monodle-main/lib/datasets/kitti/kitti_dataset.py | import os
import numpy as np
import torch.utils.data as data
from PIL import Image
from lib.datasets.utils import angle2class
from lib.datasets.utils import gaussian_radius
from lib.datasets.utils import draw_umich_gaussian
from lib.datasets.kitti.kitti_utils import get_objects_from_label
from lib.datasets.kitti.kitti_utils import Calibration
from lib.datasets.kitti.kitti_utils import get_affine_transform
from lib.datasets.kitti.kitti_utils import affine_transform
from lib.datasets.kitti.kitti_eval_python.eval import get_official_eval_result
from lib.datasets.kitti.kitti_eval_python.eval import get_distance_eval_result
import lib.datasets.kitti.kitti_eval_python.kitti_common as kitti
class KITTI_Dataset(data.Dataset):
def __init__(self, split, cfg):
# basic configuration
self.root_dir = cfg.get('root_dir', '../../data/KITTI')
self.split = split
self.num_classes = 3
self.max_objs = 50
self.class_name = ['Pedestrian', 'Car', 'Cyclist']
self.cls2id = {'Pedestrian': 0, 'Car': 1, 'Cyclist': 2}
self.resolution = np.array([1280, 384]) # W * H
self.use_3d_center = cfg.get('use_3d_center', True)
self.writelist = cfg.get('writelist', ['Car'])
# anno: use src annotations as GT, proj: use projected 2d bboxes as GT
self.bbox2d_type = cfg.get('bbox2d_type', 'anno')
assert self.bbox2d_type in ['anno', 'proj']
self.meanshape = cfg.get('meanshape', False)
self.class_merging = cfg.get('class_merging', False)
self.use_dontcare = cfg.get('use_dontcare', False)
if self.class_merging:
self.writelist.extend(['Van', 'Truck'])
if self.use_dontcare:
self.writelist.extend(['DontCare'])
# data split loading
assert self.split in ['train', 'val', 'trainval', 'test']
self.split_file = os.path.join(self.root_dir, 'ImageSets', self.split + '.txt')
self.idx_list = [x.strip() for x in open(self.split_file).readlines()]
# path configuration
self.data_dir = os.path.join(self.root_dir, 'object', 'testing' if split == 'test' else 'training')
self.image_dir = os.path.join(self.data_dir, 'image_2')
self.depth_dir = os.path.join(self.data_dir, 'depth')
self.calib_dir = os.path.join(self.data_dir, 'calib')
self.label_dir = os.path.join(self.data_dir, 'label_2')
# data augmentation configuration
self.data_augmentation = True if split in ['train', 'trainval'] else False
self.random_flip = cfg.get('random_flip', 0.5)
self.random_crop = cfg.get('random_crop', 0.5)
self.scale = cfg.get('scale', 0.4)
self.shift = cfg.get('shift', 0.1)
# statistics
self.mean = np.array([0.485, 0.456, 0.406], dtype=np.float32)
self.std = np.array([0.229, 0.224, 0.225], dtype=np.float32)
self.cls_mean_size = np.array([[1.76255119, 0.66068622, 0.84422524],
[1.52563191, 1.62856739, 3.52588311],
[1.73698127, 0.59706367, 1.76282397]], dtype=np.float32) # H*W*L
if not self.meanshape:
self.cls_mean_size = np.zeros_like(self.cls_mean_size, dtype=np.float32)
# others
self.downsample = 4
def get_image(self, idx):
img_file = os.path.join(self.image_dir, '%06d.png' % idx)
assert os.path.exists(img_file)
return Image.open(img_file)
def get_label(self, idx):
label_file = os.path.join(self.label_dir, '%06d.txt' % idx)
assert os.path.exists(label_file)
return get_objects_from_label(label_file)
def get_calib(self, idx):
calib_file = os.path.join(self.calib_dir, '%06d.txt' % idx)
assert os.path.exists(calib_file)
return Calibration(calib_file)
def eval(self, results_dir, logger):
logger.info("==> Loading detections and GTs...")
img_ids = [int(id) for id in self.idx_list]
dt_annos = kitti.get_label_annos(results_dir)
gt_annos = kitti.get_label_annos(self.label_dir, img_ids)
test_id = {'Car': 0, 'Pedestrian':1, 'Cyclist': 2}
logger.info('==> Evaluating (official) ...')
for category in self.writelist:
results_str, results_dict = get_official_eval_result(gt_annos, dt_annos, test_id[category])
logger.info(results_str)
def __len__(self):
return self.idx_list.__len__()
def __getitem__(self, item):
# ============================ get inputs ===========================
index = int(self.idx_list[item]) # index mapping, get real data id
# image loading
img = self.get_image(index)
img_size = np.array(img.size)
features_size = self.resolution // self.downsample # W * H
# data augmentation for image
center = np.array(img_size) / 2
aug_scale, crop_size = 1.0, img_size
random_crop_flag, random_flip_flag = False, False
if self.data_augmentation:
if np.random.random() < self.random_flip:
random_flip_flag = True
img = img.transpose(Image.FLIP_LEFT_RIGHT)
if np.random.random() < self.random_crop:
random_crop_flag = True
aug_scale = np.clip(np.random.randn() * self.scale + 1, 1 - self.scale, 1 + self.scale)
crop_size = img_size * aug_scale
center[0] += img_size[0] * np.clip(np.random.randn() * self.shift, -2 * self.shift, 2 * self.shift)
center[1] += img_size[1] * np.clip(np.random.randn() * self.shift, -2 * self.shift, 2 * self.shift)
# add affine transformation for 2d images.
trans, trans_inv = get_affine_transform(center, crop_size, 0, self.resolution, inv=1)
img = img.transform(tuple(self.resolution.tolist()),
method=Image.AFFINE,
data=tuple(trans_inv.reshape(-1).tolist()),
resample=Image.BILINEAR)
# image encoding
img = np.array(img).astype(np.float32) / 255.0
img = (img - self.mean) / self.std
img = img.transpose(2, 0, 1) # C * H * W
info = {'img_id': index,
'img_size': img_size,
'bbox_downsample_ratio': img_size/features_size}
if self.split == 'test':
return img, img, info # img / placeholder(fake label) / info
# ============================ get labels ==============================
objects = self.get_label(index)
calib = self.get_calib(index)
# computed 3d projected box
if self.bbox2d_type == 'proj':
for object in objects:
object.box2d_proj = np.array(calib.corners3d_to_img_boxes(object.generate_corners3d()[None, :])[0][0], dtype=np.float32)
object.box2d = object.box2d_proj.copy()
# data augmentation for labels
if random_flip_flag:
for object in objects:
[x1, _, x2, _] = object.box2d
object.box2d[0], object.box2d[2] = img_size[0] - x2, img_size[0] - x1
object.alpha = np.pi - object.alpha
object.ry = np.pi - object.ry
if object.alpha > np.pi: object.alpha -= 2 * np.pi # check range
if object.alpha < -np.pi: object.alpha += 2 * np.pi
if object.ry > np.pi: object.ry -= 2 * np.pi
if object.ry < -np.pi: object.ry += 2 * np.pi
# labels encoding
heatmap = np.zeros((self.num_classes, features_size[1], features_size[0]), dtype=np.float32) # C * H * W
size_2d = np.zeros((self.max_objs, 2), dtype=np.float32)
offset_2d = np.zeros((self.max_objs, 2), dtype=np.float32)
depth = np.zeros((self.max_objs, 1), dtype=np.float32)
heading_bin = np.zeros((self.max_objs, 1), dtype=np.int64)
heading_res = np.zeros((self.max_objs, 1), dtype=np.float32)
src_size_3d = np.zeros((self.max_objs, 3), dtype=np.float32)
size_3d = np.zeros((self.max_objs, 3), dtype=np.float32)
offset_3d = np.zeros((self.max_objs, 2), dtype=np.float32)
indices = np.zeros((self.max_objs), dtype=np.int64)
mask_2d = np.zeros((self.max_objs), dtype=np.uint8)
mask_3d = np.zeros((self.max_objs), dtype=np.uint8)
object_num = len(objects) if len(objects) < self.max_objs else self.max_objs
for i in range(object_num):
# filter objects by writelist
if objects[i].cls_type not in self.writelist:
continue
# filter inappropriate samples
if objects[i].level_str == 'UnKnown' or objects[i].pos[-1] < 2:
continue
# ignore the samples beyond the threshold [hard encoding]
threshold = 65
if objects[i].pos[-1] > threshold:
continue
# process 2d bbox & get 2d center
bbox_2d = objects[i].box2d.copy()
# add affine transformation for 2d boxes.
bbox_2d[:2] = affine_transform(bbox_2d[:2], trans)
bbox_2d[2:] = affine_transform(bbox_2d[2:], trans)
# modify the 2d bbox according to pre-compute downsample ratio
bbox_2d[:] /= self.downsample
# process 3d bbox & get 3d center
center_2d = np.array([(bbox_2d[0] + bbox_2d[2]) / 2, (bbox_2d[1] + bbox_2d[3]) / 2], dtype=np.float32) # W * H
center_3d = objects[i].pos + [0, -objects[i].h / 2, 0] # real 3D center in 3D space
center_3d = center_3d.reshape(-1, 3) # shape adjustment (N, 3)
center_3d, _ = calib.rect_to_img(center_3d) # project 3D center to image plane
center_3d = center_3d[0] # shape adjustment
if random_flip_flag: # random flip for center3d
center_3d[0] = img_size[0] - center_3d[0]
center_3d = affine_transform(center_3d.reshape(-1), trans)
center_3d /= self.downsample
# generate the center of gaussian heatmap [optional: 3d center or 2d center]
center_heatmap = center_3d.astype(np.int32) if self.use_3d_center else center_2d.astype(np.int32)
if center_heatmap[0] < 0 or center_heatmap[0] >= features_size[0]: continue
if center_heatmap[1] < 0 or center_heatmap[1] >= features_size[1]: continue
# generate the radius of gaussian heatmap
w, h = bbox_2d[2] - bbox_2d[0], bbox_2d[3] - bbox_2d[1]
radius = gaussian_radius((w, h))
radius = max(0, int(radius))
if objects[i].cls_type in ['Van', 'Truck', 'DontCare']:
draw_umich_gaussian(heatmap[1], center_heatmap, radius)
continue
cls_id = self.cls2id[objects[i].cls_type]
draw_umich_gaussian(heatmap[cls_id], center_heatmap, radius)
# encoding 2d/3d offset & 2d size
indices[i] = center_heatmap[1] * features_size[0] + center_heatmap[0]
offset_2d[i] = center_2d - center_heatmap
size_2d[i] = 1. * w, 1. * h
# encoding depth
depth[i] = objects[i].pos[-1] * aug_scale
# encoding heading angle
heading_angle = objects[i].alpha
heading_bin[i], heading_res[i] = angle2class(heading_angle)
# encoding 3d offset & size_3d
offset_3d[i] = center_3d - center_heatmap
src_size_3d[i] = np.array([objects[i].h, objects[i].w, objects[i].l], dtype=np.float32)
mean_size = self.cls_mean_size[self.cls2id[objects[i].cls_type]]
size_3d[i] = src_size_3d[i] - mean_size
mask_2d[i] = 1
mask_3d[i] = 0 if random_crop_flag else 1
# collect return data
inputs = img
targets = {'depth': depth,
'size_2d': size_2d,
'heatmap': heatmap,
'offset_2d': offset_2d,
'indices': indices,
'size_3d': size_3d,
'src_size_3d': src_size_3d,
'offset_3d': offset_3d,
'heading_bin': heading_bin,
'heading_res': heading_res,
'mask_2d': mask_2d,
'mask_3d': mask_3d}
info = {'img_id': index,
'img_size': img_size,
'bbox_downsample_ratio': img_size/features_size}
return inputs, targets, info
if __name__ == '__main__':
from torch.utils.data import DataLoader
cfg = {'root_dir': '../../../data/KITTI',
'random_flip':0.0, 'random_crop':1.0, 'scale':0.8, 'shift':0.1, 'use_dontcare': False,
'class_merging': False, 'writelist':['Pedestrian', 'Car', 'Cyclist'], 'use_3d_center':False}
dataset = KITTI_Dataset('train', cfg)
dataloader = DataLoader(dataset=dataset, batch_size=1)
print(dataset.writelist)
for batch_idx, (inputs, targets, info) in enumerate(dataloader):
# test image
img = inputs[0].numpy().transpose(1, 2, 0)
img = (img * dataset.std + dataset.mean) * 255
img = Image.fromarray(img.astype(np.uint8))
img.show()
# print(targets['size_3d'][0][0])
# test heatmap
heatmap = targets['heatmap'][0] # image id
heatmap = Image.fromarray(heatmap[0].numpy() * 255) # cats id
heatmap.show()
break
# print ground truth fisrt
objects = dataset.get_label(0)
for object in objects:
print(object.to_kitti_format())
| 13,676 | 42.419048 | 136 | py |
monodle | monodle-main/lib/datasets/kitti/kitti_utils.py | ''' some auxiliary functions for KITTI dataset '''
import numpy as np
import cv2
################ Object3D ##################
def get_objects_from_label(label_file):
with open(label_file, 'r') as f:
lines = f.readlines()
objects = [Object3d(line) for line in lines]
return objects
class Object3d(object):
def __init__(self, line):
label = line.strip().split(' ')
self.src = line
self.cls_type = label[0]
self.trucation = float(label[1])
self.occlusion = float(label[2]) # 0:fully visible 1:partly occluded 2:largely occluded 3:unknown
self.alpha = float(label[3])
self.box2d = np.array((float(label[4]), float(label[5]), float(label[6]), float(label[7])), dtype=np.float32)
self.h = float(label[8])
self.w = float(label[9])
self.l = float(label[10])
self.pos = np.array((float(label[11]), float(label[12]), float(label[13])), dtype=np.float32)
self.dis_to_cam = np.linalg.norm(self.pos)
self.ry = float(label[14])
self.score = float(label[15]) if label.__len__() == 16 else -1.0
self.level_str = None
self.level = self.get_obj_level()
def get_obj_level(self):
height = float(self.box2d[3]) - float(self.box2d[1]) + 1
if self.trucation == -1:
self.level_str = 'DontCare'
return 0
if height >= 40 and self.trucation <= 0.15 and self.occlusion <= 0:
self.level_str = 'Easy'
return 1 # Easy
elif height >= 25 and self.trucation <= 0.3 and self.occlusion <= 1:
self.level_str = 'Moderate'
return 2 # Moderate
elif height >= 25 and self.trucation <= 0.5 and self.occlusion <= 2:
self.level_str = 'Hard'
return 3 # Hard
else:
self.level_str = 'UnKnown'
return 4
def generate_corners3d(self):
"""
generate corners3d representation for this object
:return corners_3d: (8, 3) corners of box3d in camera coord
"""
l, h, w = self.l, self.h, self.w
x_corners = [l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2]
y_corners = [0, 0, 0, 0, -h, -h, -h, -h]
z_corners = [w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2]
R = np.array([[np.cos(self.ry), 0, np.sin(self.ry)],
[0, 1, 0],
[-np.sin(self.ry), 0, np.cos(self.ry)]])
corners3d = np.vstack([x_corners, y_corners, z_corners]) # (3, 8)
corners3d = np.dot(R, corners3d).T
corners3d = corners3d + self.pos
return corners3d
def to_bev_box2d(self, oblique=True, voxel_size=0.1):
"""
:param bev_shape: (2) for bev shape (h, w), => (y_max, x_max) in image
:param voxel_size: float, 0.1m
:param oblique:
:return: box2d (4, 2)/ (4) in image coordinate
"""
if oblique:
corners3d = self.generate_corners3d()
xz_corners = corners3d[0:4, [0, 2]]
box2d = np.zeros((4, 2), dtype=np.int32)
box2d[:, 0] = ((xz_corners[:, 0] - Object3d.MIN_XZ[0]) / voxel_size).astype(np.int32)
box2d[:, 1] = Object3d.BEV_SHAPE[0] - 1 - ((xz_corners[:, 1] - Object3d.MIN_XZ[1]) / voxel_size).astype(np.int32)
box2d[:, 0] = np.clip(box2d[:, 0], 0, Object3d.BEV_SHAPE[1])
box2d[:, 1] = np.clip(box2d[:, 1], 0, Object3d.BEV_SHAPE[0])
else:
box2d = np.zeros(4, dtype=np.int32)
# discrete_center = np.floor((self.pos / voxel_size)).astype(np.int32)
cu = np.floor((self.pos[0] - Object3d.MIN_XZ[0]) / voxel_size).astype(np.int32)
cv = Object3d.BEV_SHAPE[0] - 1 - ((self.pos[2] - Object3d.MIN_XZ[1]) / voxel_size).astype(np.int32)
half_l, half_w = int(self.l / voxel_size / 2), int(self.w / voxel_size / 2)
box2d[0], box2d[1] = cu - half_l, cv - half_w
box2d[2], box2d[3] = cu + half_l, cv + half_w
return box2d
def to_str(self):
print_str = '%s %.3f %.3f %.3f box2d: %s hwl: [%.3f %.3f %.3f] pos: %s ry: %.3f' \
% (self.cls_type, self.trucation, self.occlusion, self.alpha, self.box2d, self.h, self.w, self.l,
self.pos, self.ry)
return print_str
def to_kitti_format(self):
kitti_str = '%s %.2f %d %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f' \
% (self.cls_type, self.trucation, int(self.occlusion), self.alpha, self.box2d[0], self.box2d[1],
self.box2d[2], self.box2d[3], self.h, self.w, self.l, self.pos[0], self.pos[1], self.pos[2],
self.ry)
return kitti_str
################### calibration ###################
def get_calib_from_file(calib_file):
with open(calib_file) as f:
lines = f.readlines()
obj = lines[2].strip().split(' ')[1:]
P2 = np.array(obj, dtype=np.float32)
obj = lines[3].strip().split(' ')[1:]
P3 = np.array(obj, dtype=np.float32)
obj = lines[4].strip().split(' ')[1:]
R0 = np.array(obj, dtype=np.float32)
obj = lines[5].strip().split(' ')[1:]
Tr_velo_to_cam = np.array(obj, dtype=np.float32)
return {'P2': P2.reshape(3, 4),
'P3': P3.reshape(3, 4),
'R0': R0.reshape(3, 3),
'Tr_velo2cam': Tr_velo_to_cam.reshape(3, 4)}
class Calibration(object):
def __init__(self, calib_file):
if isinstance(calib_file, str):
calib = get_calib_from_file(calib_file)
else:
calib = calib_file
self.P2 = calib['P2'] # 3 x 4
self.R0 = calib['R0'] # 3 x 3
self.V2C = calib['Tr_velo2cam'] # 3 x 4
self.C2V = self.inverse_rigid_trans(self.V2C)
# Camera intrinsics and extrinsics
self.cu = self.P2[0, 2]
self.cv = self.P2[1, 2]
self.fu = self.P2[0, 0]
self.fv = self.P2[1, 1]
self.tx = self.P2[0, 3] / (-self.fu)
self.ty = self.P2[1, 3] / (-self.fv)
def cart_to_hom(self, pts):
"""
:param pts: (N, 3 or 2)
:return pts_hom: (N, 4 or 3)
"""
pts_hom = np.hstack((pts, np.ones((pts.shape[0], 1), dtype=np.float32)))
return pts_hom
def lidar_to_rect(self, pts_lidar):
"""
:param pts_lidar: (N, 3)
:return pts_rect: (N, 3)
"""
pts_lidar_hom = self.cart_to_hom(pts_lidar)
pts_rect = np.dot(pts_lidar_hom, np.dot(self.V2C.T, self.R0.T))
# pts_rect = reduce(np.dot, (pts_lidar_hom, self.V2C.T, self.R0.T))
return pts_rect
def rect_to_lidar(self, pts_rect):
pts_ref = np.transpose(np.dot(np.linalg.inv(self.R0), np.transpose(pts_rect)))
pts_ref = self.cart_to_hom(pts_ref) # nx4
return np.dot(pts_ref, np.transpose(self.C2V))
def rect_to_img(self, pts_rect):
"""
:param pts_rect: (N, 3)
:return pts_img: (N, 2)
"""
pts_rect_hom = self.cart_to_hom(pts_rect)
pts_2d_hom = np.dot(pts_rect_hom, self.P2.T)
pts_img = (pts_2d_hom[:, 0:2].T / pts_rect_hom[:, 2]).T # (N, 2)
pts_rect_depth = pts_2d_hom[:, 2] - self.P2.T[3, 2] # depth in rect camera coord
return pts_img, pts_rect_depth
def lidar_to_img(self, pts_lidar):
"""
:param pts_lidar: (N, 3)
:return pts_img: (N, 2)
"""
pts_rect = self.lidar_to_rect(pts_lidar)
pts_img, pts_depth = self.rect_to_img(pts_rect)
return pts_img, pts_depth
def img_to_rect(self, u, v, depth_rect):
"""
:param u: (N)
:param v: (N)
:param depth_rect: (N)
:return:
"""
x = ((u - self.cu) * depth_rect) / self.fu + self.tx
y = ((v - self.cv) * depth_rect) / self.fv + self.ty
pts_rect = np.concatenate((x.reshape(-1, 1), y.reshape(-1, 1), depth_rect.reshape(-1, 1)), axis=1)
return pts_rect
def depthmap_to_rect(self, depth_map):
"""
:param depth_map: (H, W), depth_map
:return:
"""
x_range = np.arange(0, depth_map.shape[1])
y_range = np.arange(0, depth_map.shape[0])
x_idxs, y_idxs = np.meshgrid(x_range, y_range)
x_idxs, y_idxs = x_idxs.reshape(-1), y_idxs.reshape(-1)
depth = depth_map[y_idxs, x_idxs]
pts_rect = self.img_to_rect(x_idxs, y_idxs, depth)
return pts_rect, x_idxs, y_idxs
def corners3d_to_img_boxes(self, corners3d):
"""
:param corners3d: (N, 8, 3) corners in rect coordinate
:return: boxes: (None, 4) [x1, y1, x2, y2] in rgb coordinate
:return: boxes_corner: (None, 8) [xi, yi] in rgb coordinate
"""
sample_num = corners3d.shape[0]
corners3d_hom = np.concatenate((corners3d, np.ones((sample_num, 8, 1))), axis=2) # (N, 8, 4)
img_pts = np.matmul(corners3d_hom, self.P2.T) # (N, 8, 3)
x, y = img_pts[:, :, 0] / img_pts[:, :, 2], img_pts[:, :, 1] / img_pts[:, :, 2]
x1, y1 = np.min(x, axis=1), np.min(y, axis=1)
x2, y2 = np.max(x, axis=1), np.max(y, axis=1)
boxes = np.concatenate((x1.reshape(-1, 1), y1.reshape(-1, 1), x2.reshape(-1, 1), y2.reshape(-1, 1)), axis=1)
boxes_corner = np.concatenate((x.reshape(-1, 8, 1), y.reshape(-1, 8, 1)), axis=2)
return boxes, boxes_corner
def camera_dis_to_rect(self, u, v, d):
"""
Can only process valid u, v, d, which means u, v can not beyond the image shape, reprojection error 0.02
:param u: (N)
:param v: (N)
:param d: (N), the distance between camera and 3d points, d^2 = x^2 + y^2 + z^2
:return:
"""
assert self.fu == self.fv, '%.8f != %.8f' % (self.fu, self.fv)
fd = np.sqrt((u - self.cu) ** 2 + (v - self.cv) ** 2 + self.fu ** 2)
x = ((u - self.cu) * d) / fd + self.tx
y = ((v - self.cv) * d) / fd + self.ty
z = np.sqrt(d ** 2 - x ** 2 - y ** 2)
pts_rect = np.concatenate((x.reshape(-1, 1), y.reshape(-1, 1), z.reshape(-1, 1)), axis=1)
return pts_rect
def inverse_rigid_trans(self, Tr):
''' Inverse a rigid body transform matrix (3x4 as [R|t])
[R'|-R't; 0|1]
'''
inv_Tr = np.zeros_like(Tr) # 3x4
inv_Tr[0:3, 0:3] = np.transpose(Tr[0:3, 0:3])
inv_Tr[0:3, 3] = np.dot(-np.transpose(Tr[0:3, 0:3]), Tr[0:3, 3])
return inv_Tr
def alpha2ry(self, alpha, u):
"""
Get rotation_y by alpha + theta - 180
alpha : Observation angle of object, ranging [-pi..pi]
x : Object center x to the camera center (x-W/2), in pixels
rotation_y : Rotation ry around Y-axis in camera coordinates [-pi..pi]
"""
ry = alpha + np.arctan2(u - self.cu, self.fu)
if ry > np.pi:
ry -= 2 * np.pi
if ry < -np.pi:
ry += 2 * np.pi
return ry
def ry2alpha(self, ry, u):
alpha = ry - np.arctan2(u - self.cu, self.fu)
if alpha > np.pi:
alpha -= 2 * np.pi
if alpha < -np.pi:
alpha += 2 * np.pi
return alpha
################### affine trainsform ###################
def get_dir(src_point, rot_rad):
sn, cs = np.sin(rot_rad), np.cos(rot_rad)
src_result = [0, 0]
src_result[0] = src_point[0] * cs - src_point[1] * sn
src_result[1] = src_point[0] * sn + src_point[1] * cs
return src_result
def get_3rd_point(a, b):
direct = a - b
return b + np.array([-direct[1], direct[0]], dtype=np.float32)
def get_affine_transform(center,
scale,
rot,
output_size,
shift=np.array([0, 0], dtype=np.float32),
inv=0):
if not isinstance(scale, np.ndarray) and not isinstance(scale, list):
scale = np.array([scale, scale], dtype=np.float32)
scale_tmp = scale
src_w = scale_tmp[0]
dst_w = output_size[0]
dst_h = output_size[1]
rot_rad = np.pi * rot / 180
src_dir = get_dir([0, src_w * -0.5], rot_rad)
dst_dir = np.array([0, dst_w * -0.5], np.float32)
src = np.zeros((3, 2), dtype=np.float32)
dst = np.zeros((3, 2), dtype=np.float32)
src[0, :] = center + scale_tmp * shift
src[1, :] = center + src_dir + scale_tmp * shift
dst[0, :] = [dst_w * 0.5, dst_h * 0.5]
dst[1, :] = np.array([dst_w * 0.5, dst_h * 0.5], np.float32) + dst_dir
src[2:, :] = get_3rd_point(src[0, :], src[1, :])
dst[2:, :] = get_3rd_point(dst[0, :], dst[1, :])
if inv:
trans = cv2.getAffineTransform(np.float32(src), np.float32(dst))
trans_inv = cv2.getAffineTransform(np.float32(dst), np.float32(src))
return trans, trans_inv
else:
trans = cv2.getAffineTransform(np.float32(src), np.float32(dst))
return trans
def affine_transform(pt, t):
new_pt = np.array([pt[0], pt[1], 1.], dtype=np.float32).T
new_pt = np.dot(t, new_pt)
return new_pt[:2]
if __name__ == '__main__':
from lib.datasets.kitti.kitti_dataset import KITTI_Dataset
cfg = {'root_dir': '../../../data'}
dataset = KITTI_Dataset('train', cfg)
# calib testing
# we project center fo 3D objects to image plane
index = 1
calib = dataset.get_calib(index)
objects = dataset.get_label(index)
for object in objects:
print(object.to_kitti_format())
object.pos[0] *= 1
center_3d = object.pos + [0, -object.h/2, 0] # real 3D center
center_3d = center_3d.reshape(-1, 3) #(N, 3)
center_3d_projected, depth = calib.rect_to_img(center_3d)
box2d = object.box2d
center_2d = [(box2d[0]+box2d[2])/2, (box2d[1]+box2d[3])/2]
print ('3D center/2D center/projected 3D center:', center_3d, center_2d, center_3d_projected)
print('alpha ---> ry ', object.alpha, calib.alpha2ry(object.alpha, center_2d[0]))
break | 14,050 | 36.073879 | 125 | py |
monodle | monodle-main/lib/datasets/kitti/kitti_eval_python/rotate_iou.py | #####################
# Based on https://github.com/hongzhenwang/RRPN-revise
# Licensed under The MIT License
# Author: yanyan, scrin@foxmail.com
#####################
import math
import numba
import numpy as np
from numba import cuda
@numba.jit(nopython=True)
def div_up(m, n):
return m // n + (m % n > 0)
@cuda.jit('(float32[:], float32[:], float32[:])', device=True, inline=True)
def trangle_area(a, b, c):
return ((a[0] - c[0]) * (b[1] - c[1]) - (a[1] - c[1]) *
(b[0] - c[0])) / 2.0
@cuda.jit('(float32[:], int32)', device=True, inline=True)
def area(int_pts, num_of_inter):
area_val = 0.0
for i in range(num_of_inter - 2):
area_val += abs(
trangle_area(int_pts[:2], int_pts[2 * i + 2:2 * i + 4],
int_pts[2 * i + 4:2 * i + 6]))
return area_val
@cuda.jit('(float32[:], int32)', device=True, inline=True)
def sort_vertex_in_convex_polygon(int_pts, num_of_inter):
if num_of_inter > 0:
center = cuda.local.array((2, ), dtype=numba.float32)
center[:] = 0.0
for i in range(num_of_inter):
center[0] += int_pts[2 * i]
center[1] += int_pts[2 * i + 1]
center[0] /= num_of_inter
center[1] /= num_of_inter
v = cuda.local.array((2, ), dtype=numba.float32)
vs = cuda.local.array((16, ), dtype=numba.float32)
for i in range(num_of_inter):
v[0] = int_pts[2 * i] - center[0]
v[1] = int_pts[2 * i + 1] - center[1]
d = math.sqrt(v[0] * v[0] + v[1] * v[1])
v[0] = v[0] / d
v[1] = v[1] / d
if v[1] < 0:
v[0] = -2 - v[0]
vs[i] = v[0]
j = 0
temp = 0
for i in range(1, num_of_inter):
if vs[i - 1] > vs[i]:
temp = vs[i]
tx = int_pts[2 * i]
ty = int_pts[2 * i + 1]
j = i
while j > 0 and vs[j - 1] > temp:
vs[j] = vs[j - 1]
int_pts[j * 2] = int_pts[j * 2 - 2]
int_pts[j * 2 + 1] = int_pts[j * 2 - 1]
j -= 1
vs[j] = temp
int_pts[j * 2] = tx
int_pts[j * 2 + 1] = ty
@cuda.jit(
'(float32[:], float32[:], int32, int32, float32[:])',
device=True,
inline=True)
def line_segment_intersection(pts1, pts2, i, j, temp_pts):
A = cuda.local.array((2, ), dtype=numba.float32)
B = cuda.local.array((2, ), dtype=numba.float32)
C = cuda.local.array((2, ), dtype=numba.float32)
D = cuda.local.array((2, ), dtype=numba.float32)
A[0] = pts1[2 * i]
A[1] = pts1[2 * i + 1]
B[0] = pts1[2 * ((i + 1) % 4)]
B[1] = pts1[2 * ((i + 1) % 4) + 1]
C[0] = pts2[2 * j]
C[1] = pts2[2 * j + 1]
D[0] = pts2[2 * ((j + 1) % 4)]
D[1] = pts2[2 * ((j + 1) % 4) + 1]
BA0 = B[0] - A[0]
BA1 = B[1] - A[1]
DA0 = D[0] - A[0]
CA0 = C[0] - A[0]
DA1 = D[1] - A[1]
CA1 = C[1] - A[1]
acd = DA1 * CA0 > CA1 * DA0
bcd = (D[1] - B[1]) * (C[0] - B[0]) > (C[1] - B[1]) * (D[0] - B[0])
if acd != bcd:
abc = CA1 * BA0 > BA1 * CA0
abd = DA1 * BA0 > BA1 * DA0
if abc != abd:
DC0 = D[0] - C[0]
DC1 = D[1] - C[1]
ABBA = A[0] * B[1] - B[0] * A[1]
CDDC = C[0] * D[1] - D[0] * C[1]
DH = BA1 * DC0 - BA0 * DC1
Dx = ABBA * DC0 - BA0 * CDDC
Dy = ABBA * DC1 - BA1 * CDDC
temp_pts[0] = Dx / DH
temp_pts[1] = Dy / DH
return True
return False
@cuda.jit(
'(float32[:], float32[:], int32, int32, float32[:])',
device=True,
inline=True)
def line_segment_intersection_v1(pts1, pts2, i, j, temp_pts):
a = cuda.local.array((2, ), dtype=numba.float32)
b = cuda.local.array((2, ), dtype=numba.float32)
c = cuda.local.array((2, ), dtype=numba.float32)
d = cuda.local.array((2, ), dtype=numba.float32)
a[0] = pts1[2 * i]
a[1] = pts1[2 * i + 1]
b[0] = pts1[2 * ((i + 1) % 4)]
b[1] = pts1[2 * ((i + 1) % 4) + 1]
c[0] = pts2[2 * j]
c[1] = pts2[2 * j + 1]
d[0] = pts2[2 * ((j + 1) % 4)]
d[1] = pts2[2 * ((j + 1) % 4) + 1]
area_abc = trangle_area(a, b, c)
area_abd = trangle_area(a, b, d)
if area_abc * area_abd >= 0:
return False
area_cda = trangle_area(c, d, a)
area_cdb = area_cda + area_abc - area_abd
if area_cda * area_cdb >= 0:
return False
t = area_cda / (area_abd - area_abc)
dx = t * (b[0] - a[0])
dy = t * (b[1] - a[1])
temp_pts[0] = a[0] + dx
temp_pts[1] = a[1] + dy
return True
@cuda.jit('(float32, float32, float32[:])', device=True, inline=True)
def point_in_quadrilateral(pt_x, pt_y, corners):
ab0 = corners[2] - corners[0]
ab1 = corners[3] - corners[1]
ad0 = corners[6] - corners[0]
ad1 = corners[7] - corners[1]
ap0 = pt_x - corners[0]
ap1 = pt_y - corners[1]
abab = ab0 * ab0 + ab1 * ab1
abap = ab0 * ap0 + ab1 * ap1
adad = ad0 * ad0 + ad1 * ad1
adap = ad0 * ap0 + ad1 * ap1
return abab >= abap and abap >= 0 and adad >= adap and adap >= 0
@cuda.jit('(float32[:], float32[:], float32[:])', device=True, inline=True)
def quadrilateral_intersection(pts1, pts2, int_pts):
num_of_inter = 0
for i in range(4):
if point_in_quadrilateral(pts1[2 * i], pts1[2 * i + 1], pts2):
int_pts[num_of_inter * 2] = pts1[2 * i]
int_pts[num_of_inter * 2 + 1] = pts1[2 * i + 1]
num_of_inter += 1
if point_in_quadrilateral(pts2[2 * i], pts2[2 * i + 1], pts1):
int_pts[num_of_inter * 2] = pts2[2 * i]
int_pts[num_of_inter * 2 + 1] = pts2[2 * i + 1]
num_of_inter += 1
temp_pts = cuda.local.array((2, ), dtype=numba.float32)
for i in range(4):
for j in range(4):
has_pts = line_segment_intersection(pts1, pts2, i, j, temp_pts)
if has_pts:
int_pts[num_of_inter * 2] = temp_pts[0]
int_pts[num_of_inter * 2 + 1] = temp_pts[1]
num_of_inter += 1
return num_of_inter
@cuda.jit('(float32[:], float32[:])', device=True, inline=True)
def rbbox_to_corners(corners, rbbox):
# generate clockwise corners and rotate it clockwise
angle = rbbox[4]
a_cos = math.cos(angle)
a_sin = math.sin(angle)
center_x = rbbox[0]
center_y = rbbox[1]
x_d = rbbox[2]
y_d = rbbox[3]
corners_x = cuda.local.array((4, ), dtype=numba.float32)
corners_y = cuda.local.array((4, ), dtype=numba.float32)
corners_x[0] = -x_d / 2
corners_x[1] = -x_d / 2
corners_x[2] = x_d / 2
corners_x[3] = x_d / 2
corners_y[0] = -y_d / 2
corners_y[1] = y_d / 2
corners_y[2] = y_d / 2
corners_y[3] = -y_d / 2
for i in range(4):
corners[2 *
i] = a_cos * corners_x[i] + a_sin * corners_y[i] + center_x
corners[2 * i
+ 1] = -a_sin * corners_x[i] + a_cos * corners_y[i] + center_y
@cuda.jit('(float32[:], float32[:])', device=True, inline=True)
def inter(rbbox1, rbbox2):
corners1 = cuda.local.array((8, ), dtype=numba.float32)
corners2 = cuda.local.array((8, ), dtype=numba.float32)
intersection_corners = cuda.local.array((16, ), dtype=numba.float32)
rbbox_to_corners(corners1, rbbox1)
rbbox_to_corners(corners2, rbbox2)
num_intersection = quadrilateral_intersection(corners1, corners2,
intersection_corners)
sort_vertex_in_convex_polygon(intersection_corners, num_intersection)
# print(intersection_corners.reshape([-1, 2])[:num_intersection])
return area(intersection_corners, num_intersection)
@cuda.jit('(float32[:], float32[:], int32)', device=True, inline=True)
def devRotateIoUEval(rbox1, rbox2, criterion=-1):
area1 = rbox1[2] * rbox1[3]
area2 = rbox2[2] * rbox2[3]
area_inter = inter(rbox1, rbox2)
if criterion == -1:
return area_inter / (area1 + area2 - area_inter)
elif criterion == 0:
return area_inter / area1
elif criterion == 1:
return area_inter / area2
else:
return area_inter
@cuda.jit('(int64, int64, float32[:], float32[:], float32[:], int32)', fastmath=False)
def rotate_iou_kernel_eval(N, K, dev_boxes, dev_query_boxes, dev_iou, criterion=-1):
threadsPerBlock = 8 * 8
row_start = cuda.blockIdx.x
col_start = cuda.blockIdx.y
tx = cuda.threadIdx.x
row_size = min(N - row_start * threadsPerBlock, threadsPerBlock)
col_size = min(K - col_start * threadsPerBlock, threadsPerBlock)
block_boxes = cuda.shared.array(shape=(64 * 5, ), dtype=numba.float32)
block_qboxes = cuda.shared.array(shape=(64 * 5, ), dtype=numba.float32)
dev_query_box_idx = threadsPerBlock * col_start + tx
dev_box_idx = threadsPerBlock * row_start + tx
if (tx < col_size):
block_qboxes[tx * 5 + 0] = dev_query_boxes[dev_query_box_idx * 5 + 0]
block_qboxes[tx * 5 + 1] = dev_query_boxes[dev_query_box_idx * 5 + 1]
block_qboxes[tx * 5 + 2] = dev_query_boxes[dev_query_box_idx * 5 + 2]
block_qboxes[tx * 5 + 3] = dev_query_boxes[dev_query_box_idx * 5 + 3]
block_qboxes[tx * 5 + 4] = dev_query_boxes[dev_query_box_idx * 5 + 4]
if (tx < row_size):
block_boxes[tx * 5 + 0] = dev_boxes[dev_box_idx * 5 + 0]
block_boxes[tx * 5 + 1] = dev_boxes[dev_box_idx * 5 + 1]
block_boxes[tx * 5 + 2] = dev_boxes[dev_box_idx * 5 + 2]
block_boxes[tx * 5 + 3] = dev_boxes[dev_box_idx * 5 + 3]
block_boxes[tx * 5 + 4] = dev_boxes[dev_box_idx * 5 + 4]
cuda.syncthreads()
if tx < row_size:
for i in range(col_size):
offset = row_start * threadsPerBlock * K + col_start * threadsPerBlock + tx * K + i
dev_iou[offset] = devRotateIoUEval(block_qboxes[i * 5:i * 5 + 5],
block_boxes[tx * 5:tx * 5 + 5], criterion)
def rotate_iou_gpu_eval(boxes, query_boxes, criterion=-1, device_id=0):
"""rotated box iou running in gpu. 500x faster than cpu version
(take 5ms in one example with numba.cuda code).
convert from [this project](
https://github.com/hongzhenwang/RRPN-revise/tree/master/pcdet/rotation).
Args:
boxes (float tensor: [N, 5]): rbboxes. format: centers, dims,
angles(clockwise when positive)
query_boxes (float tensor: [K, 5]): [description]
device_id (int, optional): Defaults to 0. [description]
Returns:
[type]: [description]
"""
box_dtype = boxes.dtype
boxes = boxes.astype(np.float32)
query_boxes = query_boxes.astype(np.float32)
N = boxes.shape[0]
K = query_boxes.shape[0]
iou = np.zeros((N, K), dtype=np.float32)
if N == 0 or K == 0:
return iou
threadsPerBlock = 8 * 8
cuda.select_device(device_id)
blockspergrid = (div_up(N, threadsPerBlock), div_up(K, threadsPerBlock))
stream = cuda.stream()
with stream.auto_synchronize():
boxes_dev = cuda.to_device(boxes.reshape([-1]), stream)
query_boxes_dev = cuda.to_device(query_boxes.reshape([-1]), stream)
iou_dev = cuda.to_device(iou.reshape([-1]), stream)
rotate_iou_kernel_eval[blockspergrid, threadsPerBlock, stream](
N, K, boxes_dev, query_boxes_dev, iou_dev, criterion)
iou_dev.copy_to_host(iou.reshape([-1]), stream=stream)
return iou.astype(boxes.dtype)
| 11,552 | 33.903323 | 95 | py |
monodle | monodle-main/lib/datasets/kitti/kitti_eval_python/evaluate.py | import time
import fire
import .kitti_common as kitti
from .eval import get_coco_eval_result, get_official_eval_result
def _read_imageset_file(path):
with open(path, 'r') as f:
lines = f.readlines()
return [int(line) for line in lines]
def evaluate(label_path,
result_path,
label_split_file,
current_class=0,
coco=False,
score_thresh=-1):
dt_annos = kitti.get_label_annos(result_path)
if score_thresh > 0:
dt_annos = kitti.filter_annos_low_score(dt_annos, score_thresh)
val_image_ids = _read_imageset_file(label_split_file)
gt_annos = kitti.get_label_annos(label_path, val_image_ids)
if coco:
return get_coco_eval_result(gt_annos, dt_annos, current_class)
else:
return get_official_eval_result(gt_annos, dt_annos, current_class)
if __name__ == '__main__':
fire.Fire()
| 908 | 26.545455 | 74 | py |
monodle | monodle-main/lib/datasets/kitti/kitti_eval_python/kitti_common.py | import concurrent.futures as futures
import os
import pathlib
import re
from collections import OrderedDict
import numpy as np
from skimage import io
def get_image_index_str(img_idx):
return "{:06d}".format(img_idx)
def get_kitti_info_path(idx,
prefix,
info_type='image_2',
file_tail='.png',
training=True,
relative_path=True):
img_idx_str = get_image_index_str(idx)
img_idx_str += file_tail
prefix = pathlib.Path(prefix)
if training:
file_path = pathlib.Path('training') / info_type / img_idx_str
else:
file_path = pathlib.Path('testing') / info_type / img_idx_str
if not (prefix / file_path).exists():
raise ValueError("file not exist: {}".format(file_path))
if relative_path:
return str(file_path)
else:
return str(prefix / file_path)
def get_image_path(idx, prefix, training=True, relative_path=True):
return get_kitti_info_path(idx, prefix, 'image_2', '.png', training,
relative_path)
def get_label_path(idx, prefix, training=True, relative_path=True):
return get_kitti_info_path(idx, prefix, 'label_2', '.txt', training,
relative_path)
def get_velodyne_path(idx, prefix, training=True, relative_path=True):
return get_kitti_info_path(idx, prefix, 'velodyne', '.bin', training,
relative_path)
def get_calib_path(idx, prefix, training=True, relative_path=True):
return get_kitti_info_path(idx, prefix, 'calib', '.txt', training,
relative_path)
def _extend_matrix(mat):
mat = np.concatenate([mat, np.array([[0., 0., 0., 1.]])], axis=0)
return mat
def get_kitti_image_info(path,
training=True,
label_info=True,
velodyne=False,
calib=False,
image_ids=7481,
extend_matrix=True,
num_worker=8,
relative_path=True,
with_imageshape=True):
# image_infos = []
root_path = pathlib.Path(path)
if not isinstance(image_ids, list):
image_ids = list(range(image_ids))
def map_func(idx):
image_info = {'image_idx': idx}
annotations = None
if velodyne:
image_info['velodyne_path'] = get_velodyne_path(
idx, path, training, relative_path)
image_info['img_path'] = get_image_path(idx, path, training,
relative_path)
if with_imageshape:
img_path = image_info['img_path']
if relative_path:
img_path = str(root_path / img_path)
image_info['img_shape'] = np.array(
io.imread(img_path).shape[:2], dtype=np.int32)
if label_info:
label_path = get_label_path(idx, path, training, relative_path)
if relative_path:
label_path = str(root_path / label_path)
annotations = get_label_anno(label_path)
if calib:
calib_path = get_calib_path(
idx, path, training, relative_path=False)
with open(calib_path, 'r') as f:
lines = f.readlines()
P0 = np.array(
[float(info) for info in lines[0].split(' ')[1:13]]).reshape(
[3, 4])
P1 = np.array(
[float(info) for info in lines[1].split(' ')[1:13]]).reshape(
[3, 4])
P2 = np.array(
[float(info) for info in lines[2].split(' ')[1:13]]).reshape(
[3, 4])
P3 = np.array(
[float(info) for info in lines[3].split(' ')[1:13]]).reshape(
[3, 4])
if extend_matrix:
P0 = _extend_matrix(P0)
P1 = _extend_matrix(P1)
P2 = _extend_matrix(P2)
P3 = _extend_matrix(P3)
image_info['calib/P0'] = P0
image_info['calib/P1'] = P1
image_info['calib/P2'] = P2
image_info['calib/P3'] = P3
R0_rect = np.array([
float(info) for info in lines[4].split(' ')[1:10]
]).reshape([3, 3])
if extend_matrix:
rect_4x4 = np.zeros([4, 4], dtype=R0_rect.dtype)
rect_4x4[3, 3] = 1.
rect_4x4[:3, :3] = R0_rect
else:
rect_4x4 = R0_rect
image_info['calib/R0_rect'] = rect_4x4
Tr_velo_to_cam = np.array([
float(info) for info in lines[5].split(' ')[1:13]
]).reshape([3, 4])
Tr_imu_to_velo = np.array([
float(info) for info in lines[6].split(' ')[1:13]
]).reshape([3, 4])
if extend_matrix:
Tr_velo_to_cam = _extend_matrix(Tr_velo_to_cam)
Tr_imu_to_velo = _extend_matrix(Tr_imu_to_velo)
image_info['calib/Tr_velo_to_cam'] = Tr_velo_to_cam
image_info['calib/Tr_imu_to_velo'] = Tr_imu_to_velo
if annotations is not None:
image_info['annos'] = annotations
add_difficulty_to_annos(image_info)
return image_info
with futures.ThreadPoolExecutor(num_worker) as executor:
image_infos = executor.map(map_func, image_ids)
return list(image_infos)
def filter_kitti_anno(image_anno,
used_classes,
used_difficulty=None,
dontcare_iou=None):
if not isinstance(used_classes, (list, tuple)):
used_classes = [used_classes]
img_filtered_annotations = {}
relevant_annotation_indices = [
i for i, x in enumerate(image_anno['name']) if x in used_classes
]
for key in image_anno.keys():
img_filtered_annotations[key] = (
image_anno[key][relevant_annotation_indices])
if used_difficulty is not None:
relevant_annotation_indices = [
i for i, x in enumerate(img_filtered_annotations['difficulty'])
if x in used_difficulty
]
for key in image_anno.keys():
img_filtered_annotations[key] = (
img_filtered_annotations[key][relevant_annotation_indices])
if 'DontCare' in used_classes and dontcare_iou is not None:
dont_care_indices = [
i for i, x in enumerate(img_filtered_annotations['name'])
if x == 'DontCare'
]
# bounding box format [y_min, x_min, y_max, x_max]
all_boxes = img_filtered_annotations['bbox']
ious = iou(all_boxes, all_boxes[dont_care_indices])
# Remove all bounding boxes that overlap with a dontcare region.
if ious.size > 0:
boxes_to_remove = np.amax(ious, axis=1) > dontcare_iou
for key in image_anno.keys():
img_filtered_annotations[key] = (img_filtered_annotations[key][
np.logical_not(boxes_to_remove)])
return img_filtered_annotations
def filter_annos_low_score(image_annos, thresh):
new_image_annos = []
for anno in image_annos:
img_filtered_annotations = {}
relevant_annotation_indices = [
i for i, s in enumerate(anno['score']) if s >= thresh
]
for key in anno.keys():
img_filtered_annotations[key] = (
anno[key][relevant_annotation_indices])
new_image_annos.append(img_filtered_annotations)
return new_image_annos
def kitti_result_line(result_dict, precision=4):
prec_float = "{" + ":.{}f".format(precision) + "}"
res_line = []
all_field_default = OrderedDict([
('name', None),
('truncated', -1),
('occluded', -1),
('alpha', -10),
('bbox', None),
('dimensions', [-1, -1, -1]),
('location', [-1000, -1000, -1000]),
('rotation_y', -10),
('score', None),
])
res_dict = [(key, None) for key, val in all_field_default.items()]
res_dict = OrderedDict(res_dict)
for key, val in result_dict.items():
if all_field_default[key] is None and val is None:
raise ValueError("you must specify a value for {}".format(key))
res_dict[key] = val
for key, val in res_dict.items():
if key == 'name':
res_line.append(val)
elif key in ['truncated', 'alpha', 'rotation_y', 'score']:
if val is None:
res_line.append(str(all_field_default[key]))
else:
res_line.append(prec_float.format(val))
elif key == 'occluded':
if val is None:
res_line.append(str(all_field_default[key]))
else:
res_line.append('{}'.format(val))
elif key in ['bbox', 'dimensions', 'location']:
if val is None:
res_line += [str(v) for v in all_field_default[key]]
else:
res_line += [prec_float.format(v) for v in val]
else:
raise ValueError("unknown key. supported key:{}".format(
res_dict.keys()))
return ' '.join(res_line)
def add_difficulty_to_annos(info):
min_height = [40, 25,
25] # minimum height for evaluated groundtruth/detections
max_occlusion = [
0, 1, 2
] # maximum occlusion level of the groundtruth used for eval_utils
max_trunc = [
0.15, 0.3, 0.5
] # maximum truncation level of the groundtruth used for eval_utils
annos = info['annos']
dims = annos['dimensions'] # lhw format
bbox = annos['bbox']
height = bbox[:, 3] - bbox[:, 1]
occlusion = annos['occluded']
truncation = annos['truncated']
diff = []
easy_mask = np.ones((len(dims), ), dtype=np.bool)
moderate_mask = np.ones((len(dims), ), dtype=np.bool)
hard_mask = np.ones((len(dims), ), dtype=np.bool)
i = 0
for h, o, t in zip(height, occlusion, truncation):
if o > max_occlusion[0] or h <= min_height[0] or t > max_trunc[0]:
easy_mask[i] = False
if o > max_occlusion[1] or h <= min_height[1] or t > max_trunc[1]:
moderate_mask[i] = False
if o > max_occlusion[2] or h <= min_height[2] or t > max_trunc[2]:
hard_mask[i] = False
i += 1
is_easy = easy_mask
is_moderate = np.logical_xor(easy_mask, moderate_mask)
is_hard = np.logical_xor(hard_mask, moderate_mask)
for i in range(len(dims)):
if is_easy[i]:
diff.append(0)
elif is_moderate[i]:
diff.append(1)
elif is_hard[i]:
diff.append(2)
else:
diff.append(-1)
annos["difficulty"] = np.array(diff, np.int32)
return diff
def get_label_anno(label_path):
annotations = {}
annotations.update({
'name': [],
'truncated': [],
'occluded': [],
'alpha': [],
'bbox': [],
'dimensions': [],
'location': [],
'rotation_y': []
})
with open(label_path, 'r') as f:
lines = f.readlines()
# if len(lines) == 0 or len(lines[0]) < 15:
# content = []
# else:
content = [line.strip().split(' ') for line in lines]
annotations['name'] = np.array([x[0] for x in content])
annotations['truncated'] = np.array([float(x[1]) for x in content])
annotations['occluded'] = np.array([int(x[2]) for x in content])
annotations['alpha'] = np.array([float(x[3]) for x in content])
annotations['bbox'] = np.array(
[[float(info) for info in x[4:8]] for x in content]).reshape(-1, 4)
# dimensions will convert hwl format to standard lhw(camera) format.
annotations['dimensions'] = np.array(
[[float(info) for info in x[8:11]] for x in content]).reshape(
-1, 3)[:, [2, 0, 1]]
annotations['location'] = np.array(
[[float(info) for info in x[11:14]] for x in content]).reshape(-1, 3)
annotations['rotation_y'] = np.array(
[float(x[14]) for x in content]).reshape(-1)
if len(content) != 0 and len(content[0]) == 16: # have score
annotations['score'] = np.array([float(x[15]) for x in content])
else:
annotations['score'] = np.zeros([len(annotations['bbox'])])
return annotations
def get_label_annos(label_folder, image_ids=None):
if image_ids is None:
filepaths = pathlib.Path(label_folder).glob('*.txt')
prog = re.compile(r'^\d{6}.txt$')
filepaths = filter(lambda f: prog.match(f.name), filepaths)
image_ids = [int(p.stem) for p in filepaths]
image_ids = sorted(image_ids)
if not isinstance(image_ids, list):
image_ids = list(range(image_ids))
annos = []
label_folder = pathlib.Path(label_folder)
for idx in image_ids:
image_idx = get_image_index_str(idx)
label_filename = label_folder / (image_idx + '.txt')
annos.append(get_label_anno(label_filename))
return annos
def area(boxes, add1=False):
"""Computes area of boxes.
Args:
boxes: Numpy array with shape [N, 4] holding N boxes
Returns:
a numpy array with shape [N*1] representing box areas
"""
if add1:
return (boxes[:, 2] - boxes[:, 0] + 1.0) * (
boxes[:, 3] - boxes[:, 1] + 1.0)
else:
return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
def intersection(boxes1, boxes2, add1=False):
"""Compute pairwise intersection areas between boxes.
Args:
boxes1: a numpy array with shape [N, 4] holding N boxes
boxes2: a numpy array with shape [M, 4] holding M boxes
Returns:
a numpy array with shape [N*M] representing pairwise intersection area
"""
[y_min1, x_min1, y_max1, x_max1] = np.split(boxes1, 4, axis=1)
[y_min2, x_min2, y_max2, x_max2] = np.split(boxes2, 4, axis=1)
all_pairs_min_ymax = np.minimum(y_max1, np.transpose(y_max2))
all_pairs_max_ymin = np.maximum(y_min1, np.transpose(y_min2))
if add1:
all_pairs_min_ymax += 1.0
intersect_heights = np.maximum(
np.zeros(all_pairs_max_ymin.shape),
all_pairs_min_ymax - all_pairs_max_ymin)
all_pairs_min_xmax = np.minimum(x_max1, np.transpose(x_max2))
all_pairs_max_xmin = np.maximum(x_min1, np.transpose(x_min2))
if add1:
all_pairs_min_xmax += 1.0
intersect_widths = np.maximum(
np.zeros(all_pairs_max_xmin.shape),
all_pairs_min_xmax - all_pairs_max_xmin)
return intersect_heights * intersect_widths
def iou(boxes1, boxes2, add1=False):
"""Computes pairwise intersection-over-union between box collections.
Args:
boxes1: a numpy array with shape [N, 4] holding N boxes.
boxes2: a numpy array with shape [M, 4] holding N boxes.
Returns:
a numpy array with shape [N, M] representing pairwise iou scores.
"""
intersect = intersection(boxes1, boxes2, add1)
area1 = area(boxes1, add1)
area2 = area(boxes2, add1)
union = np.expand_dims(
area1, axis=1) + np.expand_dims(
area2, axis=0) - intersect
return intersect / union
| 15,309 | 36.070218 | 79 | py |
monodle | monodle-main/lib/datasets/kitti/kitti_eval_python/eval.py | import numpy as np
import numba
import io as sysio
from .rotate_iou import rotate_iou_gpu_eval
DISTANCE_COVER = False
@numba.jit
def get_thresholds(scores: np.ndarray, num_gt, num_sample_pts=41):
scores.sort()
scores = scores[::-1]
current_recall = 0
thresholds = []
for i, score in enumerate(scores):
l_recall = (i + 1) / num_gt
if i < (len(scores) - 1):
r_recall = (i + 2) / num_gt
else:
r_recall = l_recall
if (((r_recall - current_recall) < (current_recall - l_recall))
and (i < (len(scores) - 1))):
continue
# recall = l_recall
thresholds.append(score)
current_recall += 1 / (num_sample_pts - 1.0)
return thresholds
def clean_data(gt_anno, dt_anno, current_class, difficulty):
CLASS_NAMES = ['car', 'pedestrian', 'cyclist', 'van', 'person_sitting', 'truck']
MIN_HEIGHT = [40, 25, 25]
MAX_OCCLUSION = [0, 1, 2]
MAX_TRUNCATION = [0.15, 0.3, 0.5]
dc_bboxes, ignored_gt, ignored_dt = [], [], []
current_cls_name = CLASS_NAMES[current_class].lower()
num_gt = len(gt_anno["name"])
num_dt = len(dt_anno["name"])
num_valid_gt = 0
for i in range(num_gt):
bbox = gt_anno["bbox"][i]
gt_name = gt_anno["name"][i].lower()
height = bbox[3] - bbox[1]
valid_class = -1
if (gt_name == current_cls_name):
valid_class = 1
elif (current_cls_name == "Pedestrian".lower() and "Person_sitting".lower() == gt_name):
valid_class = 0
elif (current_cls_name == "Car".lower() and "Van".lower() == gt_name):
valid_class = 0
else:
valid_class = -1
ignore = False
if ((gt_anno["occluded"][i] > MAX_OCCLUSION[difficulty])
or (gt_anno["truncated"][i] > MAX_TRUNCATION[difficulty])
or (height <= MIN_HEIGHT[difficulty])):
# if gt_anno["difficulty"][i] > difficulty or gt_anno["difficulty"][i] == -1:
ignore = True
if valid_class == 1 and not ignore:
ignored_gt.append(0)
num_valid_gt += 1
elif (valid_class == 0 or (ignore and (valid_class == 1))):
ignored_gt.append(1)
else:
ignored_gt.append(-1)
# for i in range(num_gt):
if gt_anno["name"][i] == "DontCare":
dc_bboxes.append(gt_anno["bbox"][i])
for i in range(num_dt):
if (dt_anno["name"][i].lower() == current_cls_name):
valid_class = 1
else:
valid_class = -1
height = abs(dt_anno["bbox"][i, 3] - dt_anno["bbox"][i, 1])
if height < MIN_HEIGHT[difficulty]:
ignored_dt.append(1)
elif valid_class == 1:
ignored_dt.append(0)
else:
ignored_dt.append(-1)
return num_valid_gt, ignored_gt, ignored_dt, dc_bboxes
def clean_data_by_distance(gt_anno, dt_anno, current_class, difficulty):
CLASS_NAMES = ['car', 'pedestrian', 'cyclist', 'van', 'person_sitting', 'truck']
MAX_TRUNCATION = [0.15, 0.3, 0.5]
MAX_OCCLUSION = [0, 1, 2]
MIN_HEIGHT = [40, 25, 25]
MAX_DISTANCE = [30, 50, 70]
dc_bboxes, ignored_gt, ignored_dt = [], [], []
current_cls_name = CLASS_NAMES[current_class].lower()
num_gt = len(gt_anno["name"])
num_dt = len(dt_anno["name"])
num_valid_gt = 0
for i in range(num_gt):
bbox = gt_anno["bbox"][i]
gt_name = gt_anno["name"][i].lower()
height = bbox[3] - bbox[1]
valid_class = -1
if (gt_name == current_cls_name):
valid_class = 1
elif (current_cls_name == "Pedestrian".lower()
and "Person_sitting".lower() == gt_name):
valid_class = 0
elif (current_cls_name == "Car".lower() and "Van".lower() == gt_name):
valid_class = 0
else:
valid_class = -1
ignore = False
dis = np.linalg.norm(gt_anno["location"][i])
# print(gt_anno['location'][i],dis)
if DISTANCE_COVER:
if ((gt_anno["occluded"][i] > MAX_OCCLUSION[2]) or
(gt_anno["truncated"][i] > MAX_TRUNCATION[2]) or
(height <= MIN_HEIGHT[2]) or
(dis > MAX_DISTANCE[difficulty])):
ignore = True
else:
if difficulty == 0:
if ((gt_anno["occluded"][i] > MAX_OCCLUSION[2]) or
(gt_anno["truncated"][i] > MAX_TRUNCATION[2]) or
(height <= MIN_HEIGHT[2]) or
(dis > MAX_DISTANCE[difficulty])):
ignore = True
else:
if ((gt_anno["occluded"][i] > MAX_OCCLUSION[2]) or
(gt_anno["truncated"][i] > MAX_TRUNCATION[2]) or
(height <= MIN_HEIGHT[2]) or
(dis > MAX_DISTANCE[difficulty]) or
(dis <= MAX_DISTANCE[difficulty - 1])):
ignore = True
if valid_class == 1 and not ignore:
ignored_gt.append(0)
num_valid_gt += 1
elif (valid_class == 0 or (ignore and (valid_class == 1))):
ignored_gt.append(1)
else:
ignored_gt.append(-1)
if gt_anno["name"][i] == "DontCare":
dc_bboxes.append(gt_anno["bbox"][i])
for i in range(num_dt):
if (dt_anno["name"][i].lower() == current_cls_name):
valid_class = 1
else:
valid_class = -1
height = abs(dt_anno["bbox"][i, 3] - dt_anno["bbox"][i, 1])
if height < MIN_HEIGHT[2]:
ignored_dt.append(1)
elif valid_class == 1:
ignored_dt.append(0)
else:
ignored_dt.append(-1)
return num_valid_gt, ignored_gt, ignored_dt, dc_bboxes
@numba.jit(nopython=True)
def image_box_overlap(boxes, query_boxes, criterion=-1):
N = boxes.shape[0]
K = query_boxes.shape[0]
overlaps = np.zeros((N, K), dtype=boxes.dtype)
for k in range(K):
qbox_area = ((query_boxes[k, 2] - query_boxes[k, 0]) *
(query_boxes[k, 3] - query_boxes[k, 1]))
for n in range(N):
iw = (min(boxes[n, 2], query_boxes[k, 2]) -
max(boxes[n, 0], query_boxes[k, 0]))
if iw > 0:
ih = (min(boxes[n, 3], query_boxes[k, 3]) -
max(boxes[n, 1], query_boxes[k, 1]))
if ih > 0:
if criterion == -1:
ua = (
(boxes[n, 2] - boxes[n, 0]) *
(boxes[n, 3] - boxes[n, 1]) + qbox_area - iw * ih)
elif criterion == 0:
ua = ((boxes[n, 2] - boxes[n, 0]) *
(boxes[n, 3] - boxes[n, 1]))
elif criterion == 1:
ua = qbox_area
else:
ua = 1.0
overlaps[n, k] = iw * ih / ua
return overlaps
def bev_box_overlap(boxes, qboxes, criterion=-1):
riou = rotate_iou_gpu_eval(boxes, qboxes, criterion)
return riou
@numba.jit(nopython=True, parallel=True)
def d3_box_overlap_kernel(boxes, qboxes, rinc, criterion=-1):
# ONLY support overlap in CAMERA, not lider.
N, K = boxes.shape[0], qboxes.shape[0]
for i in range(N):
for j in range(K):
if rinc[i, j] > 0:
# iw = (min(boxes[i, 1] + boxes[i, 4], qboxes[j, 1] +
# qboxes[j, 4]) - max(boxes[i, 1], qboxes[j, 1]))
iw = (min(boxes[i, 1], qboxes[j, 1]) - max(
boxes[i, 1] - boxes[i, 4], qboxes[j, 1] - qboxes[j, 4]))
if iw > 0:
area1 = boxes[i, 3] * boxes[i, 4] * boxes[i, 5]
area2 = qboxes[j, 3] * qboxes[j, 4] * qboxes[j, 5]
inc = iw * rinc[i, j]
if criterion == -1:
ua = (area1 + area2 - inc)
elif criterion == 0:
ua = area1
elif criterion == 1:
ua = area2
else:
ua = inc
rinc[i, j] = inc / ua
else:
rinc[i, j] = 0.0
def d3_box_overlap(boxes, qboxes, criterion=-1):
rinc = rotate_iou_gpu_eval(boxes[:, [0, 2, 3, 5, 6]],
qboxes[:, [0, 2, 3, 5, 6]], 2)
d3_box_overlap_kernel(boxes, qboxes, rinc, criterion)
return rinc
@numba.jit(nopython=True)
def compute_statistics_jit(overlaps,
gt_datas,
dt_datas,
ignored_gt,
ignored_det,
dc_bboxes,
metric,
min_overlap,
thresh=0,
compute_fp=False,
compute_aos=False):
det_size = dt_datas.shape[0]
gt_size = gt_datas.shape[0]
dt_scores = dt_datas[:, -1]
dt_alphas = dt_datas[:, 4]
gt_alphas = gt_datas[:, 4]
dt_bboxes = dt_datas[:, :4]
gt_bboxes = gt_datas[:, :4]
assigned_detection = [False] * det_size
ignored_threshold = [False] * det_size
if compute_fp:
for i in range(det_size):
if (dt_scores[i] < thresh):
ignored_threshold[i] = True
NO_DETECTION = -10000000
tp, fp, fn, similarity = 0, 0, 0, 0
# thresholds = [0.0]
# delta = [0.0]
thresholds = np.zeros((gt_size,))
thresh_idx = 0
delta = np.zeros((gt_size,))
delta_idx = 0
for i in range(gt_size):
if ignored_gt[i] == -1:
continue
det_idx = -1
valid_detection = NO_DETECTION
max_overlap = 0
assigned_ignored_det = False
for j in range(det_size):
if (ignored_det[j] == -1):
continue
if (assigned_detection[j]):
continue
if (ignored_threshold[j]):
continue
overlap = overlaps[j, i]
dt_score = dt_scores[j]
if (not compute_fp and (overlap > min_overlap)
and dt_score > valid_detection):
det_idx = j
valid_detection = dt_score
elif (compute_fp and (overlap > min_overlap)
and (overlap > max_overlap or assigned_ignored_det)
and ignored_det[j] == 0):
max_overlap = overlap
det_idx = j
valid_detection = 1
assigned_ignored_det = False
elif (compute_fp and (overlap > min_overlap)
and (valid_detection == NO_DETECTION)
and ignored_det[j] == 1):
det_idx = j
valid_detection = 1
assigned_ignored_det = True
if (valid_detection == NO_DETECTION) and ignored_gt[i] == 0:
fn += 1
elif ((valid_detection != NO_DETECTION)
and (ignored_gt[i] == 1 or ignored_det[det_idx] == 1)):
assigned_detection[det_idx] = True
elif valid_detection != NO_DETECTION:
tp += 1
# thresholds.append(dt_scores[det_idx])
thresholds[thresh_idx] = dt_scores[det_idx]
thresh_idx += 1
if compute_aos:
# delta.append(gt_alphas[i] - dt_alphas[det_idx])
delta[delta_idx] = gt_alphas[i] - dt_alphas[det_idx]
delta_idx += 1
assigned_detection[det_idx] = True
if compute_fp:
for i in range(det_size):
if (not (assigned_detection[i] or ignored_det[i] == -1
or ignored_det[i] == 1 or ignored_threshold[i])):
fp += 1
nstuff = 0
if metric == 0:
overlaps_dt_dc = image_box_overlap(dt_bboxes, dc_bboxes, 0)
for i in range(dc_bboxes.shape[0]):
for j in range(det_size):
if (assigned_detection[j]):
continue
if (ignored_det[j] == -1 or ignored_det[j] == 1):
continue
if (ignored_threshold[j]):
continue
if overlaps_dt_dc[j, i] > min_overlap:
assigned_detection[j] = True
nstuff += 1
fp -= nstuff
if compute_aos:
tmp = np.zeros((fp + delta_idx,))
# tmp = [0] * fp
for i in range(delta_idx):
tmp[i + fp] = (1.0 + np.cos(delta[i])) / 2.0
# tmp.append((1.0 + np.cos(delta[i])) / 2.0)
# assert len(tmp) == fp + tp
# assert len(delta) == tp
if tp > 0 or fp > 0:
similarity = np.sum(tmp)
else:
similarity = -1
return tp, fp, fn, similarity, thresholds[:thresh_idx]
def get_split_parts(num, num_part):
same_part = num // num_part
remain_num = num % num_part
if same_part == 0:
return [num]
if remain_num == 0:
return [same_part] * num_part
else:
return [same_part] * num_part + [remain_num]
@numba.jit(nopython=True)
def fused_compute_statistics(overlaps,
pr,
gt_nums,
dt_nums,
dc_nums,
gt_datas,
dt_datas,
dontcares,
ignored_gts,
ignored_dets,
metric,
min_overlap,
thresholds,
compute_aos=False):
gt_num = 0
dt_num = 0
dc_num = 0
for i in range(gt_nums.shape[0]):
for t, thresh in enumerate(thresholds):
overlap = overlaps[dt_num:dt_num + dt_nums[i], gt_num:
gt_num + gt_nums[i]]
gt_data = gt_datas[gt_num:gt_num + gt_nums[i]]
dt_data = dt_datas[dt_num:dt_num + dt_nums[i]]
ignored_gt = ignored_gts[gt_num:gt_num + gt_nums[i]]
ignored_det = ignored_dets[dt_num:dt_num + dt_nums[i]]
dontcare = dontcares[dc_num:dc_num + dc_nums[i]]
tp, fp, fn, similarity, _ = compute_statistics_jit(
overlap,
gt_data,
dt_data,
ignored_gt,
ignored_det,
dontcare,
metric,
min_overlap=min_overlap,
thresh=thresh,
compute_fp=True,
compute_aos=compute_aos)
pr[t, 0] += tp
pr[t, 1] += fp
pr[t, 2] += fn
if similarity != -1:
pr[t, 3] += similarity
gt_num += gt_nums[i]
dt_num += dt_nums[i]
dc_num += dc_nums[i]
def calculate_iou_partly(gt_annos, dt_annos, metric, num_parts=50):
"""fast iou algorithm. this function can be used independently to
do result analysis. Must be used in CAMERA coordinate system.
Args:
gt_annos: dict, must from get_label_annos() in kitti_common.py
dt_annos: dict, must from get_label_annos() in kitti_common.py
metric: eval type. 0: bbox, 1: bev, 2: 3d
num_parts: int. a parameter for fast calculate algorithm
"""
assert len(gt_annos) == len(dt_annos)
total_dt_num = np.stack([len(a["name"]) for a in dt_annos], 0)
total_gt_num = np.stack([len(a["name"]) for a in gt_annos], 0)
num_examples = len(gt_annos)
split_parts = get_split_parts(num_examples, num_parts)
parted_overlaps = []
example_idx = 0
for num_part in split_parts:
gt_annos_part = gt_annos[example_idx:example_idx + num_part]
dt_annos_part = dt_annos[example_idx:example_idx + num_part]
if metric == 0:
gt_boxes = np.concatenate([a["bbox"] for a in gt_annos_part], 0)
dt_boxes = np.concatenate([a["bbox"] for a in dt_annos_part], 0)
overlap_part = image_box_overlap(gt_boxes, dt_boxes)
elif metric == 1:
loc = np.concatenate(
[a["location"][:, [0, 2]] for a in gt_annos_part], 0)
dims = np.concatenate(
[a["dimensions"][:, [0, 2]] for a in gt_annos_part], 0)
rots = np.concatenate([a["rotation_y"] for a in gt_annos_part], 0)
gt_boxes = np.concatenate(
[loc, dims, rots[..., np.newaxis]], axis=1)
loc = np.concatenate(
[a["location"][:, [0, 2]] for a in dt_annos_part], 0)
dims = np.concatenate(
[a["dimensions"][:, [0, 2]] for a in dt_annos_part], 0)
rots = np.concatenate([a["rotation_y"] for a in dt_annos_part], 0)
dt_boxes = np.concatenate(
[loc, dims, rots[..., np.newaxis]], axis=1)
overlap_part = bev_box_overlap(gt_boxes, dt_boxes).astype(
np.float64)
elif metric == 2:
loc = np.concatenate([a["location"] for a in gt_annos_part], 0)
dims = np.concatenate([a["dimensions"] for a in gt_annos_part], 0)
rots = np.concatenate([a["rotation_y"] for a in gt_annos_part], 0)
gt_boxes = np.concatenate(
[loc, dims, rots[..., np.newaxis]], axis=1)
loc = np.concatenate([a["location"] for a in dt_annos_part], 0)
dims = np.concatenate([a["dimensions"] for a in dt_annos_part], 0)
rots = np.concatenate([a["rotation_y"] for a in dt_annos_part], 0)
dt_boxes = np.concatenate(
[loc, dims, rots[..., np.newaxis]], axis=1)
overlap_part = d3_box_overlap(gt_boxes, dt_boxes).astype(
np.float64)
else:
raise ValueError("unknown metric")
parted_overlaps.append(overlap_part)
example_idx += num_part
overlaps = []
example_idx = 0
for j, num_part in enumerate(split_parts):
gt_annos_part = gt_annos[example_idx:example_idx + num_part]
dt_annos_part = dt_annos[example_idx:example_idx + num_part]
gt_num_idx, dt_num_idx = 0, 0
for i in range(num_part):
gt_box_num = total_gt_num[example_idx + i]
dt_box_num = total_dt_num[example_idx + i]
overlaps.append(
parted_overlaps[j][gt_num_idx:gt_num_idx + gt_box_num,
dt_num_idx:dt_num_idx + dt_box_num])
gt_num_idx += gt_box_num
dt_num_idx += dt_box_num
example_idx += num_part
return overlaps, parted_overlaps, total_gt_num, total_dt_num
def _prepare_data(gt_annos, dt_annos, current_class, difficulty, DIForDIS=True):
gt_datas_list = []
dt_datas_list = []
total_dc_num = []
ignored_gts, ignored_dets, dontcares = [], [], []
total_num_valid_gt = 0
for i in range(len(gt_annos)):
rets = clean_data(gt_annos[i], dt_annos[i], current_class, difficulty) if DIForDIS \
else clean_data_by_distance(gt_annos[i], dt_annos[i], current_class, difficulty)
num_valid_gt, ignored_gt, ignored_det, dc_bboxes = rets
ignored_gts.append(np.array(ignored_gt, dtype=np.int64))
ignored_dets.append(np.array(ignored_det, dtype=np.int64))
if len(dc_bboxes) == 0:
dc_bboxes = np.zeros((0, 4)).astype(np.float64)
else:
dc_bboxes = np.stack(dc_bboxes, 0).astype(np.float64)
total_dc_num.append(dc_bboxes.shape[0])
dontcares.append(dc_bboxes)
total_num_valid_gt += num_valid_gt
gt_datas = np.concatenate(
[gt_annos[i]["bbox"], gt_annos[i]["alpha"][..., np.newaxis]], 1)
dt_datas = np.concatenate([
dt_annos[i]["bbox"], dt_annos[i]["alpha"][..., np.newaxis],
dt_annos[i]["score"][..., np.newaxis]
], 1)
gt_datas_list.append(gt_datas)
dt_datas_list.append(dt_datas)
total_dc_num = np.stack(total_dc_num, axis=0)
return (gt_datas_list, dt_datas_list, ignored_gts, ignored_dets, dontcares,
total_dc_num, total_num_valid_gt)
def eval_class(gt_annos,
dt_annos,
current_classes,
difficultys,
metric,
min_overlaps,
compute_aos=False,
num_parts=50,
DIForDIS=True):
"""Kitti eval. support 2d/bev/3d/aos eval. support 0.5:0.05:0.95 coco AP.
Args:
gt_annos: dict, must from get_label_annos() in kitti_common.py
dt_annos: dict, must from get_label_annos() in kitti_common.py
current_classes: list of int, 0: car, 1: pedestrian, 2: cyclist
difficultys: list of int. eval difficulty, 0: easy, 1: normal, 2: hard
metric: eval type. 0: bbox, 1: bev, 2: 3d
min_overlaps: float, min overlap. format: [num_overlap, metric, class].
num_parts: int. a parameter for fast calculate algorithm
difordis:using normal metric of distance metric false for using distance
Returns:
dict of recall, precision and aos
"""
assert len(gt_annos) == len(dt_annos)
num_examples = len(gt_annos)
split_parts = get_split_parts(num_examples, num_parts)
rets = calculate_iou_partly(dt_annos, gt_annos, metric, num_parts)
overlaps, parted_overlaps, total_dt_num, total_gt_num = rets
N_SAMPLE_PTS = 41
num_minoverlap = len(min_overlaps)
num_class = len(current_classes)
num_difficulty = len(difficultys)
precision = np.zeros(
[num_class, num_difficulty, num_minoverlap, N_SAMPLE_PTS])
recall = np.zeros(
[num_class, num_difficulty, num_minoverlap, N_SAMPLE_PTS])
aos = np.zeros([num_class, num_difficulty, num_minoverlap, N_SAMPLE_PTS])
for m, current_class in enumerate(current_classes):
for l, difficulty in enumerate(difficultys):
rets = _prepare_data(gt_annos, dt_annos, current_class, difficulty, DIForDIS=DIForDIS)
(gt_datas_list, dt_datas_list, ignored_gts, ignored_dets,
dontcares, total_dc_num, total_num_valid_gt) = rets
for k, min_overlap in enumerate(min_overlaps[:, metric, m]):
thresholdss = []
for i in range(len(gt_annos)):
rets = compute_statistics_jit(
overlaps[i],
gt_datas_list[i],
dt_datas_list[i],
ignored_gts[i],
ignored_dets[i],
dontcares[i],
metric,
min_overlap=min_overlap,
thresh=0.0,
compute_fp=False)
tp, fp, fn, similarity, thresholds = rets
thresholdss += thresholds.tolist()
thresholdss = np.array(thresholdss)
thresholds = get_thresholds(thresholdss, total_num_valid_gt)
thresholds = np.array(thresholds)
pr = np.zeros([len(thresholds), 4])
idx = 0
for j, num_part in enumerate(split_parts):
gt_datas_part = np.concatenate(
gt_datas_list[idx:idx + num_part], 0)
dt_datas_part = np.concatenate(
dt_datas_list[idx:idx + num_part], 0)
dc_datas_part = np.concatenate(
dontcares[idx:idx + num_part], 0)
ignored_dets_part = np.concatenate(
ignored_dets[idx:idx + num_part], 0)
ignored_gts_part = np.concatenate(
ignored_gts[idx:idx + num_part], 0)
fused_compute_statistics(
parted_overlaps[j],
pr,
total_gt_num[idx:idx + num_part],
total_dt_num[idx:idx + num_part],
total_dc_num[idx:idx + num_part],
gt_datas_part,
dt_datas_part,
dc_datas_part,
ignored_gts_part,
ignored_dets_part,
metric,
min_overlap=min_overlap,
thresholds=thresholds,
compute_aos=compute_aos)
idx += num_part
for i in range(len(thresholds)):
recall[m, l, k, i] = pr[i, 0] / (pr[i, 0] + pr[i, 2])
precision[m, l, k, i] = pr[i, 0] / (pr[i, 0] + pr[i, 1])
if compute_aos:
aos[m, l, k, i] = pr[i, 3] / (pr[i, 0] + pr[i, 1])
for i in range(len(thresholds)):
precision[m, l, k, i] = np.max(
precision[m, l, k, i:], axis=-1)
recall[m, l, k, i] = np.max(recall[m, l, k, i:], axis=-1)
if compute_aos:
aos[m, l, k, i] = np.max(aos[m, l, k, i:], axis=-1)
ret_dict = {
"recall": recall,
"precision": precision,
"orientation": aos,
}
return ret_dict
def get_mAP(prec):
sums = 0
for i in range(0, prec.shape[-1], 4):
sums = sums + prec[..., i]
return sums / 11 * 100
def get_mAP_R40(prec):
sums = 0
for i in range(1, prec.shape[-1]):
sums = sums + prec[..., i]
return sums / 40 * 100
def print_str(value, *arg, sstream=None):
if sstream is None:
sstream = sysio.StringIO()
sstream.truncate(0)
sstream.seek(0)
print(value, *arg, file=sstream)
return sstream.getvalue()
def do_eval(gt_annos,
dt_annos,
current_classes,
min_overlaps,
compute_aos=False,
PR_detail_dict=None,
DIForDIS=True):
# min_overlaps: [num_minoverlap, metric, num_class]
difficultys = [0, 1, 2]
ret = eval_class(gt_annos, dt_annos, current_classes, difficultys, 0,
min_overlaps, compute_aos, DIForDIS=DIForDIS)
# ret: [num_class, num_diff, num_minoverlap, num_sample_points]
mAP_bbox = get_mAP(ret["precision"])
mAP_bbox_R40 = get_mAP_R40(ret["precision"])
if PR_detail_dict is not None:
PR_detail_dict['bbox'] = ret['precision']
mAP_aos = mAP_aos_R40 = None
if compute_aos:
mAP_aos = get_mAP(ret["orientation"])
mAP_aos_R40 = get_mAP_R40(ret["orientation"])
if PR_detail_dict is not None:
PR_detail_dict['aos'] = ret['orientation']
ret = eval_class(gt_annos, dt_annos, current_classes, difficultys, 1,
min_overlaps, DIForDIS=DIForDIS)
mAP_bev = get_mAP(ret["precision"])
mAP_bev_R40 = get_mAP_R40(ret["precision"])
if PR_detail_dict is not None:
PR_detail_dict['bev'] = ret['precision']
ret = eval_class(gt_annos, dt_annos, current_classes, difficultys, 2,
min_overlaps, DIForDIS=DIForDIS)
mAP_3d = get_mAP(ret["precision"])
mAP_3d_R40 = get_mAP_R40(ret["precision"])
if PR_detail_dict is not None:
PR_detail_dict['3d'] = ret['precision']
return mAP_bbox, mAP_bev, mAP_3d, mAP_aos, mAP_bbox_R40, mAP_bev_R40, mAP_3d_R40, mAP_aos_R40
def do_coco_style_eval(gt_annos, dt_annos, current_classes, overlap_ranges,
compute_aos):
# overlap_ranges: [range, metric, num_class]
min_overlaps = np.zeros([10, *overlap_ranges.shape[1:]])
for i in range(overlap_ranges.shape[1]):
for j in range(overlap_ranges.shape[2]):
min_overlaps[:, i, j] = np.linspace(*overlap_ranges[:, i, j])
mAP_bbox, mAP_bev, mAP_3d, mAP_aos = do_eval(
gt_annos, dt_annos, current_classes, min_overlaps, compute_aos)
# ret: [num_class, num_diff, num_minoverlap]
mAP_bbox = mAP_bbox.mean(-1)
mAP_bev = mAP_bev.mean(-1)
mAP_3d = mAP_3d.mean(-1)
if mAP_aos is not None:
mAP_aos = mAP_aos.mean(-1)
return mAP_bbox, mAP_bev, mAP_3d, mAP_aos
def get_official_eval_result(gt_annos, dt_annos, current_classes, PR_detail_dict=None):
overlap_0_7 = np.array([[0.7, 0.5, 0.5, 0.7,
0.5, 0.7], [0.7, 0.5, 0.5, 0.7, 0.5, 0.7],
[0.7, 0.5, 0.5, 0.7, 0.5, 0.7]])
overlap_0_5 = np.array([[0.7, 0.5, 0.5, 0.7,
0.5, 0.5], [0.5, 0.25, 0.25, 0.5, 0.25, 0.5],
[0.5, 0.25, 0.25, 0.5, 0.25, 0.5]])
min_overlaps = np.stack([overlap_0_7, overlap_0_5], axis=0) # [2, 3, 5]
class_to_name = {
0: 'Car',
1: 'Pedestrian',
2: 'Cyclist',
3: 'Van',
4: 'Person_sitting',
5: 'Truck'
}
name_to_class = {v: n for n, v in class_to_name.items()}
if not isinstance(current_classes, (list, tuple)):
current_classes = [current_classes]
current_classes_int = []
for curcls in current_classes:
if isinstance(curcls, str):
current_classes_int.append(name_to_class[curcls])
else:
current_classes_int.append(curcls)
current_classes = current_classes_int
min_overlaps = min_overlaps[:, :, current_classes]
result = ''
# check whether alpha is valid
compute_aos = False
for anno in dt_annos:
if anno['alpha'].shape[0] != 0:
if anno['alpha'][0] != -10:
compute_aos = True
break
mAPbbox, mAPbev, mAP3d, mAPaos, mAPbbox_R40, mAPbev_R40, mAP3d_R40, mAPaos_R40 = do_eval(
gt_annos, dt_annos, current_classes, min_overlaps, compute_aos, PR_detail_dict=PR_detail_dict, DIForDIS=True)
ret_dict = {}
for j, curcls in enumerate(current_classes):
# mAP threshold array: [num_minoverlap, metric, class]
# mAP result: [num_class, num_diff, num_minoverlap]
for i in range(min_overlaps.shape[0]):
result += print_str(
(f"{class_to_name[curcls]} "
"AP@{:.2f}, {:.2f}, {:.2f}:".format(*min_overlaps[i, :, j])))
result += print_str((f"bbox AP:{mAPbbox[j, 0, i]:.4f}, "
f"{mAPbbox[j, 1, i]:.4f}, "
f"{mAPbbox[j, 2, i]:.4f}"))
result += print_str((f"bev AP:{mAPbev[j, 0, i]:.4f}, "
f"{mAPbev[j, 1, i]:.4f}, "
f"{mAPbev[j, 2, i]:.4f}"))
result += print_str((f"3d AP:{mAP3d[j, 0, i]:.4f}, "
f"{mAP3d[j, 1, i]:.4f}, "
f"{mAP3d[j, 2, i]:.4f}"))
if compute_aos:
result += print_str((f"aos AP:{mAPaos[j, 0, i]:.2f}, "
f"{mAPaos[j, 1, i]:.2f}, "
f"{mAPaos[j, 2, i]:.2f}"))
if i == 0:
ret_dict['%s_aos_easy' % class_to_name[curcls]] = mAPaos[j, 0, 0]
ret_dict['%s_aos_moderate' % class_to_name[curcls]] = mAPaos[j, 1, 0]
ret_dict['%s_aos_hard' % class_to_name[curcls]] = mAPaos[j, 2, 0]
result += print_str(
(f"{class_to_name[curcls]} "
"AP_R40@{:.2f}, {:.2f}, {:.2f}:".format(*min_overlaps[i, :, j])))
result += print_str((f"bbox AP:{mAPbbox_R40[j, 0, i]:.4f}, "
f"{mAPbbox_R40[j, 1, i]:.4f}, "
f"{mAPbbox_R40[j, 2, i]:.4f}"))
result += print_str((f"bev AP:{mAPbev_R40[j, 0, i]:.4f}, "
f"{mAPbev_R40[j, 1, i]:.4f}, "
f"{mAPbev_R40[j, 2, i]:.4f}"))
result += print_str((f"3d AP:{mAP3d_R40[j, 0, i]:.4f}, "
f"{mAP3d_R40[j, 1, i]:.4f}, "
f"{mAP3d_R40[j, 2, i]:.4f}"))
if compute_aos:
result += print_str((f"aos AP:{mAPaos_R40[j, 0, i]:.2f}, "
f"{mAPaos_R40[j, 1, i]:.2f}, "
f"{mAPaos_R40[j, 2, i]:.2f}"))
if i == 0:
ret_dict['%s_aos_easy_R40' % class_to_name[curcls]] = mAPaos_R40[j, 0, 0]
ret_dict['%s_aos_moderate_R40' % class_to_name[curcls]] = mAPaos_R40[j, 1, 0]
ret_dict['%s_aos_hard_R40' % class_to_name[curcls]] = mAPaos_R40[j, 2, 0]
if i == 0:
ret_dict['%s_3d_easy' % class_to_name[curcls]] = mAP3d[j, 0, 0]
ret_dict['%s_3d_moderate' % class_to_name[curcls]] = mAP3d[j, 1, 0]
ret_dict['%s_3d_hard' % class_to_name[curcls]] = mAP3d[j, 2, 0]
ret_dict['%s_bev_easy' % class_to_name[curcls]] = mAPbev[j, 0, 0]
ret_dict['%s_bev_moderate' % class_to_name[curcls]] = mAPbev[j, 1, 0]
ret_dict['%s_bev_hard' % class_to_name[curcls]] = mAPbev[j, 2, 0]
ret_dict['%s_image_easy' % class_to_name[curcls]] = mAPbbox[j, 0, 0]
ret_dict['%s_image_moderate' % class_to_name[curcls]] = mAPbbox[j, 1, 0]
ret_dict['%s_image_hard' % class_to_name[curcls]] = mAPbbox[j, 2, 0]
ret_dict['%s_3d_easy_R40' % class_to_name[curcls]] = mAP3d_R40[j, 0, 0]
ret_dict['%s_3d_moderate_R40' % class_to_name[curcls]] = mAP3d_R40[j, 1, 0]
ret_dict['%s_3d_hard_R40' % class_to_name[curcls]] = mAP3d_R40[j, 2, 0]
ret_dict['%s_bev_easy_R40' % class_to_name[curcls]] = mAPbev_R40[j, 0, 0]
ret_dict['%s_bev_moderate_R40' % class_to_name[curcls]] = mAPbev_R40[j, 1, 0]
ret_dict['%s_bev_hard_R40' % class_to_name[curcls]] = mAPbev_R40[j, 2, 0]
ret_dict['%s_image_easy_R40' % class_to_name[curcls]] = mAPbbox_R40[j, 0, 0]
ret_dict['%s_image_moderate_R40' % class_to_name[curcls]] = mAPbbox_R40[j, 1, 0]
ret_dict['%s_image_hard_R40' % class_to_name[curcls]] = mAPbbox_R40[j, 2, 0]
return result, ret_dict
def get_distance_eval_result(gt_annos, dt_annos, current_classes, PR_detail_dict=None):
overlap_0_7 = np.array([[0.7, 0.5, 0.5, 0.7,
0.5, 0.7], [0.7, 0.5, 0.5, 0.7, 0.5, 0.7],
[0.7, 0.5, 0.5, 0.7, 0.5, 0.7]])
overlap_0_5 = np.array([[0.7, 0.5, 0.5, 0.7,
0.5, 0.5], [0.5, 0.25, 0.25, 0.5, 0.25, 0.5],
[0.5, 0.25, 0.25, 0.5, 0.25, 0.5]])
min_overlaps = np.stack([overlap_0_7, overlap_0_5], axis=0) # [2, 3, 5]
class_to_name = {
0: 'Car',
1: 'Pedestrian',
2: 'Cyclist',
3: 'Van',
4: 'Person_sitting',
5: 'Truck'
}
name_to_class = {v: n for n, v in class_to_name.items()}
if not isinstance(current_classes, (list, tuple)):
current_classes = [current_classes]
current_classes_int = []
for curcls in current_classes:
if isinstance(curcls, str):
current_classes_int.append(name_to_class[curcls])
else:
current_classes_int.append(curcls)
current_classes = current_classes_int
min_overlaps = min_overlaps[:, :, current_classes]
result = ''
# check whether alpha is valid
compute_aos = False
for anno in dt_annos:
if anno['alpha'].shape[0] != 0:
if anno['alpha'][0] != -10:
compute_aos = True
break
mAPbbox, mAPbev, mAP3d, mAPaos, mAPbbox_R40, mAPbev_R40, mAP3d_R40, mAPaos_R40 = do_eval(
gt_annos, dt_annos, current_classes, min_overlaps, compute_aos, PR_detail_dict=PR_detail_dict, DIForDIS=False)
ret_dict = {}
for j, curcls in enumerate(current_classes):
# mAP threshold array: [num_minoverlap, metric, class]
# mAP result: [num_class, num_diff, num_minoverlap]
for i in range(min_overlaps.shape[0]):
result += print_str(
(f"{class_to_name[curcls]} "
"AP@{:.2f}, {:.2f}, {:.2f}:".format(*min_overlaps[i, :, j])))
result += print_str((f"bbox AP:{mAPbbox[j, 0, i]:.4f}, "
f"{mAPbbox[j, 1, i]:.4f}, "
f"{mAPbbox[j, 2, i]:.4f}"))
result += print_str((f"bev AP:{mAPbev[j, 0, i]:.4f}, "
f"{mAPbev[j, 1, i]:.4f}, "
f"{mAPbev[j, 2, i]:.4f}"))
result += print_str((f"3d AP:{mAP3d[j, 0, i]:.4f}, "
f"{mAP3d[j, 1, i]:.4f}, "
f"{mAP3d[j, 2, i]:.4f}"))
if compute_aos:
result += print_str((f"aos AP:{mAPaos[j, 0, i]:.2f}, "
f"{mAPaos[j, 1, i]:.2f}, "
f"{mAPaos[j, 2, i]:.2f}"))
if i == 0:
ret_dict['%s_aos_30m' % class_to_name[curcls]] = mAPaos[j, 0, 0]
ret_dict['%s_aos_50m' % class_to_name[curcls]] = mAPaos[j, 1, 0]
ret_dict['%s_aos_70m' % class_to_name[curcls]] = mAPaos[j, 2, 0]
result += print_str(
(f"{class_to_name[curcls]} "
"AP_R40@{:.2f}, {:.2f}, {:.2f}:".format(*min_overlaps[i, :, j])))
result += print_str((f"bbox AP:{mAPbbox_R40[j, 0, i]:.4f}, "
f"{mAPbbox_R40[j, 1, i]:.4f}, "
f"{mAPbbox_R40[j, 2, i]:.4f}"))
result += print_str((f"bev AP:{mAPbev_R40[j, 0, i]:.4f}, "
f"{mAPbev_R40[j, 1, i]:.4f}, "
f"{mAPbev_R40[j, 2, i]:.4f}"))
result += print_str((f"3d AP:{mAP3d_R40[j, 0, i]:.4f}, "
f"{mAP3d_R40[j, 1, i]:.4f}, "
f"{mAP3d_R40[j, 2, i]:.4f}"))
if compute_aos:
result += print_str((f"aos AP:{mAPaos_R40[j, 0, i]:.2f}, "
f"{mAPaos_R40[j, 1, i]:.2f}, "
f"{mAPaos_R40[j, 2, i]:.2f}"))
if i == 0:
ret_dict['%s_aos_30m_R40' % class_to_name[curcls]] = mAPaos_R40[j, 0, 0]
ret_dict['%s_aos_50m_R40' % class_to_name[curcls]] = mAPaos_R40[j, 1, 0]
ret_dict['%s_aos_70m_R40' % class_to_name[curcls]] = mAPaos_R40[j, 2, 0]
if i == 0:
ret_dict['%s_3d_30m' % class_to_name[curcls]] = mAP3d[j, 0, 0]
ret_dict['%s_3d_50m' % class_to_name[curcls]] = mAP3d[j, 1, 0]
ret_dict['%s_3d_70m' % class_to_name[curcls]] = mAP3d[j, 2, 0]
ret_dict['%s_bev_30m' % class_to_name[curcls]] = mAPbev[j, 0, 0]
ret_dict['%s_bev_50m' % class_to_name[curcls]] = mAPbev[j, 1, 0]
ret_dict['%s_bev_70m' % class_to_name[curcls]] = mAPbev[j, 2, 0]
ret_dict['%s_image_30m' % class_to_name[curcls]] = mAPbbox[j, 0, 0]
ret_dict['%s_image_50m' % class_to_name[curcls]] = mAPbbox[j, 1, 0]
ret_dict['%s_image_70m' % class_to_name[curcls]] = mAPbbox[j, 2, 0]
ret_dict['%s_3d_30m_R40' % class_to_name[curcls]] = mAP3d_R40[j, 0, 0]
ret_dict['%s_3d_50m_R40' % class_to_name[curcls]] = mAP3d_R40[j, 1, 0]
ret_dict['%s_3d_70m_R40' % class_to_name[curcls]] = mAP3d_R40[j, 2, 0]
ret_dict['%s_bev_30m_R40' % class_to_name[curcls]] = mAPbev_R40[j, 0, 0]
ret_dict['%s_bev_50m_R40' % class_to_name[curcls]] = mAPbev_R40[j, 1, 0]
ret_dict['%s_bev_70m_R40' % class_to_name[curcls]] = mAPbev_R40[j, 2, 0]
ret_dict['%s_image_30m_R40' % class_to_name[curcls]] = mAPbbox_R40[j, 0, 0]
ret_dict['%s_image_50m_R40' % class_to_name[curcls]] = mAPbbox_R40[j, 1, 0]
ret_dict['%s_image_70m_R40' % class_to_name[curcls]] = mAPbbox_R40[j, 2, 0]
return result, ret_dict
def get_coco_eval_result(gt_annos, dt_annos, current_classes):
class_to_name = {
0: 'Car',
1: 'Pedestrian',
2: 'Cyclist',
3: 'Van',
4: 'Person_sitting',
}
class_to_range = {
0: [0.5, 0.95, 10],
1: [0.25, 0.7, 10],
2: [0.25, 0.7, 10],
3: [0.5, 0.95, 10],
4: [0.25, 0.7, 10],
}
name_to_class = {v: n for n, v in class_to_name.items()}
if not isinstance(current_classes, (list, tuple)):
current_classes = [current_classes]
current_classes_int = []
for curcls in current_classes:
if isinstance(curcls, str):
current_classes_int.append(name_to_class[curcls])
else:
current_classes_int.append(curcls)
current_classes = current_classes_int
overlap_ranges = np.zeros([3, 3, len(current_classes)])
for i, curcls in enumerate(current_classes):
overlap_ranges[:, :, i] = np.array(
class_to_range[curcls])[:, np.newaxis]
result = ''
# check whether alpha is valid
compute_aos = False
for anno in dt_annos:
if anno['alpha'].shape[0] != 0:
if anno['alpha'][0] != -10:
compute_aos = True
break
mAPbbox, mAPbev, mAP3d, mAPaos = do_coco_style_eval(
gt_annos, dt_annos, current_classes, overlap_ranges, compute_aos)
for j, curcls in enumerate(current_classes):
# mAP threshold array: [num_minoverlap, metric, class]
# mAP result: [num_class, num_diff, num_minoverlap]
o_range = np.array(class_to_range[curcls])[[0, 2, 1]]
o_range[1] = (o_range[2] - o_range[0]) / (o_range[1] - 1)
result += print_str((f"{class_to_name[curcls]} "
"coco AP@{:.2f}:{:.2f}:{:.2f}:".format(*o_range)))
result += print_str((f"bbox AP:{mAPbbox[j, 0]:.2f}, "
f"{mAPbbox[j, 1]:.2f}, "
f"{mAPbbox[j, 2]:.2f}"))
result += print_str((f"bev AP:{mAPbev[j, 0]:.2f}, "
f"{mAPbev[j, 1]:.2f}, "
f"{mAPbev[j, 2]:.2f}"))
result += print_str((f"3d AP:{mAP3d[j, 0]:.2f}, "
f"{mAP3d[j, 1]:.2f}, "
f"{mAP3d[j, 2]:.2f}"))
if compute_aos:
result += print_str((f"aos AP:{mAPaos[j, 0]:.2f}, "
f"{mAPaos[j, 1]:.2f}, "
f"{mAPaos[j, 2]:.2f}"))
return result
| 42,956 | 42 | 118 | py |
monodle | monodle-main/lib/losses/uncertainty_loss.py | import numpy as np
import torch
def laplacian_aleatoric_uncertainty_loss(input, target, log_variance, reduction='mean'):
'''
References:
MonoPair: Monocular 3D Object Detection Using Pairwise Spatial Relationships, CVPR'20
Geometry and Uncertainty in Deep Learning for Computer Vision, University of Cambridge
'''
assert reduction in ['mean', 'sum']
loss = 1.4142 * torch.exp(-log_variance) * torch.abs(input - target) + log_variance
return loss.mean() if reduction == 'mean' else loss.sum()
def gaussian_aleatoric_uncertainty_loss(input, target, log_variance, reduction='mean'):
'''
References:
What Uncertainties Do We Need in Bayesian Deep Learning for Computer Vision?, Neuips'17
Geometry and Uncertainty in Deep Learning for Computer Vision, University of Cambridge
'''
assert reduction in ['mean', 'sum']
loss = 0.5 * torch.exp(-log_variance) * torch.abs(input - target)**2 + 0.5 * log_variance
return loss.mean() if reduction == 'mean' else loss.sum()
if __name__ == '__main__':
pass
| 1,082 | 35.1 | 95 | py |
monodle | monodle-main/lib/losses/dim_aware_loss.py | import torch
import torch.nn.functional as F
def dim_aware_l1_loss(input, target, dimension):
dimension = dimension.clone().detach()
loss = torch.abs(input - target)
loss /= dimension
with torch.no_grad():
compensation_weight = F.l1_loss(input, target) / loss.mean()
loss *= compensation_weight
return loss.mean()
if __name__ == '__main__':
input = torch.zeros(3, 3, 3)
target = torch.Tensor(range(27)).reshape(3, 3, 3)
print(dim_aware_l1_loss(input, target, target+1)) | 521 | 22.727273 | 68 | py |
monodle | monodle-main/lib/losses/focal_loss.py | import torch
import torch.nn as nn
def focal_loss(input, target, alpha=0.25, gamma=2.):
'''
Args:
input: prediction, 'batch x c x h x w'
target: ground truth, 'batch x c x h x w'
alpha: hyper param, default in 0.25
gamma: hyper param, default in 2.0
Reference: Focal Loss for Dense Object Detection, ICCV'17
'''
pos_inds = target.eq(1).float()
neg_inds = target.lt(1).float()
loss = 0
pos_loss = torch.log(input) * torch.pow(1 - input, gamma) * pos_inds * alpha
neg_loss = torch.log(1 - input) * torch.pow(input, gamma) * neg_inds * (1 - alpha)
num_pos = pos_inds.float().sum()
pos_loss = pos_loss.sum()
neg_loss = neg_loss.sum()
if num_pos == 0:
loss = loss - neg_loss
else:
loss = loss - (pos_loss + neg_loss) / num_pos
return loss.mean()
def focal_loss_cornernet(input, target, gamma=2.):
'''
Args:
input: prediction, 'batch x c x h x w'
target: ground truth, 'batch x c x h x w'
gamma: hyper param, default in 2.0
Reference: Cornernet: Detecting Objects as Paired Keypoints, ECCV'18
'''
pos_inds = target.eq(1).float()
neg_inds = target.lt(1).float()
neg_weights = torch.pow(1 - target, 4)
loss = 0
pos_loss = torch.log(input) * torch.pow(1 - input, gamma) * pos_inds
neg_loss = torch.log(1 - input) * torch.pow(input, gamma) * neg_inds * neg_weights
num_pos = pos_inds.float().sum()
pos_loss = pos_loss.sum()
neg_loss = neg_loss.sum()
if num_pos == 0:
loss = loss - neg_loss
else:
loss = loss - (pos_loss + neg_loss) / num_pos
return loss.mean()
| 1,687 | 24.19403 | 86 | py |
monodle | monodle-main/lib/losses/centernet_loss.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from lib.helpers.decode_helper import _transpose_and_gather_feat
from lib.losses.focal_loss import focal_loss_cornernet
from lib.losses.uncertainty_loss import laplacian_aleatoric_uncertainty_loss
from lib.losses.dim_aware_loss import dim_aware_l1_loss
def compute_centernet3d_loss(input, target):
stats_dict = {}
seg_loss = compute_segmentation_loss(input, target)
offset2d_loss = compute_offset2d_loss(input, target)
size2d_loss = compute_size2d_loss(input, target)
offset3d_loss = compute_offset3d_loss(input, target)
depth_loss = compute_depth_loss(input, target)
size3d_loss = compute_size3d_loss(input, target)
heading_loss = compute_heading_loss(input, target)
# statistics
stats_dict['seg'] = seg_loss.item()
stats_dict['offset2d'] = offset2d_loss.item()
stats_dict['size2d'] = size2d_loss.item()
stats_dict['offset3d'] = offset3d_loss.item()
stats_dict['depth'] = depth_loss.item()
stats_dict['size3d'] = size3d_loss.item()
stats_dict['heading'] = heading_loss.item()
total_loss = seg_loss + offset2d_loss + size2d_loss + offset3d_loss + \
depth_loss + size3d_loss + heading_loss
return total_loss, stats_dict
def compute_segmentation_loss(input, target):
input['heatmap'] = torch.clamp(input['heatmap'].sigmoid_(), min=1e-4, max=1 - 1e-4)
loss = focal_loss_cornernet(input['heatmap'], target['heatmap'])
return loss
def compute_size2d_loss(input, target):
# compute size2d loss
size2d_input = extract_input_from_tensor(input['size_2d'], target['indices'], target['mask_2d'])
size2d_target = extract_target_from_tensor(target['size_2d'], target['mask_2d'])
size2d_loss = F.l1_loss(size2d_input, size2d_target, reduction='mean')
return size2d_loss
def compute_offset2d_loss(input, target):
# compute offset2d loss
offset2d_input = extract_input_from_tensor(input['offset_2d'], target['indices'], target['mask_2d'])
offset2d_target = extract_target_from_tensor(target['offset_2d'], target['mask_2d'])
offset2d_loss = F.l1_loss(offset2d_input, offset2d_target, reduction='mean')
return offset2d_loss
def compute_depth_loss(input, target):
depth_input = extract_input_from_tensor(input['depth'], target['indices'], target['mask_3d'])
depth_input, depth_log_variance = depth_input[:, 0:1], depth_input[:, 1:2]
depth_input = 1. / (depth_input.sigmoid() + 1e-6) - 1.
depth_target = extract_target_from_tensor(target['depth'], target['mask_3d'])
depth_loss = laplacian_aleatoric_uncertainty_loss(depth_input, depth_target, depth_log_variance)
return depth_loss
def compute_offset3d_loss(input, target):
offset3d_input = extract_input_from_tensor(input['offset_3d'], target['indices'], target['mask_3d'])
offset3d_target = extract_target_from_tensor(target['offset_3d'], target['mask_3d'])
offset3d_loss = F.l1_loss(offset3d_input, offset3d_target, reduction='mean')
return offset3d_loss
def compute_size3d_loss(input, target):
size3d_input = extract_input_from_tensor(input['size_3d'], target['indices'], target['mask_3d'])
size3d_target = extract_target_from_tensor(target['size_3d'], target['mask_3d'])
size3d_loss = dim_aware_l1_loss(size3d_input, size3d_target, size3d_target)
return size3d_loss
def compute_heading_loss(input, target):
heading_input = _transpose_and_gather_feat(input['heading'], target['indices']) # B * C * H * W ---> B * K * C
heading_input = heading_input.view(-1, 24)
heading_target_cls = target['heading_bin'].view(-1)
heading_target_res = target['heading_res'].view(-1)
mask = target['mask_2d'].view(-1)
# classification loss
heading_input_cls = heading_input[:, 0:12]
heading_input_cls, heading_target_cls = heading_input_cls[mask], heading_target_cls[mask]
if mask.sum() > 0:
cls_loss = F.cross_entropy(heading_input_cls, heading_target_cls, reduction='mean')
else:
cls_loss = 0.0
# regression loss
heading_input_res = heading_input[:, 12:24]
heading_input_res, heading_target_res = heading_input_res[mask], heading_target_res[mask]
cls_onehot = torch.zeros(heading_target_cls.shape[0], 12).cuda().scatter_(dim=1, index=heading_target_cls.view(-1, 1), value=1)
heading_input_res = torch.sum(heading_input_res * cls_onehot, 1)
reg_loss = F.l1_loss(heading_input_res, heading_target_res, reduction='mean')
return cls_loss + reg_loss
###################### auxiliary functions #########################
def extract_input_from_tensor(input, ind, mask):
input = _transpose_and_gather_feat(input, ind) # B*C*H*W --> B*K*C
return input[mask] # B*K*C --> M * C
def extract_target_from_tensor(target, mask):
return target[mask]
if __name__ == '__main__':
input_cls = torch.zeros(2, 50, 12) # B * 50 * 24
input_reg = torch.zeros(2, 50, 12) # B * 50 * 24
target_cls = torch.zeros(2, 50, 1, dtype=torch.int64)
target_reg = torch.zeros(2, 50, 1)
input_cls, target_cls = input_cls.view(-1, 12), target_cls.view(-1)
cls_loss = F.cross_entropy(input_cls, target_cls, reduction='mean')
a = torch.zeros(2, 24, 10, 10)
b = torch.zeros(2, 10).long()
c = torch.ones(2, 10).long()
d = torch.zeros(2, 10, 1).long()
e = torch.zeros(2, 10, 1)
print(compute_heading_loss(a, b, c, d, e))
| 5,440 | 40.853846 | 131 | py |
monodle | monodle-main/lib/backbones/dla.py | import os
import math
import numpy as np
import torch
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
BatchNorm = nn.BatchNorm2d
def get_model_url(data='imagenet', name='dla34', hash='ba72cf86'):
return os.path.join('http://dl.yf.io/dla/models', data, '{}-{}.pth'.format(name, hash))
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
def __init__(self, inplanes, planes, stride=1, dilation=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3,
stride=stride, padding=dilation,
bias=False, dilation=dilation)
self.bn1 = BatchNorm(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3,
stride=1, padding=dilation,
bias=False, dilation=dilation)
self.bn2 = BatchNorm(planes)
self.stride = stride
def forward(self, x, residual=None):
if residual is None:
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 2
def __init__(self, inplanes, planes, stride=1, dilation=1):
super(Bottleneck, self).__init__()
expansion = Bottleneck.expansion
bottle_planes = planes // expansion
self.conv1 = nn.Conv2d(inplanes, bottle_planes,
kernel_size=1, bias=False)
self.bn1 = BatchNorm(bottle_planes)
self.conv2 = nn.Conv2d(bottle_planes, bottle_planes, kernel_size=3,
stride=stride, padding=dilation,
bias=False, dilation=dilation)
self.bn2 = BatchNorm(bottle_planes)
self.conv3 = nn.Conv2d(bottle_planes, planes,
kernel_size=1, bias=False)
self.bn3 = BatchNorm(planes)
self.relu = nn.ReLU(inplace=True)
self.stride = stride
def forward(self, x, residual=None):
if residual is None:
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
out += residual
out = self.relu(out)
return out
class BottleneckX(nn.Module):
expansion = 2
cardinality = 32
def __init__(self, inplanes, planes, stride=1, dilation=1):
super(BottleneckX, self).__init__()
cardinality = BottleneckX.cardinality
# dim = int(math.floor(planes * (BottleneckV5.expansion / 64.0)))
# bottle_planes = dim * cardinality
bottle_planes = planes * cardinality // 32
self.conv1 = nn.Conv2d(inplanes, bottle_planes,
kernel_size=1, bias=False)
self.bn1 = BatchNorm(bottle_planes)
self.conv2 = nn.Conv2d(bottle_planes, bottle_planes, kernel_size=3,
stride=stride, padding=dilation, bias=False,
dilation=dilation, groups=cardinality)
self.bn2 = BatchNorm(bottle_planes)
self.conv3 = nn.Conv2d(bottle_planes, planes,
kernel_size=1, bias=False)
self.bn3 = BatchNorm(planes)
self.relu = nn.ReLU(inplace=True)
self.stride = stride
def forward(self, x, residual=None):
if residual is None:
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
out += residual
out = self.relu(out)
return out
class Root(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, residual):
super(Root, self).__init__()
self.conv = nn.Conv2d(
in_channels, out_channels, 1,
stride=1, bias=False, padding=(kernel_size - 1) // 2)
self.bn = BatchNorm(out_channels)
self.relu = nn.ReLU(inplace=True)
self.residual = residual
def forward(self, *x):
children = x
x = self.conv(torch.cat(x, 1))
x = self.bn(x)
if self.residual:
x += children[0]
x = self.relu(x)
return x
class Tree(nn.Module):
def __init__(self, levels, block, in_channels, out_channels, stride=1,
level_root=False, root_dim=0, root_kernel_size=1,
dilation=1, root_residual=False):
super(Tree, self).__init__()
if root_dim == 0:
root_dim = 2 * out_channels
if level_root:
root_dim += in_channels
if levels == 1:
self.tree1 = block(in_channels, out_channels, stride,
dilation=dilation)
self.tree2 = block(out_channels, out_channels, 1,
dilation=dilation)
else:
self.tree1 = Tree(levels - 1, block, in_channels, out_channels,
stride, root_dim=0,
root_kernel_size=root_kernel_size,
dilation=dilation, root_residual=root_residual)
self.tree2 = Tree(levels - 1, block, out_channels, out_channels,
root_dim=root_dim + out_channels,
root_kernel_size=root_kernel_size,
dilation=dilation, root_residual=root_residual)
if levels == 1:
self.root = Root(root_dim, out_channels, root_kernel_size,
root_residual)
self.level_root = level_root
self.root_dim = root_dim
self.downsample = None
self.project = None
self.levels = levels
if stride > 1:
self.downsample = nn.MaxPool2d(stride, stride=stride)
if in_channels != out_channels:
self.project = nn.Sequential(
nn.Conv2d(in_channels, out_channels,
kernel_size=1, stride=1, bias=False),
BatchNorm(out_channels)
)
def forward(self, x, residual=None, children=None):
children = [] if children is None else children
bottom = self.downsample(x) if self.downsample else x
residual = self.project(bottom) if self.project else bottom
if self.level_root:
children.append(bottom)
x1 = self.tree1(x, residual)
if self.levels == 1:
x2 = self.tree2(x1)
x = self.root(x2, x1, *children)
else:
children.append(x1)
x = self.tree2(x1, children=children)
return x
class DLA(nn.Module):
def __init__(self, levels, channels, num_classes=1000,
block=BasicBlock, residual_root=False, return_levels=False,
pool_size=7, linear_root=False):
super(DLA, self).__init__()
self.channels = channels
self.return_levels = return_levels
self.num_classes = num_classes
self.base_layer = nn.Sequential(
nn.Conv2d(3, channels[0], kernel_size=7, stride=1,
padding=3, bias=False),
BatchNorm(channels[0]),
nn.ReLU(inplace=True))
self.level0 = self._make_conv_level(
channels[0], channels[0], levels[0])
self.level1 = self._make_conv_level(
channels[0], channels[1], levels[1], stride=2)
self.level2 = Tree(levels[2], block, channels[1], channels[2], 2,
level_root=False,
root_residual=residual_root)
self.level3 = Tree(levels[3], block, channels[2], channels[3], 2,
level_root=True, root_residual=residual_root)
self.level4 = Tree(levels[4], block, channels[3], channels[4], 2,
level_root=True, root_residual=residual_root)
self.level5 = Tree(levels[5], block, channels[4], channels[5], 2,
level_root=True, root_residual=residual_root)
self.avgpool = nn.AvgPool2d(pool_size)
self.fc = nn.Conv2d(channels[-1], num_classes, kernel_size=1,
stride=1, padding=0, bias=True)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, BatchNorm):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_level(self, block, inplanes, planes, blocks, stride=1):
downsample = None
if stride != 1 or inplanes != planes:
downsample = nn.Sequential(
nn.MaxPool2d(stride, stride=stride),
nn.Conv2d(inplanes, planes,
kernel_size=1, stride=1, bias=False),
BatchNorm(planes),
)
layers = []
layers.append(block(inplanes, planes, stride, downsample=downsample))
for i in range(1, blocks):
layers.append(block(inplanes, planes))
return nn.Sequential(*layers)
def _make_conv_level(self, inplanes, planes, convs, stride=1, dilation=1):
modules = []
for i in range(convs):
modules.extend([
nn.Conv2d(inplanes, planes, kernel_size=3,
stride=stride if i == 0 else 1,
padding=dilation, bias=False, dilation=dilation),
BatchNorm(planes),
nn.ReLU(inplace=True)])
inplanes = planes
return nn.Sequential(*modules)
def forward(self, x):
y = []
x = self.base_layer(x)
for i in range(6):
x = getattr(self, 'level{}'.format(i))(x)
y.append(x)
if self.return_levels:
return y
else:
x = self.avgpool(x)
x = self.fc(x)
x = x.view(x.size(0), -1)
return x
def load_pretrained_model(self, data='imagenet', name='dla34', hash='ba72cf86'):
fc = self.fc
if name.endswith('.pth'):
model_weights = torch.load(data + name)
else:
model_url = get_model_url(data, name, hash)
model_weights = model_zoo.load_url(model_url)
num_classes = len(model_weights[list(model_weights.keys())[-1]])
self.fc = nn.Conv2d(
self.channels[-1], num_classes,
kernel_size=1, stride=1, padding=0, bias=True)
self.load_state_dict(model_weights)
self.fc = fc
def dla34(pretrained=False, **kwargs): # DLA-34
model = DLA([1, 1, 1, 2, 2, 1],
[16, 32, 64, 128, 256, 512],
block=BasicBlock, **kwargs)
if pretrained:
model.load_pretrained_model(data='imagenet', name='dla34', hash='ba72cf86')
return model
def dla46_c(pretrained=False, **kwargs): # DLA-46-C
Bottleneck.expansion = 2
model = DLA([1, 1, 1, 2, 2, 1],
[16, 32, 64, 64, 128, 256],
block=Bottleneck, **kwargs)
if pretrained:
model.load_pretrained_model(data='imagenet', name='dla46_c', hash='2bfd52c3')
return model
def dla46x_c(pretrained=False, **kwargs): # DLA-X-46-C
BottleneckX.expansion = 2
model = DLA([1, 1, 1, 2, 2, 1],
[16, 32, 64, 64, 128, 256],
block=BottleneckX, **kwargs)
if pretrained:
model.load_pretrained_model(data='imagenet', name='dla46x_c', hash='d761bae7')
return model
def dla60x_c(pretrained=False, **kwargs): # DLA-X-60-C
BottleneckX.expansion = 2
model = DLA([1, 1, 1, 2, 3, 1],
[16, 32, 64, 64, 128, 256],
block=BottleneckX, **kwargs)
if pretrained:
model.load_pretrained_model(data='imagenet', name='dla60x_c', hash='b870c45c')
return model
def dla60(pretrained=False, **kwargs): # DLA-60
Bottleneck.expansion = 2
model = DLA([1, 1, 1, 2, 3, 1],
[16, 32, 128, 256, 512, 1024],
block=Bottleneck, **kwargs)
if pretrained:
model.load_pretrained_model(data='imagenet', name='dla60', hash='24839fc4')
return model
def dla60x(pretrained=False, **kwargs): # DLA-X-60
BottleneckX.expansion = 2
model = DLA([1, 1, 1, 2, 3, 1],
[16, 32, 128, 256, 512, 1024],
block=BottleneckX, **kwargs)
if pretrained:
model.load_pretrained_model(data='imagenet', name='dla60x', hash='d15cacda')
return model
def dla102(pretrained=False, **kwargs): # DLA-102
Bottleneck.expansion = 2
model = DLA([1, 1, 1, 3, 4, 1], [16, 32, 128, 256, 512, 1024],
block=Bottleneck, residual_root=True, **kwargs)
if pretrained:
model.load_pretrained_model(data='imagenet', name='dla102', hash='d94d9790')
return model
def dla102x(pretrained=False, **kwargs): # DLA-X-102
BottleneckX.expansion = 2
model = DLA([1, 1, 1, 3, 4, 1], [16, 32, 128, 256, 512, 1024],
block=BottleneckX, residual_root=True, **kwargs)
if pretrained:
model.load_pretrained_model(data='imagenet', name='dla102x', hash='ad62be81')
return model
def dla102x2(pretrained=False, **kwargs): # DLA-X-102 64
BottleneckX.cardinality = 64
model = DLA([1, 1, 1, 3, 4, 1], [16, 32, 128, 256, 512, 1024],
block=BottleneckX, residual_root=True, **kwargs)
if pretrained:
model.load_pretrained_model(data='imagenet', name='dla102x2', hash='262837b6')
return model
def dla169(pretrained=False, **kwargs): # DLA-169
Bottleneck.expansion = 2
model = DLA([1, 1, 2, 3, 5, 1], [16, 32, 128, 256, 512, 1024],
block=Bottleneck, residual_root=True, **kwargs)
if pretrained:
model.load_pretrained_model(data='imagenet', name='dla169', hash='0914e092')
return model
if __name__ == '__main__':
net = dla169(pretrained=True)
print(net)
| 14,554 | 34.413625 | 91 | py |
monodle | monodle-main/lib/backbones/dlaup.py | import os, sys
import math
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(os.path.dirname(BASE_DIR))
sys.path.append(ROOT_DIR)
import numpy as np
import torch
import torch.nn as nn
class Conv2d(nn.Module):
def __init__(self, in_planes, out_planes, kernal_szie=3, stride=1, bias=True):
super(Conv2d, self).__init__()
self.conv = nn.Conv2d(in_planes,
out_planes,
kernel_size=kernal_szie,
stride=stride,
padding=kernal_szie//2,
bias=bias)
self.bn = nn.BatchNorm2d(out_planes)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x
class IDAUp(nn.Module):
'''
input: features map of different layers
output: up-sampled features
'''
def __init__(self, in_channels_list, up_factors_list, out_channels):
super(IDAUp, self).__init__()
self.in_channels_list = in_channels_list
self.out_channels = out_channels
for i in range(1, len(in_channels_list)):
in_channels = in_channels_list[i]
up_factors = int(up_factors_list[i])
proj = Conv2d(in_channels, out_channels, kernal_szie=3, stride=1, bias=False)
node = Conv2d(out_channels*2, out_channels, kernal_szie=3, stride=1, bias=False)
up = nn.ConvTranspose2d(in_channels=out_channels,
out_channels=out_channels,
kernel_size=up_factors * 2,
stride=up_factors,
padding=up_factors // 2,
output_padding=0,
groups=out_channels,
bias=False)
fill_up_weights(up)
setattr(self, 'proj_' + str(i), proj)
setattr(self, 'up_' + str(i), up)
setattr(self, 'node_' + str(i), node)
# weight init
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, layers):
assert len(self.in_channels_list) == len(layers), \
'{} vs {} layers'.format(len(self.in_channels_list), len(layers))
for i in range(1, len(layers)):
upsample = getattr(self, 'up_' + str(i))
project = getattr(self, 'proj_' + str(i))
node = getattr(self, 'node_' + str(i))
layers[i] = upsample(project(layers[i]))
layers[i] = node(torch.cat([layers[i-1], layers[i]], 1))
return layers
class IDAUpv2(nn.Module):
'''
input: features map of different layers
output: up-sampled features
'''
def __init__(self, in_channels_list, up_factors_list, out_channels):
super(IDAUpv2, self).__init__()
self.in_channels_list = in_channels_list
self.out_channels = out_channels
for i in range(1, len(in_channels_list)):
in_channels = in_channels_list[i]
up_factors = int(up_factors_list[i])
proj = Conv2d(in_channels, out_channels, kernal_szie=3, stride=1, bias=False)
node = Conv2d(out_channels, out_channels, kernal_szie=3, stride=1, bias=False)
up = nn.ConvTranspose2d(in_channels=out_channels,
out_channels=out_channels,
kernel_size=up_factors * 2,
stride=up_factors,
padding=up_factors // 2,
output_padding=0,
groups=out_channels,
bias=False)
fill_up_weights(up)
setattr(self, 'proj_' + str(i), proj)
setattr(self, 'up_' + str(i), up)
setattr(self, 'node_' + str(i), node)
# weight init
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, layers):
assert len(self.in_channels_list) == len(layers), \
'{} vs {} layers'.format(len(self.in_channels_list), len(layers))
for i in range(1, len(layers)):
upsample = getattr(self, 'up_' + str(i))
project = getattr(self, 'proj_' + str(i))
node = getattr(self, 'node_' + str(i))
layers[i] = upsample(project(layers[i]))
layers[i] = node(layers[i-1] + layers[i])
#layers[i] = node(torch.cat([layers[i-1], layers[i]], 1))
return layers
class DLAUp(nn.Module):
def __init__(self, in_channels_list, scales_list=(1, 2, 4, 8, 16)):
super(DLAUp, self).__init__()
scales_list = np.array(scales_list, dtype=int)
for i in range(len(in_channels_list) - 1):
j = -i - 2
setattr(self, 'ida_{}'.format(i), IDAUp(in_channels_list=in_channels_list[j:],
up_factors_list=scales_list[j:] // scales_list[j],
out_channels=in_channels_list[j]))
scales_list[j + 1:] = scales_list[j]
in_channels_list[j + 1:] = [in_channels_list[j] for _ in in_channels_list[j + 1:]]
def forward(self, layers):
layers = list(layers)
assert len(layers) > 1
for i in range(len(layers) - 1):
ida = getattr(self, 'ida_{}'.format(i))
layers[-i - 2:] = ida(layers[-i - 2:])
return layers[-1]
class DLAUpv2(nn.Module):
def __init__(self, in_channels_list, scales_list=(1, 2, 4, 8, 16)):
super(DLAUpv2, self).__init__()
scales_list = np.array(scales_list, dtype=int)
in_channels_list_backup = in_channels_list.copy()
for i in range(len(in_channels_list) - 1):
j = -i - 2
setattr(self, 'ida_{}'.format(i), IDAUpv2(in_channels_list=in_channels_list[j:],
up_factors_list=scales_list[j:] // scales_list[j],
out_channels=in_channels_list[j]))
scales_list[j + 1:] = scales_list[j]
in_channels_list[j + 1:] = [in_channels_list[j] for _ in in_channels_list[j + 1:]]
self.final_fusion = IDAUpv2(in_channels_list=in_channels_list_backup,
up_factors_list=[2**i for i in range(len(in_channels_list_backup))],
out_channels=in_channels_list_backup[0])
def forward(self, layers):
layers = list(layers)
outputs = [layers[-1]]
assert len(layers) > 1
for i in range(len(layers) - 1):
ida = getattr(self, 'ida_{}'.format(i))
layers[-i - 2:] = ida(layers[-i - 2:])
outputs.insert(0, layers[-1])
outputs = self.final_fusion(outputs)
return outputs[-1]
# weight init for up-sample layers [tranposed conv2d]
def fill_up_weights(up):
w = up.weight.data
f = math.ceil(w.size(2) / 2)
c = (2 * f - 1 - f % 2) / (2. * f)
for i in range(w.size(2)):
for j in range(w.size(3)):
w[0, 0, i, j] = \
(1 - math.fabs(i / f - c)) * (1 - math.fabs(j / f - c))
for c in range(1, w.size(0)):
w[c, 0, :, :] = w[0, 0, :, :]
if __name__ == '__main__':
from lib.backbones.dla import dla34
backbone = dla34(return_levels=True)
input = torch.randn(2, 3, 64, 64)
features = backbone(input)
print('input data shape:', input.shape)
print('numbers of feature maps generated by DLA backbone:', len(features))
print('feature maps generated by DLA backbone:')
for i in range(len(features)):
print(features[i].shape)
channels = backbone.channels
start_level = int(np.log2(4))
scales = [2 ** i for i in range(len(channels[start_level:]))]
print('channels list of DLA features:', channels)
print('start level of features-up aggratation:', start_level)
print('upsumapling factors of features', scales)
dlaup = DLAUp(in_channels_list=channels[start_level:], scales_list=scales)
features_up = dlaup(features[start_level:])
print('shape of upsampled feature maps', features_up.shape)
| 8,927 | 36.991489 | 104 | py |
monodle | monodle-main/lib/backbones/hourglass.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import torch
import torch.nn as nn
class convolution(nn.Module):
def __init__(self, k, inp_dim, out_dim, stride=1, with_bn=True):
super(convolution, self).__init__()
pad = (k - 1) // 2
self.conv = nn.Conv2d(inp_dim, out_dim, (k, k), padding=(pad, pad), stride=(stride, stride), bias=not with_bn)
self.bn = nn.BatchNorm2d(out_dim) if with_bn else nn.Sequential()
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
conv = self.conv(x)
bn = self.bn(conv)
relu = self.relu(bn)
return relu
class fully_connected(nn.Module):
def __init__(self, inp_dim, out_dim, with_bn=True):
super(fully_connected, self).__init__()
self.with_bn = with_bn
self.linear = nn.Linear(inp_dim, out_dim)
if self.with_bn:
self.bn = nn.BatchNorm1d(out_dim)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
linear = self.linear(x)
bn = self.bn(linear) if self.with_bn else linear
relu = self.relu(bn)
return relu
class residual(nn.Module):
def __init__(self, k, inp_dim, out_dim, stride=1, with_bn=True):
super(residual, self).__init__()
self.conv1 = nn.Conv2d(inp_dim, out_dim, (3, 3), padding=(1, 1), stride=(stride, stride), bias=False)
self.bn1 = nn.BatchNorm2d(out_dim)
self.relu1 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(out_dim, out_dim, (3, 3), padding=(1, 1), bias=False)
self.bn2 = nn.BatchNorm2d(out_dim)
self.skip = nn.Sequential(
nn.Conv2d(inp_dim, out_dim, (1, 1), stride=(stride, stride), bias=False),
nn.BatchNorm2d(out_dim)
) if stride != 1 or inp_dim != out_dim else nn.Sequential()
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
conv1 = self.conv1(x)
bn1 = self.bn1(conv1)
relu1 = self.relu1(bn1)
conv2 = self.conv2(relu1)
bn2 = self.bn2(conv2)
skip = self.skip(x)
return self.relu(bn2 + skip)
def make_layer(k, inp_dim, out_dim, modules, layer=convolution, **kwargs):
layers = [layer(k, inp_dim, out_dim, **kwargs)]
for _ in range(1, modules):
layers.append(layer(k, out_dim, out_dim, **kwargs))
return nn.Sequential(*layers)
def make_layer_revr(k, inp_dim, out_dim, modules, layer=convolution, **kwargs):
layers = []
for _ in range(modules - 1):
layers.append(layer(k, inp_dim, inp_dim, **kwargs))
layers.append(layer(k, inp_dim, out_dim, **kwargs))
return nn.Sequential(*layers)
class MergeUp(nn.Module):
def forward(self, up1, up2):
return up1 + up2
def make_merge_layer(dim):
return MergeUp()
def make_pool_layer(dim):
return nn.Sequential()
def make_unpool_layer(dim):
return nn.Upsample(scale_factor=2)
def make_kp_layer(cnv_dim, curr_dim, out_dim):
return nn.Sequential(
convolution(3, cnv_dim, curr_dim, with_bn=False),
nn.Conv2d(curr_dim, out_dim, (1, 1))
)
def make_inter_layer(dim):
return residual(3, dim, dim)
def make_cnv_layer(inp_dim, out_dim):
return convolution(3, inp_dim, out_dim)
class kp_module(nn.Module):
def __init__(
self, n, dims, modules, layer=residual,
make_up_layer=make_layer, make_low_layer=make_layer,
make_hg_layer=make_layer, make_hg_layer_revr=make_layer_revr,
make_pool_layer=make_pool_layer, make_unpool_layer=make_unpool_layer,
make_merge_layer=make_merge_layer, **kwargs
):
super(kp_module, self).__init__()
self.n = n
curr_mod = modules[0]
next_mod = modules[1]
curr_dim = dims[0]
next_dim = dims[1]
self.up1 = make_up_layer(
3, curr_dim, curr_dim, curr_mod,
layer=layer, **kwargs
)
self.max1 = make_pool_layer(curr_dim)
self.low1 = make_hg_layer(
3, curr_dim, next_dim, curr_mod,
layer=layer, **kwargs
)
self.low2 = kp_module(
n - 1, dims[1:], modules[1:], layer=layer,
make_up_layer=make_up_layer,
make_low_layer=make_low_layer,
make_hg_layer=make_hg_layer,
make_hg_layer_revr=make_hg_layer_revr,
make_pool_layer=make_pool_layer,
make_unpool_layer=make_unpool_layer,
make_merge_layer=make_merge_layer,
**kwargs
) if self.n > 1 else \
make_low_layer(
3, next_dim, next_dim, next_mod,
layer=layer, **kwargs
)
self.low3 = make_hg_layer_revr(
3, next_dim, curr_dim, curr_mod,
layer=layer, **kwargs
)
self.up2 = make_unpool_layer(curr_dim)
self.merge = make_merge_layer(curr_dim)
def forward(self, x):
up1 = self.up1(x)
max1 = self.max1(x)
low1 = self.low1(max1)
low2 = self.low2(low1)
low3 = self.low3(low2)
up2 = self.up2(low3)
return self.merge(up1, up2)
class exkp(nn.Module):
def __init__(
self, n, nstack, dims, modules, heads, pre=None, cnv_dim=256,
make_tl_layer=None, make_br_layer=None,
make_cnv_layer=make_cnv_layer, make_heat_layer=make_kp_layer,
make_tag_layer=make_kp_layer, make_regr_layer=make_kp_layer,
make_up_layer=make_layer, make_low_layer=make_layer,
make_hg_layer=make_layer, make_hg_layer_revr=make_layer_revr,
make_pool_layer=make_pool_layer, make_unpool_layer=make_unpool_layer,
make_merge_layer=make_merge_layer, make_inter_layer=make_inter_layer,
kp_layer=residual
):
super(exkp, self).__init__()
self.nstack = nstack
self.heads = heads
curr_dim = dims[0]
self.pre = nn.Sequential(
convolution(7, 3, 128, stride=2),
residual(3, 128, 256, stride=2)
) if pre is None else pre
self.kps = nn.ModuleList([
kp_module(
n, dims, modules, layer=kp_layer,
make_up_layer=make_up_layer,
make_low_layer=make_low_layer,
make_hg_layer=make_hg_layer,
make_hg_layer_revr=make_hg_layer_revr,
make_pool_layer=make_pool_layer,
make_unpool_layer=make_unpool_layer,
make_merge_layer=make_merge_layer
) for _ in range(nstack)
])
self.cnvs = nn.ModuleList([
make_cnv_layer(curr_dim, cnv_dim) for _ in range(nstack)
])
self.inters = nn.ModuleList([
make_inter_layer(curr_dim) for _ in range(nstack - 1)
])
self.inters_ = nn.ModuleList([
nn.Sequential(
nn.Conv2d(curr_dim, curr_dim, (1, 1), bias=False),
nn.BatchNorm2d(curr_dim)
) for _ in range(nstack - 1)
])
self.cnvs_ = nn.ModuleList([
nn.Sequential(
nn.Conv2d(cnv_dim, curr_dim, (1, 1), bias=False),
nn.BatchNorm2d(curr_dim)
) for _ in range(nstack - 1)
])
## keypoint heatmaps
for head in heads.keys():
if 'heatmap' in head:
module = nn.ModuleList([
make_heat_layer(
cnv_dim, curr_dim, heads[head]) for _ in range(nstack)
])
self.__setattr__(head, module)
for heat in self.__getattr__(head):
heat[-1].bias.data.fill_(-2.19)
else:
module = nn.ModuleList([
make_regr_layer(
cnv_dim, curr_dim, heads[head]) for _ in range(nstack)
])
self.__setattr__(head, module)
self.relu = nn.ReLU(inplace=True)
def forward(self, image):
# print('image shape', image.shape)
inter = self.pre(image)
outs = []
for ind in range(self.nstack):
kp_, cnv_ = self.kps[ind], self.cnvs[ind]
kp = kp_(inter)
cnv = cnv_(kp)
out = {}
for head in self.heads:
layer = self.__getattr__(head)[ind]
y = layer(cnv)
out[head] = y
outs.append(out)
if ind < self.nstack - 1:
inter = self.inters_[ind](inter) + self.cnvs_[ind](cnv)
inter = self.relu(inter)
inter = self.inters[ind](inter)
return outs
def make_hg_layer(kernel, dim0, dim1, mod, layer=convolution, **kwargs):
layers = [layer(kernel, dim0, dim1, stride=2)]
layers += [layer(kernel, dim1, dim1) for _ in range(mod - 1)]
return nn.Sequential(*layers)
class HourglassNet(exkp):
def __init__(self, heads, num_stacks=2):
n = 5
dims = [256, 256, 384, 384, 384, 512]
modules = [2, 2, 2, 2, 2, 4]
super(HourglassNet, self).__init__(
n, num_stacks, dims, modules, heads,
make_tl_layer=None,
make_br_layer=None,
make_pool_layer=make_pool_layer,
make_hg_layer=make_hg_layer,
kp_layer=residual, cnv_dim=256
)
def get_large_hourglass_net(num_layers, heads, head_conv):
model = HourglassNet(heads, 2)
return model
def load_pretrian_model(model, model_path):
checkpoint = torch.load(model_path, map_location=lambda storage, loc: storage)
state_dict_ = checkpoint
state_dict = {}
# convert data_parallal to model
for k in state_dict_:
if k.startswith('module') and not k.startswith('module_list'):
state_dict[k[7:]] = state_dict_[k]
else:
state_dict[k] = state_dict_[k]
model_state_dict = model.state_dict()
# check loaded parameters and created model parameters
msg = 'If you see this, your model does not fully load the ' + \
'pre-trained weight. Please make sure ' + \
'you have correctly specified --arch xxx ' + \
'or set the correct --num_classes for your own dataset.'
for k in state_dict:
if k in model_state_dict:
if state_dict[k].shape != model_state_dict[k].shape:
print('Skip loading parameter {}, required shape{}, ' \
'loaded shape{}. {}'.format(
k, model_state_dict[k].shape, state_dict[k].shape, msg))
state_dict[k] = model_state_dict[k]
else:
print('Drop parameter {}.'.format(k) + msg)
for k in model_state_dict:
if not (k in state_dict):
print('No param {}.'.format(k) + msg)
state_dict[k] = model_state_dict[k]
model.load_state_dict(state_dict, strict=False)
return model
if __name__ == '__main__':
net = get_large_hourglass_net()
print (net) | 11,068 | 31.365497 | 118 | py |
PalmTree | PalmTree-master/src/config.py | """
Configuration file.
"""
VOCAB_SIZE = 10000
USE_CUDA = True
DEVICES = [0]
CUDA_DEVICE = DEVICES[0]
VERSION = 1
MAXLEN = 10
| 127 | 10.636364 | 24 | py |
PalmTree | PalmTree-master/src/train_palmtree.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torch.autograd import Variable
from config import *
import numpy as np
import palmtree
from palmtree import dataset
from palmtree import trainer
import pickle as pkl
print(palmtree.__file__)
vocab_path = "cdfg_bert_1/vocab"
train_cfg_dataset = "data/training/cdfg_bert_1/cfg_train.txt"
train_dfg_dataset = "data/training/cdfg_bert_1/dfg_train.txt"
test_dataset = "data/training/cdfg_bert_1/test.txt"
sent_dataset = "data/sentence.pkl"
output_path = "cdfg_bert_1/transformer"
with open(train_cfg_dataset, "r", encoding="utf-8") as f1:
with open(train_dfg_dataset, "r", encoding="utf-8") as f2:
vocab = dataset.WordVocab([f1, f2], max_size=13000, min_freq=1)
print("VOCAB SIZE:", len(vocab))
vocab.save_vocab(vocab_path)
print("Loading Vocab", vocab_path)
vocab = dataset.WordVocab.load_vocab(vocab_path)
print("Vocab Size: ", len(vocab))
# print(vocab.itos)
print("Loading Train Dataset")
train_dataset = dataset.BERTDataset(train_cfg_dataset, train_dfg_dataset, vocab, seq_len=20,
corpus_lines=None, on_memory=True)
print("Loading Test Dataset", test_dataset)
test_dataset = bert_pytorch.dataset.BERTDataset(test_dataset, test_dataset, vocab, seq_len=20, on_memory=True) \
if test_dataset is not None else None
print("Creating Dataloader")
train_data_loader = DataLoader(train_dataset, batch_size=256, num_workers=10)
test_data_loader = DataLoader(test_dataset, batch_size=256, num_workers=10) \
if test_dataset is not None else None
print("Building BERT model")
bert = bert_pytorch.BERT(len(vocab), hidden=128, n_layers=12, attn_heads=8, dropout=0.0)
print("Creating BERT Trainer")
trainer = trainer.BERTTrainer(bert, len(vocab), train_dataloader=train_data_loader, test_dataloader=test_data_loader,
lr=1e-5, betas=(0.9, 0.999), weight_decay=0.0,
with_cuda=True, cuda_devices=[0], log_freq=100)
print("Training Start")
for epoch in range(20):
trainer.train(epoch)
trainer.save(epoch, output_path)
# if test_data_loader is not None:
# trainer.test(epoch)
| 2,202 | 32.378788 | 117 | py |
PalmTree | PalmTree-master/src/palmtree/__init__.py | from .model import BERT
| 24 | 11.5 | 23 | py |
PalmTree | PalmTree-master/src/palmtree/trainer/pretrain.py | import torch
import torch.nn as nn
from torch.optim import Adam, AdamW
from torch.utils.data import DataLoader
from ..model import BERTLM, BERT
from .optim_schedule import ScheduledOptim
import tqdm
class BERTTrainer:
"""
BERTTrainer make the pretrained BERT model with two LM training method.
1. Masked Language Model : 3.3.1 Task #1: Masked LM
2. Next Sentence prediction : 3.3.2 Task #2: Next Sentence Prediction
please check the details on README.md with simple example.
"""
def __init__(self, bert: BERT, vocab_size: int,
train_dataloader: DataLoader, test_dataloader: DataLoader = None,
lr: float = 1e-4, betas=(0.9, 0.999), weight_decay: float = 0.01, warmup_steps=10000,
with_cuda: bool = True, cuda_devices=None, log_freq: int = 10):
"""
:param bert: BERT model which you want to train
:param vocab_size: total word vocab size
:param train_dataloader: train dataset data loader
:param test_dataloader: test dataset data loader [can be None]
:param lr: learning rate of optimizer
:param betas: Adam optimizer betas
:param weight_decay: Adam optimizer weight decay param
:param with_cuda: traning with cuda
:param log_freq: logging frequency of the batch iteration
"""
# Setup cuda device for BERT training, argument -c, --cuda should be true
cuda_condition = torch.cuda.is_available() and with_cuda
self.device = torch.device("cuda:0" if cuda_condition else "cpu")
# This BERT model will be saved every epoch
self.bert = bert
# Initialize the BERT Language Model, with BERT model
self.model = BERTLM(bert, vocab_size).to(self.device)
# Distributed GPU training if CUDA can detect more than 1 GPU
if with_cuda and torch.cuda.device_count() > 1:
print("Using %d GPUS for BERT" % torch.cuda.device_count())
self.model = nn.DataParallel(self.model, device_ids=cuda_devices)
# Setting the train and test data loader
self.train_data = train_dataloader
self.test_data = test_dataloader
# Setting the Adam optimizer with hyper-param
# self.optim = Adam(self.model.parameters(), lr=lr, betas=betas, weight_decay=weight_decay)
self.optim = AdamW(self.model.parameters(), lr=lr, betas=betas, weight_decay=weight_decay)
self.optim_schedule = ScheduledOptim(self.optim, self.bert.hidden, n_warmup_steps=warmup_steps)
# Using Negative Log Likelihood Loss function for predicting the masked_token
self.masked_criterion = nn.NLLLoss(ignore_index=0)
self.dfg_next_criterion = nn.NLLLoss()
self.cfg_next_criterion = nn.NLLLoss()
self.comp_criterion = nn.NLLLoss(ignore_index=0)
self.sentence_bert = nn.NLLLoss()
self.log_freq = log_freq
print("Total Parameters:", sum([p.nelement() for p in self.model.parameters()]))
def train(self, epoch):
self.iteration(epoch, self.train_data)
def test(self, epoch):
self.iteration(epoch, self.test_data, train=False)
def iteration(self, epoch, data_loader, train=True):
"""
loop over the data_loader for training or testing
if on train status, backward operation is activated
and also auto save the model every peoch
:param epoch: current epoch index
:param data_loader: torch.utils.data.DataLoader for iteration
:param train: boolean value of is train or test
:return: None
"""
str_code = "train" if train else "test"
# Setting the tqdm progress bar
data_iter = tqdm.tqdm(enumerate(data_loader),
desc="EP_%s:%d" % (str_code, epoch),
total=len(data_loader),
bar_format="{l_bar}{r_bar}")
avg_loss = 0.0
total_correct = 0
total_element = 0
for i, data in data_iter:
# 0. batch_data will be sent into the device(GPU or cpu)
data = {key: value.to(self.device) for key, value in data.items()}
# 1. forward the next_sentence_prediction and masked_lm model
dfg_next_sent_output, cfg_next_sent_output, mask_lm_output= self.model.forward(data["dfg_bert_input"], data["dfg_segment_label"], data["cfg_bert_input"], data["cfg_segment_label"])
# 2-1. NLL(negative log likelihood) loss of is_next classification result
dfg_next_loss = self.dfg_next_criterion(dfg_next_sent_output, data["dfg_is_next"])
cfg_next_loss = self.cfg_next_criterion(cfg_next_sent_output, data["cfg_is_next"])
# 2-2. NLLLoss of predicting masked token word
mask_loss = self.masked_criterion(mask_lm_output.transpose(1, 2), data["dfg_bert_label"])
# 2-3 NLLloss of instruction component prediction
#comp_loss = self.comp_criterion(inst_comp_output.transpose(1, 2), data["component"])
# 2-5. Adding next_loss and mask_loss : 3.4 Pre-training Procedure
loss = dfg_next_loss + cfg_next_loss + mask_loss
# 3. backward and optimization only in train
if train:
self.optim_schedule.zero_grad()
loss.backward()
self.optim_schedule.step_and_update_lr()
post_fix = {
"epoch": epoch,
"iter": i,
"CWP:": cfg_next_loss.item(),
"DUP:": dfg_next_loss.item(),
"MLM:": mask_loss.item(),
}
if i % self.log_freq == 0:
data_iter.write(str(post_fix))
def save(self, epoch, file_path="output/bert_trained.model"):
"""
Saving the current BERT model on file_path
:param epoch: current epoch number
:param file_path: model output path which gonna be file_path+"ep%d" % epoch
:return: final_output_path
"""
output_path = file_path + ".ep%d" % epoch
torch.save(self.bert.cpu(), output_path)
self.bert.to(self.device)
print("EP:%d Model Saved on:" % epoch, output_path)
return output_path
| 6,299 | 39.645161 | 192 | py |
PalmTree | PalmTree-master/src/palmtree/trainer/__init__.py | from .pretrain import BERTTrainer
| 34 | 16.5 | 33 | py |
PalmTree | PalmTree-master/src/palmtree/trainer/optim_schedule.py | '''A wrapper class for optimizer '''
import numpy as np
class ScheduledOptim():
'''A simple wrapper class for learning rate scheduling'''
def __init__(self, optimizer, d_model, n_warmup_steps):
self._optimizer = optimizer
self.n_warmup_steps = n_warmup_steps
self.n_current_steps = 0
self.init_lr = np.power(d_model, -0.5)
def step_and_update_lr(self):
"Step with the inner optimizer"
self._update_learning_rate()
self._optimizer.step()
def zero_grad(self):
"Zero out the gradients by the inner optimizer"
self._optimizer.zero_grad()
def _get_lr_scale(self):
return np.min([
np.power(self.n_current_steps, -0.5),
np.power(self.n_warmup_steps, -1.5) * self.n_current_steps])
def _update_learning_rate(self):
''' Learning rate scheduling per step '''
self.n_current_steps += 1
lr = self.init_lr * self._get_lr_scale()
for param_group in self._optimizer.param_groups:
param_group['lr'] = lr
| 1,069 | 28.722222 | 72 | py |
PalmTree | PalmTree-master/src/palmtree/dataset/dataset.py | from torch.utils.data import Dataset
import tqdm
import torch
import random
import pickle as pkl
class BERTDataset(Dataset):
def __init__(self, dfg_corpus_path, cfg_corpus_path, vocab, seq_len, encoding="utf-8", corpus_lines=None, on_memory=True):
self.vocab = vocab
self.seq_len = seq_len
self.bb_len = 50
self.on_memory = on_memory
self.corpus_lines = corpus_lines
self.dfg_corpus_path = dfg_corpus_path
self.cfg_corpus_path = cfg_corpus_path
self.encoding = encoding
# load DFG sequences
with open(dfg_corpus_path, "r", encoding=encoding) as f:
if self.corpus_lines is None and not on_memory:
for _ in tqdm.tqdm(f, desc="Loading Dataset", total=corpus_lines):
self.corpus_lines += 1
if on_memory:
self.dfg_lines = [line[:-1].split("\t")
for line in tqdm.tqdm(f, desc="Loading Dataset", total=corpus_lines)]
self.corpus_lines = len(self.dfg_lines)
# load CFG sequences
with open(cfg_corpus_path, "r", encoding=encoding) as f:
if self.corpus_lines is None and not on_memory:
for _ in tqdm.tqdm(f, desc="Loading Dataset", total=corpus_lines):
self.corpus_lines += 1
if on_memory:
self.cfg_lines = [line[:-1].split("\t")
for line in tqdm.tqdm(f, desc="Loading Dataset", total=corpus_lines)]
if self.corpus_lines > len(self.cfg_lines):
self.corpus_lines = len(self.cfg_lines)
if not on_memory:
self.file = open(corpus_path, "r", encoding=encoding)
self.random_file = open(corpus_path, "r", encoding=encoding)
for _ in range(random.randint(self.corpus_lines if self.corpus_lines < 1000 else 1000)):
self.random_file.__next__()
def __len__(self):
return self.corpus_lines
def __getitem__(self, item):
c1, c2, c_label, d1, d2, d_label = self.random_sent(item)
d1_random, d1_label = self.random_word(d1)
d2_random, d2_label = self.random_word(d2)
d1 = [self.vocab.sos_index] + d1_random + [self.vocab.eos_index]
d2 = d2_random + [self.vocab.eos_index]
c1 = [self.vocab.sos_index] + [self.vocab.stoi.get(c, self.vocab.unk_index) for c in c1.split()] + [self.vocab.eos_index]
c2 = [self.vocab.stoi.get(c, self.vocab.unk_index) for c in c2.split()] + [self.vocab.eos_index]
d1_label = [self.vocab.pad_index] + d1_label + [self.vocab.pad_index]
d2_label = d2_label + [self.vocab.pad_index]
dfg_segment_label = ([1 for _ in range(len(d1))] + [2 for _ in range(len(d2))])[:self.seq_len]
cfg_segment_label = ([1 for _ in range(len(c1))] + [2 for _ in range(len(c2))])[:self.seq_len]
dfg_bert_input = (d1 + d2)[:self.seq_len]
dfg_bert_label = (d1_label + d2_label)[:self.seq_len]
cfg_bert_input = (c1 + c2)[:self.seq_len]
padding = [self.vocab.pad_index for _ in range(self.seq_len - len(dfg_bert_input))]
dfg_bert_input.extend(padding), dfg_bert_label.extend(padding), dfg_segment_label.extend(padding) #, comp_label.extend(padding)
cfg_padding = [self.vocab.pad_index for _ in range(self.seq_len - len(cfg_bert_input))]
cfg_bert_input.extend(cfg_padding), cfg_segment_label.extend(cfg_padding)
output = {"dfg_bert_input": dfg_bert_input,
"dfg_bert_label": dfg_bert_label,
"dfg_segment_label": dfg_segment_label,
"dfg_is_next": d_label,
"cfg_bert_input": cfg_bert_input,
"cfg_segment_label": cfg_segment_label,
"cfg_is_next": c_label
}
return {key: torch.tensor(value) for key, value in output.items()}
def random_bb(self):
prob = random.random()
if prob > 0.5:
bb_pair = self.bb_pairs[random.choice(list(self.bb_pairs.keys()))]
return bb_pair, 1
else:
neg_keys = random.choices(list(self.bb_pairs.keys()), k=2)
bb_pair = (self.bb_pairs[neg_keys[0]][0], self.bb_pairs[neg_keys[1]][1])
return bb_pair, 0
def get_index_bb(self, bb_pair):
tokens1 = [self.vocab.sos_index]
segment1 = [1]
i = 1
for ins in bb_pair[0].split(";")[-5:]:
if ins:
for token in ins.split():
tokens1.append(self.vocab.stoi.get(token, self.vocab.unk_index))
segment1.append(i)
tokens1.append(self.vocab.eos_index)
segment1.append(i)
i += 1
tokens2 = [self.vocab.sos_index]
segment2 = [1]
j = 1
for ins in bb_pair[0].split(";")[-5:]:
if ins:
for token in ins.split():
tokens2.append(self.vocab.stoi.get(token, self.vocab.unk_index))
segment2.append(j)
tokens2.append(self.vocab.eos_index)
segment2.append(j)
j += 1
tokens1 = tokens1[:self.bb_len]
tokens2 = tokens2[:self.bb_len]
segment1 = segment1[:self.bb_len]
segment2 = segment2[:self.bb_len]
padding1 = [self.vocab.pad_index for _ in range(self.bb_len - len(tokens1))]
padding2 = [self.vocab.pad_index for _ in range(self.bb_len - len(tokens2))]
tokens1.extend(padding1)
tokens2.extend(padding2)
segment1.extend(padding1)
segment2.extend(padding2)
return tokens1, tokens2, segment1, segment2
def random_word(self, sentence):
tokens = sentence.split()
output_label = []
for i, token in enumerate(tokens):
prob = random.random()
if prob < 0.15:
prob /= 0.15
# 80% randomly change token to mask token
if prob < 0.8:
tokens[i] = self.vocab.mask_index
# 10% randomly change token to random token
elif prob < 0.9:
tokens[i] = random.randrange(len(self.vocab))
# 10% randomly change token to current token
else:
tokens[i] = self.vocab.stoi.get(token, self.vocab.unk_index)
output_label.append(self.vocab.stoi.get(token, self.vocab.unk_index))
else:
tokens[i] = self.vocab.stoi.get(token, self.vocab.unk_index)
output_label.append(0)
return tokens, output_label
def random_sent(self, index):
c1, c2, d1, d2 = self.get_corpus_line(index)
dice = random.random() # TODO: should throw the dice twice here.
if dice > 0.25:
return c1, c2, 1, d1, d2, 1
elif 0.25 <= dice < 0.5:
return c1, self.get_random_line(), 0, d1, d2, 1
elif 0.5 <= dice < 0.75:
return c1, c2, 1, d2, d1, 0
else:
return c1, self.get_random_line(), 0, d2, d1, 0
def get_corpus_line(self, item):
if self.on_memory:
return self.cfg_lines[item][0], self.cfg_lines[item][1], self.dfg_lines[item][0], self.dfg_lines[item][1]
# now only on_memory copurs are supported
# else:
# line = self.file.__next__()
# if line is None:
# self.file.close()
# self.file = open(self.corpus_path, "r", encoding=self.encoding)
# line = self.file.__next__()
# t1, t2 = line[:-1].split("\t")
# return t1, t2
def get_random_line(self):
if self.on_memory:
l = self.cfg_lines[random.randrange(len(self.cfg_lines))]
return l[1]
# now only on_memory copurs are supported
# line = self.file.__next__()
# if line is None:
# self.file.close()
# self.file = open(self.corpus_path, "r", encoding=self.encoding)
# for _ in range(random.randint(self.corpus_lines if self.corpus_lines < 1000 else 1000)):
# self.random_file.__next__()
# line = self.random_file.__next__()
# return line[:-1].split("\t")[1] | 8,441 | 36.189427 | 135 | py |
PalmTree | PalmTree-master/src/palmtree/dataset/vocab.py | import pickle
import tqdm
from collections import Counter
class TorchVocab(object):
"""Defines a vocabulary object that will be used to numericalize a field.
Attributes:
freqs: A collections.Counter object holding the frequencies of tokens
in the data used to build the Vocab.
stoi: A collections.defaultdict instance mapping token strings to
numerical identifiers.
itos: A list of token strings indexed by their numerical identifiers.
"""
def __init__(self, counter, max_size=None, min_freq=1, specials=['<pad>', '<oov>'],
vectors=None, unk_init=None, vectors_cache=None):
"""Create a Vocab object from a collections.Counter.
Arguments:
counter: collections.Counter object holding the frequencies of
each value found in the data.
max_size: The maximum size of the vocabulary, or None for no
maximum. Default: None.
min_freq: The minimum frequency needed to include a token in the
vocabulary. Values less than 1 will be set to 1. Default: 1.
specials: The list of special tokens (e.g., padding or eos) that
will be prepended to the vocabulary in addition to an <unk>
token. Default: ['<pad>']
vectors: One of either the available pretrained vectors
or custom pretrained vectors (see Vocab.load_vectors);
or a list of aforementioned vectors
unk_init (callback): by default, initialize out-of-vocabulary word vectors
to zero vectors; can be any function that takes in a Tensor and
returns a Tensor of the same size. Default: torch.Tensor.zero_
vectors_cache: directory for cached vectors. Default: '.vector_cache'
"""
self.freqs = counter
counter = counter.copy()
min_freq = max(min_freq, 1)
self.itos = list(specials)
# frequencies of special tokens are not counted when building vocabulary
# in frequency order
for tok in specials:
del counter[tok]
max_size = None if max_size is None else max_size + len(self.itos)
# sort by frequency, then alphabetically
words_and_frequencies = sorted(counter.items(), key=lambda tup: tup[0])
words_and_frequencies.sort(key=lambda tup: tup[1], reverse=True)
for word, freq in words_and_frequencies:
if freq < min_freq or len(self.itos) == max_size:
break
self.itos.append(word)
# stoi is simply a reverse dict for itos
self.stoi = {tok: i for i, tok in enumerate(self.itos)}
self.vectors = None
if vectors is not None:
self.load_vectors(vectors, unk_init=unk_init, cache=vectors_cache)
else:
assert unk_init is None and vectors_cache is None
def __eq__(self, other):
if self.freqs != other.freqs:
return False
if self.stoi != other.stoi:
return False
if self.itos != other.itos:
return False
if self.vectors != other.vectors:
return False
return True
def __len__(self):
return len(self.itos)
def vocab_rerank(self):
self.stoi = {word: i for i, word in enumerate(self.itos)}
def extend(self, v, sort=False):
words = sorted(v.itos) if sort else v.itos
for w in words:
if w not in self.stoi:
self.itos.append(w)
self.stoi[w] = len(self.itos) - 1
class Vocab(TorchVocab):
def __init__(self, counter, max_size=None, min_freq=1):
self.pad_index = 0
self.unk_index = 1
self.eos_index = 2
self.sos_index = 3
self.mask_index = 4
super().__init__(counter, specials=["<pad>", "<unk>", "<eos>", "<sos>", "<mask>"],
max_size=max_size, min_freq=min_freq)
def to_seq(self, sentece, seq_len, with_eos=False, with_sos=False) -> list:
pass
def from_seq(self, seq, join=False, with_pad=False):
pass
@staticmethod
def load_vocab(vocab_path: str) -> 'Vocab':
with open(vocab_path, "rb") as f:
return pickle.load(f)
def save_vocab(self, vocab_path):
with open(vocab_path, "wb") as f:
pickle.dump(self, f)
# Building Vocab with text files
class WordVocab(Vocab):
def __init__(self, texts, max_size=None, min_freq=1):
print("Building Vocab")
counter = Counter()
for t in texts:
for line in tqdm.tqdm(t):
if isinstance(line, list):
words = line
else:
words = line.replace("\n", " ").replace("\t", " ").split()
for word in words:
counter[word] += 1
super().__init__(counter, max_size=max_size, min_freq=min_freq)
def to_seq(self, sentence, seq_len=None, with_eos=False, with_sos=False, with_len=False):
if isinstance(sentence, str):
sentence = sentence.split()
seq = [self.stoi.get(word, self.unk_index) for word in sentence]
if with_eos:
seq += [self.eos_index] # this would be index 1
if with_sos:
seq = [self.sos_index] + seq
origin_seq_len = len(seq)
if seq_len is None:
pass
elif len(seq) <= seq_len:
seq += [self.pad_index for _ in range(seq_len - len(seq))]
else:
seq = seq[:seq_len]
return (seq, origin_seq_len) if with_len else seq
def from_seq(self, seq, join=False, with_pad=False):
words = [self.itos[idx]
if idx < len(self.itos)
else "<%d>" % idx
for idx in seq
if not with_pad or idx != self.pad_index]
return " ".join(words) if join else words
@staticmethod
def load_vocab(vocab_path: str) -> 'WordVocab':
with open(vocab_path, "rb") as f:
return pickle.load(f)
def build():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--corpus_path", required=True, type=str)
parser.add_argument("-o", "--output_path", required=True, type=str)
parser.add_argument("-s", "--vocab_size", type=int, default=None)
parser.add_argument("-e", "--encoding", type=str, default="utf-8")
parser.add_argument("-m", "--min_freq", type=int, default=1)
args = parser.parse_args()
with open(args.corpus_path, "r", encoding=args.encoding) as f:
vocab = WordVocab(f, max_size=args.vocab_size, min_freq=args.min_freq)
print("VOCAB SIZE:", len(vocab))
vocab.save_vocab(args.output_path)
| 6,797 | 35.352941 | 93 | py |
PalmTree | PalmTree-master/src/palmtree/dataset/__init__.py | from .dataset import BERTDataset
from .vocab import WordVocab
| 62 | 20 | 32 | py |
PalmTree | PalmTree-master/src/palmtree/model/bert.py | import torch.nn as nn
from .transformer import TransformerBlock
from .embedding import BERTEmbedding
class BERT(nn.Module):
"""
BERT model : Bidirectional Encoder Representations from Transformers.
"""
def __init__(self, vocab_size, hidden=768, n_layers=12, attn_heads=12, dropout=0.1):
"""
:param vocab_size: vocab_size of total words
:param hidden: BERT model hidden size
:param n_layers: numbers of Transformer blocks(layers)
:param attn_heads: number of attention heads
:param dropout: dropout rate
"""
super().__init__()
self.hidden = hidden
self.n_layers = n_layers
self.attn_heads = attn_heads
# paper noted they used 4*hidden_size for ff_network_hidden_size
self.feed_forward_hidden = hidden * 4
# embedding for BERT, sum of positional, segment, token embeddings
self.embedding = BERTEmbedding(vocab_size=vocab_size, embed_size=hidden)
# multi-layers transformer blocks, deep network
self.transformer_blocks = nn.ModuleList(
[TransformerBlock(hidden, attn_heads, hidden * 4, dropout) for _ in range(n_layers)])
def forward(self, x, segment_info):
# attention masking for padded token
# torch.ByteTensor([batch_size, 1, seq_len, seq_len)
mask = (x > 0).unsqueeze(1).repeat(1, x.size(1), 1).unsqueeze(1)
# embedding the indexed sequence to sequence of vectors
x = self.embedding(x, segment_info)
# running over multiple transformer blocks
for transformer in self.transformer_blocks:
x = transformer.forward(x, mask)
return x
def encode(self, x, segment_info):
mask = (x > 0).unsqueeze(1).repeat(1, x.size(1), 1).unsqueeze(1)
# embedding the indexed sequence to sequence of vectors
x = self.embedding(x, segment_info)
# running over multiple transformer blocks
for transformer in self.transformer_blocks[:-1]:
x = transformer.forward(x, mask)
return x
| 2,088 | 29.720588 | 97 | py |
PalmTree | PalmTree-master/src/palmtree/model/transformer.py | import torch.nn as nn
from .attention import MultiHeadedAttention
from .utils import SublayerConnection, PositionwiseFeedForward
class TransformerBlock(nn.Module):
"""
Bidirectional Encoder = Transformer (self-attention)
Transformer = MultiHead_Attention + Feed_Forward with sublayer connection
"""
def __init__(self, hidden, attn_heads, feed_forward_hidden, dropout):
"""
:param hidden: hidden size of transformer
:param attn_heads: head sizes of multi-head attention
:param feed_forward_hidden: feed_forward_hidden, usually 4*hidden_size
:param dropout: dropout rate
"""
super().__init__()
self.attention = MultiHeadedAttention(h=attn_heads, d_model=hidden)
self.feed_forward = PositionwiseFeedForward(d_model=hidden, d_ff=feed_forward_hidden, dropout=dropout)
self.input_sublayer = SublayerConnection(size=hidden, dropout=dropout)
self.output_sublayer = SublayerConnection(size=hidden, dropout=dropout)
self.dropout = nn.Dropout(p=dropout)
def forward(self, x, mask):
x = self.input_sublayer(x, lambda _x: self.attention.forward(_x, _x, _x, mask=mask))
x = self.output_sublayer(x, self.feed_forward)
return self.dropout(x)
| 1,276 | 38.90625 | 110 | py |
PalmTree | PalmTree-master/src/palmtree/model/__init__.py | from .bert import BERT
from .language_model import BERTLM
| 58 | 18.666667 | 34 | py |
PalmTree | PalmTree-master/src/palmtree/model/language_model.py | import torch.nn as nn
import torch
from .bert import BERT
class BERTLM(nn.Module):
"""
BERT Language Model
Next Sentence Prediction Model + Masked Language Model
"""
def __init__(self, bert: BERT, vocab_size):
"""
:param bert: BERT model which should be trained
:param vocab_size: total vocab size for masked_lm
"""
super().__init__()
self.bert = bert
self.CWP= NextSentencePrediction(self.bert.hidden)
self.DUP = NextSentencePrediction(self.bert.hidden)
self.MLM = MaskedLanguageModel(self.bert.hidden, vocab_size)
def forward(self, d, d_segment_label, c, c_segment_label):
d = self.bert(d, d_segment_label)
c = self.bert(c, c_segment_label)
return self.DUP(d), self.CWP(c), self.MLM(d)
class NextSentencePrediction(nn.Module):
"""
From NSP task, now used for DUP and CWP
"""
def __init__(self, hidden):
"""
:param hidden: BERT model output size
"""
super().__init__()
self.linear = nn.Linear(hidden, 2)
self.softmax = nn.LogSoftmax(dim=-1)
def forward(self, x):
return self.softmax(self.linear(x[:, 0]))
class MaskedLanguageModel(nn.Module):
"""
predicting origin token from masked input sequence
n-class classification problem, n-class = vocab_size
"""
def __init__(self, hidden, vocab_size):
"""
:param hidden: output size of BERT model
:param vocab_size: total vocab size
"""
super().__init__()
self.linear = nn.Linear(hidden, vocab_size)
self.softmax = nn.LogSoftmax(dim=-1)
def forward(self, x):
return self.softmax(self.linear(x))
| 1,740 | 24.602941 | 68 | py |
PalmTree | PalmTree-master/src/palmtree/model/embedding/bert.py | import torch.nn as nn
from .token import TokenEmbedding
from .position import PositionalEmbedding
from .segment import SegmentEmbedding
class BERTEmbedding(nn.Module):
"""
BERT Embedding which is consisted with under features
1. TokenEmbedding : normal embedding matrix
2. PositionalEmbedding : adding positional information using sin, cos
2. SegmentEmbedding : adding sentence segment info, (sent_A:1, sent_B:2)
sum of all these features are output of BERTEmbedding
"""
def __init__(self, vocab_size, embed_size, dropout=0.1):
"""
:param vocab_size: total vocab size
:param embed_size: embedding size of token embedding
:param dropout: dropout rate
"""
super().__init__()
self.token = TokenEmbedding(vocab_size=vocab_size, embed_size=embed_size)
self.position = PositionalEmbedding(d_model=self.token.embedding_dim)
self.segment = SegmentEmbedding(embed_size=self.token.embedding_dim)
self.dropout = nn.Dropout(p=dropout)
self.embed_size = embed_size
def forward(self, sequence, segment_label):
x = self.token(sequence) + self.position(sequence) + self.segment(segment_label)
return self.dropout(x)
| 1,261 | 37.242424 | 88 | py |
PalmTree | PalmTree-master/src/palmtree/model/embedding/position.py | import torch.nn as nn
import torch
import math
class PositionalEmbedding(nn.Module):
def __init__(self, d_model, max_len=512):
super().__init__()
# Compute the positional encodings once in log space.
pe = torch.zeros(max_len, d_model).float()
pe.require_grad = False
position = torch.arange(0, max_len).float().unsqueeze(1)
div_term = (torch.arange(0, d_model, 2).float() * -(math.log(10000.0) / d_model)).exp()
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0)
self.register_buffer('pe', pe)
def forward(self, x):
return self.pe[:, :x.size(1)]
| 710 | 26.346154 | 95 | py |
PalmTree | PalmTree-master/src/palmtree/model/embedding/segment.py | import torch.nn as nn
class SegmentEmbedding(nn.Embedding):
def __init__(self, embed_size=512):
super().__init__(3, embed_size, padding_idx=0)
| 157 | 21.571429 | 54 | py |
PalmTree | PalmTree-master/src/palmtree/model/embedding/token.py | import torch.nn as nn
class TokenEmbedding(nn.Embedding):
def __init__(self, vocab_size, embed_size=512):
super().__init__(vocab_size, embed_size, padding_idx=0)
| 176 | 24.285714 | 63 | py |
PalmTree | PalmTree-master/src/palmtree/model/embedding/__init__.py | from .bert import BERTEmbedding
| 32 | 15.5 | 31 | py |
PalmTree | PalmTree-master/src/palmtree/model/attention/multi_head.py | import torch.nn as nn
from .single import Attention
class MultiHeadedAttention(nn.Module):
"""
Take in model size and number of heads.
"""
def __init__(self, h, d_model, dropout=0.1):
super().__init__()
assert d_model % h == 0
# We assume d_v always equals d_k
self.d_k = d_model // h
self.h = h
self.linear_layers = nn.ModuleList([nn.Linear(d_model, d_model) for _ in range(3)])
self.output_linear = nn.Linear(d_model, d_model)
self.attention = Attention()
self.dropout = nn.Dropout(p=dropout)
def forward(self, query, key, value, mask=None):
batch_size = query.size(0)
# 1) Do all the linear projections in batch from d_model => h x d_k
query, key, value = [l(x).view(batch_size, -1, self.h, self.d_k).transpose(1, 2)
for l, x in zip(self.linear_layers, (query, key, value))]
# 2) Apply attention on all the projected vectors in batch.
x, attn = self.attention(query, key, value, mask=mask, dropout=self.dropout)
# 3) "Concat" using a view and apply a final linear.
x = x.transpose(1, 2).contiguous().view(batch_size, -1, self.h * self.d_k)
return self.output_linear(x)
| 1,268 | 32.394737 | 91 | py |
PalmTree | PalmTree-master/src/palmtree/model/attention/__init__.py | from .multi_head import MultiHeadedAttention
from .single import Attention
| 75 | 24.333333 | 44 | py |
PalmTree | PalmTree-master/src/palmtree/model/attention/single.py | import torch.nn as nn
import torch.nn.functional as F
import torch
import math
class Attention(nn.Module):
"""
Compute 'Scaled Dot Product Attention
"""
def forward(self, query, key, value, mask=None, dropout=None):
scores = torch.matmul(query, key.transpose(-2, -1)) \
/ math.sqrt(query.size(-1))
if mask is not None:
scores = scores.masked_fill(mask == 0, -1e9)
p_attn = F.softmax(scores, dim=-1)
if dropout is not None:
p_attn = dropout(p_attn)
return torch.matmul(p_attn, value), p_attn
| 596 | 21.961538 | 66 | py |
PalmTree | PalmTree-master/src/palmtree/model/utils/gelu.py | import torch.nn as nn
import torch
import math
class GELU(nn.Module):
"""
Paper Section 3.4, last paragraph notice that BERT used the GELU instead of RELU
"""
def forward(self, x):
return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
| 301 | 22.230769 | 100 | py |
PalmTree | PalmTree-master/src/palmtree/model/utils/feed_forward.py | import torch.nn as nn
from .gelu import GELU
class PositionwiseFeedForward(nn.Module):
"Implements FFN equation."
def __init__(self, d_model, d_ff, dropout=0.1):
super(PositionwiseFeedForward, self).__init__()
self.w_1 = nn.Linear(d_model, d_ff)
self.w_2 = nn.Linear(d_ff, d_model)
self.dropout = nn.Dropout(dropout)
self.activation = GELU()
def forward(self, x):
return self.w_2(self.dropout(self.activation(self.w_1(x))))
| 488 | 27.764706 | 67 | py |
PalmTree | PalmTree-master/src/palmtree/model/utils/sublayer.py | import torch.nn as nn
from .layer_norm import LayerNorm
class SublayerConnection(nn.Module):
"""
A residual connection followed by a layer norm.
Note for code simplicity the norm is first as opposed to last.
"""
def __init__(self, size, dropout):
super(SublayerConnection, self).__init__()
self.norm = LayerNorm(size)
self.dropout = nn.Dropout(dropout)
def forward(self, x, sublayer):
"Apply residual connection to any sublayer with the same size."
return x + self.dropout(sublayer(self.norm(x)))
| 565 | 28.789474 | 71 | py |
PalmTree | PalmTree-master/src/palmtree/model/utils/__init__.py | from .feed_forward import PositionwiseFeedForward
from .layer_norm import LayerNorm
from .sublayer import SublayerConnection
from .gelu import GELU
| 148 | 28.8 | 49 | py |
PalmTree | PalmTree-master/src/palmtree/model/utils/layer_norm.py | import torch.nn as nn
import torch
class LayerNorm(nn.Module):
"Construct a layernorm module (See citation for details)."
def __init__(self, features, eps=1e-6):
super(LayerNorm, self).__init__()
self.a_2 = nn.Parameter(torch.ones(features))
self.b_2 = nn.Parameter(torch.zeros(features))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.a_2 * (x - mean) / (std + self.eps) + self.b_2
| 519 | 27.888889 | 66 | py |
PalmTree | PalmTree-master/src/extrinsic_evaluation/gemini/registers.py |
"""
The Design of Instruction Decoder
The reference come from :
1. Intel® 64 and IA-32 Architectures Software Developer’s Manual: https://www.intel.com/content/dam/www/public/us/en/documents/manuals/64-ia-32-architectures-software-developer-instruction-set-reference-manual-325383.pdf
2. x64_cheatsheet: https://cs.brown.edu/courses/cs033/docs/guides/x64_cheatsheet.pdf
3. ORACLE x86 assembly language Reference Manual: https://docs.oracle.com/cd/E26502_01/html/E28388/ennbz.html#scrolltoc
4. ORACLE AMD64 register information: https://docs.oracle.com/cd/E19205-01/821-2506/gituv/index.html
5. http://service.scs.carleton.ca/sivarama/asm_book_web/Student_copies/ch5_addrmodes.pdf
OPCODE is a 200 bit one-hot vector
Operand type is a 3 bit binary vector
Register is a 64 bit one-hot vector
| OPCODE | Operand type1 | Base | Index | Scale | Offset | Operand type2 |...|
| 200 bit | 3 bit | 100 bit | 100 bit | 1 bit | 17 bit |...|
| Register | |...|
| 100 bit | 0 | |...|
| string | |...|
| 16 bit | 0 | |...|
"""
OPCODE_LEN = 200
OPERAND_TYPE = 3
REGISTER_LEN = 100
# does not support SSE SSE2 and MMX instructions
OPCODE = [
# Data Movement
"mov",
"push",
"pop",
"cwtl",
"cltq",
"cqto",
# Unary Operations
"inc",
"dec",
"neg",
"not",
# Binary Operations
"lea",
"leaq",
"add",
"sub",
"imul",
"xor",
"or",
"and",
# Shift Operations
"sal",
"sar",
"shr",
# Special Arithmetic Operations
"imulq",
"mulq",
"idivq",
"divq",
# Comparison and Test Instructions
"cmp",
"test",
# Conditional Set Instructions
"sete",
"setz",
"setne",
"setnz",
"sets",
"setns",
"setg",
"setnle",
"setge",
"setnl",
"setl",
"setnge",
"setle",
"setng",
"seta",
"setnbe",
"setae",
"setnb",
"setbe",
"setna",
#Jump Instructions
"jmp",
"je",
"jz",
"jne",
"jnz",
"js",
"jns",
"jg",
"jnle",
"jge",
"jnl",
"jl",
"jnge",
"jle",
"jng",
"ja",
"jnbe",
"jae",
"jnb",
"jb",
"jnae",
"jbe",
"jna",
# Conditional Move Instructions
"cmove",
"cmovz",
"cmovne",
"cmovenz",
"cmovs",
"cmovns",
"cmovg",
"cmovnle",
"cmovge",
"cmovnl",
"cmovl",
"cmovnge",
"cmovle",
"cmovng",
"cmova",
"cmovnbe",
"cmovae",
"cmovnb",
"cmovb",
"cmovnae",
"cmovbe",
"cmovna",
# Procedure Call Instruction
"call",
"leave",
"ret",
"retn"
# String Instructions
"cmps",
"cmpsb",
"cmpsl",
"cmpsw",
"lods",
"lodsb",
"lodsl",
"lodsw",
"movs",
"movsb",
"movsl",
"movsw",
# Float point Arithmetic Instructions
"fabs",
"fadd",
"faddp",
"fchs",
"fdiv",
"fdivp",
"fdivr",
"fdivrp",
"fiadd",
"fidiv",
"fidivr",
"fimul",
"fisub",
"fisubr",
"fmul",
"fmulp",
"fprem",
"fprem1",
"frndint",
"fscale",
"fsqrt",
"fsub",
"fsubp",
"fsubr",
"fsubrp",
"fxtract",
]
REGISTER = [
"rax", "eax", "ax", "al", "ah",
"rcx", "ecx", "cx", "cl", "ch",
"rdx", "edx", "dx", "dl", "dh",
"rbx", "ebx", "bx", "bl", "bh", # 20
"rsi", "esi", "si", "sil",
"rdi", "edi", "di", "dil",
"rsp", "esp", "sp", "spl",
"rbp", "ebp", "bp", "bpl",
"r8", "r8d", "r8w", "r8b",
"r9", "r9d", "r9w", "r9b", # 44
"r10", "r10d", "r10w", "r10b",
"r11", "r11d", "r11w", "r11b",
"r12", "r12d", "r12w", "r12b",
"r13", "r13d", "r13w", "r13b",
"r14", "r14d", "r14w", "r14b",
"r15", "r15d", "r15w", "r15b",
"xmm0", "xmm1", "xmm2", "xmm3",
"xmm4", "xmm5", "xmm6", "xmm7", # 76
"st0", "st1", "st2", "st3",
"st4", "st5", "st6", "st7", # 84
"cs", "es", "os", "fs", "gs", "ss", # 90
"fcw", "fsw", "ftw", "fop", #94
"frip", "frdp", "mxcsr", "mxcsr_mask",
"rip", "rflags", # 100
#normalization
"string",
"symbol",
"address",
"shl"
]
SEGMENT = [
"cs",
"ss",
"ds",
"es",
"fs",
"gs"
] | 4,474 | 18.123932 | 220 | py |
PalmTree | PalmTree-master/src/extrinsic_evaluation/gemini/data_loader.py | """"
Here we implement a class for loading data.
"""
import torch
from torch.autograd import Variable
from vocab import *
from config import *
import numpy as np
import random
import re
np.random.seed(0)
class DataLoader:
EOS = 0 # to mean end of sentence
UNK = 1 # to mean unknown token
maxlen = MAXLEN
def __init__(self, text_file=None, sentences=None, word_dict=None):
if text_file:
sentences = []
for txt_file in text_file:
print("Loading text file at {}".format(txt_file))
with open(txt_file, "rt") as f:
text = f.readlines()
for i, line in enumerate(text):
if i % 2:
sentences.extend(line.strip().split(';'))
print("Making dictionary for these words")
word_dict = build_and_save_dictionary(sentences, source="data/instruction")
assert sentences and word_dict, "Please provide the file to extract from or give sentences and word_dict"
self.sentences = sentences
self.word_dict = word_dict
# print("Making reverse dictionary")
self.revmap = list(self.word_dict.items())
self.lengths = [len(sent) for sent in self.sentences]
def convert_sentence_to_indices(self, sentence):
sentence = re.split(',| ', sentence)
tokn_lst = []
for s in sentence:
tokn_lst.extend(re.split('([0-9A-Za-z@_.]+)', s))
tokn_lst = [t for t in tokn_lst if t]
indices = [
# assign an integer to each word, if the word is too rare assign unknown token
self.word_dict.get(w) if self.word_dict.get(w, VOCAB_SIZE + 1) < VOCAB_SIZE else self.UNK
for w in tokn_lst # split into words on spaces
][: self.maxlen - 1] # take only maxlen-1 words per sentence at the most.
# last words are EOS
indices += [self.EOS] * (self.maxlen - len(indices))
indices = np.array(indices)
indices = Variable(torch.from_numpy(indices))
return indices
def convert_indices_to_sentences(self, indices):
def convert_index_to_word(idx):
idx = idx.data.item()
if idx == 0:
return "EOS"
elif idx == 1:
return "UNK"
search_idx = idx - 2
if search_idx >= len(self.revmap):
return "NA"
word, idx_ = self.revmap[search_idx]
assert idx_ == idx
return word
words = [convert_index_to_word(idx) for idx in indices]
return " ".join(words)
def fetch_batch(self, batch_size):
first_index = random.randint(0, len(self.sentences) - batch_size)
batch = []
lengths = []
for i in range(first_index, first_index + batch_size):
sent = self.sentences[i]
ind = self.convert_sentence_to_indices(sent)
if USE_CUDA:
ind = ind.cuda(CUDA_DEVICE)
batch.append(ind)
lengths.append(min(len(sent.split()), MAXLEN))
batch = torch.stack(batch)
lengths = np.array(lengths)
return batch, lengths
| 3,295 | 30.390476 | 113 | py |
PalmTree | PalmTree-master/src/extrinsic_evaluation/gemini/gemini_feature_extraction_palmtree.py | import glob
import pickle
import queue
import time
import re
import os
import numpy as np
import eval_utils as utils
import binaryninja as binja
from obj import Obj
CALL_INST = {binja.LowLevelILOperation.LLIL_CALL, binja.LowLevelILOperation.LLIL_CALL_PARAM,
binja.LowLevelILOperation.LLIL_CALL_OUTPUT_SSA, binja.LowLevelILOperation.LLIL_CALL_SSA,
binja.LowLevelILOperation.LLIL_CALL_STACK_ADJUST, binja.LowLevelILOperation.LLIL_CALL_STACK_SSA}
LOGIC_INST = {binja.LowLevelILOperation.LLIL_AND, binja.LowLevelILOperation.LLIL_TEST_BIT,
binja.LowLevelILOperation.LLIL_OR, binja.LowLevelILOperation.LLIL_XOR,
binja.LowLevelILOperation.LLIL_NOT, binja.LowLevelILOperation.LLIL_ROR,
binja.LowLevelILOperation.LLIL_ROL, binja.LowLevelILOperation.LLIL_ASR,
binja.LowLevelILOperation.LLIL_LSL, binja.LowLevelILOperation.LLIL_LSR}
ARITH_INST = {binja.LowLevelILOperation.LLIL_ADD, binja.LowLevelILOperation.LLIL_ADD_OVERFLOW,
binja.LowLevelILOperation.LLIL_ADD_OVERFLOW, binja.LowLevelILOperation.LLIL_SUB,
binja.LowLevelILOperation.LLIL_FSUB, binja.LowLevelILOperation.LLIL_DIVS,
binja.LowLevelILOperation.LLIL_DIVS_DP, binja.LowLevelILOperation.LLIL_DIVU,
binja.LowLevelILOperation.LLIL_DIVU_DP, binja.LowLevelILOperation.LLIL_FDIV,
binja.LowLevelILOperation.LLIL_MUL, binja.LowLevelILOperation.LLIL_MULS_DP,
binja.LowLevelILOperation.LLIL_MULU_DP, binja.LowLevelILOperation.LLIL_FMUL,
binja.LowLevelILOperation.LLIL_ADC, binja.LowLevelILOperation.LLIL_SBB,
binja.LowLevelILOperation.LLIL_BOOL_TO_INT, binja.LowLevelILOperation.LLIL_FLOAT_TO_INT,
binja.LowLevelILOperation.LLIL_ROUND_TO_INT, binja.LowLevelILOperation.LLIL_INT_TO_FLOAT}
TRANSFER_INST = {binja.LowLevelILOperation.LLIL_IF, binja.LowLevelILOperation.LLIL_GOTO}
class BasicBlockMap(dict):
def __missing__(self, key):
v = len(self)
self[key] = v
return v
def encode_str(string):
vector = [0] * 256
str_lst = list(string)
for u in str_lst:
vector[ord(u)]+=1
vector = ''.join([str(i) for i in vector])
return vector
def parse_instruction(ins, symbol_map, string_map):
ins = re.sub('\s+', ', ', ins, 1)
parts = ins.split(', ')
operand = []
token_lst = []
if len(parts) > 1:
operand = parts[1:]
token_lst.append(parts[0])
for i in range(len(operand)):
symbols = re.split('([0-9A-Za-z]+)', operand[i])
symbols = [s.strip() for s in symbols if s]
for j in range(len(symbols)):
if symbols[j][:2] == '0x' and len(symbols[j]) == 8:
if int(symbols[j], 16) in symbol_map:
symbols[j] = "symbol"
elif int(symbols[j], 16) in string_map:
symbols[j] = "string"
else:
symbols[j] = "address"
token_lst.extend(symbols)
return ' '.join(token_lst)
def calc_st_embeddings(usable_encoder: utils.UsableEncoder, bv: binja.BinaryViewType, block: binja.BasicBlock, symbol_map, string_map):
text = []
idx = block.start
for inst in block:
text.append(parse_instruction(bv.get_disassembly(idx), symbol_map, string_map))
idx += inst[1]
if text:
embd = np.sum(usable_encoder.encode(text), axis=0)/len(text)
else:
embd = np.zeros(128)
return embd, text
def calc_statistics(func: binja.Function, block: binja.BasicBlock):
num_as, num_calls, num_insts, num_lis, num_tis = 0, 0, 0, 0, 0
idx = block.start
for inst in block:
llil = func.get_lifted_il_at(idx)
idx += inst[1]
num_insts += 1
if not hasattr(llil, 'operation'):
continue
if llil.operation in CALL_INST:
num_calls += 1
elif llil.operation in ARITH_INST:
num_as += 1
elif llil.operation in LOGIC_INST:
num_lis += 1
elif llil.operation in TRANSFER_INST:
num_tis += 1
return [0, 0, calc_descendents(block), num_as, num_calls, num_insts, num_lis, num_tis]
def calc_descendents(block: binja.BasicBlock):
q = queue.Queue()
q.put(block)
visited = set()
visited.add(block.start)
cnt = 0
while not q.empty():
b = q.get()
for edge in b.outgoing_edges:
target = edge.target
if target.start not in visited:
cnt += 1
q.put(target)
visited.add(target.start)
return cnt
def build_neighbors(func: binja.Function, bb_map: BasicBlockMap):
edge_list = []
for block in func:
src_id = bb_map[block.start]
for edge in block.outgoing_edges:
dst_id = bb_map[edge.target.start]
edge_list.append((src_id, dst_id))
return edge_list
def disassemble(path, function_filter):
bv = binja.BinaryViewType.get_view_of_file(path)
symbol_map = {}
string_map = {}
for sym in bv.get_symbols():
symbol_map[sym.address] = sym.full_name
for string in bv.get_strings():
string_map[string.start] = string.value
#binja.log_to_stdout(True)
usable_encoder = utils.UsableTransformer(model_path="", vocab_path="")
s = time.time()
raw_graph_list = []
filter_count = 0
for func in bv.functions:
bb_map = BasicBlockMap()
edge_list = build_neighbors(func, bb_map)
fvec_list = [0] * len(bb_map)
ins_list = []
for block in func:
# fv_list = calc_statistics(func, block)
fv_list, ins = calc_st_embeddings(usable_encoder, bv, block, symbol_map, string_map)
fvec_list[bb_map[block.start]] = fv_list
ins_list.extend(ins)
ins_text = ';'.join(ins_list)
if ins_text not in function_filter:
function_filter.append(ins_text)
acfg = Obj()
acfg.fv_list = fvec_list
acfg.funcname = func.name
acfg.edge_list = edge_list
raw_graph_list.append(acfg)
else:
filter_count += 1
acfgs = Obj()
acfgs.raw_graph_list = raw_graph_list
elapse = time.time() - s
print('-------', elapse)
print("filter out functions: ", filter_count)
return acfgs
idx = 0
function_filter = []
for parent, subdirs, files in os.walk('/path/to/trainingdataset/'):
if files:
for ida_path in files:
acfgs = disassemble(os.path.join(parent, ida_path), function_filter)
print(ida_path)
pickle.dump(acfgs, open('/path/to/train/gemini' + str(idx) +'.ida', 'wb'))
idx += 1
del acfgs
idx = 0
function_filter = []
for parent, subdirs, files in os.walk('/path/to/testingdataset/'):
if files:
for ida_path in files:
acfgs = disassemble(os.path.join(parent, ida_path), function_filter)
print(ida_path)
pickle.dump(acfgs, open('/path/to/test/gemini' + str(idx) +'.ida', 'wb'))
idx += 1
del acfgs
#
# for ida_path in glob.iglob(r'/home/ericlee/projects/Gemini/trainingdataset/*'):
# # if not os.path.splitext(ida_path)[-1]:
# start = time.time()
# acfgs = disassemble(ida_path)
# elapse = time.time() - start
# print(ida_path, elapse)
# # break
# pickle.dump(acfgs, open(ida_path + '.ida', 'wb'))
| 7,463 | 34.542857 | 135 | py |
PalmTree | PalmTree-master/src/extrinsic_evaluation/gemini/obj.py | class Obj:
pass
| 20 | 6 | 10 | py |
PalmTree | PalmTree-master/src/extrinsic_evaluation/gemini/vocab.py | """
This code has been taken and modified from https://github.com/ryankiros/skip-thoughts
Constructing and loading dictionaries
"""
import _pickle as pkl
from collections import OrderedDict
import argparse
import re
def build_dictionary(text):
"""
Build a dictionary
text: list of sentences (pre-tokenized)
"""
wordcount = {}
for cc in text:
words = cc.split(',')
tokn_lst = []
for s in words:
tokn_lst.extend(re.split('([0-9A-Za-z@_.]+)', s))
tokn_lst = [t for t in tokn_lst if t]
for w in tokn_lst:
if w not in wordcount:
wordcount[w] = 0
wordcount[w] += 1
sorted_words = sorted(list(wordcount.keys()), key=lambda x: wordcount[x], reverse=True)
worddict = OrderedDict()
for idx, word in enumerate(sorted_words):
worddict[word] = idx + 2 # 0: <eos>, 1: <unk>
return worddict, wordcount
def load_dictionary(loc):
"""
Load a dictionary
"""
with open(loc, 'rb') as f:
worddict = pkl.load(f)
return worddict
def save_dictionary(worddict, wordcount, loc'):
"""
Save a dictionary to the specified location
"""
with open(loc, 'wb') as f:
pkl.dump(worddict, f, protocol=2)
pkl.dump(wordcount, f)
def build_and_save_dictionary(text, source):
save_loc = source+".pkl"
try:
cached = load_dictionary(save_loc)
print("Using cached dictionary at {}".format(save_loc))
return cached
except:
pass
# build again and save
print("unable to load from cached, building fresh")
worddict, wordcount = build_dictionary(text)
print("Got {} unique words".format(len(worddict)))
print("Saveing dictionary at {}".format(save_loc))
save_dictionary(worddict, wordcount, save_loc)
return worddict
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("text_file", type=str)
args = parser.parse_args()
print("Extracting text from {}".format(args.text_file))
text = open(args.text_file, "rt").readlines()
print("Extracting dictionary..")
worddict, wordcount = build_dictionary(text)
out_file = args.text_file+".pkl"
print("Got {} unique words. Saving to file {}".format(len(worddict), out_file))
save_dictionary(worddict, wordcount, out_file)
print("Done.")
| 2,386 | 26.436782 | 91 | py |
PalmTree | PalmTree-master/src/extrinsic_evaluation/gemini/config.py | """
Configuration file.
"""
VOCAB_SIZE = 10000
USE_CUDA = True
DEVICES = [0]
CUDA_DEVICE = DEVICES[0]
VERSION = 1
MAXLEN = 10
LEARNING_RATE=1e-5
| 147 | 10.384615 | 24 | py |
PalmTree | PalmTree-master/src/extrinsic_evaluation/gemini/eval_utils.py | from model import UniSkip, Encoder
from data_loader import DataLoader
from vocab import load_dictionary
from config import *
from torch import nn
import torch.nn.functional as F
from torch.autograd import Variable
import torch
import re
import numpy as np
import pickle
class UsableTransformer:
# @profile
def __init__(self, model_path, vocab_path):
print("Loading Vocab", vocab_path)
with open(vocab_path, "rb") as f:
self.vocab = pickle.load(f)
# self.vocab = dataset.WordVocab.load_vocab(vocab_path)
print("Vocab Size: ", len(self.vocab))
self.model = torch.load(model_path)
if USE_CUDA:
self.model.cuda(CUDA_DEVICE)
# @profile
def encode(self, text, numpy=True):
segment_label = []
sequence = []
for t in text:
l = len(t.split(' ')) * [1]
s = self.vocab.to_seq(t)
if len(l) > 30:
segment_label.append(l[:30])
else:
segment_label.append(l + [0]*(30-len(l)))
if len(s) > 30:
sequence.append(s[:30])
else:
sequence.append(s + [0]*(30-len(s)))
segment_label = torch.LongTensor(segment_label)
sequence = torch.LongTensor(sequence)
if USE_CUDA:
sequence = sequence.cuda(CUDA_DEVICE)
segment_label = segment_label.cuda(CUDA_DEVICE)
encoded = self.model.encode(sequence, segment_label)
result = torch.mean(encoded.detach(), dim=1)
del encoded
if USE_CUDA:
if numpy:
return result.data.cpu().numpy()
else:
return result.to('cpu')
else:
if numpy:
return result.data.numpy()
else:
return result
| 1,852 | 28.412698 | 63 | py |
PalmTree | PalmTree-master/src/extrinsic_evaluation/gemini/embedding/embedding.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# to use tfdbg
# wrap session object with debugger wrapper
from tensorflow.python import debug as tf_debug
from random import shuffle
from scipy.linalg import block_diag
import tensorflow as tf
import numpy as np
import os
import operator
import time
import pickle as p
import scipy
# local library
from siamese_emb import Siamese
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_integer('vector_size', 128, "Vector size of acfg")
flags.DEFINE_integer('emb_size', 64, "Embedding size for acfg")
flags.DEFINE_float('learning_rate', 0.001, "Learning Rate for Optimizer")
flags.DEFINE_string('data_file', 'train.pickle', "Stores the train sample after preprocessing")
flags.DEFINE_string('test_file', 'test.pickle', "Stores the test sample after preprocessing")
flags.DEFINE_integer('T', 5, "Number of time to be interated while embedding generation")
flags.DEFINE_string('emb_type', 'trans', "Embedding type")
PROJ_DIR = os.path.dirname(os.path.realpath(__file__))
class Embedding:
def __init__(self):
self.emb_model_loc = PROJ_DIR + "/model/"
# self.siamese = Siamese()
self._init_tensorflow()
self.g_embed_funcs = [self.siamese.get_embedding()]
self.g_test_similarity = self.test_similarity_internal()
def _init_tensorflow(self):
self.siamese = Siamese()
global_step = tf.Variable(0, name="global_step", trainable=False)
print("siamese model object initialized")
init_op = tf.global_variables_initializer()
# set cpu utilization
#config = tf.ConfigProto(device_count = {'CPU': 1})
#self.tf_sess = tf.Session(config=config)
# set gpu utilization %
#gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.001)
#self.tf_sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
# original
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
self.tf_sess = tf.Session(config=config)
# to be used later
self.tf_saver = tf.train.Saver()
# can use other optimizers
optimizer = tf.train.GradientDescentOptimizer(FLAGS.learning_rate)
# optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate)
train_op = optimizer.minimize(self.siamese.loss)
print("defined training operations")
print("initializing global variables")
self.tf_sess.run(init_op)
self.tf_saver.restore(self.tf_sess, self.emb_model_loc + "model.ckpt")
def _close_tensorflow(self):
self.tf_sess.close()
del self.siamese
#print(self.W_1)
def train_model_all(self):
print("Retrain the model")
def embed_single_function(self, acfg_mat, acfg_nbr):
r4 = self.tf_sess.run([self.siamese.get_embedding()],
feed_dict={self.siamese.x: acfg_mat, self.siamese.n: acfg_nbr})
return r4[0]
def embed_function_by_name(self, funcname, idafile):
print("Embedding a single binary function " + funcname)
# load '.ida' file
with open(idafile, 'rb') as f:
acfgs = p.load(f)
for acfg in acfgs.raw_graph_list:
fvec_list = []
func_name = acfg.funcname
if func_name != funcname:
continue
# This loop is to delete first two elements of feature vectors
# because they are list and we need numeric valure for our matrix
# if there is method to convert those list to values this loop can be commented out
for fv in acfg.fv_list:
# deleting first 2 element of each feature vector
del fv[:2]
fvec_list.append(fv)
# converting to matrix form
acfg_mat = np.array(fvec_list)
# setting up neighbor matrix from edge list
num_nodes = len(fvec_list)
acfg_nbr = np.zeros((num_nodes, num_nodes))
for edge in acfg.edge_list:
acfg_nbr.itemset((edge[0], edge[1]), 1)
acfg_nbr.itemset((edge[1], edge[0]), 1)
# embeding function
embed = self.embed_single_function(acfg_mat, acfg_nbr)
return tuple([func_name, embed])
return None
def get_some_embedding(self, it, cnt=35):
mul_mat = []
acfg_mat = []
acfg_nbr_mat = []
func_name_list = []
acfg_length_list = []
while len(func_name_list) < cnt:
try:
acfg = next(it)
except StopIteration:
break
# if len(acfg.fv_list) < 5:
# continue
fvec_list = []
func_name = acfg.funcname
fsize_list = []
# test print function name
# print(func_name)
# This loop is to delete first two elements of feature vectors
# because they are list and we need numeric valure for our matrix
# if there is method to convert those list to values this loop can be commented out
for fv in acfg.fv_list:
# deleting first 2 element of each feature vector
fvec_list.append(fv)
fsize_list.append(len(fv))
mul_mat.append(np.ones(len(acfg.fv_list)))
# test fvec shape
# converting to matrix form
# initialize acfg_mat
acfg_mat_tmp = np.concatenate(fvec_list)
acfg_mat.append(acfg_mat_tmp)
acfg_length_list.append(fsize_list)
# matrix input acfg_mat & acfg_nbr
num_nodes = len(fvec_list)
acfg_nbr = np.zeros((num_nodes, num_nodes))
for edge in acfg.edge_list:
acfg_nbr.itemset((edge[0], edge[1]), 1)
acfg_nbr.itemset((edge[1], edge[0]), 1)
acfg_nbr_mat.append(acfg_nbr)
func_name_list.append(func_name)
if len(mul_mat) != 0:
# acfg_mat = np.vstack(acfg_mat)
acfg_mat = np.concatenate(acfg_mat)
acfg_nbr_mat = block_diag(*acfg_nbr_mat)
mul_mat = block_diag(*mul_mat)
return acfg_mat, acfg_nbr_mat, acfg_length_list, mul_mat, func_name_list
def embed_a_binary(self, idafile, target_name=None):
# counter for test first 100 function
with open(idafile, 'rb') as f:
acfgs = p.load(f)
time_embdding_start = time.time()
# print shape of acfg_mat & acfg_nbr
it = iter(acfgs.raw_graph_list)
retval = []
func_names = []
while True:
acfg_mat, acfg_nbr_mat, acfg_length_list, mul_mat, func_name_list = self.get_some_embedding(it)
if len(mul_mat) == 0:
break
idx = 0
idy = 0
merged_acfg_mat = np.ndarray((acfg_nbr_mat.shape[0], FLAGS.vector_size))
if FLAGS.emb_type != "org":
for length in acfg_length_list:
for l in length:
ins = np.expand_dims(acfg_mat[idx: idx+l], axis=0)
merged_acfg_mat[idy,:] = np.squeeze(self.tf_sess.run([self.siamese.bb_emb], feed_dict={self.siamese.ins: ins}), axis=0)
idy += 1
idx += l
# print(merged_acfg_mat.shape, acfg_nbr_mat.shape)
emb = self.tf_sess.run(self.g_embed_funcs, feed_dict={self.siamese.x: np.concatenate([merged_acfg_mat, np.transpose(mul_mat)], 1),
self.siamese.n: acfg_nbr_mat})
else:
emb = self.tf_sess.run(self.g_embed_funcs, feed_dict={self.siamese.x: np.concatenate([acfg_mat, np.transpose(mul_mat)], 1),
self.siamese.n: acfg_nbr_mat})
retval.extend(emb[0])
func_names.extend(func_name_list)
time_embdding_end = time.time()
# print("embedding duration: ", time_embdding_end-time_embdding_start)
if target_name is not None:
return retval[func_names.index(target_name)]
# return embedding_list
return func_names, retval
def embed_multiple_function(self, acfg_mat, acfg_nbr):
r5 = self.tf_sess.run(self.g_embed_funcs,
feed_dict={self.siamese.x: acfg_mat, self.siamese.n: acfg_nbr})
return r5[0]
def test_similarity_internal(self):
self.funca = tf.placeholder(tf.float32, (None, None))
self.funcb = tf.placeholder(tf.float32, (None, None))
mul = tf.matmul(self.funca, self.funcb, transpose_b=True)
na = tf.norm(self.funca, axis=1, keepdims=True)
nb = tf.norm(self.funcb, axis=1, keepdims=True)
return mul / tf.matmul(na, nb, transpose_b=True)
def test_similarity(self, funca, funcb):
# funca: embeddings of list a
# funcb : embeddings of list b
# ret: predicted value
return self.tf_sess.run(self.g_test_similarity, feed_dict={self.funca: funca, self.funcb: funcb})
def gen_pca(self, emb, dims_rescaled_data=2):
emb = np.array(emb).T
emb -= emb.mean(axis=0)
r = np.cov(emb, rowvar=False)
evals, evecs = scipy.linalg.eigh(r)
idx = np.argsort(evals)[::-1]
evecs = evecs[:, idx]
evecs = evecs[:, :dims_rescaled_data]
return np.dot(emb, evecs).T.reshape(dims_rescaled_data*64)
'''
def train_siamese(self):
# Training
# ==================================================
print("starting graph def")
with tf.Graph().as_default():
#init class
siamese = Siamese()
global_step = tf.Variable(0, name="global_step", trainable=False)
print("siamese model object initialized")
init_op = tf.global_variables_initializer()
print("started session")
sess = tf.Session()
#to be used later
saver = tf.train.Saver()
with sess.as_default() as sess:
#can use other optimizers
optimizer = tf.train.GradientDescentOptimizer(FLAGS.learning_rate)
#optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate)
train_op = optimizer.minimize(siamese.loss)
print("defined training operations")
print("initializing global variables")
sess.run(init_op)
saver.restore(sess, "/tmp/model.ckpt")
#Implement AUC
#this part can be parallelized for better embedding generation speed
print("generating embedding...")
emb_list = []
#generating embedding for all acfg in the test sample
for i, item in enumerate(pair_sample):
if i%100 == 0:
print("calucating :", i)
#print(item)
r4 = sess.run([siamese.get_embedding()],feed_dict = {siamese.x: item[1], siamese.n: item[2]})
#print(r4[0])
#appending generated embedding and name of the function
emb_list.append((item[0],r4[0]))
#just for testing small sample
#if i == 10:
# break
'''
| 9,614 | 30.628289 | 134 | py |
PalmTree | PalmTree-master/src/extrinsic_evaluation/gemini/embedding/siamese_emb.py | import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
flags = tf.app.flags
FLAGS = flags.FLAGS
class Siamese:
#calculate embedding
def emb_generation(self, x, n):
mul_mat = x[:, FLAGS.vector_size:]
x = x[:, :FLAGS.vector_size]
# tf.reset_default_graph()
# embeddings to be calculated
#print("x shape:" , tf.shape(x))
#print("n shape:" , tf.shape(n))
#print(self.W_1)
mu_val = tf.zeros_like(x, name="mu_val")
# tf.Variable(mu_v, name="mu_val", validate_shape=False, trainable=False)
# Running T times
for t in range(FLAGS.T):
#calculating summation of neighbour vertexes
mu_val = tf.matmul(n, mu_val, name="neighbour_summation")
# print("mu_val:", mu_val)
#non-linear trabsformation
sig_1 = tf.nn.relu(tf.matmul(mu_val, self.P_1), name="relu_op_lv1")
sig_2 = tf.nn.relu(tf.matmul(sig_1, self.P_2), name="relu_op_lv2")
#new embedding value
mu_val = tf.nn.tanh(tf.matmul(x, self.W_1) + sig_2, name="new_mu_value")
#summation across column
#print("mu_valu shape", tf.shape(mu_val))
#print(mu_val)
if mul_mat.shape[1] == 0:
mu_summ = tf.reduce_sum(mu_val, axis=0, name="cumm_column_sum")
g_embedding = tf.matmul(tf.reshape(mu_summ,[1,FLAGS.vector_size]), self.W_2,name="embedding_calc")
else:
mu_summ = tf.matmul(mul_mat, mu_val, True)
g_embedding = tf.matmul(mu_summ, self.W_2,name="embedding_calc")
print("mu_summ shape",tf.shape(mu_summ))
# print(g_embedding)
return g_embedding
#loss function
def loss_with_spring(self):
margin = 5.0
#true labels
labels_t = self.y_
#fail labels
labels_f = tf.subtract(1.0, self.y_, name="1-y_i")
#calculating eucledian distance
eucd2 = tf.pow(tf.subtract(self.o1, self.o2),2)
eucd2 = tf.reduce_sum(eucd2, 1)
eucd = tf.sqrt(eucd2+1e-6, name="eucd")
C = tf.constant(margin, name="C")
# ( y_i * ||CNN(p1_i) - CNN(p2_i)||^2 ) + ( 1-y_i * (max(0, C - ||CNN(p1_i) - CNN(p2_i)||))^2 )
pos = tf.multiply(labels_t, eucd2, name="y_i_x_eucd2")
neg = tf.multiply(labels_f, tf.pow(tf.maximum(0.0, tf.subtract(C, eucd)), 2), name="Ny_i_x_C-eucd_xx_2")
cumm_losses = tf.add(pos, neg, name="cumm_losses")
loss = tf.reduce_mean(cumm_losses, name="loss")
return loss
#loss funtion with step
def loss_with_step(self):
margin = 5.0
#true labels
labels_t = self.y_
#fail labels
labels_f = tf.subtract(1.0, self.y_, name="1-y_i")
#calculating eucledian distance
eucd2 = tf.pow(tf.subtract(self.o1, self.o2),2)
eucd2 = tf.reduce_sum(eucd2, 1)
eucd = tf.sqrt(eucd2+1e-6, name="eucd")
C = tf.constant(margin, name="C")
pos = tf.multiply(labels_t, eucd, name="y_x_eucd")
neg = tf.multiply(labels_f, tf.maximum(0, tf.subtract(C, eucd)), name="Ny_x_C-eucd")
cumm_losses = tf.add(pos, neg, name="cumm_losses")
loss = tf.reduce_mean(cumm_losses, name="loss")
return loss
def l2_norm(self, x, eps=1e-12):
return tf.sqrt( tf.reduce_sum(tf.square(x), axis=1) + eps )
def cosine_norm(self, x, eps=1e-12):
return tf.sqrt( tf.reduce_sum(tf.matmul(x,tf.transpose(x)), axis=1) + eps )
def emb_gen(self):
#creating nn using inputs
with tf.variable_scope("acfg_embedding") as siam_scope:
#Left embedding
self.e1 = self.emb_generation(self.x1, self.n1)
#print("-->siamese left tensor", self.e1)
siam_scope.reuse_variables()
#Right embedding
self.e2 = self.emb_generation(self.x2, self.n2)
#siamese cosine loss
#math
#[\frac{l \cdot r}{l2_norm(l) \cdot l2_norm(right)}]
def siamese_cosine_loss(self):
_y = tf.cast(self.y, tf.float32)
#trying reset default graph
#tf.reset_default_graph()
self.emb_gen()
#cast true value to float type
#predict value from left and right tensors using cosine formula
pred_y = tf.reduce_sum(tf.multiply(self.e1, self.e2) , axis=1)/ (self.cosine_norm(self.e1) * self.cosine_norm(self.e2))
#print(tf.nn.l2_loss(y-pred)/ tf.cast(tf.shape(left)[0], tf.float32))
#return tf.nn.l2_loss(y-pred)/ tf.cast(tf.shape(left)[0], tf.float32)
return tf.nn.l2_loss(pred_y - _y)
# return self.constrastive_loss(self.e1, self.e2, _y, 0.9)
#generate embedding of single given acfg
def get_embedding(self):
#x
self.x = tf.placeholder(dtype=tf.float32, shape=(None, None), name="test_x")
#x neighbours value
self.shape_x = self.x.get_shape().as_list()
self.n = tf.placeholder(dtype=tf.float32, shape=(None, self.shape_x[0]), name="test_neighbours_x")
emb_val = self.emb_generation(self.x, self.n)
return emb_val
#Function to be used to predict the similarity between two given ACFG using thier Embedding
def siamese_pred(self):
#left embedding
self.test_e1 = tf.placeholder(dtype=tf.float32, shape=(1, FLAGS.emb_size), name="test_e1")
#right embedding
self.test_e2 = tf.placeholder(dtype=tf.float32, shape=(1, FLAGS.emb_size), name="test_e2")
#predict value from left and right tensors using cosine formula
pred = tf.reduce_sum(tf.multiply(self.test_e1, self.test_e2) , axis=1)/ (self.cosine_norm(self.test_e1) * self.cosine_norm(self.test_e2))
return pred
#constastive loss
def constrastive_loss(self, left, right, y, margin):
with tf.name_scope("constrative-loss"):
d = tf.sqrt(tf.reduce_sum( tf.pow( left-right, 2), axis=1, keep_dims=True))
tmp = y * tf.square(d)
tmp2 = (1 - y) * tf.square( tf.maximum((margin - d), 0))
return tf.reduce_mean(tmp + tmp2)/2
#create model
def __init__(self):
#with tf.name_scope("input"):
# self.n_input = self.nbr_input()
#input vector/acfg's
if FLAGS.emb_type != "manual": #and FLAGS.emb_type != "cfg_bert" and FLAGS.emb_type != 'mlm_only':
with tf.name_scope("basicblocks-rnn"):
if FLAGS.emb_type == '1hot':
self.ins = tf.placeholder(dtype=tf.float32, shape=(1, None), name="input_ins")
self.length = tf.placeholder(dtype=tf.float32, shape=(None), name="input_length")
self.rnncell = tf.compat.v1.nn.rnn_cell.GRUCell(FLAGS.vector_size)
self.embedding = layers.Embedding(5001, FLAGS.vector_size)
self.emb_ins =keras.activations.tanh(self.embedding(self.ins))
else:
self.ins = tf.placeholder(dtype=tf.float32, shape=(None, None, FLAGS.vector_size), name="input_ins")
self.length = tf.placeholder(dtype=tf.float32, shape=(None), name="input_length")
self.rnncell = tf.compat.v1.nn.rnn_cell.GRUCell(FLAGS.vector_size)
self.emb_ins = tf.layers.dense(self.ins, FLAGS.vector_size, activation=tf.nn.elu)
_, self.bb_emb = tf.nn.dynamic_rnn(self.rnncell, self.ins, dtype=tf.float32)
with tf.name_scope("acfgs-siamese"):
#Input 1
self.x1 = tf.placeholder(dtype=tf.float32, shape=(None, FLAGS.vector_size), name="input_x1")
#Input 2
self.x2 = tf.placeholder(dtype=tf.float32, shape=(None, FLAGS.vector_size), name="input_x2")
#Resulting Label
self.y = tf.placeholder(dtype=tf.int32, name="input_y")
#x1 neighbour value
self.shape_x1 = self.x1.get_shape().as_list()
self.n1 = tf.placeholder(dtype=tf.float32, shape=(None, self.shape_x1[0]), name="neighbours_x1")
#x2 neighbour value
self.shape_x2 = self.x2.get_shape().as_list()
self.n2 = tf.placeholder(dtype=tf.float32, shape=(None, self.shape_x2[0]), name="neighbours_x2")
w_init = tf.truncated_normal_initializer(stddev=0.1)
#learnable parameters
self.W_1 = tf.get_variable(name='W_1', dtype=tf.float32, shape=(FLAGS.vector_size, FLAGS.vector_size), initializer=w_init)
self.W_2 = tf.get_variable(name='W_2', dtype=tf.float32, shape=(FLAGS.vector_size, FLAGS.emb_size), initializer=w_init)
self.P_1 = tf.get_variable(name='P1_relu', dtype=tf.float32, shape=(FLAGS.vector_size, FLAGS.vector_size), initializer=w_init)
self.P_2 = tf.get_variable(name='P2_relu', dtype=tf.float32, shape=(FLAGS.vector_size, FLAGS.vector_size), initializer=w_init)
with tf.name_scope("loss"):
self.loss = self.siamese_cosine_loss()
#create loss
| 8,987 | 43.49505 | 145 | py |
PalmTree | PalmTree-master/src/extrinsic_evaluation/gemini/embedding/dataset.py | import glob
import random
from collections import defaultdict
import tensorflow as tf
import numpy as np
import pickle as p
from numpy.random import choice, permutation
from itertools import combinations
import util
import os
import sys
import re
import operator
from functools import reduce
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir) + "/coogleconfig")
from random import shuffle
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir))
from raw_graphs import *
flags = tf.app.flags
FLAGS = flags.FLAGS
# Added to test standalone testing of this filef
# To be commented if testing with NN Code
# flags.DEFINE_string('data_file', 'train.pickle', "Stores the train sample after preprocessing")
# flags.DEFINE_string('test_file', 'test.pickle', "Stores the test sample after preprocessing")
class BatchGenerator():
# __slots__=('filter_size', 'train_sample', 'test_sample')
def __init__(self, training_dataset, filter_size=0):
np.random.seed()
self.filter_size = filter_size
self.train_sample = defaultdict(list)
self.create_sample_space(training_dataset)
# for testing
# need to commented out while training
# g, g1, g2 = self.get_train_acfg()
# print("g: ", g)
# print("g1: ", g1)
# print("g2: ", g2)
# create sample space from all the available '.ida' files
def create_sample_space(self, training_dataset):
for ida_path in glob.iglob(training_dataset):
# load '.ida' file
print("train:",ida_path)
acfgs = p.load(open(ida_path, 'rb'))
filter_cnt = 0
for acfg in acfgs.raw_graph_list:
# if len(reduce(operator.add, acfg.fv_list)) < self.filter_size:
if len(acfg.fv_list) < self.filter_size:
filter_cnt += 1
continue
fvec_list = []
fsize_list = []
func_name = acfg.funcname
# This loop is to delete first two elements of feature vectors
# because they are list and we need numeric valure for our matrix
# if there is method to convert those list to values this loop can be commented out
for fv in acfg.fv_list:
# deleting first 2 element of each feature vector
# del fv[:2]
fvec_list.append(fv)
if FLAGS.emb_type != 'org':
fsize_list.append(len(fv))
else:
fsize_list.append(1)
# converting to matrix form
if FLAGS.emb_type == "manual":
acfg_mat = np.array(fvec_list)
else:
acfg_mat = np.concatenate(fvec_list)
# func_data = tuple(acfg_mat.ravel().tolist())
# if FLAGS.emb_type == 'org' and func_name in func_name_filter and func_data in func_filter:
# filter_cnt += 1
# continue
# if func_data in func_filter:
# filter_cnt += 1
# continue
# if FLAGS.emb_type == 'manual':
# func_name_filter.add(func_name)
# func_filter.add(func_data)
# setting up neighbor matrix from edge list
num_nodes = len(fsize_list)
acfg_nbr = np.zeros((num_nodes, num_nodes))
for edge in acfg.edge_list:
acfg_nbr.itemset((edge[0], edge[1]), 1)
acfg_nbr.itemset((edge[1], edge[0]), 1)
self.train_sample[func_name].append((func_name, acfg_mat, acfg_nbr, fsize_list))
print(filter_cnt, len(acfgs.raw_graph_list))
# # divide the training and testing data
# test_func_filter = set()
# test_func_name_filter = set()
# for test_ida_path in glob.iglob(testing_dataset):
# # load '.ida' file
# print("test:", test_ida_path)
# test_acfgs = p.load(open(test_ida_path, 'rb'))
# test_filter_cnt = 0
# for acfg in test_acfgs.raw_graph_list:
# # if len(reduce(operator.add, acfg.fv_list)) < self.filter_size:
# if len(acfg.fv_list) < self.filter_size:
# test_filter_cnt += 1
# continue
# fvec_list = []
# fsize_list = []
# func_name = acfg.funcname
# # This loop is to delete first two elements of feature vectors
# # because they are list and we need numeric valure for our matrix
# # if there is method to convert those list to values this loop can be commented out
# for fv in acfg.fv_list:
# # deleting first 2 element of each feature vector
# # del fv[:2]
# fvec_list.append(fv)
# fsize_list.append(len(fv))
# # converting to matrix form
# if FLAGS.emb_type == "manual":
# acfg_mat = np.array(fvec_list)
# else:
# acfg_mat = np.concatenate(fvec_list)
# func_data = tuple(acfg_mat.ravel().tolist())
# # if FLAGS.emb_type == 'org' and (func_data in test_func_filter) and (func_name in test_func_name_filter):
# # test_filter_cnt += 1
# # continue
# if func_data in test_func_filter:
# test_filter_cnt += 1
# continue
# if FLAGS.emb_type == 'manual':
# test_func_name_filter.add(func_name)
# test_func_filter.add(func_data)
# # setting up neighbor matrix from edge list
# num_nodes = len(fsize_list)
# acfg_nbr = np.zeros((num_nodes, num_nodes))
# for edge in acfg.edge_list:
# acfg_nbr.itemset((edge[0], edge[1]), 1)
# acfg_nbr.itemset((edge[1], edge[0]), 1)
# self.test_sample[func_name].append((func_name, acfg_mat, acfg_nbr, fsize_list))
# print(test_filter_cnt, len(test_acfgs.raw_graph_list))
# get train acfg
def get_train_acfg(self):
return self.get_acfg_pairs(self.train_sample)
# get test acfg
def get_test_acfg(self):
return self.get_acfg_pairs(self.test_sample)
# get randomly selected acgf pair sampled from sample list
def get_acfg_pairs(self, sample):
while True:
k1, k2 = np.random.choice(list(sample.keys()), 2, False)
if len(sample[k1]) > 1:
break
idx1, idx2 = np.random.choice(len(sample[k1]), 2, False)
g, g1 = sample[k1][idx1], sample[k1][idx2]
g2 = random.choice(sample[k2])
return g, g1, g2
def split_function_name(self, s):
s = re.sub('\d', ' ', s)
s = re.sub('_', ' ', s)
tokens = s.split(' ')
tokens_f = []
for t in tokens:
res_list = re.findall('[A-Z][^A-Z]+', t)
if len(res_list) > 0:
tokens_f.extend(res_list)
if len(res_list) == 0 and len(t) != 0:
tokens_f.append(t)
return tokens_f
# Divide sample space into training and testing sample
# def divide_sample_space(self):
# sample_size = sum([len(v) for v in self.sample.values()])
# train_size = int(sample_size * .5)
# keys = list(self.sample.keys())
# shuffle(keys)
# train_sample = defaultdict(list)
# test_sample = defaultdict(list)
# it = iter(keys)
# total_len = 0
# while True:
# k = next(it)
# train_sample[k] = self.sample[k]
# total_len += len(self.sample[k])
# if total_len >= train_size:
# break
# for k in it:
# test_sample[k] = self.sample[k]
# return train_sample, test_sample
if __name__ == '__main__':
sample_gen = BatchGenerator()
| 8,285 | 35.342105 | 124 | py |
PalmTree | PalmTree-master/src/extrinsic_evaluation/gemini/embedding/obj.py | class Obj:
pass
| 20 | 6 | 10 | py |
PalmTree | PalmTree-master/src/extrinsic_evaluation/gemini/embedding/util.py | import os
import sys
#getting all file list in a directory
def get_files(directory):
file_list = []
for root, dirc, files in os.walk(directory):
for file in files:
file_list.append(os.path.join(root, file))
return file_list | 234 | 18.583333 | 45 | py |
PalmTree | PalmTree-master/src/extrinsic_evaluation/gemini/embedding/__init__.py | 0 | 0 | 0 | py |
|
PalmTree | PalmTree-master/src/extrinsic_evaluation/gemini/embedding/emb_train.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Siamese graph embedding implementaition using tensorflow
By:
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import pickle as pkl
import time
import random
import nltk
os.environ["CUDA_VISIBLE_DEVICES"]="-1"
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from scipy.linalg import block_diag
from sklearn import metrics
# from embedding import Embedding
from dataset import BatchGenerator
# local library%
from siamese_emb import Siamese
# to use tfdbg
# wrap session object with debugger wrapper
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_integer('vector_size',128, "Vector size of acfg")
flags.DEFINE_integer('emb_size', 64, "Embedding size for acfg")
flags.DEFINE_float('learning_rate', 0.0001, "Learning Rate for Optimizer")
flags.DEFINE_string('data_file', 'train.pickle', "Stores the train sample after preprocessing")
flags.DEFINE_string('test_file', 'test.pickle', "Stores the test sample after preprocessing")
flags.DEFINE_integer('T', 5, "Number of time to be interated while embedding generation")
flags.DEFINE_string('emb_type', 'mlm_only', "Embedding type")
FILTER_SIZE = 2
def get_some_embedding(it, cnt=35):
acfg_mat = []
acfg_nbr_mat = []
acfg_length_list = []
mul_mat = []
func_name_list = []
while len(func_name_list) < cnt:
try:
data = next(it)
# data = it
except StopIteration:
break
func_name = data[0]
acfg = data[1]
if len(acfg) < FILTER_SIZE:
continue
acfg_nbr = data[2]
acfg_length = data[3]
func_name_list.append(func_name)
acfg_mat.append(acfg)
acfg_length_list.append(acfg_length)
acfg_nbr_mat.append(acfg_nbr)
mul_mat.append(np.ones(len(acfg_nbr)))
if len(mul_mat) != 0:
# acfg_mat = np.vstack(acfg_mat)
acfg_mat = np.concatenate(acfg_mat)
acfg_nbr_mat = block_diag(*acfg_nbr_mat)
mul_mat = block_diag(*mul_mat)
return acfg_mat, acfg_nbr_mat, acfg_length_list, mul_mat, func_name_list
class Training:
def __init__(self):
self.g_test_similarity = self.test_similarity_internal()
def test_similarity_internal(self):
self.funca = tf.placeholder(tf.float32, (None, None))
self.funcb = tf.placeholder(tf.float32, (None, None))
mul = tf.matmul(self.funca, self.funcb, transpose_b=True)
na = tf.norm(self.funca, axis=1, keepdims=True)
nb = tf.norm(self.funcb, axis=1, keepdims=True)
return mul / tf.matmul(na, nb, transpose_b=True)
def test_similarity(self, sess, funca, funcb):
# funca: embeddings of list a
# funcb : embeddings of list b
# ret: predicted value
return sess.run(self.g_test_similarity, feed_dict={self.funca: funca, self.funcb: funcb})
def train_siamese(num_of_iterations):
# Training part
print("starting graph def")
with tf.Graph().as_default():
# init class
siamese = Siamese()
data_gen = BatchGenerator(r'/home/administrator/zixiang/gemini/{}/train/*.ida'.format(FLAGS.emb_type,FLAGS.emb_type), FILTER_SIZE)
global_step = tf.Variable(0, name="global_step", trainable=False)
print("siamese model object initialized")
print("started session")
sess = tf.Session(config=tf.ConfigProto(gpu_options=tf.GPUOptions(per_process_gpu_memory_fraction=0.5)))
saver = tf.train.Saver()
with sess.as_default() as sess:
# can use other optimizers
optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate)
# optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate)
train_op = optimizer.minimize(siamese.loss)
init_op = tf.global_variables_initializer()
print("defined training operations")
print("initializing global variables")
sess.run(init_op)
# Trainning parameters
TRAIN_ITER = num_of_iterations # number of iterations in each training
# model saved path
SAVEPATH = "./model/model.ckpt"
## start of counting training time
time_train_start = time.time()
print("model training start:")
# Temporary loss value
temp_loss_r1 = 10
temp_loss_r3 = 10
for i in range(1, TRAIN_ITER): ## default 1k, set to 100 for test
g, g1, g2 = data_gen.get_train_acfg()
if FLAGS.emb_type == 'manual': #or FLAGS.emb_type == 'cfg_bert' or FLAGS.emb_type == 'mlm_only':
bb = g[1]
bb1 = g1[1]
bb2 = g2[1]
else:
with tf.variable_scope("acfg_embedding") as siam_scope:
idx = 0
bb = []
for length in g[3]:
ins = np.expand_dims(g[1][idx: idx+length], axis=0)
bb.append(sess.run([siamese.bb_emb], feed_dict={siamese.ins: ins}))
idx += length
bb = np.reshape(np.array(bb),(-1, FLAGS.vector_size))
siam_scope.reuse_variables()
idx = 0
bb1 = []
for length in g1[3]:
ins = np.expand_dims(g1[1][idx: idx+length], axis=0)
bb1.append(sess.run([siamese.bb_emb], feed_dict={siamese.ins: ins}))
idx += length
bb1 = np.reshape(np.array(bb1),(-1, FLAGS.vector_size))
siam_scope.reuse_variables()
idx = 0
bb2 = []
for length in g2[3]:
ins = np.expand_dims(g2[1][idx: idx+length], axis=0)
bb2.append(sess.run([siamese.bb_emb], feed_dict={siamese.ins: ins}))
idx += length
bb2 = np.reshape(np.array(bb2),(-1, FLAGS.vector_size))
#import pdb; pdb.set_trace()
#print("x: ", bb.shape)
#print("n: ", g[2].shape)
r0, r1 = sess.run([train_op, siamese.loss],
feed_dict={siamese.x1: bb, siamese.x2: bb1, siamese.y: 1,
siamese.n1: g[2], siamese.n2: g1[2]})
r2, r3 = sess.run([train_op, siamese.loss],
feed_dict={siamese.x1: bb, siamese.x2: bb2, siamese.y: -1,
siamese.n1: g[2], siamese.n2: g2[2]})
# currently saving for best loss modify to save for best AUC
if i% 10 == 0:
# Save the variables to disk.
if r3 < temp_loss_r3:
saver.save(sess, SAVEPATH)
print("Model saved", i, r1 , r3)
temp_loss_r3 = r3
continue
if r1 < temp_loss_r1:
saver.save(sess, SAVEPATH)
print("Model saved", i, r1 , r3)
temp_loss_r1 = r1
continue
## Restore variables from disk for least loss.
## To be changed for best AUC
# end of counting training time
time_train_end = time.time()
# get total training time
print("traing duration: ", time_train_end - time_train_start)
# evalution part
saver.restore(sess, SAVEPATH)
print("generating embedding for test samples")
emb_list = []
name_list = []
test_list = []
data_gen = BatchGenerator(r'/home/administrator/zixiang/gemini/{}/test/*.ida'.format(FLAGS.emb_type,FLAGS.emb_type), FILTER_SIZE)
for k, v in data_gen.train_sample.items():
if len(v) >= 2:
rd = random.sample(v, 2)
test_list.extend(rd)
it = iter(test_list)
emb_func = [siamese.get_embedding()]
while True:
acfg_mat, acfg_nbr_mat, acfg_length_list, mul_mat, func_name_list = get_some_embedding(it)
if len(mul_mat) == 0:
break
idx = 0
idy = 0
merged_acfg_mat = np.ndarray((acfg_nbr_mat.shape[0], FLAGS.vector_size))
if FLAGS.emb_type != "manual" and FLAGS.emb_type != "albert_avg":
for length in acfg_length_list:
for l in length:
ins = np.expand_dims(acfg_mat[idx: idx+l], axis=0)
merged_acfg_mat[idy,:] = np.squeeze(sess.run([siamese.bb_emb], feed_dict={siamese.ins: ins}), axis=0)
idy += 1
idx += l
# print(merged_acfg_mat.shape, acfg_nbr_mat.shape)
emb = sess.run(emb_func, feed_dict={siamese.x: np.concatenate([merged_acfg_mat, np.transpose(mul_mat)], 1),
siamese.n: acfg_nbr_mat})
else:
emb = sess.run(emb_func, feed_dict={siamese.x: np.concatenate([acfg_mat, np.transpose(mul_mat)], 1),
siamese.n: acfg_nbr_mat})
emb_list.extend(emb[0])
name_list.extend(func_name_list)
print("evaluating prediction values")
# AUC_Matrix = []
# done_pred = []
training = Training()
resultMat = training.test_similarity(sess, emb_list, emb_list)
rank_index_list = []
to_sort_list = tf.placeholder(tf.float32, (None, None))
sort_func = tf.contrib.framework.argsort(to_sort_list, direction='DESCENDING')
time_eval_start = time.time()
for i in range(0, len(resultMat), 5000):
ret = sess.run(sort_func, feed_dict={to_sort_list: resultMat[i:i + 5000]})
rank_index_list.extend(ret)
time_eval_end = time.time()
print("sort duration: ", time_eval_end - time_eval_start)
# for i in range(len(resultMat)):
# sample = resultMat[i]
# if (name_list[i] != name_list[rank_index_list[i][0]] and i != rank_index_list[i][0]) or (name_list[i] != name_list[rank_index_list[i][1]] and i == rank_index_list[i][0]):
# sample = np.sort(sample)
# print(sample[-50:])
# print("target:", name_list[i])
# print("candidate: ", [name_list[j] for j in rank_index_list[i][:50]])
del resultMat
func_counts = len(rank_index_list)
print("func_counts: ", func_counts)
total_tp = []
total_fp = []
for func in range(func_counts):
real_name = name_list[func]
tp = [0]
fp = [0]
for rank, idx in enumerate(rank_index_list[func]):
if func == idx:
assert name_list[idx] == real_name
continue
if name_list[idx] == real_name:
#print(rank)
tp.append(1)
fp.append(fp[-1])
else:
tp.append(max(tp[-1], 0))
fp.append(fp[-1] + 1)
total_tp.append(tp[1:])
total_fp.append(fp[1:])
# num_positive = sum(len(v) * len(v) for k, v in data_gen.test_sample.items())
num_positive = len(test_list)
num_negative = func_counts * func_counts - num_positive - func_counts
total_tp = np.sum(total_tp, axis=0, dtype=np.float) / func_counts
total_fp = np.sum(total_fp, axis=0, dtype=np.float) / num_negative
time_eval_end = time.time()
print("eval duration: ", time_eval_end - time_eval_start)
return total_fp, total_tp
def plot_eval_siamese(total_fp, total_tp):
plt.figure(1)
plt.title('ROC')
plt.plot(total_fp, total_tp, '-', label='ROC')
plt.legend(loc='lower right')
plt.xlim([-0.1, 1.1])
plt.ylim([-0.1, 1.1])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.show()
if __name__ == "__main__":
total_fp, total_tp = train_siamese(40001)
plot_eval_siamese(total_fp, total_tp)
with open('./{}_total_fp.txt'.format(FLAGS.emb_type), 'wb') as f:
pkl.dump(total_fp, f)
with open('./{}_total_tp.txt'.format(FLAGS.emb_type), 'wb') as f:
pkl.dump(total_tp, f)
print(metrics.auc(total_fp, total_tp))
| 13,215 | 38.687688 | 188 | py |
PalmTree | PalmTree-master/src/extrinsic_evaluation/EKLAVYA/code/embedding/save_embeddings.py | #!/usr/bin/python
import argparse
import os
import pickle
import sqlite3
import sys
import base64
import tensorflow as tf
from tensorflow.models.embedding import gen_word2vec
embeddings = {}
def get_config():
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--embed_pickle_path', dest='embed_pickle_path', help='The file saving all training parameters', type=str, required=True)
parser.add_argument('-i', '--int2insn_map_path', dest='int2insn_path', help='The pickle file saving int -> instruction mapping.', type=str, required=False, default='int2insn.map')
parser.add_argument('-m', '--model_path', dest='model_path', help='The file saving the trained embedding model', type=str, required=True)
parser.add_argument('-o', '--output_file', dest='output_file', help='The file saving the embedding vector for each instruction', type=str, required=False, default='embed.pkl')
args = parser.parse_args()
config_info = {
'embed_pickle_path': args.embed_pickle_path,
'model_path': args.model_path,
'output_file': args.output_file,
'int2insn_path': args.int2insn_path
}
return config_info
def main():
config_info = get_config()
embed_pickle_path = config_info['embed_pickle_path']
model_path = config_info['model_path']
output_file = config_info['output_file']
int2insn_path = config_info['int2insn_path']
with tf.Graph().as_default(), tf.Session() as sess:
print "Loading model..."
saver = tf.train.import_meta_graph(model_path + ".meta")
a = saver.restore(sess, model_path)
print "Model loaded"
print "Loading embed input data..."
input_data = pickle.load(open(embed_pickle_path))
print "Embed input data loaded"
print "Loading int to instruction map..."
int2insn_map = pickle.load(open(int2insn_path))
int2insn_map['UNK'] = 'UNK'
print "Int to instruction map loaded"
w_out = [v for v in tf.global_variables() if v.name == "w_out:0"][0]
num = 0
total_num = len(input_data['word2id'])
ids = []
vectors = {}
error_num = 0
for word in input_data['word2id']:
word_id = input_data['word2id'][word]
ids.append(word_id)
if len(ids) == 1000:
part_vector = tf.nn.embedding_lookup(w_out, ids).eval()
for i in range(len(ids)):
word_id = ids[i]
word = input_data['id2word'][word_id]
if word != 'UNK':
word = int(word)
vector = part_vector[i]
insn = int2insn_map[word]
embeddings[str(insn)] = {'vector': vector}
ids = []
num += 1000
if num % 1000 == 0:
print "{} computed ({}%)".format(num, 100.0 * num / total_num)
if len(ids) > 0:
part_vector = tf.nn.embedding_lookup(w_out, ids).eval()
for i in range(len(ids)):
word_id = ids[i]
word = input_data['id2word'][word_id]
if word != 'UNK':
word = int(word)
vector = part_vector[i]
insn = int2insn_map[word]
embeddings[str(insn)] = {'vector': vector}
print "{} Errors".format(error_num)
pickle.dump(embeddings, open(output_file, "w"))
print "Done"
if __name__ == '__main__':
main()
| 3,548 | 30.6875 | 183 | py |
PalmTree | PalmTree-master/src/extrinsic_evaluation/EKLAVYA/code/embedding/insn_int.py | '''
transfer the instructions to integer
or transfer the integer to instructions
input: int list
output: one integer
'''
def insn2int_inverse(insn_list):
'''
transfer the instruction to integer with inverse order
example:
[72,137,229] ==>15042888 (72+137*256+229*256*256)
[243, 15, 16, 13, 205, 0, 0, 0] ==> 880687452147 (243+15*256+16*256*256+13*256*256*256+13*256*256*256*256+205*256*256*256*256*256)
:param insn_list:
:return insn_int:
'''
insn_int=0
for idx, value in enumerate(insn_list):
insn_int = insn_int + value*(256**idx)
return insn_int
def insn2int(insn_list):
'''
transfer the instruction to integer
example:
[72,137,229] ==> 4753893 (72*256*256+137*256+229)
[243, 15, 16, 13, 205, 0, 0, 0] ==> 1.751423513*10^19(243*256^7+15*256^6+16*256^5+13*256^4+205*256^3+0*256^2+0*256^1+0*256^0)
:param insn_list:
:return insn_list:
'''
insn_len=len(insn_list)-1
insn_int=0
for idx, value in enumerate(insn_list):
insn_int = insn_int + value*(256**(insn_len- idx))
return insn_int | 1,096 | 28.648649 | 134 | py |
PalmTree | PalmTree-master/src/extrinsic_evaluation/EKLAVYA/code/embedding/prep_embed_input.py | '''
Input: the whole dataset (in order to get the whole vocabulary)
Output:
output_path: the input for embedding model
error_path: save all the error information in (especially when two distinct instructions map to same integer)
int2insn_map_path: the map information(int -> insn (int list))
'''
import pickle
import argparse
import sys
import os
import insn_int
def get_file_path(folder_path, tag):
path_list=[]
file_list=os.listdir(folder_path)
'''initial path list'''
for file_name in file_list:
path_list.append(os.path.join(folder_path, file_name))
final_path_list=[]
tag_len = len(tag)
'''get all specific path'''
while(len(path_list) > 0):
source_path=path_list[0]
path_list.remove(source_path)
if not os.path.isdir(source_path) and (source_path[-tag_len-1] == '.') and (source_path[-tag_len:] == tag):
final_path_list.append(source_path)
elif os.path.isdir(source_path):
file_list=os.listdir(source_path)
for file_name in file_list:
path_list.append(os.path.join(source_path, file_name))
else:
pass
return final_path_list
class GetVocab(object):
def __init__(self, config):
self.config = config
self.path_list=get_file_path(self.config['input_folder_path'], 'pkl')
self.int2insn_map=dict()
self.get_embed_input()
def get_embed_input(self):
cnt = 0
for file_path in self.path_list:
temp=pickle.load(open(file_path))
insn2int_list = []
for func_name in temp['functions']:
for insn in temp['functions'][func_name]['inst_bytes']:
int_value=insn_int.insn2int_inverse(insn)
if int_value in self.int2insn_map:
if self.int2insn_map[int_value] != insn:
error_str='[ERROR] different insns map to same integer!!!!'
print(error_str)
with open(self.config['error_path'], 'a') as f:
f.write(error_str+'\n')
f.write('format: [int_value] insn1 # insn2\n')
f.write('[%d] %s # %s\n' % (int_value, str(self.int2insn_map[int_value]), str(insn)))
else:
self.int2insn_map[int_value]=insn
insn2int_list.append(str(int_value))
with open(self.config['output_path'], 'ab') as f:
if cnt == 0:
pass
else:
f.write(' ')
f.write(' '.join(insn2int_list))
cnt+=1
'''print the process'''
if cnt % 100==0:
print('[embed_input] Unpickle files: %d' % cnt)
else:
pass
print('[embed_input] Got the input for training the embedding model!')
with open(self.config['int2insn_map_path'], 'w') as f:
pickle.dump(self.int2insn_map, f)
print('[embed_input] Saved the integer-insn mapping information!')
print('[embed_input] END!')
def get_config():
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input_folder_path', dest='input_folder_path', help='The data folder saving binaries information.', type=str, required=True)
parser.add_argument('-o', '--output_path', dest='output_path' ,help='The file saving the input for embedding model.', type=str, required=False, default='embed_input')
parser.add_argument('-e', '--error_path', dest='error_path' ,help='The file saving all error information. ', type=str, required=False, default='error_log')
parser.add_argument('-m', '--int2insn_map_path', dest='int2insn_map_path', help='The file saving the map information (int -> instruction (int list)).', type=str, required=False, default='int2insn.map')
args = parser.parse_args()
config_info = {
'input_folder_path': args.input_folder_path,
'output_path': args.output_path,
'error_path': args.error_path,
'int2insn_map_path': args.int2insn_map_path
}
return config_info
def main():
config_info = get_config()
my_vocab=GetVocab(config_info)
if __name__ == '__main__':
main() | 4,326 | 39.064815 | 205 | py |
PalmTree | PalmTree-master/src/extrinsic_evaluation/EKLAVYA/code/embedding/train_embed.py | '''
train the embedding model in order to get the vectors representing each instructions
the vectors embed the semantic information of instruction inside
the input: the output of prep_embed_input (output_path)
the output: the embedding model and mapping between the integer to vectors
'''
import os
import sys
import argparse
import tensorflow as tf
import threading
import pickle
import time
from tensorflow.models.embedding import gen_word2vec as word2vec
from six.moves import xrange
class Options(object):
def __init__(self):
pass
class TrainEmbed(object):
def __init__(self, args, session):
self._session = session
self._word2id = {}
self._id2word = []
self.global_epoch=0
self.num_threads = int(args['thread_num'])
self.data_file_path = args['input_path']
self.read_config(args)
self.get_output_path(args)
self.build_graph()
def read_config(self, config):
self._options = Options()
self._options.train_data = config['input_path']
self._options.emb_dim = int(config['embedding_size'])
self._options.batch_size = int(config['batch_size'])
self._options.window_size = int(config['window_size'])
self._options.min_count = int(config['min_count'])
self._options.subsample = float(config['subsample'])
self._options.epochs_to_train = int(config['num_epochs'])
self._options.learning_rate = float(config['learning_rate'])
self._options.num_samples = int(config['num_neg_samples'])
def get_output_path(self, config):
if os.path.isdir(config['output_dir']):
'''embedding mapping path'''
cnt = 1
self.output_path = os.path.join(config['output_dir'], 'embed_%d.emb' % cnt)
while(os.path.exists(self.output_path)):
cnt += 1
self.output_path = os.path.join(config['output_dir'], 'embed_%d.emb' % cnt)
'''folder for saving embedding model'''
self.model_folder = os.path.join(config['output_dir'], 'model_%d' % cnt)
os.mkdir(self.model_folder)
else:
error_str = '[ERROR] the output folder does not exist! ... %s' % config['output_dir']
sys.exit(error_str)
def _train_thread_body(self):
initial_epoch, = self._session.run([self._epoch])
while True:
_, epoch = self._session.run([self._train, self._epoch])
if epoch != initial_epoch:
break
def build_graph(self):
"""Build the model graph."""
opts = self._options
# The training data. A text file.
(words, counts, words_per_epoch, current_epoch, total_words_processed,
examples, labels) = word2vec.skipgram(filename=opts.train_data,
batch_size=opts.batch_size,
window_size=opts.window_size,
min_count=opts.min_count,
subsample=opts.subsample)
(opts.vocab_words, opts.vocab_counts,
opts.words_per_epoch) = self._session.run([words, counts, words_per_epoch])
opts.vocab_size = len(opts.vocab_words)
print("Data file: ", opts.train_data)
print("Vocab size: ", opts.vocab_size - 1, " + UNK")
print("Words per epoch: ", opts.words_per_epoch)
self._id2word = opts.vocab_words
for i, w in enumerate(self._id2word):
self._word2id[w] = i
# Declare all variables we need.
# Input words embedding: [vocab_size, emb_dim]
w_in = tf.Variable(
tf.random_uniform(
[opts.vocab_size,
opts.emb_dim], -0.5 / opts.emb_dim, 0.5 / opts.emb_dim),
name="w_in")
# Global step: scalar, i.e., shape [].
w_out = tf.Variable(tf.zeros([opts.vocab_size, opts.emb_dim]), name="w_out")
# Global step: []
global_step = tf.Variable(0, name="global_step")
# Linear learning rate decay.
words_to_train = float(opts.words_per_epoch * opts.epochs_to_train)
lr = opts.learning_rate * tf.maximum(
0.0001,
1.0 - tf.cast(total_words_processed, tf.float32) / words_to_train)
# Training nodes.
inc = global_step.assign_add(1)
with tf.control_dependencies([inc]):
train = word2vec.neg_train(w_in,
w_out,
examples,
labels,
lr,
vocab_count=opts.vocab_counts.tolist(),
num_negative_samples=opts.num_samples)
self._w_in = w_in
self._examples = examples
self._labels = labels
self._lr = lr
self._train = train
self.step = global_step
self._epoch = current_epoch
self._words = total_words_processed
# Properly initialize all variables.
tf.initialize_all_variables().run()
self.saver = tf.train.Saver(max_to_keep=100,)
def train(self):
"""Train the model."""
opts = self._options
initial_epoch, initial_words = self._session.run([self._epoch, self._words])
workers = []
for _ in xrange(self.num_threads):
t = threading.Thread(target=self._train_thread_body)
t.start()
workers.append(t)
last_words, last_time = initial_words, time.time()
while True:
time.sleep(5) # Reports our progress once a while.
(epoch, step, words,
lr) = self._session.run([self._epoch, self.step, self._words, self._lr])
self.global_epoch = epoch
now = time.time()
last_words, last_time, rate = words, now, (words - last_words) / (now - last_time)
print("Epoch %4d Step %8d: lr = %12.10f words/sec = %8.0f" % (epoch, step, lr, rate))
sys.stdout.flush()
if epoch != initial_epoch:
break
for t in workers:
t.join()
def save(self):
# save the model and the corresponding training parameters
insn_embed = {}
# with tempfile.NamedTemporaryFile() as temp_file:
ckpt_name = os.path.join(self.model_folder, 'model_%d.ckpt' % self.global_epoch)
self.saver.save(self._session, ckpt_name)
insn_embed['vocab_size'] = self._options.vocab_size
insn_embed['embedding_size'] = self._options.emb_dim
insn_embed['word2id'] = self._word2id
insn_embed['id2word'] = self._id2word
insn_embed['num_epochs'] = self._options.epochs_to_train
insn_embed['learning_rate'] = self._options.learning_rate
insn_embed['num_neg_samples'] = self._options.num_samples
insn_embed['batch_size'] = self._options.batch_size
insn_embed['window_size'] = self._options.window_size
insn_embed['min_count'] = self._options.min_count
insn_embed['subsample'] = self._options.subsample
if os.path.exists(self.output_path):
os.remove(self.output_path)
else:
pass
pickle.dump(insn_embed, open(self.output_path, 'wb'))
print('Saved word embedding network as %s.' % self.output_path)
def get_config():
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input_path', dest='input_path', help='The input file for training embedding model', type=str, required=True)
parser.add_argument('-o', '--output_dir', dest='output_dir', help='The output folder saving the trained embedding information', type=str, required=False, default='embed_output')
parser.add_argument('-tn', '--thread_num', dest='thread_num', help='Number of threads', type=int, required=False, default=40)
parser.add_argument('-sw', '--save_window', dest='save_window', help='Saving the trained information every save_window epoch', type=int, required=False, default=5)
parser.add_argument('-e', '--embed_dim', dest='embed_dim', help='Dimension of the embedding vector for each instruction', type=int, required=False, default=256)
parser.add_argument('-ne', '--num_epochs', dest='num_epochs', help='Number of epochs for training the embedding model', type=int, required=False, default=100)
parser.add_argument('-l', '--learning_rate', dest='learning_rate', help='Learning rate', type=float, required=False, default=0.001)
parser.add_argument('-nn', '--num_neg_smaples', dest='num_neg_samples', help='Number of negative samples', type=int, required=False, default=25)
parser.add_argument('-b', '--batch_size', dest='batch_size', help='Batch size', type=int, required=False, default=512)
parser.add_argument('-ws', '--window_size', dest='window_size', help='Window size', type=int, required=False, default=5)
parser.add_argument('-mc', '--min_count', dest='min_count', help='Ignoring all words with total frequency lower than this', type=int, required=False, default=1)
parser.add_argument('-s', '--subsample', dest='subsample', help='Subsampling threshold', type=float, required=False, default=0.01)
args = parser.parse_args()
config_info = {
'input_path': args.input_path,
'output_dir': args.output_dir,
'thread_num': args.thread_num,
'save_window': args.save_window,
'embedding_size': args.embed_dim,
'num_epochs': args.num_epochs,
'learning_rate': args.learning_rate,
'num_neg_samples': args.num_neg_samples,
'batch_size': args.batch_size,
'window_size': args.window_size,
'min_count': args.min_count,
'subsample': args.subsample
}
return config_info
def main():
config_info = get_config()
with tf.Graph().as_default(), tf.Session() as session:
with tf.device("/cpu:0"):
'''create the training graph for embedding model'''
my_embed = TrainEmbed(config_info, session)
for _ in xrange(my_embed._options.epochs_to_train):
my_embed.train()
if my_embed.global_epoch % int(config_info['save_window']) == 0 :
my_embed.save()
else:
pass
my_embed.save()
if __name__ == '__main__':
main() | 10,473 | 40.729084 | 181 | py |
PalmTree | PalmTree-master/src/extrinsic_evaluation/EKLAVYA/code/RNN/test/dataset.py | import pickle
import os
import numpy as np
from multiprocessing import Pool
embed_info = {}
type_info = {
'char': 0,
'int': 1,
'float': 2,
'pointer': 3,
'enum': 4,
'struct': 5,
'union': 6
}
def approximate_type(type_str):
int_list = ['_Bool', 'unsigned int', 'int', 'long long int', 'long long unsigned int', 'unsigned short',
'short unsigned int', 'short', 'long unsigned int', 'short int', 'long int']
char_list = ['char', 'unsigned char', 'signed char']
if type_str[-1] == '*' or type_str == 'func_ptr' or type_str.split()[0][-1] == '*':
return 'pointer'
elif type_str in int_list:
return 'int'
elif type_str[:5] == 'enum ':
return 'enum'
elif type_str in char_list:
return 'char'
elif type_str[:7] == 'struct ':
return 'struct'
elif type_str[:6] == 'union ':
return 'union'
elif type_str == 'double' or type_str == 'long double':
return 'float'
else:
return type_str
def one_hot_encoding(label_id, class_num):
temp = np.zeros(class_num)
temp[label_id] = 1
return temp
def get_single_num_args(folder_path, file_name, func_list, embed_dim, max_length, class_num):
file_path = os.path.join(folder_path, file_name)
extract_info = {}
with open(file_path) as f:
file_info = pickle.load(f)
for func_name in func_list:
func_tag = '%s#%s' % (file_name, func_name)
extract_info[func_tag] = {}
inst_bytes = file_info['functions'][func_name]['inst_bytes']
temp_data = []
for inst in inst_bytes:
if str(inst) in embed_info:
temp_data.append(embed_info[str(inst)]['vector'])
else:
temp_data.append([0.0] * embed_dim)
if len(temp_data) >= max_length:
break
temp_data = np.asarray(temp_data)
if temp_data.shape[0] < max_length:
extract_info[func_tag]['length'] = temp_data.shape[0]
temp_zero = np.zeros((max_length - temp_data.shape[0], embed_dim))
temp_data = np.concatenate((temp_data, temp_zero), axis=0)
else:
extract_info[func_tag]['length'] = temp_data.shape[0]
extract_info[func_tag]['data'] = temp_data
extract_info[func_tag]['label'] = one_hot_encoding(file_info['functions'][func_name]['num_args'], class_num)
return extract_info
def get_single_args_type(folder_path, file_name, func_list, embed_dim, max_length, class_num, arg_no):
file_path = os.path.join(folder_path, file_name)
extract_info = {}
with open(file_path) as f:
file_info = pickle.load(f)
for func_name in func_list:
func_tag = '%s#%s' % (file_name, func_name)
extract_info[func_tag] = {}
inst_bytes = file_info['functions'][func_name]['inst_bytes']
temp_data = []
for inst in inst_bytes:
if str(inst) in embed_info:
temp_data.append(embed_info[str(inst)]['vector'])
else:
temp_data.append([0.0] * embed_dim)
if len(temp_data) >= max_length:
break
temp_data = np.asarray(temp_data)
if temp_data.shape[0] < max_length:
extract_info[func_tag]['length'] = temp_data.shape[0]
temp_zero = np.zeros((max_length - temp_data.shape[0], embed_dim))
temp_data = np.concatenate((temp_data, temp_zero), axis=0)
else:
extract_info[func_tag]['length'] = temp_data.shape[0]
extract_info[func_tag]['data'] = temp_data
temp_type = approximate_type(file_info['functions'][func_name]['args_type'][arg_no])
extract_info[func_tag]['label'] = one_hot_encoding(type_info[temp_type], class_num)
return extract_info
class Dataset(object):
def __init__(self, data_folder, func_path, embed_path, thread_num, embed_dim, max_length, class_num, tag):
global embed_info
self.data_folder = data_folder
self.tag = tag #num_args or type#0
if self.tag == 'num_args':
pass
else:
self.arg_no = int(self.tag.split('#')[-1])
self.thread_num = thread_num
self.embed_dim = embed_dim
self.max_length = max_length
self.class_num = class_num
with open(func_path) as f:
func_info = pickle.load(f)
self.func_list = np.asarray(func_info['test'])
self.func_num = len(self.func_list)
print('Loaded train function information ... %s' % func_path)
print('Train Function Number: %d' % self.func_num)
with open(embed_path) as f:
embed_info = pickle.load(f)
print('Loaded embed information ... %s' % embed_path)
self.test_tag = True
self._index_in_test = 0
def get_batch_data(self, batch_func_list):
func_list = sorted(batch_func_list)
binary_name = ''
input_func_list = []
batch_info = {}
pool = Pool(self.thread_num)
if self.tag == 'num_args':
for whole_func_name in func_list:
if binary_name == '':
binary_name = whole_func_name.split('#')[0]
input_func_list.append(whole_func_name.split('#')[1])
else:
if binary_name == whole_func_name.split('#')[0]:
input_func_list.append(whole_func_name.split('#')[1])
else:
pool.apply_async(
get_single_num_args,
args= (self.data_folder, binary_name, input_func_list, self.embed_dim, self.max_length, self.class_num),
callback= batch_info.update
)
binary_name = whole_func_name.split('#')[0]
input_func_list = [whole_func_name.split('#')[1]]
if len(input_func_list) == 0:
pass
else:
pool.apply_async(
get_single_num_args,
args=(self.data_folder, binary_name, input_func_list, self.embed_dim, self.max_length, self.class_num),
callback=batch_info.update
)
else: #self.tag == 'type#0'
for whole_func_name in func_list:
if binary_name == '':
binary_name = whole_func_name.split('#')[0]
input_func_list.append(whole_func_name.split('#')[1])
else:
if binary_name == whole_func_name.split('#')[0]:
input_func_list.append(whole_func_name.split('#')[1])
else:
pool.apply_async(
get_single_args_type,
args=(self.data_folder, binary_name, input_func_list, self.embed_dim, self.max_length,
self.class_num, self.arg_no),
callback= batch_info.update
)
binary_name = whole_func_name.split('#')[0]
input_func_list = [whole_func_name.split('#')[1]]
if len(input_func_list) == 0:
pass
else:
pool.apply_async(
get_single_args_type,
args=(self.data_folder, binary_name, input_func_list, self.embed_dim, self.max_length, self.class_num, self.arg_no),
callback=batch_info.update
)
pool.close()
pool.join()
new_batch_data = {
'data': [],
'label': [],
'length': [],
'func_name':[]
}
for full_func_name in batch_info:
new_batch_data['data'].append(batch_info[full_func_name]['data'])
new_batch_data['label'].append(batch_info[full_func_name]['label'])
new_batch_data['length'].append(batch_info[full_func_name]['length'])
new_batch_data['func_name'].append(full_func_name)
batch_info = {
'data': np.asarray(new_batch_data['data'], dtype=np.float32),
'label': np.asarray(new_batch_data['label'], dtype=np.float32),
'length': np.asarray(new_batch_data['length'], dtype=np.float32),
'func_name': np.asarray(new_batch_data['func_name'])
}
return batch_info
def get_batch(self, batch_size):
start = self._index_in_test
if start + batch_size >= self.func_num:
self.test_tag = False
func_list_batch = self.func_list[start:]
test_batch = self.get_batch_data(func_list_batch)
return test_batch
else:
self._index_in_test += batch_size
end = self._index_in_test
func_list_batch = self.func_list[start: end]
test_batch = self.get_batch_data(func_list_batch)
return test_batch
| 9,019 | 38.911504 | 136 | py |
PalmTree | PalmTree-master/src/extrinsic_evaluation/EKLAVYA/code/RNN/test/eval.py | import tensorflow as tf
import dataset
import dataset_caller
import os
import sys
import argparse
import functools
import pickle
import inspect
def lazy_property(function):
attribute = '_' + function.__name__
@property
@functools.wraps(function)
def wrapper(self):
if not hasattr(self, attribute):
setattr(self, attribute, function(self))
return getattr(self, attribute)
return wrapper
def placeholder_inputs(class_num, max_length= 500, embedding_dim= 256):
data_placeholder = tf.placeholder(tf.float32, [None, max_length, embedding_dim])
label_placeholder = tf.placeholder(tf.float32, [None, class_num])
length_placeholder = tf.placeholder(tf.int32, [None,])
keep_prob_placeholder = tf.placeholder(tf.float32) # dropout (keep probability)
return data_placeholder, label_placeholder, length_placeholder, keep_prob_placeholder
def fill_feed_dict(data_set, batch_size, data_tag, keep_prob, data_pl, label_pl, length_pl, keep_prob_pl):
data_batch = data_set.get_batch(batch_size=batch_size)
feed_dict = {
data_pl: data_batch['data'],
label_pl: data_batch['label'],
length_pl: data_batch['length'],
keep_prob_pl: keep_prob
}
return feed_dict, data_batch['func_name']
class Model(object):
def __init__(self, session, my_data, config_info, data_pl, label_pl, length_pl, keep_prob_pl):
self.session = session
self.datasets = my_data
self.emb_dim = int(config_info['embed_dim'])
self.dropout = float(config_info['dropout'])
self.num_layers = int(config_info['num_layers'])
self.num_classes = int(config_info['num_classes'])
self.batch_size = int(config_info['batch_size'])
self._data = data_pl
self._label = label_pl
self._length = length_pl
self._keep_prob = keep_prob_pl
self.run_count = 0
self.build_graph()
@lazy_property
def probability(self):
def lstm_cell():
if 'reuse' in inspect.getargspec(tf.contrib.rnn.GRUCell.__init__).args:
return tf.contrib.rnn.GRUCell(self.emb_dim, reuse=tf.get_variable_scope().reuse)
else:
return tf.contrib.rnn.GRUCell(self.emb_dim)
attn_cell = lstm_cell
if self.dropout < 1:
def attn_cell():
return tf.contrib.rnn.DropoutWrapper(
lstm_cell(), output_keep_prob=self._keep_prob)
single_cell = tf.contrib.rnn.MultiRNNCell([attn_cell() for _ in range(self.num_layers)], state_is_tuple=True)
output, state = tf.nn.dynamic_rnn(single_cell, self._data, dtype=tf.float32,
sequence_length=self._length)
weight = tf.Variable(tf.truncated_normal([self.emb_dim, self.num_classes], stddev=0.01))
bias = tf.Variable(tf.constant(0.1, shape=[self.num_classes]))
self.output = output
probability = tf.matmul(self.last_relevant(output, self._length), weight) + bias
return probability
def last_relevant(self, output, length):
batch_size = tf.shape(output)[0]
max_len = int(output.get_shape()[1])
output_size = int(output.get_shape()[2])
index = tf.range(0, batch_size) * max_len + (length - 1)
flat = tf.reshape(output, [-1, output_size])
relevant = tf.gather(flat, index)
return relevant
@lazy_property
def cost_list(self):
prediction = self.probability
target = self._label
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=target)
return cross_entropy
@lazy_property
def cost(self):
cross_entropy = tf.reduce_mean(self.cost_list)
return cross_entropy
@lazy_property
def optimize(self):
global_step = tf.Variable(0, name='global_step', trainable=False)
train_op = tf.train.AdamOptimizer().minimize(self.cost, global_step)
return train_op
@lazy_property
def calc_accuracy(self):
true_probability = tf.nn.softmax(self.probability)
correct_pred = tf.equal(tf.argmax(true_probability, 1), tf.argmax(self._label, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
tf.summary.scalar('acc', accuracy)
return accuracy
@lazy_property
def pred_label(self):
true_probability = tf.nn.softmax(self.probability)
pred_output = tf.argmax(true_probability, 1)
label_output = tf.argmax(self._label, 1)
output_result = {
'pred': pred_output,
'label': label_output
}
return output_result
def build_graph(self):
self.optimize
self.calc_accuracy
self.pred_label
self.saver = tf.train.Saver(tf.trainable_variables())
tf.global_variables_initializer().run()
def test(self):
total_result = {
'cost': [],
'pred': [],
'func_name': []
}
while self.datasets.test_tag:
feed_dict, func_name_list = fill_feed_dict(self.datasets, self.batch_size, 'test', 1.0,
self._data, self._label, self._length, self._keep_prob)
cost_result, pred_result = self.session.run(
[self.cost_list, self.pred_label],
feed_dict = feed_dict
)
total_result['cost'].append(cost_result)
total_result['pred'].append(pred_result)
total_result['func_name'].append(func_name_list)
return total_result
def get_model_id_list(folder_path):
file_list = os.listdir(folder_path)
model_id_set = set()
for file_name in file_list:
if file_name[:6] == 'model-':
model_id_set.add(int(file_name.split('.')[0].split('-')[-1]))
else:
pass
model_id_list = sorted(list(model_id_set))
return model_id_list
def testing(config_info):
data_folder = config_info['data_folder']
func_path = config_info['func_path']
embed_path = config_info['embed_path']
tag = config_info['tag']
data_tag = config_info['data_tag']
process_num = int(config_info['process_num'])
embed_dim = int(config_info['embed_dim'])
max_length = int(config_info['max_length'])
num_classes = int(config_info['num_classes'])
model_dir = config_info['model_dir']
output_dir = config_info['output_dir']
'''create model & log folder'''
if os.path.exists(output_dir):
pass
else:
os.mkdir(output_dir)
print('Created all folders!')
'''load dataset'''
if data_tag == 'callee':
my_data = dataset.Dataset(data_folder, func_path, embed_path, process_num, embed_dim, max_length, num_classes, tag)
else: # caller
my_data = dataset_caller.Dataset(data_folder, func_path, embed_path, process_num, embed_dim, max_length, num_classes, tag)
print('Created the dataset!')
'''get model id list'''
# model_id_list = sorted(get_model_id_list(model_dir), reverse=True)
model_id_list = sorted(get_model_id_list(model_dir))
with tf.Graph().as_default(), tf.Session() as session:
# generate placeholder
data_pl, label_pl, length_pl, keep_prob_pl = placeholder_inputs(num_classes, max_length, embed_dim)
# generate model
model = Model(session, my_data, config_info, data_pl, label_pl, length_pl, keep_prob_pl)
print('Created the model!')
for model_id in model_id_list:
result_path = os.path.join(output_dir, 'test_result_%d.pkl' % model_id)
if os.path.exists(result_path):
continue
else:
pass
model_path = os.path.join(model_dir, 'model-%d' % model_id)
model.saver.restore(session, model_path)
total_result = model.test()
my_data._index_in_test = 0
my_data.test_tag = True
with open(result_path, 'w') as f:
pickle.dump(total_result, f)
print('Save the test result !!! ... %s' % result_path)
def get_config():
'''
get config information
'''
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--data_folder', dest='data_folder', help='The data folder of testing dataset.', type=str, required=True)
parser.add_argument('-f', '--split_func_path', dest='func_path', help='The path of file saving the training & testing function names.', type=str, required=True)
parser.add_argument('-e', '--embed_path', dest='embed_path', help='The path of file saving embedding vectors.', type=str, required=True)
parser.add_argument('-o', '--output_dir', dest='output_dir', help='The directory to saved the evaluation result.', type=str, required=True)
parser.add_argument('-m', '--model_dir', dest='model_dir', help='The directory saved the models.', type=str, required=True)
parser.add_argument('-t', '--label_tag', dest='tag', help='The type of labels. Possible value: num_args, type#0, type#1, ...', type=str, required=False, default='num_args')
parser.add_argument('-dt', '--data_tag', dest='data_tag', help='The type of input data.', type=str, required=False, choices=['caller', 'callee'], default='callee')
parser.add_argument('-pn', '--process_num', dest='process_num', help='Number of processes.', type=int, required=False, default=40)
parser.add_argument('-ed', '--embedding_dim', dest='embed_dim', help='The dimension of embedding vector.', type=int, required=False, default=256)
parser.add_argument('-ml', '--max_length', dest='max_length', help='The maximun length of input sequences.', type=int, required=False, default=500)
parser.add_argument('-nc', '--num_classes', dest='num_classes', help='The number of classes', type=int, required=False, default=16)
parser.add_argument('-do', '--dropout', dest='dropout', help='The dropout value.', type=float, required=False, default=1.0)
parser.add_argument('-nl', '--num_layers', dest='num_layers', help='Number of layers in RNN.', type=int, required=False, default=3)
parser.add_argument('-b', '--batch_size', dest='batch_size', help='The size of batch.', type=int, required=False, default=256)
args = parser.parse_args()
config_info = {
'data_folder': args.data_folder,
'func_path': args.func_path,
'embed_path': args.embed_path,
'tag': args.tag,
'data_tag': args.data_tag,
'process_num': args.process_num,
'embed_dim': args.embed_dim,
'max_length': args.max_length,
'num_classes': args.num_classes,
'output_dir': args.output_dir,
'model_dir': args.model_dir,
'dropout': args.dropout,
'num_layers': args.num_layers,
'batch_size': args.batch_size
}
return config_info
def main():
config_info = get_config()
testing(config_info)
if __name__ == '__main__':
main() | 11,011 | 38.188612 | 176 | py |
PalmTree | PalmTree-master/src/extrinsic_evaluation/EKLAVYA/code/RNN/test/dataset_caller.py | import pickle
import os
import numpy as np
from multiprocessing import Pool
embed_info = {}
type_info = {
'char': 0,
'int': 1,
'float': 2,
'pointer': 3,
'enum': 4,
'struct': 5,
'union': 6
}
def approximate_type(type_str):
int_list = ['_Bool', 'unsigned int', 'int', 'long long int', 'long long unsigned int', 'unsigned short',
'short unsigned int', 'short', 'long unsigned int', 'short int', 'long int']
char_list = ['char', 'unsigned char', 'signed char']
if type_str[-1] == '*' or type_str == 'func_ptr' or type_str.split()[0][-1] == '*':
return 'pointer'
elif type_str in int_list:
return 'int'
elif type_str[:5] == 'enum ':
return 'enum'
elif type_str in char_list:
return 'char'
elif type_str[:7] == 'struct ':
return 'struct'
elif type_str[:6] == 'union ':
return 'union'
elif type_str == 'double' or type_str == 'long double':
return 'float'
else:
return type_str
def one_hot_encoding(label_id, class_num):
temp = np.zeros(class_num)
temp[label_id] = 1
return temp
def get_single_num_args(folder_path, file_name, func_list, embed_dim, max_length, class_num):
file_path = os.path.join(folder_path, file_name)
extract_info = {}
with open(file_path) as f:
file_info = pickle.load(f)
for whole_func_name in func_list:
'''callee_name#caller_name#indice'''
temp = whole_func_name.split('#')
callee_name = temp[0]
caller_name = temp[1]
indice = int(temp[2])
func_tag = '%s#%s' % (file_name, whole_func_name)
extract_info[func_tag] = {}
# inst_bytes = file_info['functions'][caller_name]['inst_bytes'][:indice]
temp_data = []
indice_list = sorted(range(indice), reverse=True)
for indice_id in indice_list:
inst = file_info['functions'][caller_name]['inst_bytes'][indice_id]
if str(inst) in embed_info:
temp_data.append(embed_info[str(inst)]['vector'])
else:
temp_data.append([0.0] * embed_dim)
if len(temp_data) >= max_length:
break
temp_data = np.asarray(temp_data)
if temp_data.shape[0] < max_length:
extract_info[func_tag]['length'] = temp_data.shape[0]
temp_zero = np.zeros((max_length - temp_data.shape[0], embed_dim))
temp_data = np.concatenate((temp_data, temp_zero), axis=0)
else:
extract_info[func_tag]['length'] = temp_data.shape[0]
extract_info[func_tag]['data'] = temp_data
extract_info[func_tag]['label'] = one_hot_encoding(file_info['functions'][callee_name]['num_args'], class_num)
return extract_info
def get_single_args_type(folder_path, file_name, func_list, embed_dim, max_length, class_num, arg_no):
file_path = os.path.join(folder_path, file_name)
extract_info = {}
with open(file_path) as f:
file_info = pickle.load(f)
for whole_func_name in func_list:
'''callee_name#caller_name#indice'''
temp = whole_func_name.split('#')
callee_name = temp[0]
caller_name = temp[1]
indice = int(temp[2])
func_tag = '%s#%s' % (file_name, whole_func_name)
extract_info[func_tag] = {}
# inst_bytes = file_info['functions'][caller_name]['inst_bytes'][:indice]
temp_data = []
indice_list = sorted(range(indice), reverse=True)
for indice_id in indice_list:
inst = file_info['functions'][caller_name]['inst_bytes'][indice_id]
if str(inst) in embed_info:
temp_data.append(embed_info[str(inst)]['vector'])
else:
temp_data.append([0.0] * embed_dim)
if len(temp_data) >= max_length:
break
temp_data = np.asarray(temp_data)
if temp_data.shape[0] < max_length:
extract_info[func_tag]['length'] = temp_data.shape[0]
temp_zero = np.zeros((max_length - temp_data.shape[0], embed_dim))
temp_data = np.concatenate((temp_data, temp_zero), axis=0)
else:
extract_info[func_tag]['length'] = temp_data.shape[0]
extract_info[func_tag]['data'] = temp_data
temp_type = approximate_type(file_info['functions'][callee_name]['args_type'][arg_no])
extract_info[func_tag]['label'] = one_hot_encoding(type_info[temp_type], class_num)
return extract_info
class Dataset(object):
def __init__(self, data_folder, func_path, embed_path, thread_num, embed_dim, max_length, class_num, tag):
global embed_info
self.data_folder = data_folder
self.tag = tag #num_args or type#0
if self.tag == 'num_args':
pass
else:
self.arg_no = int(self.tag.split('#')[-1])
self.thread_num = thread_num
self.embed_dim = embed_dim
self.max_length = max_length
self.class_num = class_num
with open(func_path) as f:
func_info = pickle.load(f)
self.func_list = np.asarray(func_info['test'])
self.func_num = len(self.func_list)
print('Loaded test function information ... %s' % func_path)
print('Test Function Number: %d' % self.func_num)
with open(embed_path) as f:
embed_info = pickle.load(f)
print('Loaded embed information ... %s' % embed_path)
self._index_in_test = 0
self.test_tag = True
def get_batch_data(self, batch_func_list):
func_list = sorted(batch_func_list)
binary_name = ''
input_func_list = []
batch_info = {}
pool = Pool(self.thread_num)
if self.tag == 'num_args':
for whole_func_name in func_list:
if binary_name == '':
binary_name = whole_func_name.split('#')[0]
input_func_list.append('#'.join(whole_func_name.split('#')[1:]))
else:
if binary_name == whole_func_name.split('#')[0]:
input_func_list.append('#'.join(whole_func_name.split('#')[1:]))
else:
pool.apply_async(
get_single_num_args,
args=(self.data_folder, binary_name, input_func_list, self.embed_dim, self.max_length,
self.class_num),
callback=batch_info.update
)
binary_name = whole_func_name.split('#')[0]
input_func_list = ['#'.join(whole_func_name.split('#')[1:])]
if len(input_func_list) == 0:
pass
else:
pool.apply_async(
get_single_num_args,
args=(
self.data_folder, binary_name, input_func_list, self.embed_dim, self.max_length, self.class_num),
callback=batch_info.update
)
else: # self.tag == 'type#0'
for whole_func_name in func_list:
if binary_name == '':
binary_name = whole_func_name.split('#')[0]
input_func_list.append('#'.join(whole_func_name.split('#')[1:]))
else:
if binary_name == whole_func_name.split('#')[0]:
input_func_list.append('#'.join(whole_func_name.split('#')[1:]))
else:
pool.apply_async(
get_single_args_type,
args=(self.data_folder, binary_name, input_func_list, self.embed_dim, self.max_length,
self.class_num, self.arg_no),
callback=batch_info.update
)
binary_name = whole_func_name.split('#')[0]
input_func_list = ['#'.join(whole_func_name.split('#')[1:])]
if len(input_func_list) == 0:
pass
else:
pool.apply_async(
get_single_args_type,
args=(
self.data_folder, binary_name, input_func_list, self.embed_dim, self.max_length, self.class_num,
self.arg_no),
callback=batch_info.update
)
pool.close()
pool.join()
new_batch_data = {
'data': [],
'label': [],
'length': [],
'func_name': []
}
for full_func_name in batch_info:
new_batch_data['data'].append(batch_info[full_func_name]['data'])
new_batch_data['label'].append(batch_info[full_func_name]['label'])
new_batch_data['length'].append(batch_info[full_func_name]['length']),
new_batch_data['func_name'].append(full_func_name)
batch_info = {
'data': np.asarray(new_batch_data['data'], dtype=np.float32),
'label': np.asarray(new_batch_data['label'], dtype=np.float32),
'length': np.asarray(new_batch_data['length'], dtype=np.float32),
'func_name': np.asarray(new_batch_data['func_name'])
}
return batch_info
def get_batch(self, batch_size):
start = self._index_in_test
if start + batch_size >= self.func_num:
self.test_tag = False
func_list_batch = self.func_list[start:]
test_batch = self.get_batch_data(func_list_batch)
return test_batch
else:
self._index_in_test += batch_size
end = self._index_in_test
func_list_batch = self.func_list[start: end]
test_batch = self.get_batch_data(func_list_batch)
return test_batch | 9,864 | 40.104167 | 118 | py |
PalmTree | PalmTree-master/src/extrinsic_evaluation/EKLAVYA/code/RNN/train/split_function_path_gen.py | import os
import pickle
import random
import time
def get_file_path(folder_path, tag):
path_list=[]
file_list=os.listdir(folder_path)
'''initial path list'''
for file_name in file_list:
path_list.append(os.path.join(folder_path, file_name))
final_path_list=[]
tag_len = len(tag)
'''get all specific path'''
while(len(path_list) > 0):
source_path=path_list[0]
path_list.remove(source_path)
if not os.path.isdir(source_path) and (source_path[-tag_len-1] == '.') and (source_path[-tag_len:] == tag):
final_path_list.append(source_path)
elif os.path.isdir(source_path):
file_list=os.listdir(source_path)
for file_name in file_list:
path_list.append(os.path.join(source_path, file_name))
else:
pass
return final_path_list
def main():
random.seed(time.time())
splitFuncDict = {}
train = []
test = []
path_list = get_file_path('smalldata/pickles','pkl')
for file_path in path_list[0:10]:
temp=pickle.load(open(file_path))
for func_name in temp['functions']:
(filepath,filename) = os.path.split(file_path)
if random.random() >= 0.3:
train.append(filename + '#' + func_name)
else:
test.append(filename + '#' + func_name)
splitFuncDict['train'] = train
splitFuncDict['test'] = test
with open('outputs/split_func.pkl', 'wb') as f:
pickle.dump(splitFuncDict, f)
if __name__ == '__main__':
main() | 1,570 | 29.211538 | 115 | py |
PalmTree | PalmTree-master/src/extrinsic_evaluation/EKLAVYA/code/RNN/train/train_rnn.py | import argparse
import functools
import inspect
import os
import sys
import pickle
import dataset
import dataset_caller
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
def lazy_property(function):
attribute = '_' + function.__name__
@property
@functools.wraps(function)
def wrapper(self):
if not hasattr(self, attribute):
setattr(self, attribute, function(self))
return getattr(self, attribute)
return wrapper
def placeholder_inputs(class_num, max_length= 500, embedding_dim= 256):
data_placeholder = tf.placeholder(tf.float32, [None, max_length, embedding_dim])
label_placeholder = tf.placeholder(tf.float32, [None, class_num])
length_placeholder = tf.placeholder(tf.int32, [None,])
keep_prob_placeholder = tf.placeholder(tf.float32) # dropout (keep probability)
return data_placeholder, label_placeholder, length_placeholder, keep_prob_placeholder
def fill_feed_dict(data_set, batch_size, keep_prob, data_pl, label_pl, length_pl, keep_prob_pl):
data_batch = data_set.get_batch(batch_size=batch_size)
feed_dict = {
data_pl: data_batch['data'],
label_pl: data_batch['label'],
length_pl: data_batch['length'],
keep_prob_pl: keep_prob
}
return feed_dict
def fill_test_dict(data_set, batch_size, data_tag, keep_prob, data_pl, label_pl, length_pl, keep_prob_pl):
data_batch = data_set.get_test_batch(batch_size=batch_size)
feed_dict = {
data_pl: data_batch['data'],
label_pl: data_batch['label'],
length_pl: data_batch['length'],
keep_prob_pl: keep_prob
}
return feed_dict, data_batch['func_name']
class Model(object):
def __init__(self, session, my_data, config_info, data_pl, label_pl, length_pl, keep_prob_pl):
self.session = session
self.datasets = my_data
self.emb_dim = int(config_info['embed_dim'])
self.dropout = float(config_info['dropout'])
self.num_layers = int(config_info['num_layers'])
self.num_classes = int(config_info['num_classes'])
self.max_to_save = int(config_info['max_to_save'])
self.output_dir = config_info['log_path']
self.batch_size = int(config_info['batch_size'])
self.summary_frequency = int(config_info['summary_frequency'])
self.embd_type = config_info['embedding_type']
self._data = data_pl
self._label = label_pl
self._length = length_pl
self._keep_prob = keep_prob_pl
self.run_count = 0
self.build_graph()
@lazy_property
def probability(self):
def lstm_cell():
if 'reuse' in inspect.getargspec(tf.contrib.rnn.GRUCell.__init__).args:
return tf.contrib.rnn.GRUCell(self.emb_dim, reuse=tf.get_variable_scope().reuse)
else:
return tf.contrib.rnn.GRUCell(self.emb_dim)
attn_cell = lstm_cell
if self.dropout < 1:
def attn_cell():
return tf.contrib.rnn.DropoutWrapper(
lstm_cell(), output_keep_prob=self._keep_prob)
single_cell = tf.contrib.rnn.MultiRNNCell([attn_cell() for _ in range(self.num_layers)], state_is_tuple=True)
if self.embd_type == '1hot':
embedding = layers.Embedding(5001, 128)
self.emb_ins = keras.activations.tanh(embedding(self._data))
self.emb_ins = tf.squeeze(self.emb_ins, axis=2)
output, state = tf.nn.dynamic_rnn(single_cell, self.emb_ins, dtype=tf.float32,
sequence_length=self._length)
else:
output, state = tf.nn.dynamic_rnn(single_cell, self._data, dtype=tf.float32,
sequence_length=self._length)
weight = tf.Variable(tf.truncated_normal([self.emb_dim, self.num_classes], stddev=0.01))
bias = tf.Variable(tf.constant(0.1, shape=[self.num_classes]))
self.output = output
probability = tf.matmul(self.last_relevant(output, self._length), weight) + bias
return probability
def last_relevant(self, output, length):
batch_size = tf.shape(output)[0]
max_len = int(output.get_shape()[1])
output_size = int(output.get_shape()[2])
index = tf.range(0, batch_size) * max_len + (length - 1)
flat = tf.reshape(output, [-1, output_size])
relevant = tf.gather(flat, index)
return relevant
@lazy_property
def cost_list(self):
prediction = self.probability
target = self._label
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=target)
return cross_entropy
@lazy_property
def cost(self):
cross_entropy = tf.reduce_mean(self.cost_list)
tf.summary.scalar('cross_entropy', cross_entropy)
return cross_entropy
@lazy_property
def optimize(self):
global_step = tf.Variable(0, name='global_step', trainable=False)
train_op = tf.train.AdamOptimizer().minimize(self.cost, global_step)
return train_op
@lazy_property
def calc_accuracy(self):
true_probability = tf.nn.softmax(self.probability)
correct_pred = tf.equal(tf.argmax(true_probability, 1), tf.argmax(self._label, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
tf.summary.scalar('acc', accuracy)
return accuracy
@lazy_property
def pred_label(self):
true_probability = tf.nn.softmax(self.probability)
pred_output = tf.argmax(true_probability, 1)
label_output = tf.argmax(self._label, 1)
output_result = {
'pred': pred_output,
'label': label_output
}
return output_result
def build_graph(self):
self.optimize
self.calc_accuracy
self.pred_label
self.merged = tf.summary.merge_all()
self.train_writer = tf.summary.FileWriter(self.output_dir + '/train', self.session.graph)
self.test_writer = tf.summary.FileWriter(self.output_dir + '/test')
self.saver = tf.train.Saver(tf.trainable_variables(),
max_to_keep=self.max_to_save)
tf.global_variables_initializer().run()
def train(self):
feed_dict = fill_feed_dict(self.datasets, self.batch_size, self.dropout,
self._data, self._label, self._length, self._keep_prob)
if self.run_count % self.summary_frequency == 0:
cost, acc, summary, _ = self.session.run(
[self.cost, self.calc_accuracy, self.merged, self.optimize],
feed_dict = feed_dict
)
self.train_writer.add_summary(summary, self.run_count)
print('[Batch %d][Epoch %d] cost: %.3f; accuracy: %.3f' % (self.run_count,
self.datasets._complete_epochs,
cost,
acc))
else:
self.session.run(self.optimize, feed_dict = feed_dict)
self.run_count += 1
def test(self):
total_result = {
'cost': [],
'pred': [],
'func_name': [],
'acc':[]
}
while self.datasets.test_tag:
feed_dict, func_name_list = fill_test_dict(self.datasets, self.batch_size, 'test', 1.0,
self._data, self._label, self._length, self._keep_prob)
cost_result, pred_result, acc = self.session.run(
[self.cost_list, self.pred_label, self.calc_accuracy],
feed_dict = feed_dict
)
print(acc)
total_result['cost'].append(cost_result)
total_result['pred'].append(pred_result)
total_result['func_name'].append(func_name_list)
total_result['acc'].append(acc)
return total_result
def training(config_info):
data_folder = config_info['data_folder']
func_path = config_info['func_path']
embed_path = config_info['embed_path']
tag = config_info['tag']
data_tag = config_info['data_tag']
process_num = int(config_info['process_num'])
embed_dim = int(config_info['embed_dim'])
max_length = int(config_info['max_length'])
num_classes = int(config_info['num_classes'])
epoch_num = int(config_info['epoch_num'])
save_batch_num = int(config_info['save_batchs'])
output_dir = config_info['output_dir']
embd_type = config_info['embedding_type']
'''create model & log folder'''
if os.path.exists(output_dir):
pass
else:
os.mkdir(output_dir)
model_basedir = os.path.join(output_dir, 'model')
if os.path.exists(model_basedir):
pass
else:
os.mkdir(model_basedir)
log_basedir = os.path.join(output_dir, 'log')
if tf.gfile.Exists(log_basedir):
tf.gfile.DeleteRecursively(log_basedir)
tf.gfile.MakeDirs(log_basedir)
config_info['log_path'] = log_basedir
print('Created all folders!')
'''load dataset'''
if data_tag == 'callee':
my_data = dataset.Dataset(data_folder, func_path, embed_path, process_num, embed_dim, max_length, num_classes, tag, embd_type)
else: #caller
my_data = dataset_caller.Dataset(data_folder, func_path, embed_path, process_num, embed_dim, max_length, num_classes, tag)
print('Created the dataset!')
with tf.Graph().as_default(), tf.Session() as session:
# generate placeholder
data_pl, label_pl, length_pl, keep_prob_pl = placeholder_inputs(num_classes, max_length, embed_dim)
# generate model
model = Model(session, my_data, config_info, data_pl, label_pl, length_pl, keep_prob_pl)
print('Created the model!')
while my_data._complete_epochs < epoch_num:
model.train()
if model.run_count % save_batch_num == 0:
model.saver.save(session, os.path.join(model_basedir, 'model'), global_step = model.run_count)
print('Saved the model ... %d' % model.run_count)
else:
pass
model.train_writer.close()
model.test_writer.close()
def get_model_id_list(folder_path):
file_list = os.listdir(folder_path)
model_id_set = set()
for file_name in file_list:
if file_name[:6] == 'model-':
model_id_set.add(int(file_name.split('.')[0].split('-')[-1]))
else:
pass
model_id_list = sorted(list(model_id_set))
return model_id_list
def testing(config_info):
data_folder = config_info['data_folder']
func_path = config_info['func_path']
embed_path = config_info['embed_path']
tag = config_info['tag']
data_tag = config_info['data_tag']
process_num = int(config_info['process_num'])
embed_dim = int(config_info['embed_dim'])
max_length = int(config_info['max_length'])
num_classes = int(config_info['num_classes'])
model_dir = config_info['model_dir']
output_dir = config_info['output_dir']
embd_type = config_info['embedding_type']
'''create model & log folder'''
log_basedir = os.path.join(output_dir, 'log')
if tf.gfile.Exists(log_basedir):
# tf.gfile.DeleteRecursively(log_basedir)
os.system("rm -rf "+log_basedir)
tf.gfile.MakeDirs(log_basedir)
config_info['log_path'] = log_basedir
if os.path.exists(output_dir):
pass
else:
os.mkdir(output_dir)
print('Created all folders!')
'''load dataset'''
if data_tag == 'callee':
my_data = dataset.Dataset(data_folder, func_path, embed_path, process_num, embed_dim, max_length, num_classes, tag, embd_type)
else: # caller
my_data = dataset_caller.Dataset(data_folder, func_path, embed_path, process_num, embed_dim, max_length, num_classes, tag)
print('Created the dataset!')
'''get model id list'''
# model_id_list = sorted(get_model_id_list(model_dir), reverse=True)
model_id_list = sorted(get_model_id_list(model_dir))
with tf.Graph().as_default(), tf.Session() as session:
# generate placeholder
data_pl, label_pl, length_pl, keep_prob_pl = placeholder_inputs(num_classes, max_length, embed_dim)
# generate model
model = Model(session, my_data, config_info, data_pl, label_pl, length_pl, keep_prob_pl)
print('Created the model!')
for model_id in model_id_list:
result_path = os.path.join(output_dir, 'test_result_%d.pkl' % model_id)
if os.path.exists(result_path):
continue
else:
pass
model_path = os.path.join(model_dir, 'model-%d' % model_id)
model.saver.restore(session, model_path)
total_result = model.test()
my_data._index_in_test = 0
my_data.test_tag = True
print(total_result['acc'])
with open(result_path, 'w') as f:
pickle.dump(total_result, f)
print('Save the test result !!! ... %s' % result_path)
def get_config():
'''
get config information from command line
'''
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--data_folder', dest='data_folder', help='The data folder of training dataset.', type=str, required=True)
parser.add_argument('-o', '--output_dir', dest='output_dir', help='The directory to saved the log information & models.', type=str, required=True)
parser.add_argument('-f', '--split_func_path', dest='func_path', help='The path of file saving the training & testing function names.', type=str, required=True)
parser.add_argument('-e', '--embed_path', dest='embed_path', help='The path of saved embedding vectors.', type=str, required=True)
parser.add_argument('-m', '--model_dir', dest='model_dir', help='The directory saved the models.', type=str, required=True)
parser.add_argument('-t', '--label_tag', dest='tag', help='The type of labels. Possible value: num_args, type#0, type#1, ...', type=str, required=False, default='num_args')
parser.add_argument('-dt', '--data_tag', dest='data_tag', help='The type of input data.', type=str, required=False, choices=['caller', 'callee'], default='callee')
parser.add_argument('-pn', '--process_num', dest='process_num', help='Number of processes.', type=int, required=False, default=40)
parser.add_argument('-ed', '--embedding_dim', dest='embed_dim', help='The dimension of embedding vector.', type=int, required=False, default=256)
parser.add_argument('-ml', '--max_length', dest='max_length', help='The maximum length of input sequences.', type=int, required=False, default=500)
parser.add_argument('-nc', '--num_classes', dest='num_classes', help='The number of classes', type=int, required=False, default=16)
parser.add_argument('-en', '--epoch_num', dest='epoch_num', help='The number of epoch.', type=int, required=False, default=20)
parser.add_argument('-s', '--save_frequency', dest='save_batchs', help='The frequency for saving the trained model.', type=int, required=False, default=100)
parser.add_argument('-do', '--dropout', dest='dropout', help='The dropout value.', type=float, required=False, default=0.8)
parser.add_argument('-nl', '--num_layers', dest='num_layers', help='Number of layers in RNN.', type=int, required=False, default=3)
parser.add_argument('-ms', '--max_to_save', dest='max_to_save', help='Maximum number of models saved in the directory.', type=int, required=False, default=100)
parser.add_argument('-b', '--batch_size', dest='batch_size', help='The size of batch.', type=int, required=False, default=256)
parser.add_argument('-p', '--summary_frequency', dest='summary_frequency', help='The frequency of showing the accuracy & cost value.', type=int, required=False, default=20)
args = parser.parse_args()
config_info = {
'data_folder': args.data_folder,
'output_dir': args.output_dir,
'func_path': args.func_path,
'embed_path': args.embed_path,
'tag': args.tag,
'model_dir': args.model_dir,
'data_tag': args.data_tag,
'process_num': args.process_num,
'embed_dim': args.embed_dim,
'max_length': args.max_length,
'num_classes': args.num_classes,
'epoch_num': args.epoch_num,
'save_batchs': args.save_batchs,
'dropout': args.dropout,
'num_layers': args.num_layers,
'max_to_save': args.max_to_save,
'batch_size': args.batch_size,
'summary_frequency': args.summary_frequency,
'embedding_type': args.embedding_type
}
return config_info
def main():
config_info = get_config()
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
training(config_info)
testing(config_info)
if __name__ == '__main__':
main()
| 17,058 | 39.811005 | 176 | py |
PalmTree | PalmTree-master/src/extrinsic_evaluation/EKLAVYA/code/RNN/train/data_loader.py | """"
Here we implement a class for loading data.
"""
import torch
from torch.autograd import Variable
from vocab import *
from config import *
import numpy as np
import random
import re
np.random.seed(0)
class DataLoader:
EOS = 0 # to mean end of sentence
UNK = 1 # to mean unknown token
maxlen = MAXLEN
def __init__(self, text_file=None, sentences=None, word_dict=None):
if text_file:
sentences = []
for txt_file in text_file:
print("Loading text file at {}".format(txt_file))
with open(txt_file, "rt") as f:
text = f.readlines()
for i, line in enumerate(text):
if i % 2:
sentences.extend(line.strip().split(';'))
print("Making dictionary for these words")
word_dict = build_and_save_dictionary(sentences, source="data/instruction")
assert sentences and word_dict, "Please provide the file to extract from or give sentences and word_dict"
self.sentences = sentences
self.word_dict = word_dict
# print("Making reverse dictionary")
self.revmap = list(self.word_dict.items())
self.lengths = [len(sent) for sent in self.sentences]
def convert_sentence_to_indices(self, sentence):
sentence = re.split(',| ', sentence)
tokn_lst = []
for s in sentence:
tokn_lst.extend(re.split('([0-9A-Za-z@_.]+)', s))
tokn_lst = [t for t in tokn_lst if t]
indices = [
# assign an integer to each word, if the word is too rare assign unknown token
self.word_dict.get(w) if self.word_dict.get(w, VOCAB_SIZE + 1) < VOCAB_SIZE else self.UNK
for w in tokn_lst # split into words on spaces
][: self.maxlen - 1] # take only maxlen-1 words per sentence at the most.
# last words are EOS
indices += [self.EOS] * (self.maxlen - len(indices))
indices = np.array(indices)
indices = Variable(torch.from_numpy(indices))
return indices
def convert_indices_to_sentences(self, indices):
def convert_index_to_word(idx):
idx = idx.data.item()
if idx == 0:
return "EOS"
elif idx == 1:
return "UNK"
search_idx = idx - 2
if search_idx >= len(self.revmap):
return "NA"
word, idx_ = self.revmap[search_idx]
assert idx_ == idx
return word
words = [convert_index_to_word(idx) for idx in indices]
return " ".join(words)
def fetch_batch(self, batch_size):
first_index = random.randint(0, len(self.sentences) - batch_size)
batch = []
lengths = []
for i in range(first_index, first_index + batch_size):
sent = self.sentences[i]
ind = self.convert_sentence_to_indices(sent)
if USE_CUDA:
ind = ind.cuda(CUDA_DEVICE)
batch.append(ind)
lengths.append(min(len(sent.split()), MAXLEN))
batch = torch.stack(batch)
lengths = np.array(lengths)
return batch, lengths
| 3,294 | 30.682692 | 113 | py |
PalmTree | PalmTree-master/src/extrinsic_evaluation/EKLAVYA/code/RNN/train/model.py | """
This file implements the Skip-Thought architecture.
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from config import *
import math
import numpy as np
class Encoder(nn.Module):
thought_size = 128
word_size = 256
@staticmethod
def reverse_variable(var):
idx = [i for i in range(var.size(0) - 1, -1, -1)]
idx = Variable(torch.LongTensor(idx))
if USE_CUDA:
idx = idx.cuda(CUDA_DEVICE)
inverted_var = var.index_select(0, idx)
return inverted_var
def __init__(self):
super(Encoder, self).__init__()
# self.rnn = nn.LSTM(self.word_size, self.thought_size)
self.rnn = nn.GRU(self.word_size, self.thought_size, bidirectional=False)
def forward(self, embeddings):
# sentences = (batch_size, maxlen), with padding on the right.
# sentences = sentences.transpose(0, 1) # (maxlen, batch_size)
# word_embeddings = torch.tanh(self.word2embd(sentences)) # (maxlen, batch_size, word_size)
output, thoughts = self.rnn(embeddings)
# _, thoughts = self.rnn(embeddings)
return output, thoughts
class Attn(nn.Module):
def __init__(self, hidden_size):
super(Attn, self).__init__()
self.hidden_size = hidden_size
self.attn = nn.Linear(self.hidden_size * 2, hidden_size)
self.v = nn.Parameter(torch.rand(hidden_size))
stdv = 1. / math.sqrt(self.v.size(0))
self.v.data.normal_(mean=0, std=stdv)
def forward(self, hidden, encoder_outputs):
'''
:param hidden:
previous hidden state of the decoder, in shape (layers*directions,B,H)
:param encoder_outputs:
encoder outputs from Encoder, in shape (T,B,H)
:return
attention energies in shape (B,T)
'''
max_len = encoder_outputs.size(0)
this_batch_size = encoder_outputs.size(1)
H = hidden.repeat(max_len,1,1).transpose(0,1)
encoder_outputs = encoder_outputs.transpose(0,1) # [B*T*H]
attn_energies = self.score(H,encoder_outputs) # compute attention score
return F.softmax(attn_energies, dim=1).unsqueeze(1) # normalize with softmax
def score(self, hidden, encoder_outputs):
energy = F.tanh(self.attn(torch.cat([hidden, encoder_outputs], 2))) # [B*T*2H]->[B*T*H]
energy = energy.transpose(2,1) # [B*H*T]
v = self.v.repeat(encoder_outputs.data.shape[0],1).unsqueeze(1) #[B*1*H]
energy = torch.bmm(v,energy) # [B*1*T]
return energy.squeeze(1) #[B*T]
class Decoder(nn.Module):
word_size = Encoder.word_size
def __init__(self, hidden_size, attention):
super(Decoder,self).__init__()
# self.rnn = nn.GRU(input_size= Encoder.word_size+Encoder.thought_size, hidden_size=Encoder.thought_size, bidirectional=False)
# self.worder = nn.Linear(Encoder.thought_size, VOCAB_SIZE)
self.attention = attention
if attention:
self.rnn = nn.GRU(input_size= Encoder.word_size+hidden_size, hidden_size=hidden_size, bidirectional=False)
self.worder = nn.Linear(hidden_size*2, VOCAB_SIZE)
self.attn = Attn(hidden_size)
else:
self.rnn = nn.GRU(input_size= Encoder.word_size, hidden_size=hidden_size, bidirectional=False)
self.worder = nn.Linear(hidden_size, VOCAB_SIZE)
def forward(self, thoughts, word_embedded, encoder_outputs, decoder_context):
word_embedded = word_embedded.view(1, encoder_outputs.size(1), Encoder.word_size)
# attn_weights = self.attn(thoughts, encoder_outputs)
# context = attn_weights.bmm(encoder_outputs.transpose(0, 1)) # (B,1,V)
# context = context.transpose(0, 1) # (1,B,V)
# rnn_input = torch.cat((word_embedded, context), 2)
# output, hidden = self.rnn(rnn_input, thoughts)
# word = F.log_softmax(self.worder(output), dim=2)
# word = word.transpose(0, 1).contiguous()
if self.attention:
rnn_input = torch.cat((word_embedded, decoder_context), 2)
output, hidden = self.rnn(rnn_input, thoughts)
attn_weights = self.attn(output.squeeze(0), encoder_outputs)
context = attn_weights.bmm(encoder_outputs.transpose(0, 1)) # (B,1,V)
context = context.transpose(0, 1) # (1,B,V)
word = F.log_softmax(self.worder(torch.cat([output,context],dim=2)), dim=2)
else:
output, hidden = self.rnn(word_embedded, thoughts)
word = F.log_softmax(self.worder(output), dim=2)
context = None
word = word.transpose(0, 1).contiguous()
return word, hidden, context
class UniSkip(nn.Module):
def __init__(self, model_type='skip-inst', attention=False):
super(UniSkip, self).__init__()
self.model_type = model_type
if self.model_type == 'skip-inst':
self.word2embd = nn.Embedding(VOCAB_SIZE, Encoder.word_size)
self.encoder = Encoder()
self.prev_decoder = Decoder(Encoder.thought_size, attention=attention)
self.next_decoder = Decoder(Encoder.thought_size, attention=attention)
elif self.model_type == 'cbo-inst':
self.word2embd = nn.Embedding(VOCAB_SIZE, Encoder.word_size)
self.encoder = Encoder()
self.decoder = Decoder(2*Encoder.thought_size, attention=attention)
else:
self.word2embd = nn.Embedding(VOCAB_SIZE, Encoder.word_size)
self.encoder = Encoder()
self.context_encoder = Encoder()
self.outputembd = nn.Embedding(VOCAB_SIZE, Encoder.word_size)
def forward(self, positive_samples, positive_context, negative_context):
sentences = positive_samples
if self.model_type == 'skip-inst':
sentences = sentences.transpose(0, 1) # (maxlen, batch_size)
word_embeddings = torch.tanh(self.word2embd(sentences))
output, thoughts = self.encoder(word_embeddings[:,1:-1,:])
prev_context = Variable(torch.zeros([1,sentences.size(1)-2,Encoder.word_size]))
next_context = Variable(torch.zeros([1,sentences.size(1)-2,Encoder.word_size]))
prev_decoder_context = Variable(torch.zeros([1,sentences.size(1)-2,Encoder.thought_size]))
next_decoder_context = Variable(torch.zeros([1,sentences.size(1)-2,Encoder.thought_size]))
prev_hidden = thoughts
next_hidden = thoughts
if USE_CUDA:
prev_context = prev_context.cuda(CUDA_DEVICE)
next_context = next_context.cuda(CUDA_DEVICE)
prev_decoder_context = prev_decoder_context.cuda(CUDA_DEVICE)
next_decoder_context = next_decoder_context.cuda(CUDA_DEVICE)
prev_word = []
next_word = []
for i in range(MAXLEN):
prev_context, prev_hidden, prev_decoder_context = self.prev_decoder(prev_hidden, prev_context, output, prev_decoder_context) # both = (batch-1, maxlen, VOCAB_SIZE)
next_context, next_hidden, next_decoder_context = self.next_decoder(next_hidden, next_context, output, next_decoder_context)
prev_word.append(prev_context)
next_word.append(next_context)
prev_context = torch.tanh(self.word2embd(prev_context.max(2)[1]))
next_context = torch.tanh(self.word2embd(prev_context.max(2)[1]))
# print(prev_word.size())
prev_word = torch.cat(prev_word, dim=1)
next_word = torch.cat(next_word, dim=1)
# print(prev_word.size())
prev_loss = F.cross_entropy(prev_word.view(-1, VOCAB_SIZE), sentences.transpose(0, 1)[:-2, :].view(-1))
next_loss = F.cross_entropy(next_word.view(-1, VOCAB_SIZE), sentences.transpose(0, 1)[2:, :].view(-1))
loss = prev_loss + next_loss
return loss, sentences.transpose(0, 1), prev_word
elif self.model_type == 'cbo-inst':
sentences = sentences.transpose(0, 1) # (maxlen, batch_size)
word_embeddings = torch.tanh(self.word2embd(sentences))
prev_output, prev_thoughts = self.encoder(word_embeddings[:,:-2,:])
next_output, next_thoughts = self.encoder(word_embeddings[:,2:,:])
hidden = torch.cat([prev_thoughts, next_thoughts], dim=2)
encoder_outputs = torch.cat([prev_output, next_output],dim=2)
context = Variable(torch.zeros([1,sentences.size(1)-2,Encoder.word_size]))
decoder_context = Variable(torch.zeros([1,sentences.size(1)-2,Encoder.thought_size*2]))
if USE_CUDA:
context = context.cuda(CUDA_DEVICE)
decoder_context = decoder_context.cuda(CUDA_DEVICE)
word = []
for i in range(MAXLEN):
if i == 0:
embd = Variable(torch.zeros([1,sentences.size(1)-2,Encoder.word_size]))
if USE_CUDA:
embd = embd.cuda(CUDA_DEVICE)
else:
embd = torch.tanh(self.word2embd(sentences[i-1,1:-1]))
# print(embd.size())
# context, hidden, decoder_context = self.decoder(hidden, context, encoder_outputs, decoder_context)
context, _, decoder_context = self.decoder(hidden, embd, encoder_outputs, decoder_context)
word.append(context)
# context = torch.tanh(self.word2embd(context.max(2)[1]))
word = torch.cat(word, dim=1)
# print(word.size())
loss = F.cross_entropy(word.view(-1, VOCAB_SIZE), sentences.transpose(0,1)[1:-1, :].view(-1))
return loss, sentences.transpose(0,1), word
elif self.model_type == 'quick-thought':
# sentences = sentences.transpose(0, 1) # (maxlen, batch_size)
# samples = samples.transpose(0,1)
# batch_size = sentences.size(1)-1
# word_embeddings = torch.tanh(self.word2embd(sentences))
# sample_embeddings = torch.tanh(self.word2embd(samples))
# _, thoughts = self.encoder(word_embeddings)
# thoughts = thoughts.squeeze()
# _, sample_thoughts = self.encoder(sample_embeddings)
# sample_thoughts = sample_thoughts.squeeze()
# positive_samples = torch.sum(torch.mul(thoughts[:-1,:], thoughts[1:,:]),dim=1)
# negative_samples = torch.sum(torch.mul(thoughts[:-1,:], sample_thoughts[1:,:]), dim=1)
# positive_target = torch.ones(batch_size, device="cuda:0")
# negative_target = torch.zeros(batch_size, device="cuda:0")
# loss_sunc = nn.BCEWithLogitsLoss(reduction='mean')
# pos_loss = loss_sunc(positive_samples, positive_target)
# neg_los = loss_sunc(negative_samples, negative_target)
# loss = 0.7*pos_loss + 0.3*neg_los
positive_samples = positive_samples.transpose(0, 1)
positive_context = positive_context.transpose(0, 1)
negative_context = negative_context.transpose(0, 1)
batch_size = negative_context.size(1)
positive_emb = torch.tanh(self.word2embd(positive_samples))
positive_ctxt_emb = torch.tanh(self.outputembd(positive_context))
negative_ctxt_emb = torch.tanh(self.outputembd(negative_context))
_, pos_thought = self.encoder(positive_emb)
pos_thought = pos_thought.squeeze()
_, pos_ctxt_thought = self.context_encoder(positive_ctxt_emb)
pos_ctxt_thought = pos_ctxt_thought.squeeze()
_, neg_ctxt_thought = self.context_encoder(negative_ctxt_emb)
neg_ctxt_thought = neg_ctxt_thought.squeeze()
positive_samples = torch.sum(torch.mul(pos_thought, pos_ctxt_thought),dim=1)
negative_samples = torch.sum(torch.mul(pos_thought, neg_ctxt_thought),dim=1)
positive_target = torch.ones(batch_size, device="cuda:0")
negative_target = torch.zeros(batch_size, device="cuda:0")
loss_sunc = nn.BCEWithLogitsLoss(reduction='mean')
pos_loss = loss_sunc(positive_samples, positive_target)
neg_los = loss_sunc(negative_samples, negative_target)
loss = pos_loss + neg_los
return loss, None, None
# scores = torch.matmul(thoughts[:-1,:], torch.t(thoughts[1:,:]))
# scores[range(len(scores)), range(len(scores))] = torch.zeros(batch_size, device='cuda:0')
# targets_np = np.zeros((batch_size, batch_size))
# ctxt_sent_pos = [-1,1]
# for ctxt_pos in ctxt_sent_pos:
# targets_np += np.eye(batch_size, k=ctxt_pos)
# targets_np_sum = np.sum(targets_np, axis=1, keepdims=True)
# targets_np = targets_np/targets_np_sum
# targets = torch.tensor(targets_np, dtype=torch.float32, requires_grad=True, device='cuda:0')
# loss_sunc = nn.BCEWithLogitsLoss(reduce=True, reduction='mean')
| 13,287 | 42.710526 | 180 | py |
PalmTree | PalmTree-master/src/extrinsic_evaluation/EKLAVYA/code/RNN/train/dataset.py | import pickle
import os
import numpy as np
import re
from multiprocessing import Pool
import instruction2vec
import eval_utils as utils
from collections import Counter
embed_info = {}
type_info = {
'char': 0,
'int': 1,
'float': 2,
'pointer': 3,
'enum': 4,
'struct': 5,
'union': 6
}
def approximate_type(type_str):
int_list = ['_Bool', 'unsigned int', 'int', 'long long int', 'long long unsigned int', 'unsigned short',
'short unsigned int', 'short', 'long unsigned int', 'short int', 'long int']
char_list = ['char', 'unsigned char', 'signed char']
if type_str[-1] == '*' or type_str == 'func_ptr' or type_str.split()[0][-1] == '*':
return 'pointer'
elif type_str in int_list:
return 'int'
elif type_str[:5] == 'enum ':
return 'enum'
elif type_str in char_list:
return 'char'
elif type_str[:7] == 'struct ':
return 'struct'
elif type_str[:6] == 'union ':
return 'union'
elif type_str == 'double' or type_str == 'long double':
return 'float'
else:
return type_str
def one_hot_encoding(label_id, class_num):
temp = np.zeros(class_num)
temp[label_id] = 1
return temp
def get_single_num_args(folder_path, file_name, func_list, embed_dim, max_length, class_num, embd_type, vocab=None):
def parse_instruction(ins):
import re
ins = re.sub('\s+', ', ', ins, 1)
parts = ins.split(', ')
return ','.join(parts)
def parse_instruction_trans(ins):
import re
ins = re.sub('\s+', ', ', ins, 1)
parts = ins.split(', ')
operand = []
token_lst = []
if len(parts) > 1:
operand = parts[1:]
token_lst.append(parts[0])
for i in range(len(operand)):
symbols = re.split('([0-9A-Za-z]+)', operand[i])
symbols = [s.strip() for s in symbols if s != '']
token_lst.extend(symbols)
token_lst = [t for t in token_lst if t]
return ' '.join(token_lst)
usable_encoder = utils.UsableTransformer(model_path="/path/to/palmtree/model", vocab_path="/path/to/palmtree/vocabulary")
file_path = os.path.join(folder_path, file_name)
extract_info = {}
with open(file_path) as f:
file_info = pickle.load(f)
for func_name in func_list:
func_tag = '%s#%s' % (file_name, func_name)
extract_info[func_tag] = {}
inst_bytes = file_info['functions'][func_name]['inst_bytes']
inst_strings = file_info['functions'][func_name]['inst_strings']
temp_data = []
inst_strings = [parse_instruction_trans(str(ins)) for ins in inst_strings]
if len(inst_strings) >= max_length:
inst_strings = inst_strings[:max_length]
temp_data = usable_encoder.encode(inst_strings)
if temp_data.shape[0] < max_length:
extract_info[func_tag]['length'] = temp_data.shape[0]
temp_zero = np.zeros((max_length - temp_data.shape[0], embed_dim))
temp_data = np.concatenate((temp_data, temp_zero), axis=0)
else:
extract_info[func_tag]['length'] = temp_data.shape[0]
extract_info[func_tag]['data'] = temp_data
extract_info[func_tag]['label'] = one_hot_encoding(file_info['functions'][func_name]['num_args'], class_num)
return extract_info
def get_single_args_type(folder_path, file_name, func_list, embed_dim, max_length, class_num, arg_no):
file_path = os.path.join(folder_path, file_name)
extract_info = {}
with open(file_path) as f:
file_info = pickle.load(f)
for func_name in func_list:
func_tag = '%s#%s' % (file_name, func_name)
extract_info[func_tag] = {}
inst_bytes = file_info['functions'][func_name]['inst_bytes']
temp_data = []
for inst in inst_bytes:
if str(inst) in embed_info:
temp_data.append(embed_info[str(inst)]['vector'])
else:
temp_data.append([0.0] * embed_dim)
if len(temp_data) >= max_length:
break
temp_data = np.asarray(temp_data)
if temp_data.shape[0] < max_length:
extract_info[func_tag]['length'] = temp_data.shape[0]
temp_zero = np.zeros((max_length - temp_data.shape[0], embed_dim))
temp_data = np.concatenate((temp_data, temp_zero), axis=0)
else:
extract_info[func_tag]['length'] = temp_data.shape[0]
extract_info[func_tag]['data'] = temp_data
temp_type = approximate_type(file_info['functions'][func_name]['args_type'][arg_no])
extract_info[func_tag]['label'] = one_hot_encoding(type_info[temp_type], class_num)
return extract_info
class Dataset(object):
def __init__(self, data_folder, func_path, embed_path, thread_num, embed_dim, max_length, class_num, tag, embd_type):
global embed_info
self.data_folder = data_folder
self.tag = tag #num_args or type#0
if self.tag == 'num_args':
pass
else:
self.arg_no = int(self.tag.split('#')[-1])
self.thread_num = thread_num
self.embed_dim = embed_dim
self.max_length = max_length
self.class_num = class_num
self.embd_type = embd_type
print(func_path)
with open(func_path) as f:
func_info = pickle.load(f)
self.train_func_list = np.asarray(func_info['train'])
self.train_num = len(self.train_func_list)
print('Loaded train function information ... %s' % func_path)
print('Train Function Number: %d' % self.train_num)
self.func_list = np.asarray(func_info['test'])
self.func_num = len(self.func_list)
print('Loaded train function information ... %s' % func_path)
print('Train Function Number: %d' % self.func_num)
with open(embed_path) as f:
embed_info = pickle.load(f)
print('Loaded embed information ... %s' % embed_path)
self.test_tag = True
self._index_in_epoch = 0
self._index_in_test = 0
self._complete_epochs = 0
# get vocabulary
if self.embd_type == '1hot':
counter = Counter()
binaries = os.listdir(self.data_folder)
for binary in binaries:
print(binary)
file_path = os.path.join(self.data_folder, binary)
with open(file_path) as f:
file_info = pickle.load(f)
print(len(file_info['functions']))
for func in file_info['functions'].values():
counter.update(func['inst_strings'])
self.vocabulary = sorted(counter, key=counter.get, reverse=True)[:5000]
self.vocabulary.append('UNK')
else:
self.vocabulary = None
def get_batch_data(self, batch_func_list):
func_list = sorted(batch_func_list)
binary_name = ''
input_func_list = []
batch_info = {}
pool = Pool(self.thread_num)
if self.tag == 'num_args':
for whole_func_name in func_list:
if binary_name == '':
binary_name = whole_func_name.split('#')[0]
input_func_list.append(whole_func_name.split('#')[1])
else:
if binary_name == whole_func_name.split('#')[0]:
input_func_list.append(whole_func_name.split('#')[1])
else:
pool.apply_async(
get_single_num_args,
args= (self.data_folder, binary_name, input_func_list, self.embed_dim, self.max_length, self.class_num, self.embd_type, self.vocabulary),
callback= batch_info.update
)
binary_name = whole_func_name.split('#')[0]
input_func_list = [whole_func_name.split('#')[1]]
if len(input_func_list) == 0:
pass
else:
pool.apply_async(
get_single_num_args,
args=(self.data_folder, binary_name, input_func_list, self.embed_dim, self.max_length, self.class_num, self.embd_type, self.vocabulary),
callback=batch_info.update
)
else: #self.tag == 'type#0'
for whole_func_name in func_list:
if binary_name == '':
binary_name = whole_func_name.split('#')[0]
input_func_list.append(whole_func_name.split('#')[1])
else:
if binary_name == whole_func_name.split('#')[0]:
input_func_list.append(whole_func_name.split('#')[1])
else:
pool.apply_async(
get_single_args_type,
args=(self.data_folder, binary_name, input_func_list, self.embed_dim, self.max_length,
self.class_num, self.arg_no),
callback= batch_info.update
)
binary_name = whole_func_name.split('#')[0]
input_func_list = [whole_func_name.split('#')[1]]
if len(input_func_list) == 0:
pass
else:
pool.apply_async(
get_single_args_type,
args=(self.data_folder, binary_name, input_func_list, self.embed_dim, self.max_length, self.class_num, self.arg_no),
callback=batch_info.update
)
pool.close()
pool.join()
new_batch_data = {
'data': [],
'label': [],
'length': [],
'func_name':[]
}
for full_func_name in batch_info:
new_batch_data['data'].append(batch_info[full_func_name]['data'])
new_batch_data['label'].append(batch_info[full_func_name]['label'])
new_batch_data['length'].append(batch_info[full_func_name]['length'])
new_batch_data['func_name'].append(full_func_name)
batch_info = {
'data': np.asarray(new_batch_data['data'], dtype=np.float32),
'label': np.asarray(new_batch_data['label'], dtype=np.float32),
'length': np.asarray(new_batch_data['length'], dtype=np.float32),
'func_name': np.asarray(new_batch_data['func_name'])
}
return batch_info
def get_batch(self, batch_size):
start = self._index_in_epoch
# shuffle for the first round
if self._complete_epochs == 0 and self._index_in_epoch == 0:
perm0 = np.arange(self.train_num)
np.random.shuffle(perm0)
self.train_func_list = self.train_func_list[perm0]
# go to the next epoch
if start + batch_size > self.train_num:
self._complete_epochs += 1
rest_example_num = self.train_num - start
rest_func_list = self.train_func_list[start:self.train_num]
# shuffle for the new epoch
perm = np.arange(self.train_num)
np.random.shuffle(perm)
self.train_func_list = self.train_func_list[perm]
# start a new epoch
start = 0
self._index_in_epoch = batch_size - rest_example_num
end = self._index_in_epoch
new_func_list = self.train_func_list[start:end]
func_list_batch = np.concatenate((rest_func_list, new_func_list), axis=0)
train_batch = self.get_batch_data(func_list_batch)
return train_batch
else: # process current epoch
self._index_in_epoch += batch_size
end = self._index_in_epoch
func_list_batch = self.train_func_list[start:end]
train_batch = self.get_batch_data(func_list_batch)
return train_batch
def get_test_batch(self, batch_size):
start = self._index_in_test
if start + batch_size >= self.func_num:
self.test_tag = False
func_list_batch = self.func_list[start:]
test_batch = self.get_batch_data(func_list_batch)
return test_batch
else:
self._index_in_test += batch_size
end = self._index_in_test
func_list_batch = self.func_list[start: end]
test_batch = self.get_batch_data(func_list_batch)
return test_batch
| 12,586 | 40.268852 | 165 | py |
PalmTree | PalmTree-master/src/extrinsic_evaluation/EKLAVYA/code/RNN/train/vocab.py | """
This code has been taken and modified from https://github.com/ryankiros/skip-thoughts
Constructing and loading dictionaries
"""
import pickle as pkl
from collections import OrderedDict
import argparse
import re
def build_dictionary(text):
"""
Build a dictionary
text: list of sentences (pre-tokenized)
"""
wordcount = {}
for cc in text:
words = cc.split(',')
tokn_lst = []
for s in words:
tokn_lst.extend(re.split('([0-9A-Za-z@_.]+)', s))
tokn_lst = [t for t in tokn_lst if t]
for w in tokn_lst:
if w not in wordcount:
wordcount[w] = 0
wordcount[w] += 1
sorted_words = sorted(list(wordcount.keys()), key=lambda x: wordcount[x], reverse=True)
worddict = OrderedDict()
for idx, word in enumerate(sorted_words):
worddict[word] = idx + 2 # 0: <eos>, 1: <unk>
return worddict, wordcount
def load_dictionary(loc='./data/book_dictionary_large.pkl'):
"""
Load a dictionary
"""
with open(loc, 'rb') as f:
worddict = pkl.load(f)
return worddict
def save_dictionary(worddict, wordcount, loc='./data/book_dictionary_large.pkl'):
"""
Save a dictionary to the specified location
"""
with open(loc, 'wb') as f:
pkl.dump(worddict, f, protocol=2)
pkl.dump(wordcount, f)
def build_and_save_dictionary(text, source):
save_loc = source+".pkl"
try:
cached = load_dictionary(save_loc)
print("Using cached dictionary at {}".format(save_loc))
return cached
except:
pass
# build again and save
print("unable to load from cached, building fresh")
worddict, wordcount = build_dictionary(text)
print("Got {} unique words".format(len(worddict)))
print("Saveing dictionary at {}".format(save_loc))
save_dictionary(worddict, wordcount, save_loc)
return worddict
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("text_file", type=str)
args = parser.parse_args()
print("Extracting text from {}".format(args.text_file))
text = open(args.text_file, "rt").readlines()
print("Extracting dictionary..")
worddict, wordcount = build_dictionary(text)
out_file = args.text_file+".pkl"
print("Got {} unique words. Saving to file {}".format(len(worddict), out_file))
save_dictionary(worddict, wordcount, out_file)
print("Done.")
| 2,454 | 27.218391 | 91 | py |
PalmTree | PalmTree-master/src/extrinsic_evaluation/EKLAVYA/code/RNN/train/config.py | """
Configuration file.
"""
VOCAB_SIZE = 5000
USE_CUDA = False
DEVICES = [0]
CUDA_DEVICE = DEVICES[0]
VERSION = 1
MAXLEN = 10
LEARNING_RATE=1e-5
| 147 | 10.384615 | 24 | py |
PalmTree | PalmTree-master/src/extrinsic_evaluation/EKLAVYA/code/RNN/train/transformer.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torch.autograd import Variable
from config import *
import numpy as np
import bert_pytorch
from bert_pytorch import dataset
from bert_pytorch import trainer
import pickle as pkl
vocab_path = "data/test_vocab"
train_dataset = "data/training/dfg/temp.txt"
test_dataset = "data/training/dfg/temp.txt"
output_path = "saved_models/transformer"
with open(train_dataset, "r", encoding="utf-8") as f:
vocab = dataset.WordVocab(f, max_size=VOCAB_SIZE, min_freq=1)
print("VOCAB SIZE:", len(vocab))
vocab.save_vocab(vocab_path)
print("Loading Vocab", vocab_path)
vocab = dataset.WordVocab.load_vocab(vocab_path)
print("Vocab Size: ", len(vocab))
print(vocab.itos)
print("Loading Train Dataset", train_dataset)
train_dataset = dataset.BERTDataset(train_dataset, vocab, seq_len=MAXLEN,
corpus_lines=None, on_memory=True)
print("Loading Test Dataset", test_dataset)
test_dataset = bert_pytorch.dataset.BERTDataset(test_dataset, vocab, seq_len=MAXLEN, on_memory=True) \
if test_dataset is not None else None
print("Creating Dataloader")
train_data_loader = DataLoader(train_dataset, batch_size=8, num_workers=4)
test_data_loader = DataLoader(test_dataset, batch_size=64, num_workers=4) \
if test_dataset is not None else None
print("Building BERT model")
bert = bert_pytorch.BERT(len(vocab), hidden=128, n_layers=3, attn_heads=3, dropout=0.0)
print("Creating BERT Trainer")
trainer = trainer.BERTTrainer(bert, len(vocab), train_dataloader=train_data_loader, test_dataloader=test_data_loader,
lr=1e-5, betas=(0.9, 0.999), weight_decay=0,
with_cuda=True, cuda_devices=CUDA_DEVICE, log_freq=10)
print("Training Start")
for epoch in range(20):
trainer.train(epoch)
trainer.save(epoch, output_path)
if test_data_loader is not None:
trainer.test(epoch) | 1,971 | 31.866667 | 117 | py |
PalmTree | PalmTree-master/src/extrinsic_evaluation/EKLAVYA/code/RNN/train/dataset_caller.py | import pickle
import os
import numpy as np
from multiprocessing import Pool
embed_info = {}
type_info = {
'char': 0,
'int': 1,
'float': 2,
'pointer': 3,
'enum': 4,
'struct': 5,
'union': 6
}
def approximate_type(type_str):
int_list = ['_Bool', 'unsigned int', 'int', 'long long int', 'long long unsigned int', 'unsigned short',
'short unsigned int', 'short', 'long unsigned int', 'short int', 'long int']
char_list = ['char', 'unsigned char', 'signed char']
if type_str[-1] == '*' or type_str == 'func_ptr' or type_str.split()[0][-1] == '*':
return 'pointer'
elif type_str in int_list:
return 'int'
elif type_str[:5] == 'enum ':
return 'enum'
elif type_str in char_list:
return 'char'
elif type_str[:7] == 'struct ':
return 'struct'
elif type_str[:6] == 'union ':
return 'union'
elif type_str == 'double' or type_str == 'long double':
return 'float'
else:
return type_str
def one_hot_encoding(label_id, class_num):
temp = np.zeros(class_num)
temp[label_id] = 1
return temp
def get_single_num_args(folder_path, file_name, func_list, embed_dim, max_length, class_num):
file_path = os.path.join(folder_path, file_name)
extract_info = {}
with open(file_path) as f:
file_info = pickle.load(f)
for whole_func_name in func_list:
'''callee_name#caller_name#indice'''
temp = whole_func_name.split('#')
callee_name = temp[0]
caller_name = temp[1]
indice = int(temp[2])
func_tag = '%s#%s' % (file_name, whole_func_name)
extract_info[func_tag] = {}
# inst_bytes = file_info['functions'][caller_name]['inst_bytes'][:indice]
temp_data = []
indice_list = sorted(range(indice), reverse=True)
for indice_id in indice_list:
inst = file_info['functions'][caller_name]['inst_bytes'][indice_id]
if str(inst) in embed_info:
temp_data.append(embed_info[str(inst)]['vector'])
else:
temp_data.append([0.0] * embed_dim)
if len(temp_data) >= max_length:
break
temp_data = np.asarray(temp_data)
if temp_data.shape[0] < max_length:
extract_info[func_tag]['length'] = temp_data.shape[0]
temp_zero = np.zeros((max_length - temp_data.shape[0], embed_dim))
temp_data = np.concatenate((temp_data, temp_zero), axis=0)
else:
extract_info[func_tag]['length'] = temp_data.shape[0]
extract_info[func_tag]['data'] = temp_data
extract_info[func_tag]['label'] = one_hot_encoding(file_info['functions'][callee_name]['num_args'], class_num)
return extract_info
def get_single_args_type(folder_path, file_name, func_list, embed_dim, max_length, class_num, arg_no):
file_path = os.path.join(folder_path, file_name)
extract_info = {}
with open(file_path) as f:
file_info = pickle.load(f)
for whole_func_name in func_list:
'''callee_name#caller_name#indice'''
temp = whole_func_name.split('#')
callee_name = temp[0]
caller_name = temp[1]
indice = int(temp[2])
func_tag = '%s#%s' % (file_name, whole_func_name)
extract_info[func_tag] = {}
# inst_bytes = file_info['functions'][caller_name]['inst_bytes'][:indice]
temp_data = []
indice_list = sorted(range(indice), reverse=True)
for indice_id in indice_list:
inst = file_info['functions'][caller_name]['inst_bytes'][indice_id]
if str(inst) in embed_info:
temp_data.append(embed_info[str(inst)]['vector'])
else:
temp_data.append([0.0] * embed_dim)
if len(temp_data) >= max_length:
break
temp_data = np.asarray(temp_data)
if temp_data.shape[0] < max_length:
extract_info[func_tag]['length'] = temp_data.shape[0]
temp_zero = np.zeros((max_length - temp_data.shape[0], embed_dim))
temp_data = np.concatenate((temp_data, temp_zero), axis=0)
else:
extract_info[func_tag]['length'] = temp_data.shape[0]
extract_info[func_tag]['data'] = temp_data
temp_type = approximate_type(file_info['functions'][callee_name]['args_type'][arg_no])
extract_info[func_tag]['label'] = one_hot_encoding(type_info[temp_type], class_num)
return extract_info
class Dataset(object):
def __init__(self, data_folder, func_path, embed_path, thread_num, embed_dim, max_length, class_num, tag):
global embed_info
self.data_folder = data_folder
self.tag = tag #num_args or type#0
if self.tag == 'num_args':
pass
else:
self.arg_no = int(self.tag.split('#')[-1])
self.thread_num = thread_num
self.embed_dim = embed_dim
self.max_length = max_length
self.class_num = class_num
with open(func_path) as f:
func_info = pickle.load(f)
self.train_func_list = np.asarray(func_info['train'])
self.train_num = len(self.train_func_list)
print('Loaded train function information ... %s' % func_path)
print('Train Function Number: %d' % self.train_num)
with open(embed_path) as f:
embed_info = pickle.load(f)
print('Loaded embed information ... %s' % embed_path)
self._index_in_epoch = 0
self._complete_epochs = 0
def get_batch_data(self, batch_func_list):
func_list = sorted(batch_func_list)
binary_name = ''
input_func_list = []
batch_info = {}
pool = Pool(self.thread_num)
if self.tag == 'num_args':
for whole_func_name in func_list:
if binary_name == '':
binary_name = whole_func_name.split('#')[0]
input_func_list.append('#'.join(whole_func_name.split('#')[1:]))
else:
if binary_name == whole_func_name.split('#')[0]:
input_func_list.append('#'.join(whole_func_name.split('#')[1:]))
else:
pool.apply_async(
get_single_num_args,
args=(self.data_folder, binary_name, input_func_list, self.embed_dim, self.max_length,
self.class_num),
callback=batch_info.update
)
binary_name = whole_func_name.split('#')[0]
input_func_list = ['#'.join(whole_func_name.split('#')[1:])]
if len(input_func_list) == 0:
pass
else:
pool.apply_async(
get_single_num_args,
args=(
self.data_folder, binary_name, input_func_list, self.embed_dim, self.max_length, self.class_num),
callback=batch_info.update
)
else: # self.tag == 'type#0'
for whole_func_name in func_list:
if binary_name == '':
binary_name = whole_func_name.split('#')[0]
input_func_list.append('#'.join(whole_func_name.split('#')[1:]))
else:
if binary_name == whole_func_name.split('#')[0]:
input_func_list.append('#'.join(whole_func_name.split('#')[1:]))
else:
pool.apply_async(
get_single_args_type,
args=(self.data_folder, binary_name, input_func_list, self.embed_dim, self.max_length,
self.class_num, self.arg_no),
callback=batch_info.update
)
binary_name = whole_func_name.split('#')[0]
input_func_list = ['#'.join(whole_func_name.split('#')[1:])]
if len(input_func_list) == 0:
pass
else:
pool.apply_async(
get_single_args_type,
args=(
self.data_folder, binary_name, input_func_list, self.embed_dim, self.max_length, self.class_num,
self.arg_no),
callback=batch_info.update
)
pool.close()
pool.join()
new_batch_data = {
'data': [],
'label': [],
'length': []
}
for full_func_name in batch_info:
new_batch_data['data'].append(batch_info[full_func_name]['data'])
new_batch_data['label'].append(batch_info[full_func_name]['label'])
new_batch_data['length'].append(batch_info[full_func_name]['length'])
batch_info = {
'data': np.asarray(new_batch_data['data'], dtype=np.float32),
'label': np.asarray(new_batch_data['label'], dtype=np.float32),
'length': np.asarray(new_batch_data['length'], dtype=np.float32)
}
return batch_info
def get_batch(self, batch_size):
start = self._index_in_epoch
# shuffle for the first round
if self._complete_epochs == 0 and self._index_in_epoch == 0:
perm0 = np.arange(self.train_num)
np.random.shuffle(perm0)
self.train_func_list = self.train_func_list[perm0]
# go to the next epoch
if start + batch_size > self.train_num:
self._complete_epochs += 1
rest_example_num = self.train_num - start
rest_func_list = self.train_func_list[start:self.train_num]
# shuffle for the new epoch
perm = np.arange(self.train_num)
np.random.shuffle(perm)
self.train_func_list = self.train_func_list[perm]
# start a new epoch
start = 0
self._index_in_epoch = batch_size - rest_example_num
end = self._index_in_epoch
new_func_list = self.train_func_list[start:end]
func_list_batch = np.concatenate((rest_func_list, new_func_list), axis=0)
train_batch = self.get_batch_data(func_list_batch)
return train_batch
else: # process current epoch
self._index_in_epoch += batch_size
end = self._index_in_epoch
func_list_batch = self.train_func_list[start:end]
train_batch = self.get_batch_data(func_list_batch)
print(train_batch)
return train_batch | 10,646 | 40.589844 | 118 | py |
PalmTree | PalmTree-master/src/extrinsic_evaluation/EKLAVYA/code/RNN/train/train.py | import os
try:
os.chdir(os.path.join(os.getcwd(), 'src/skip-thoughts'))
print(os.getcwd())
except:
pass
import torch
from torch import nn
from torch.autograd import Variable
import re
import pickle
import random
import numpy as np
from data_loader import DataLoader
from model import UniSkip
from config import *
from datetime import datetime, timedelta
import onmt.inputters as inputters
import onmt.modules
from onmt.encoders import str2enc
from onmt.model_builder import *
from onmt.decoders import str2dec
from onmt.modules import Embeddings, VecEmbedding, CopyGenerator
from onmt.modules.util_class import Cast
from onmt.utils.misc import use_gpu
from onmt.utils.logging import logger
from onmt.utils.parse import ArgumentParser
from sklearn.metrics import pairwise_distances
data_list = ['data/training/dfg/dfg-seq' + str(i) + '.txt' for i in range(200)]
d = DataLoader(data_list)
mod = UniSkip(model_type='quick-thought', attention=False)
if USE_CUDA:
mod.cuda(CUDA_DEVICE)
lr = 1e-5
optimizer = torch.optim.Adam(params=mod.parameters(), lr=lr)
loss_trail = []
last_best_loss = None
current_time = datetime.utcnow()
# def fix_key(s):
# s = re.sub(r'(.*)\.layer_norm((_\d+)?)\.b_2',
# r'\1.layer_norm\2.bias', s)
# s = re.sub(r'(.*)\.layer_norm((_\d+)?)\.a_2',
# r'\1.layer_norm\2.weight', s)
# return s
def debug(i, loss):
global loss_trail
global last_best_loss
global current_time
this_loss = loss.data.item()
loss_trail.append(this_loss)
loss_trail = loss_trail[-20:]
new_current_time = datetime.utcnow()
time_elapsed = str(new_current_time - current_time)
current_time = new_current_time
print("Iteration {}: time = {} last_best_loss = {}, this_loss = {}".format(
i, time_elapsed, last_best_loss, this_loss))
# for i in range(3,12):
# _, pred_ids = pred[i+1].max(1)
# print("current = {}\npred = {}".format(
# d.convert_indices_to_sentences(prev[i]),
# d.convert_indices_to_sentences(pred_ids)
# ))
# print("=============================================")
try:
trail_loss = sum(loss_trail)/len(loss_trail)
if last_best_loss is None or last_best_loss > trail_loss:
print("Loss improved from {} to {}".format(last_best_loss, trail_loss))
save_loc = "./saved_models/skip-best".format(lr, VOCAB_SIZE)
print("saving model at {}".format(save_loc))
torch.save(mod.state_dict(), save_loc)
last_best_loss = trail_loss
#save embeddings:
except Exception as e:
print("Couldn't save model because {}".format(e))
print("Starting training...")
for i in range(0, 400000):
positive_samples, positive_context, negative_context = d.fetch_batch_w_neg_sampling(128)
loss, prev, pred = mod(positive_samples, positive_context, negative_context)
if i % 2000 == 0:
debug(i, loss)
optimizer.zero_grad()
loss.backward()
optimizer.step()
| 3,112 | 27.824074 | 92 | py |
PalmTree | PalmTree-master/src/extrinsic_evaluation/EKLAVYA/code/RNN/train/eval_utils.py | # from model import UniSkip, Encoder
from data_loader import DataLoader
from vocab import load_dictionary
from config import *
from torch import nn
import torch.nn.functional as F
from torch.autograd import Variable
import torch
import re
import numpy as np
import pickle
class UsableTransformer:
# @profile
def __init__(self, model_path, vocab_path):
with open(vocab_path, "rb") as f:
self.vocab = pickle.load(f)
# self.vocab = dataset.WordVocab.load_vocab(vocab_path)
self.model = torch.load(model_path)
if USE_CUDA:
self.model.cuda(CUDA_DEVICE)
# @profile
def encode(self, text, numpy=True):
segment_label = []
sequence = []
for t in text:
l = len(t.split(' ')) * [1]
s = self.vocab.to_seq(t)
if len(l) > 20:
segment_label.append(l[:20])
else:
segment_label.append(l + [0]*(20-len(l)))
if len(s) > 20:
sequence.append(s[:20])
else:
sequence.append(s + [0]*(20-len(s)))
segment_label = torch.LongTensor(segment_label)
sequence = torch.LongTensor(sequence)
if USE_CUDA:
sequence = sequence.cuda(CUDA_DEVICE)
segment_label = segment_label.cuda(CUDA_DEVICE)
encoded = self.model.forward(sequence, segment_label)
result = torch.mean(encoded.detach(), dim=1)
del encoded
if USE_CUDA:
if numpy:
return result.data.cpu().numpy()
else:
return result.to('cpu')
else:
if numpy:
return result.data.numpy()
else:
return result
| 1,766 | 29.465517 | 63 | py |
PalmTree | PalmTree-master/src/data_generator/dataflow_gen.py | from binaryninja import *
import networkx as nx
import numpy as np
import matplotlib.pyplot as plt
from sklearn.feature_extraction.text import CountVectorizer
from itertools import product
from sklearn.decomposition import PCA
import random
import os
import re
import tqdm
import pickle
from collections import Counter
from memory_profiler import profile
import gc
def parse_instruction(ins, symbol_map, string_map):
ins = re.sub('\s+', ', ', ins, 1)
parts = ins.split(', ')
operand = []
if len(parts) > 1:
operand = parts[1:]
for i in range(len(operand)):
symbols = re.split('([0-9A-Za-z]+)', operand[i])
for j in range(len(symbols)):
if symbols[j][:2] == '0x' and len(symbols[j]) >= 6:
if int(symbols[j], 16) in symbol_map:
symbols[j] = "symbol" # function names
elif int(symbols[j], 16) in string_map:
symbols[j] = "string" # constant strings
else:
symbols[j] = "address" # addresses
operand[i] = ' '.join(symbols)
opcode = parts[0]
return ' '.join([opcode]+operand)
def random_walk(g,length, symbol_map, string_map):
sequence = []
for n in g:
if n != -1 and g.node[n]['text'] != None:
s = []
l = 0
s.append(parse_instruction(g.node[n]['text'], symbol_map, string_map))
cur = n
while l < length:
nbs = list(g.successors(cur))
if len(nbs):
cur = random.choice(nbs)
s.append(parse_instruction(g.node[cur]['text'], symbol_map, string_map))
l += 1
else:
break
sequence.append(s)
return sequence
def process_file(f):
symbol_map = {}
string_map = {}
print(f)
bv = BinaryViewType.get_view_of_file(f)
# encode strings
for sym in bv.get_symbols():
symbol_map[sym.address] = sym.full_name
for string in bv.get_strings():
string_map[string.start] = string.value
function_graphs = {}
for func in bv.functions:
G = nx.DiGraph()
G.add_node(-1, text='entry_point')
line = 0
label_dict = {}
label_dict[-1] = 'entry_point'
for block in func.mlil:
for ins in block:
G.add_node(ins.address, text=bv.get_disassembly(ins.address))
label_dict[ins.address] = bv.get_disassembly(ins.address)
depd = []
for var in ins.vars_read:
depd = [(func.mlil[i].address, ins.address)
for i in func.mlil.get_var_definitions(var)
if func.mlil[i].address != ins.address]
for var in ins.vars_written:
depd += [(ins.address, func.mlil[i].address)
for i in func.mlil.get_var_uses(var)
if func.mlil[i].address != ins.address]
if depd:
G.add_edges_from(depd)
for node in G.nodes:
if not G.in_degree(node):
G.add_edge(-1, node)
if len(G.nodes) > 2:
function_graphs[func.name] = G
with open('dfg_train.txt', 'a') as w:
for name, graph in function_graphs.items():
sequence = random_walk(graph, 40, symbol_map, string_map)
for s in sequence:
if len(s) >= 2:
for idx in range(1, len(s)):
w.write(s[idx-1] +'\t' + s[idx] + '\n')
gc.collect()
def process_string(f):
str_lst = []
bv = BinaryViewType.get_view_of_file(f)
for sym in bv.get_symbols():
str_lst.extend(re.findall('([0-9A-Za-z]+)', sym.full_name))
return str_lst
def main():
bin_folder = '/path/to/binaries'
file_lst = []
str_counter = Counter()
for parent, subdirs, files in os.walk(bin_folder):
if files:
for f in files:
file_lst.append(os.path.join(parent,f))
for f in tqdm(file_lst):
process_file(f)
if __name__ == "__main__":
main()
| 4,212 | 29.092857 | 92 | py |
PalmTree | PalmTree-master/src/data_generator/control_flow_gen.py | from binaryninja import *
import networkx as nx
import numpy as np
import matplotlib.pyplot as plt
from sklearn.feature_extraction.text import CountVectorizer
from itertools import product
from sklearn.decomposition import PCA
from collections import Counter
import random
import os
import re
import pickle
import math
def parse_instruction(ins, symbol_map, string_map):
ins = re.sub('\s+', ', ', ins, 1)
parts = ins.split(', ')
operand = []
if len(parts) > 1:
operand = parts[1:]
for i in range(len(operand)):
symbols = re.split('([0-9A-Za-z]+)', operand[i])
for j in range(len(symbols)):
if symbols[j][:2] == '0x' and len(symbols[j]) >= 6:
if int(symbols[j], 16) in symbol_map:
symbols[j] = "symbol"
elif int(symbols[j], 16) in string_map:
symbols[j] = "string"
else:
symbols[j] = "address"
operand[i] = ' '.join(symbols)
opcode = parts[0]
return ' '.join([opcode]+operand)
def random_walk(g,length, symbol_map, string_map):
sequence = []
for n in g:
if n != -1 and 'text' in g.node[n]:
s = []
l = 0
s.append(parse_instruction(g.node[n]['text'], symbol_map, string_map))
cur = n
while l < length:
nbs = list(g.successors(cur))
if len(nbs):
cur = random.choice(nbs)
if 'text' in g.node[cur]:
s.append(parse_instruction(g.node[cur]['text'], symbol_map, string_map))
l += 1
else:
break
else:
break
sequence.append(s)
if len(sequence) > 5000:
print("early stop")
return sequence[:5000]
return sequence
def process_file(f, window_size):
symbol_map = {}
string_map = {}
print(f)
bv = BinaryViewType.get_view_of_file(f)
for sym in bv.get_symbols():
symbol_map[sym.address] = sym.full_name
for string in bv.get_strings():
string_map[string.start] = string.value
function_graphs = {}
for func in bv.functions:
G = nx.DiGraph()
label_dict = {}
add_map = {}
for block in func:
# print(block.disassembly_text)
curr = block.start
predecessor = curr
for inst in block:
label_dict[curr] = bv.get_disassembly(curr)
G.add_node(curr, text=bv.get_disassembly(curr))
if curr != block.start:
G.add_edge(predecessor, curr)
predecessor = curr
curr += inst[1]
for edge in block.outgoing_edges:
G.add_edge(predecessor, edge.target.start)
if len(G.nodes) > 2:
function_graphs[func.name] = G
with open('cfg_train.txt', 'a') as w:
for name, graph in function_graphs.items():
sequence = random_walk(graph, 40, symbol_map, string_map)
for s in sequence:
if len(s) >= 4:
for idx in range(0, len(s)):
for i in range(1, window_size+1):
if idx - i > 0:
w.write(s[idx-i] +'\t' + s[idx] + '\n')
if idx + i < len(s):
w.write(s[idx] +'\t' + s[idx+i] + '\n')
# gc.collect()
def main():
bin_folder = '/path/to/binaries'
file_lst = []
str_counter = Counter()
window_size = 1;
for parent, subdirs, files in os.walk(bin_folder):
if files:
for f in files:
file_lst.append(os.path.join(parent,f))
i=0
for f in file_lst:
print(i,'/', len(file_lst))
process_file(f, window_size)
i+=1
if __name__ == "__main__":
main() | 3,984 | 31.663934 | 96 | py |
PalmTree | PalmTree-master/pre-trained_model/vocab.py | import pickle
import tqdm
from collections import Counter
class TorchVocab(object):
"""Defines a vocabulary object that will be used to numericalize a field.
Attributes:
freqs: A collections.Counter object holding the frequencies of tokens
in the data used to build the Vocab.
stoi: A collections.defaultdict instance mapping token strings to
numerical identifiers.
itos: A list of token strings indexed by their numerical identifiers.
"""
def __init__(self, counter, max_size=None, min_freq=1, specials=['<pad>', '<oov>'],
vectors=None, unk_init=None, vectors_cache=None):
"""Create a Vocab object from a collections.Counter.
Arguments:
counter: collections.Counter object holding the frequencies of
each value found in the data.
max_size: The maximum size of the vocabulary, or None for no
maximum. Default: None.
min_freq: The minimum frequency needed to include a token in the
vocabulary. Values less than 1 will be set to 1. Default: 1.
specials: The list of special tokens (e.g., padding or eos) that
will be prepended to the vocabulary in addition to an <unk>
token. Default: ['<pad>']
vectors: One of either the available pretrained vectors
or custom pretrained vectors (see Vocab.load_vectors);
or a list of aforementioned vectors
unk_init (callback): by default, initialize out-of-vocabulary word vectors
to zero vectors; can be any function that takes in a Tensor and
returns a Tensor of the same size. Default: torch.Tensor.zero_
vectors_cache: directory for cached vectors. Default: '.vector_cache'
"""
self.freqs = counter
counter = counter.copy()
min_freq = max(min_freq, 1)
self.itos = list(specials)
# frequencies of special tokens are not counted when building vocabulary
# in frequency order
for tok in specials:
del counter[tok]
max_size = None if max_size is None else max_size + len(self.itos)
# sort by frequency, then alphabetically
words_and_frequencies = sorted(counter.items(), key=lambda tup: tup[0])
words_and_frequencies.sort(key=lambda tup: tup[1], reverse=True)
for word, freq in words_and_frequencies:
if freq < min_freq or len(self.itos) == max_size:
break
self.itos.append(word)
# stoi is simply a reverse dict for itos
self.stoi = {tok: i for i, tok in enumerate(self.itos)}
self.vectors = None
if vectors is not None:
self.load_vectors(vectors, unk_init=unk_init, cache=vectors_cache)
else:
assert unk_init is None and vectors_cache is None
def __eq__(self, other):
if self.freqs != other.freqs:
return False
if self.stoi != other.stoi:
return False
if self.itos != other.itos:
return False
if self.vectors != other.vectors:
return False
return True
def __len__(self):
return len(self.itos)
def vocab_rerank(self):
self.stoi = {word: i for i, word in enumerate(self.itos)}
def extend(self, v, sort=False):
words = sorted(v.itos) if sort else v.itos
for w in words:
if w not in self.stoi:
self.itos.append(w)
self.stoi[w] = len(self.itos) - 1
class Vocab(TorchVocab):
def __init__(self, counter, max_size=None, min_freq=1):
self.pad_index = 0
self.unk_index = 1
self.eos_index = 2
self.sos_index = 3
self.mask_index = 4
super().__init__(counter, specials=["<pad>", "<unk>", "<eos>", "<sos>", "<mask>"],
max_size=max_size, min_freq=min_freq)
def to_seq(self, sentece, seq_len, with_eos=False, with_sos=False) -> list:
pass
def from_seq(self, seq, join=False, with_pad=False):
pass
@staticmethod
def load_vocab(vocab_path: str) -> 'Vocab':
with open(vocab_path, "rb") as f:
return pickle.load(f)
def save_vocab(self, vocab_path):
with open(vocab_path, "wb") as f:
pickle.dump(self, f)
# Building Vocab with text files
class WordVocab(Vocab):
def __init__(self, texts, max_size=None, min_freq=1):
print("Building Vocab")
counter = Counter()
for line in tqdm.tqdm(texts):
if isinstance(line, list):
words = line
else:
words = line.replace("\n", " ").replace("\t", " ").split()[:4]
for word in words:
counter[word] += 1
super().__init__(counter, max_size=max_size, min_freq=min_freq)
def to_seq(self, sentence, seq_len=None, with_eos=False, with_sos=False, with_len=False):
if isinstance(sentence, str):
sentence = sentence.split()
seq = [self.stoi.get(word, self.unk_index) for word in sentence]
if with_eos:
seq += [self.eos_index] # this would be index 1
if with_sos:
seq = [self.sos_index] + seq
origin_seq_len = len(seq)
if seq_len is None:
pass
elif len(seq) <= seq_len:
seq += [self.pad_index for _ in range(seq_len - len(seq))]
else:
seq = seq[:seq_len]
return (seq, origin_seq_len) if with_len else seq
def from_seq(self, seq, join=False, with_pad=False):
words = [self.itos[idx]
if idx < len(self.itos)
else "<%d>" % idx
for idx in seq
if not with_pad or idx != self.pad_index]
return " ".join(words) if join else words
@staticmethod
def load_vocab(vocab_path: str) -> 'WordVocab':
with open(vocab_path, "rb") as f:
return pickle.load(f)
def build():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--corpus_path", required=True, type=str)
parser.add_argument("-o", "--output_path", required=True, type=str)
parser.add_argument("-s", "--vocab_size", type=int, default=None)
parser.add_argument("-e", "--encoding", type=str, default="utf-8")
parser.add_argument("-m", "--min_freq", type=int, default=1)
args = parser.parse_args()
with open(args.corpus_path, "r", encoding=args.encoding) as f:
vocab = WordVocab(f, max_size=args.vocab_size, min_freq=args.min_freq)
print("VOCAB SIZE:", len(vocab))
vocab.save_vocab(args.output_path)
| 6,753 | 35.311828 | 93 | py |
PalmTree | PalmTree-master/pre-trained_model/config.py | """
Configuration file.
"""
VOCAB_SIZE = 5000
USE_CUDA = True
DEVICES = [0]
CUDA_DEVICE = DEVICES[0]
VERSION = 1
MAXLEN = 10
LEARNING_RATE=1e-5
| 146 | 10.307692 | 24 | py |
PalmTree | PalmTree-master/pre-trained_model/how2use.py | import os
from config import *
from torch import nn
from scipy.ndimage.filters import gaussian_filter1d
from torch.autograd import Variable
import torch
import numpy as np
import eval_utils as utils
palmtree = utils.UsableTransformer(model_path="./palmtree/transformer.ep19", vocab_path="./palmtree/vocab")
# tokens has to be seperated by spaces.
text = ["mov rbp rdi",
"mov ebx 0x1",
"mov rdx rbx",
"call memcpy",
"mov [ rcx + rbx ] 0x0",
"mov rcx rax",
"mov [ rax ] 0x2e"]
# it is better to make batches as large as possible.
embeddings = palmtree.encode(text)
print("usable embedding of this basicblock:", embeddings)
print("the shape of output tensor: ", embeddings.shape)
| 736 | 26.296296 | 107 | py |
PalmTree | PalmTree-master/pre-trained_model/eval_utils.py | from torch.autograd import Variable
import torch
import re
import numpy
from torch import nn
import torch.nn.functional as F
from config import *
import vocab
# this function is how I parse and pre-pocess instructions for palmtree. It is very simple and based on regular expressions.
# If I use IDA pro or angr instead of Binary Ninja, I would have come up with a better solution.
def parse_instruction(ins, symbol_map, string_map):
# arguments:
# ins: string e.g. "mov, eax, [rax+0x1]"
# symbol_map: a dict that contains symbols the key is the address and the value is the symbol
# string_map : same as symbol_map in Binary Ninja, constant strings will be included into string_map
# and the other meaningful strings like function names will be included into the symbol_map
# I think you do not have to separate them. This is just one of the possible nomailization stretagies.
ins = re.sub('\s+', ', ', ins, 1)
parts = ins.split(', ')
operand = []
token_lst = []
if len(parts) > 1:
operand = parts[1:]
token_lst.append(parts[0])
for i in range(len(operand)):
# print(operand)
symbols = re.split('([0-9A-Za-z]+)', operand[i])
symbols = [s.strip() for s in symbols if s]
processed = []
for j in range(len(symbols)):
if symbols[j][:2] == '0x' and len(symbols[j]) > 6 and len(symbols[j]) < 15:
# I make a very dumb rule here to treat number larger than 6 but smaller than 15 digits as addresses,
# the others are constant numbers and will not be normalized.
if int(symbols[j], 16) in symbol_map:
processed.append("symbol")
elif int(symbols[j], 16) in string_map:
processed.append("string")
else:
processed.append("address")
else:
processed.append(symbols[j])
processed = [p for p in processed if p]
token_lst.extend(processed)
# the output will be like "mov eax [ rax + 0x1 ]"
return ' '.join(token_lst)
class UsableTransformer:
def __init__(self, model_path, vocab_path):
print("Loading Vocab", vocab_path)
self.vocab = vocab.WordVocab.load_vocab(vocab_path)
print("Vocab Size: ", len(self.vocab))
self.model = torch.load(model_path)
self.model.eval()
if USE_CUDA:
self.model.cuda(CUDA_DEVICE)
def encode(self, text, output_option='lst'):
segment_label = []
sequence = []
for t in text:
l = (len(t.split(' '))+2) * [1]
s = self.vocab.to_seq(t)
# print(t, s)
s = [3] + s + [2]
if len(l) > 20:
segment_label.append(l[:20])
else:
segment_label.append(l + [0]*(20-len(l)))
if len(s) > 20:
sequence.append(s[:20])
else:
sequence.append(s + [0]*(20-len(s)))
segment_label = torch.LongTensor(segment_label)
sequence = torch.LongTensor(sequence)
if USE_CUDA:
sequence = sequence.cuda(CUDA_DEVICE)
segment_label = segment_label.cuda(CUDA_DEVICE)
encoded = self.model.forward(sequence, segment_label)
result = torch.mean(encoded.detach(), dim=1)
del encoded
if USE_CUDA:
if numpy:
return result.data.cpu().numpy()
else:
return result.to('cpu')
else:
if numpy:
return result.data.numpy()
else:
return result | 3,712 | 34.361905 | 125 | py |
DCASE2022-TASK3 | DCASE2022-TASK3-main/code/main.py | import sys
from timeit import default_timer as timer
from utils.cli_parser import parse_cli_overides
from utils.config import get_dataset
from learning.preprocess import Preprocess
from utils.ddp_init import cleanup, spawn_nproc, setup
import torch
from utils.common import prepare_train_id
from learning import initialize, train, infer
feature_type = {
'mic': ['salsalite', 'logmelgcc', 'logmel'],
'foa': ['salsa', 'logmelIV', 'logmel']
}
def training(rank, args, cfg, dataset):
# Init DDP
setup(rank=rank, args=args)
train_initializer = initialize.init_train(args, cfg, dataset)
train.train(cfg, **train_initializer)
def main(args, cfg):
"""Execute a task based on the given command-line arguments.
This function is the main entry-point of the program. It allows the
user to extract features, train a model, infer predictions, and
evaluate predictions using the command-line interface.
Args:
args: command line arguments.
cfg: configurations.
Return:
0: successful termination
'any nonzero value': abnormal termination
"""
assert cfg['data']['audio_feature'] in feature_type[cfg['data']['type']], \
'{} is not the feature of {} signals.'.format(cfg['data']['audio_feature'], cfg['data']['type'])
# Dataset initialization
dataset = get_dataset(root_dir=cfg['dataset_dir'], cfg=cfg, args=args)
# Preprocess
if args.mode == 'preprocess':
preprocessor = Preprocess(args, cfg, dataset)
if args.preproc_mode == 'extract_data':
preprocessor.extract_data()
elif args.preproc_mode == 'extract_mic_features':
preprocessor.extract_mic_features()
elif args.preproc_mode == 'extract_pit_label':
preprocessor.extract_PIT_label()
elif args.preproc_mode == 'extract_indexes':
preprocessor.extract_index()
elif args.preproc_mode == 'extract_scalar':
preprocessor.extract_scalar()
elif args.preproc_mode == 'extract_adpit_label':
preprocessor.extract_ADPIT_label()
# Train
if args.mode == 'train':
prepare_train_id(args, cfg)
spawn_nproc(training, args, cfg, dataset)
# Inference
elif args.mode == 'infer':
infer_initializer = initialize.init_infer(args, cfg, dataset)
infer.infer(cfg, dataset, **infer_initializer)
if __name__ == '__main__':
args, cfg = parse_cli_overides()
sys.exit(main(args, cfg))
| 2,506 | 33.342466 | 104 | py |
DCASE2022-TASK3 | DCASE2022-TASK3-main/code/compute_seld_metrics.py | import os
from methods.utils.SELD_metrics import SELDMetrics
from methods.utils.data_utilities import *
from pathlib import Path
from ruamel.yaml import YAML
import argparse
from scipy import stats
import re
def jackknife_estimation(global_value, partial_estimates, significance_level=0.05):
"""
Compute jackknife statistics from a global value and partial estimates.
Original function by Nicolas Turpault
:param global_value: Value calculated using all (N) examples
:param partial_estimates: Partial estimates using N-1 examples at a time
:param significance_level: Significance value used for t-test
:return:
estimate: estimated value using partial estimates
bias: Bias computed between global value and the partial estimates
std_err: Standard deviation of partial estimates
conf_interval: Confidence interval obtained after t-test
"""
mean_jack_stat = np.mean(partial_estimates)
n = len(partial_estimates)
bias = (n - 1) * (mean_jack_stat - global_value)
std_err = np.sqrt(
(n - 1) * np.mean((partial_estimates - mean_jack_stat) * (partial_estimates - mean_jack_stat), axis=0)
)
# bias-corrected "jackknifed estimate"
estimate = global_value - bias
# jackknife confidence interval
if not (0 < significance_level < 1):
raise ValueError("confidence level must be in (0, 1).")
t_value = stats.t.ppf(1 - significance_level / 2, n - 1)
# t-test
conf_interval = estimate + t_value * np.array((-std_err, std_err))
return estimate, bias, std_err, conf_interval
class ComputeSELDResults(object):
def __init__(
self, ref_files_folder=None, use_polar_format=True, average='macro', doa_thresh=20, nb_classes=13
):
self._use_polar_format = use_polar_format
self._desc_dir = Path(ref_files_folder)
self._doa_thresh = doa_thresh
self._nb_classes = nb_classes
# Load feature class
# collect reference files
self._ref_meta_list = sorted(self._desc_dir.glob('**/*.csv'))
self._ref_labels = {}
for file in self._ref_meta_list:
fn = file.stem
gt_dict = load_output_format_file(file)
nb_ref_frames = max(list(gt_dict.keys()))
self._ref_labels[fn] = [to_metrics_format(gt_dict, nb_ref_frames, label_resolution=0.1), nb_ref_frames, gt_dict]
self._nb_ref_files = len(self._ref_labels)
self._average = average
@staticmethod
def get_nb_files(file_list, tag='all'):
'''
Given the file_list, this function returns a subset of files corresponding to the tag.
Tags supported
'all' -
'ir'
:param file_list: complete list of predicted files
:param tag: Supports two tags 'all', 'ir'
:return: Subset of files according to chosen tag
'''
_cnt_dict = {}
for _filename in file_list:
if tag == 'all':
_ind = 0
else:
_ind = int(re.findall(r"(?<=room)\d+", str(_filename))[0])
if _ind not in _cnt_dict:
_cnt_dict[_ind] = []
_cnt_dict[_ind].append(_filename)
return _cnt_dict
def get_SELD_Results(self, pred_files_path, is_jackknife=False):
# collect predicted files info
pred_file_list = sorted(Path(pred_files_path).glob('*.csv'))
pred_labels_dict = {}
eval = SELDMetrics(nb_classes=self._nb_classes, doa_threshold=self._doa_thresh, average=self._average)
for pred_cnt, pred_file in enumerate(pred_file_list):
# Load predicted output format file
fn = pred_file.stem
pred_dict = load_output_format_file(pred_file)
pred_labels = to_metrics_format(pred_dict, self._ref_labels[fn][1], label_resolution=0.1)
# Calculated scores
eval.update_seld_scores(pred_labels, self._ref_labels[fn][0])
if is_jackknife:
pred_labels_dict[fn] = pred_labels
# Overall SED and DOA scores
ER, F, LE, LR, seld_scr, classwise_results = eval.compute_seld_scores()
if is_jackknife:
global_values = [ER, F, LE, LR, seld_scr]
if len(classwise_results):
global_values.extend(classwise_results.reshape(-1).tolist())
partial_estimates = []
# Calculate partial estimates by leave-one-out method
for leave_file in pred_file_list:
leave_one_out_list = pred_file_list[:]
leave_one_out_list.remove(leave_file)
eval = SELDMetrics(nb_classes=self._nb_classes, doa_threshold=self._doa_thresh, average=self._average)
for pred_cnt, pred_file in enumerate(leave_one_out_list):
# Calculated scores
fn = pred_file.stem
eval.update_seld_scores(pred_labels_dict[fn], self._ref_labels[fn][0])
ER, F, LE, LR, seld_scr, classwise_results = eval.compute_seld_scores()
leave_one_out_est = [ER, F, LE, LR, seld_scr]
if len(classwise_results):
leave_one_out_est.extend(classwise_results.reshape(-1).tolist())
# Overall SED and DOA scores
partial_estimates.append(leave_one_out_est)
partial_estimates = np.array(partial_estimates)
estimate, bias, std_err, conf_interval = [-1]*len(global_values), [-1]*len(global_values), [-1]*len(global_values), [-1]*len(global_values)
for i in range(len(global_values)):
estimate[i], bias[i], std_err[i], conf_interval[i] = jackknife_estimation(
global_value=global_values[i],
partial_estimates=partial_estimates[:, i],
significance_level=0.05
)
return [ER, conf_interval[0]], [F, conf_interval[1]], [LE, conf_interval[2]], [LR, conf_interval[3]], [seld_scr, conf_interval[4]], [classwise_results, np.array(conf_interval)[5:].reshape(5,13,2) if len(classwise_results) else []]
else:
return ER, F, LE, LR, seld_scr, classwise_results
def get_consolidated_SELD_results(self, pred_files_path, score_type_list=['all', 'room']):
'''
Get all categories of results.
;score_type_list: Supported
'all' - all the predicted files
'room' - for individual rooms
'''
# collect predicted files info
pred_file_list = sorted(Path(pred_files_path).glob('*.csv'))
nb_pred_files = len(pred_file_list)
# Calculate scores for different splits, overlapping sound events, and impulse responses (reverberant scenes)
print('Number of predicted files: {}\nNumber of reference files: {}'.format(nb_pred_files, self._nb_ref_files))
for score_type in score_type_list:
print('\n\n---------------------------------------------------------------------------------------------------')
print('------------------------------------ {} ---------------------------------------------'.format('Total score' if score_type=='all' else 'score per {}'.format(score_type)))
print('---------------------------------------------------------------------------------------------------')
split_cnt_dict = self.get_nb_files(pred_file_list, tag=score_type) # collect files corresponding to score_type
# Calculate scores across files for a given score_type
for split_key in np.sort(list(split_cnt_dict)):
# Load evaluation metric class
eval = SELDMetrics(nb_classes=self._nb_classes, doa_threshold=self._doa_thresh, average=self._average)
samples_per_class = [0] * self._nb_classes
for pred_cnt, pred_file in enumerate(split_cnt_dict[split_key]):
# Load predicted output format file
fn = pred_file.stem
pred_dict = load_output_format_file(pred_file)
pred_labels = to_metrics_format(pred_dict, self._ref_labels[fn][1], label_resolution=0.1)
# Count samples of each class per room
for frame_ind in self._ref_labels[fn][2].keys():
for event in self._ref_labels[fn][2][frame_ind]:
samples_per_class[event[0]] += 1
# Calculated scores
eval.update_seld_scores(pred_labels, self._ref_labels[fn][0])
# Overall SED and DOA scores
ER, F, LE, LR, seld_scr, classwise_test_scr = eval.compute_seld_scores()
print('\nAverage score for {} {} data using {} coordinates'.format(score_type, 'fold' if score_type=='all' else split_key, 'Polar' if self._use_polar_format else 'Cartesian' ))
print('SELD score (early stopping metric): {:0.3f}'.format(seld_scr))
print('SED metrics: Error rate: {:0.3f}, F-score:{:0.1f}'.format(ER, 100*F))
print('DOA metrics: Localization error: {:0.1f}, Localization Recall: {:0.1f}'.format(LE, 100*LR))
# print('Samples of each class for {}: {}'.format('all rooms' if score_type=='all' else 'room ' + str(split_key), samples_per_class))
for cls_cnt in range(nb_classes):
words = '{}\t{:0.3f}\t{:0.3f}\t{:0.3f}\t{:0.3f}\t{:0.3f}\t{}'.format(cls_cnt, classwise_test_scr[0][cls_cnt], classwise_test_scr[1][cls_cnt], classwise_test_scr[2][cls_cnt],\
classwise_test_scr[3][cls_cnt], classwise_test_scr[4][cls_cnt], samples_per_class[cls_cnt])
print(words)
def reshape_3Dto2D(A):
return A.reshape(A.shape[0] * A.shape[1], A.shape[2])
if __name__ == "__main__":
nb_classes = 13
spatial_threshold = 20
parser = argparse.ArgumentParser(
description='Event Independent Network for DCASE2022.',
add_help=False
)
parser.add_argument('-c', '--config_file', default='./configs/ein_seld/seld.yaml', help='Specify config file', metavar='FILE')
parser.add_argument('--dataset', default='STARSS22', type=str)
parser.add_argument('--use_jackknife', action='store_true', help='Use jackknife.')
parser.add_argument('--consolidated_score', action='store_true', help='Compute consolidated SELD scroe.')
args = parser.parse_args()
yaml = YAML()
with open(args.config_file, 'r') as f:
cfg = yaml.load(f)
use_jackknife = args.use_jackknife
results_dir = Path(cfg['workspace_dir']).joinpath('results')
out_infer = results_dir.joinpath('out_infer')
pred_csv_dir = out_infer.joinpath(cfg['method']).joinpath(cfg['inference']['infer_id']).joinpath('submissions')
gt_csv_dir = Path(cfg['hdf5_dir']).joinpath(cfg['dataset']).joinpath('label','frame').joinpath(args.dataset)
out_evaluate = results_dir.joinpath('out_evaluate').joinpath(cfg['method'])
score_obj = ComputeSELDResults(ref_files_folder=gt_csv_dir, nb_classes=nb_classes, doa_thresh=spatial_threshold)
# Compute just the DCASE final results
if not use_jackknife:
if not args.consolidated_score:
# Save as file
if not out_evaluate.is_dir():
out_evaluate.mkdir(parents=True, exist_ok=True)
path = out_evaluate.joinpath(cfg['inference']['infer_id']+'.tsv')
if path.is_file():
os.unlink(path)
#### Macro ####
score_obj._average = 'macro'
# score_obj = ComputeSELDResults(ref_files_folder=gt_csv_dir, average=average, nb_classes=nb_classes, doa_thresh=spatial_threshold)
ER, F, LE, LR, seld_scr, classwise_test_scr = score_obj.get_SELD_Results(pred_csv_dir)
print('#### Classwise results on unseen test data ####')
words = 'Class\tER\tF\tLE\tLR\tSELD_score'
print(words)
f = open(path, 'a')
f.writelines(words+'\n')
for cls_cnt in range(nb_classes):
words = '{}\t{:0.3f}\t{:0.3f}\t{:0.3f}\t{:0.3f}\t{:0.3f}'\
.format(cls_cnt, classwise_test_scr[0][cls_cnt], classwise_test_scr[1][cls_cnt], classwise_test_scr[2][cls_cnt], classwise_test_scr[3][cls_cnt], classwise_test_scr[4][cls_cnt])
print(words)
f.writelines(words+'\n')
words = 'Sum_macro\t{:0.3f}\t{:0.3f}\t{:0.3f}\t{:0.3f}\t{:0.3f}'.format(ER, F, LE, LR, seld_scr)
f.writelines(words+'\n')
print('######## MACRO ########')
print('SELD score (early stopping metric): {:0.3f}'.format(seld_scr))
print('SED metrics: Error rate: {:0.3f}, F-score:{:0.1f}'.format(ER, 100*F))
print('DOA metrics: Localization error: {:0.1f}, Localization Recall: {:0.1f}'.format(LE, 100*LR))
#### Micro ####
score_obj._average = 'micro'
ER, F, LE, LR, seld_scr, _ = score_obj.get_SELD_Results(pred_csv_dir)
words = 'Sum_micro\t{:0.3f}\t{:0.3f}\t{:0.3f}\t{:0.3f}\t{:0.3f}'.format(ER, F, LE, LR, seld_scr)
f.writelines(words+'\n')
f.close()
print('######## MICRO ########')
print('SELD score (early stopping metric): {:0.3f}'.format(seld_scr))
print('SED metrics: Error rate: {:0.3f}, F-score:{:0.1f}'.format(ER, 100*F))
print('DOA metrics: Localization error: {:0.1f}, Localization Recall: {:0.1f}'.format(LE, 100*LR))
else:
score_obj.get_consolidated_SELD_results(pred_csv_dir)
else:
ER, F, LE, LR, seld_scr, classwise_test_scr = score_obj.get_SELD_Results(pred_csv_dir,is_jackknife=use_jackknife )
print('SELD score (early stopping metric): {:0.3f} {}'.format(seld_scr[0], '[{:0.3f}, {:0.3f}]'.format(seld_scr[1][0], seld_scr[1][1]) ))
print('SED metrics: Error rate: {:0.3f} {}, F-score: {:0.1f} {}'.format(ER[0] , '[{:0.3f}, {:0.3f}]'\
.format(ER[1][0], ER[1][1]) , 100*F[0], '[{:0.3f}, {:0.3f}]'.format(100*F[1][0], 100*F[1][1]) ))
print('DOA metrics: Localization error: {:0.1f} {}, Localization Recall: {:0.1f} {}'\
.format(LE[0], '[{:0.3f}, {:0.3f}]'.format(LE[1][0], LE[1][1]) , 100*LR[0],'[{:0.3f}, {:0.3f}]'.format(100*LR[1][0], 100*LR[1][1]) ))
print('Classwise results on unseen test data')
print('Class\tER\tF\tLE\tLR\tSELD_score')
for cls_cnt in range(nb_classes):
print('{}\t{:0.3f} {}\t{:0.3f} {}\t{:0.3f} {}\t{:0.3f} {}\t{:0.3f} {}'.format(
cls_cnt,
classwise_test_scr[0][0][cls_cnt], '[{:0.3f}, {:0.3f}]'\
.format(classwise_test_scr[1][0][cls_cnt][0], classwise_test_scr[1][0][cls_cnt][1]),
classwise_test_scr[0][1][cls_cnt], '[{:0.3f}, {:0.3f}]'\
.format(classwise_test_scr[1][1][cls_cnt][0], classwise_test_scr[1][1][cls_cnt][1]),
classwise_test_scr[0][2][cls_cnt], '[{:0.3f}, {:0.3f}]'\
.format(classwise_test_scr[1][2][cls_cnt][0], classwise_test_scr[1][2][cls_cnt][1]) ,
classwise_test_scr[0][3][cls_cnt], '[{:0.3f}, {:0.3f}]'\
.format(classwise_test_scr[1][3][cls_cnt][0], classwise_test_scr[1][3][cls_cnt][1]) ,
classwise_test_scr[0][4][cls_cnt], '[{:0.3f}, {:0.3f}]'\
.format(classwise_test_scr[1][4][cls_cnt][0], classwise_test_scr[1][4][cls_cnt][1])))
| 15,630 | 50.587459 | 242 | py |
DCASE2022-TASK3 | DCASE2022-TASK3-main/code/methods/inference.py | class BaseInferer:
""" Base inferer class
"""
def infer(self, *args, **kwargs):
""" Perform an inference on test data.
"""
raise NotImplementedError
def fusion(self, submissions_dir, preds):
""" Ensamble predictions.
"""
raise NotImplementedError
| 336 | 15.047619 | 46 | py |
DCASE2022-TASK3 | DCASE2022-TASK3-main/code/methods/training.py | class BaseTrainer:
""" Base trainer class
"""
def train_step(self, *args, **kwargs):
""" Perform a training step.
"""
raise NotImplementedError
def validate_step(self, *args, **kwargs):
""" Perform a validation step
"""
raise NotImplementedError
| 317 | 15.736842 | 45 | py |
DCASE2022-TASK3 | DCASE2022-TASK3-main/code/methods/data.py | from pathlib import Path
import os
import pandas as pd
import h5py
import numpy as np
import torch
from torch.utils.data import Dataset
from utils.common import int16_samples_to_float32
class BaseDataset(Dataset):
""" User defined datset
"""
def __init__(self, args, cfg, dataset):
"""
Args:
args: input args
cfg: configurations
dataset: dataset used
"""
super().__init__()
self.args = args
self.sample_rate = cfg['data']['sample_rate']
self.data_type = 'wav' if cfg['data']['audio_feature'] in ['logmelIV', 'logmel'] else 'feature'
# It's different from traing data
# Chunklen and hoplen and segmentation.
hdf5_dir = Path(cfg['hdf5_dir']).joinpath(cfg['dataset'])
main_data_dir = hdf5_dir.joinpath('data').joinpath('{}fs'.format(cfg['data']['sample_rate'])).joinpath(self.data_type)
if self.data_type == 'feature':
self.data_dir = main_data_dir.joinpath('dev').joinpath(cfg['data']['audio_feature'])
self.points_per_predictions = int(dataset.label_resolution / (cfg['data']['hoplen'] / cfg['data']['sample_rate']))
else:
self.data_dir = main_data_dir.joinpath('dev').joinpath(cfg['data']['type'])
self.points_per_predictions = cfg['data']['sample_rate'] * dataset.label_resolution
# Data path
indexes_path = main_data_dir.joinpath('devset_{}sChunklen_{}sHoplen_train.csv'\
.format(cfg['data']['train_chunklen_sec'], cfg['data']['train_hoplen_sec']))
segments_indexes = pd.read_csv(indexes_path, header=None).values
dataset_list = str(cfg['dataset_synth']).split(',')
dataset_list.append('STARSS22')
segments_indexes = [segment for segment in segments_indexes for _dataset in dataset_list if _dataset in segment[0]]
self.segments_list = segments_indexes
self.num_segments = len(self.segments_list)
def __len__(self):
"""Get length of the dataset
"""
return len(self.segments_list)
def __getitem__(self, idx):
"""
Read features from the dataset
"""
clip_indexes = self.segments_list[idx]
fn, segments = clip_indexes[0], clip_indexes[1:]
data_path = self.data_dir.joinpath(fn)
index_begin = segments[0]
index_end = segments[1]
pad_width_before = segments[2]
pad_width_after = segments[3]
if self.data_type == 'wav':
with h5py.File(data_path, 'r') as hf:
x = int16_samples_to_float32(hf['waveform'][:, index_begin: index_end])
pad_width = ((0, 0), (pad_width_before, pad_width_after))
else:
with h5py.File(data_path, 'r') as hf:
x = hf['feature'][:, index_begin: index_end]
pad_width = ((0, 0), (pad_width_before, pad_width_after), (0, 0))
x = np.pad(x, pad_width, mode='constant')
sample = {
'waveform': x
}
return sample
class PinMemCustomBatch:
def __init__(self, batch_dict):
batch_x = []
for n in range(len(batch_dict)):
batch_x.append(batch_dict[n]['waveform'])
batch_x = np.stack(batch_x, axis=0)
self.batch_out_dict = {
'waveform': torch.tensor(batch_x, dtype=torch.float32),
}
def pin_memory(self):
self.batch_out_dict['waveform'] = self.batch_out_dict['waveform'].pin_memory()
return self.batch_out_dict
def collate_fn(batch_dict):
"""
Merges a list of samples to form a mini-batch
Pin memory for customized dataset
"""
return PinMemCustomBatch(batch_dict)
| 3,767 | 34.54717 | 126 | py |
DCASE2022-TASK3 | DCASE2022-TASK3-main/code/methods/metrics.py | from methods.utils.SELD_metrics import *
from utils.ddp_init import reduce_value
class Metrics(object):
"""Metrics for evaluation
"""
def __init__(self, dataset):
# self.metrics = []
self.names = ['ER_macro', 'F_macro', 'LE_macro', 'LR_macro', 'SELD_scr_macro', 'ER_micro', 'F_micro', 'LE_micro', 'LR_micro', 'SELD_scr_micro']
self.num_classes = dataset.num_classes
self.doa_threshold = 20 # in deg
self.num_frames_1s = int(1 / dataset.label_resolution)
self.metrics = SELDMetrics(nb_classes=self.num_classes, doa_threshold=self.doa_threshold)
def update(self, pred_dict, gt_dict):
self.metrics.update_seld_scores(pred_dict, gt_dict)
def calculate(self):
# ER: error rate, F: F1-score, LE: Location error, LR: Location recall
self.metrics._average = 'macro'
ER_macro, F_macro, LE_macro, LR_macro, seld_score_macro, _ = self.metrics.compute_seld_scores()
self.metrics._average = 'micro'
ER_micro, F_micro, LE_micro, LR_micro, seld_score_micro, _ = self.metrics.compute_seld_scores()
self.metrics = SELDMetrics(nb_classes=self.num_classes, doa_threshold=self.doa_threshold)
metrics_scores_macro = {
'ER_macro': ER_macro,
'F_macro': F_macro,
'LE_macro': LE_macro,
'LR_macro': LR_macro,
'seld_macro': seld_score_macro,
}
metrics_scores_micro = {
'ER_micro': ER_micro,
'F_micro': F_micro,
'LE_micro': LE_micro,
'LR_micro': LR_micro,
'seld_micro': seld_score_micro,
}
metrics_scores = {
'macro': metrics_scores_macro,
'micro': metrics_scores_micro,
}
return metrics_scores
| 1,807 | 35.897959 | 151 | py |
DCASE2022-TASK3 | DCASE2022-TASK3-main/code/methods/__init__.py | 0 | 0 | 0 | py |
|
DCASE2022-TASK3 | DCASE2022-TASK3-main/code/methods/feature.py | import torch
import torch.nn as nn
import librosa
import numpy as np
from methods.utils.stft import (STFT, LogmelFilterBank, intensityvector,
spectrogram_STFTInput)
import math
def nCr(n, r):
return math.factorial(n) // math.factorial(r) // math.factorial(n-r)
class LogmelIntensity_Extractor(nn.Module):
def __init__(self, cfg):
super().__init__()
data = cfg['data']
sample_rate, n_fft, hop_length, window, n_mels = \
data['sample_rate'], data['nfft'], data['hoplen'], data['window'], data['n_mels']
center = True
pad_mode = 'reflect'
ref = 1.0
amin = 1e-10
top_db = None
# STFT extractor
self.stft_extractor = STFT(n_fft=n_fft, hop_length=hop_length, win_length=n_fft,
window=window, center=center, pad_mode=pad_mode,
freeze_parameters=True)
# Spectrogram extractor
self.spectrogram_extractor = spectrogram_STFTInput
# Logmel extractor
self.logmel_extractor = LogmelFilterBank(sr=sample_rate, n_fft=n_fft,
n_mels=n_mels, fmin=20, fmax=sample_rate/2, ref=ref, amin=amin, top_db=top_db,
freeze_parameters=True)
# Intensity vector extractor
self.intensityVector_extractor = intensityvector
def forward(self, x):
"""
input:
(batch_size, channels=4, data_length)
output:
(batch_size, channels, time_steps, freq_bins) freq_bins->mel_bins
"""
if x.ndim != 3:
raise ValueError("x shape must be (batch_size, num_channels, data_length)\n \
Now it is {}".format(x.shape))
x = self.stft_extractor(x)
logmel = self.logmel_extractor(self.spectrogram_extractor(x))
intensity_vector = self.intensityVector_extractor(x, self.logmel_extractor.melW)
out = torch.cat((logmel, intensity_vector), dim=1)
return out
class Logmel_Extractor(nn.Module):
def __init__(self, cfg):
super().__init__()
data = cfg['data']
sample_rate, n_fft, hop_length, window, n_mels = \
data['sample_rate'], data['nfft'], data['hoplen'], data['window'], data['n_mels']
center = True
pad_mode = 'reflect'
ref = 1.0
amin = 1e-10
top_db = None
# STFT extractor
self.stft_extractor = STFT(n_fft=n_fft, hop_length=hop_length, win_length=n_fft,
window=window, center=center, pad_mode=pad_mode,
)
# Spectrogram extractor
self.spectrogram_extractor = spectrogram_STFTInput
# Logmel extractor
self.logmel_extractor = LogmelFilterBank(sr=sample_rate, n_fft=n_fft,
n_mels=n_mels, fmin=20, fmax=sample_rate/2, ref=ref, amin=amin, top_db=top_db,
freeze_parameters=True)
def forward(self, x):
"""
input:
(batch_size, channels=4, data_length)
output:
(batch_size, channels, time_steps, freq_bins) freq_bins->mel_bins
"""
if x.ndim != 3:
raise ValueError("x shape must be (batch_size, num_channels, data_length)\n \
Now it is {}".format(x.shape))
x = self.stft_extractor(x)
logmel = self.logmel_extractor(self.spectrogram_extractor(x))
out = logmel
return out
class Features_Extractor_MIC():
def __init__(self, cfg):
self.fs = cfg['data']['sample_rate']
self.n_fft = cfg['data']['nfft']
self.n_mels = cfg['data']['n_mels']
self.hoplen = cfg['data']['hoplen']
self.mel_bank = librosa.filters.mel(sr=self.fs, n_fft=self.n_fft, n_mels=self.n_mels).T
if cfg['data']['audio_feature'] == 'salsalite':
# Initialize the spatial feature constants
c = 343
self.fmin_doa = cfg['data']['salsalite']['fmin_doa']
self.fmax_doa = cfg['data']['salsalite']['fmax_doa']
self.fmax_spectra = cfg['data']['salsalite']['fmax_spectra']
self.lower_bin = np.int(np.floor(self.fmin_doa * self.n_fft / np.float(self.fs)))
self.lower_bin = np.max((self.lower_bin, 1))
self.upper_bin = np.int(np.floor(self.fmax_spectra * self.n_fft / np.float(self.fs)))
self.cutoff_bin = np.int(np.floor(self.fmax_spectra * self.n_fft / np.float(self.fs)))
assert self.upper_bin <= self.cutoff_bin, 'Upper bin for doa feature is higher than cutoff bin for spectrogram {}!'
# Normalization factor for salsalite
self.delta = 2 * np.pi * self.fs / (self.n_fft * c)
self.freq_vector = np.arange(self.n_fft // 2 + 1)
self.freq_vector[0] = 1
self.freq_vector = self.freq_vector[None, :, None]
def _spectrogram(self, audio_input, _nb_frames):
_nb_ch = audio_input.shape[1]
spectra = []
for ch_cnt in range(_nb_ch):
stft_ch = librosa.core.stft(np.asfortranarray(audio_input[:, ch_cnt]), n_fft=self.n_fft, hop_length=self.hoplen,
win_length=self.n_fft, window=self.cfg['data']['window'])
spectra.append(stft_ch[:, :_nb_frames])
return np.array(spectra).T
def _get_logmel_spectrogram(self, linear_spectra):
logmel_feat = np.zeros((linear_spectra.shape[0], self.n_mels, linear_spectra.shape[-1]))
for ch_cnt in range(linear_spectra.shape[-1]):
mag_spectra = np.abs(linear_spectra[:, :, ch_cnt])**2
mel_spectra = np.dot(mag_spectra, self.mel_bank)
logmel_spectra = librosa.power_to_db(mel_spectra)
logmel_feat[:, :, ch_cnt] = logmel_spectra
return logmel_feat
def _get_gcc(self, linear_spectra):
gcc_channels = nCr(linear_spectra.shape[-1], 2)
gcc_feat = np.zeros((linear_spectra.shape[0], self.n_mels, gcc_channels))
cnt = 0
for m in range(linear_spectra.shape[-1]):
for n in range(m+1, linear_spectra.shape[-1]):
R = np.conj(linear_spectra[:, :, m]) * linear_spectra[:, :, n]
cc = np.fft.irfft(np.exp(1.j*np.angle(R)))
cc = np.concatenate((cc[:, -self.n_mels//2:], cc[:, :self.n_mels//2]), axis=-1)
gcc_feat[:, :, cnt] = cc
cnt += 1
return gcc_feat
def _get_salsalite(self, linear_spectra):
# Adapted from the official SALSA repo- https://github.com/thomeou/SALSA
# spatial features
phase_vector = np.angle(linear_spectra[:, :, 1:] * np.conj(linear_spectra[:, :, 0, None]))
phase_vector = phase_vector / (self.delta * self.freq_vector)
phase_vector = phase_vector[:, self.lower_bin:self.cutoff_bin, :]
phase_vector[:, self.upper_bin:, :] = 0
phase_vector = phase_vector.transpose((2, 0, 1))
# spectral features
linear_spectra = np.abs(linear_spectra)**2
for ch_cnt in range(linear_spectra.shape[-1]):
linear_spectra[:, :, ch_cnt] = librosa.power_to_db(linear_spectra[:, :, ch_cnt], ref=1.0, amin=1e-10, top_db=None)
linear_spectra = linear_spectra[:, self.lower_bin:self.cutoff_bin, :]
linear_spectra = linear_spectra.transpose((2, 0, 1))
return np.concatenate((linear_spectra, phase_vector), axis=0) | 7,445 | 42.040462 | 127 | py |
DCASE2022-TASK3 | DCASE2022-TASK3-main/code/methods/utils/stft.py | import math
import librosa
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from librosa import ParameterError
from torch.nn.parameter import Parameter
eps = torch.finfo(torch.float32).eps
class DFTBase(nn.Module):
def __init__(self):
"""Base class for DFT and IDFT matrix"""
super().__init__()
def dft_matrix(self, n):
(x, y) = np.meshgrid(np.arange(n), np.arange(n))
omega = np.exp(-2 * np.pi * 1j / n)
W = np.power(omega, x * y)
return W
def idft_matrix(self, n):
(x, y) = np.meshgrid(np.arange(n), np.arange(n))
omega = np.exp(2 * np.pi * 1j / n)
W = np.power(omega, x * y)
return W
class DFT(DFTBase):
def __init__(self, n, norm):
"""Calculate DFT, IDFT, RDFT, IRDFT.
Args:
n: fft window size
norm: None | 'ortho'
"""
super().__init__()
self.W = self.dft_matrix(n)
self.inv_W = self.idft_matrix(n)
self.W_real = torch.Tensor(np.real(self.W))
self.W_imag = torch.Tensor(np.imag(self.W))
self.inv_W_real = torch.Tensor(np.real(self.inv_W))
self.inv_W_imag = torch.Tensor(np.imag(self.inv_W))
self.n = n
self.norm = norm
def dft(self, x_real, x_imag):
"""Calculate DFT of signal.
Args:
x_real: (n,), signal real part
x_imag: (n,), signal imag part
Returns:
z_real: (n,), output real part
z_imag: (n,), output imag part
"""
z_real = torch.matmul(x_real, self.W_real) - torch.matmul(x_imag, self.W_imag)
z_imag = torch.matmul(x_imag, self.W_real) + torch.matmul(x_real, self.W_imag)
if self.norm is None:
pass
elif self.norm == 'ortho':
z_real /= math.sqrt(self.n)
z_imag /= math.sqrt(self.n)
return z_real, z_imag
def idft(self, x_real, x_imag):
"""Calculate IDFT of signal.
Args:
x_real: (n,), signal real part
x_imag: (n,), signal imag part
Returns:
z_real: (n,), output real part
z_imag: (n,), output imag part
"""
z_real = torch.matmul(x_real, self.inv_W_real) - torch.matmul(x_imag, self.inv_W_imag)
z_imag = torch.matmul(x_imag, self.inv_W_real) + torch.matmul(x_real, self.inv_W_imag)
if self.norm is None:
z_real /= self.n
elif self.norm == 'ortho':
z_real /= math.sqrt(self.n)
z_imag /= math.sqrt(self.n)
return z_real, z_imag
def rdft(self, x_real):
"""Calculate right DFT of signal.
Args:
x_real: (n,), signal real part
x_imag: (n,), signal imag part
Returns:
z_real: (n // 2 + 1,), output real part
z_imag: (n // 2 + 1,), output imag part
"""
n_rfft = self.n // 2 + 1
z_real = torch.matmul(x_real, self.W_real[..., 0 : n_rfft])
z_imag = torch.matmul(x_real, self.W_imag[..., 0 : n_rfft])
if self.norm is None:
pass
elif self.norm == 'ortho':
z_real /= math.sqrt(self.n)
z_imag /= math.sqrt(self.n)
return z_real, z_imag
def irdft(self, x_real, x_imag):
"""Calculate inverse right DFT of signal.
Args:
x_real: (n // 2 + 1,), signal real part
x_imag: (n // 2 + 1,), signal imag part
Returns:
z_real: (n,), output real part
z_imag: (n,), output imag part
"""
n_rfft = self.n // 2 + 1
flip_x_real = torch.flip(x_real, dims=(-1,))
x_real = torch.cat((x_real, flip_x_real[..., 1 : n_rfft - 1]), dim=-1)
flip_x_imag = torch.flip(x_imag, dims=(-1,))
x_imag = torch.cat((x_imag, -1. * flip_x_imag[..., 1 : n_rfft - 1]), dim=-1)
z_real = torch.matmul(x_real, self.inv_W_real) - torch.matmul(x_imag, self.inv_W_imag)
if self.norm is None:
z_real /= self.n
elif self.norm == 'ortho':
z_real /= math.sqrt(self.n)
return z_real
class STFT(DFTBase):
def __init__(self, n_fft=2048, hop_length=None, win_length=None,
window='hann', center=True, pad_mode='reflect', freeze_parameters=True):
"""Implementation of STFT with Conv1d. The function has the same output
of librosa.core.stft
"""
super().__init__()
assert pad_mode in ['constant', 'reflect']
self.n_fft = n_fft
self.center = center
self.pad_mode = pad_mode
# By default, use the entire frame
if win_length is None:
win_length = n_fft
# Set the default hop, if it's not already specified
if hop_length is None:
hop_length = int(win_length // 4)
fft_window = librosa.filters.get_window(window, win_length, fftbins=True)
# Pad the window out to n_fft size
fft_window = librosa.util.pad_center(fft_window, n_fft)
# DFT & IDFT matrix
self.W = self.dft_matrix(n_fft)
out_channels = n_fft // 2 + 1
self.conv_real = nn.Conv1d(in_channels=1, out_channels=out_channels,
kernel_size=n_fft, stride=hop_length, padding=0, dilation=1,
groups=1, bias=False)
self.conv_imag = nn.Conv1d(in_channels=1, out_channels=out_channels,
kernel_size=n_fft, stride=hop_length, padding=0, dilation=1,
groups=1, bias=False)
self.conv_real.weight.data = torch.Tensor(
np.real(self.W[:, 0 : out_channels] * fft_window[:, None]).T)[:, None, :]
# (n_fft // 2 + 1, 1, n_fft)
self.conv_imag.weight.data = torch.Tensor(
np.imag(self.W[:, 0 : out_channels] * fft_window[:, None]).T)[:, None, :]
# (n_fft // 2 + 1, 1, n_fft)
if freeze_parameters:
for param in self.parameters():
param.requires_grad = False
def forward(self, input):
"""input: (batch_size, num_channels, data_length)
Returns:
real: (batch_size, num_channels, time_steps, n_fft // 2 + 1)
imag: (batch_size, num_channels, time_steps, n_fft // 2 + 1)
"""
_, num_channels, _ = input.shape
real_out = []
imag_out = []
for n in range(num_channels):
x = input[:, n, :][:, None, :]
# (batch_size, 1, data_length)
if self.center:
x = F.pad(x, pad=(self.n_fft // 2, self.n_fft // 2), mode=self.pad_mode)
real = self.conv_real(x)
imag = self.conv_imag(x)
# (batch_size, n_fft // 2 + 1, time_steps)
real = real[:, None, :, :].transpose(2, 3)
imag = imag[:, None, :, :].transpose(2, 3)
# (batch_size, 1, time_steps, n_fft // 2 + 1)
real_out.append(real)
imag_out.append(imag)
real_out = torch.cat(real_out, dim=1)
imag_out = torch.cat(imag_out, dim=1)
return real_out, imag_out
def magphase(real, imag):
mag = (real ** 2 + imag ** 2) ** 0.5
cos = real / torch.clamp(mag, 1e-10, np.inf)
sin = imag / torch.clamp(mag, 1e-10, np.inf)
return mag, cos, sin
class ISTFT(DFTBase):
def __init__(self, n_fft=2048, hop_length=None, win_length=None,
window='hann', center=True, pad_mode='reflect', freeze_parameters=True):
"""Implementation of ISTFT with Conv1d. The function has the same output
of librosa.core.istft
"""
super().__init__()
assert pad_mode in ['constant', 'reflect']
self.n_fft = n_fft
self.hop_length = hop_length
self.win_length = win_length
self.window = window
self.center = center
self.pad_mode = pad_mode
# By default, use the entire frame
if win_length is None:
win_length = n_fft
# Set the default hop, if it's not already specified
if hop_length is None:
hop_length = int(win_length // 4)
ifft_window = librosa.filters.get_window(window, win_length, fftbins=True)
# Pad the window out to n_fft size
ifft_window = librosa.util.pad_center(ifft_window, n_fft)
# DFT & IDFT matrix
self.W = self.idft_matrix(n_fft) / n_fft
self.conv_real = nn.Conv1d(in_channels=n_fft, out_channels=n_fft,
kernel_size=1, stride=1, padding=0, dilation=1,
groups=1, bias=False)
self.conv_imag = nn.Conv1d(in_channels=n_fft, out_channels=n_fft,
kernel_size=1, stride=1, padding=0, dilation=1,
groups=1, bias=False)
self.conv_real.weight.data = torch.Tensor(
np.real(self.W * ifft_window[None, :]).T)[:, :, None]
# (n_fft // 2 + 1, 1, n_fft)
self.conv_imag.weight.data = torch.Tensor(
np.imag(self.W * ifft_window[None, :]).T)[:, :, None]
# (n_fft // 2 + 1, 1, n_fft)
if freeze_parameters:
for param in self.parameters():
param.requires_grad = False
def forward(self, real_stft, imag_stft, length):
"""input: (batch_size, num_channels, time_steps, n_fft // 2 + 1)
Returns:
real: (batch_size, num_channels, data_length)
"""
assert real_stft.ndimension() == 4 and imag_stft.ndimension() == 4
device = next(self.parameters()).device
batch_size, num_channels, _, _ = real_stft.shape
wav_out = []
for n in range(num_channels):
real_stft = real_stft[:, n, :, :].transpose(1, 2)
imag_stft = imag_stft[:, n, :, :].transpose(1, 2)
# (batch_size, n_fft // 2 + 1, time_steps)
# Full stft
full_real_stft = torch.cat((real_stft, torch.flip(real_stft[:, 1 : -1, :], dims=[1])), dim=1)
full_imag_stft = torch.cat((imag_stft, - torch.flip(imag_stft[:, 1 : -1, :], dims=[1])), dim=1)
# Reserve space for reconstructed waveform
if length:
if self.center:
padded_length = length + int(self.n_fft)
else:
padded_length = length
n_frames = min(
real_stft.shape[2], int(np.ceil(padded_length / self.hop_length)))
else:
n_frames = real_stft.shape[2]
expected_signal_len = self.n_fft + self.hop_length * (n_frames - 1)
expected_signal_len = self.n_fft + self.hop_length * (n_frames - 1)
y = torch.zeros(batch_size, expected_signal_len).to(device)
# IDFT
s_real = self.conv_real(full_real_stft) - self.conv_imag(full_imag_stft)
# Overlap add
for i in range(n_frames):
y[:, i * self.hop_length : i * self.hop_length + self.n_fft] += s_real[:, :, i]
ifft_window_sum = librosa.filters.window_sumsquare(self.window, n_frames,
win_length=self.win_length, n_fft=self.n_fft, hop_length=self.hop_length)
approx_nonzero_indices = np.where(ifft_window_sum > librosa.util.tiny(ifft_window_sum))[0]
approx_nonzero_indices = torch.LongTensor(approx_nonzero_indices).to(device)
ifft_window_sum = torch.Tensor(ifft_window_sum).to(device)
y[:, approx_nonzero_indices] /= ifft_window_sum[approx_nonzero_indices][None, :]
# Trim or pad to length
if length is None:
if self.center:
y = y[:, self.n_fft // 2 : -self.n_fft // 2]
else:
if self.center:
start = self.n_fft // 2
else:
start = 0
y = y[:, start : start + length]
(batch_size, len_y) = y.shape
if y.shape[-1] < length:
y = torch.cat((y, torch.zeros(batch_size, length - len_y).to(device)), dim=-1)
wav_out.append(y)
wav_out = torch.cat(wav_out, dim=1)
return y
def spectrogram_STFTInput(input, power=2.0):
"""
Input:
real: (batch_size, num_channels, time_steps, n_fft // 2 + 1)
imag: (batch_size, num_channels, time_steps, n_fft // 2 + 1)
Returns:
spectrogram: (batch_size, num_channels, time_steps, n_fft // 2 + 1)
"""
(real, imag) = input
# (batch_size, num_channels, n_fft // 2 + 1, time_steps)
spectrogram = real ** 2 + imag ** 2
if power == 2.0:
pass
else:
spectrogram = spectrogram ** (power / 2.0)
return spectrogram
class Spectrogram(nn.Module):
def __init__(self, n_fft=2048, hop_length=None, win_length=None,
window='hann', center=True, pad_mode='reflect', power=2.0,
freeze_parameters=True):
"""Calculate spectrogram using pytorch. The STFT is implemented with
Conv1d. The function has the same output of librosa.core.stft
"""
super().__init__()
self.power = power
self.stft = STFT(n_fft=n_fft, hop_length=hop_length,
win_length=win_length, window=window, center=center,
pad_mode=pad_mode, freeze_parameters=True)
def forward(self, input):
"""input: (batch_size, num_channels, data_length)
Returns:
spectrogram: (batch_size, num_channels, time_steps, n_fft // 2 + 1)
"""
(real, imag) = self.stft.forward(input)
# (batch_size, num_channels, n_fft // 2 + 1, time_steps)
spectrogram = real ** 2 + imag ** 2
if self.power == 2.0:
pass
else:
spectrogram = spectrogram ** (self.power / 2.0)
return spectrogram
class LogmelFilterBank(nn.Module):
def __init__(self, sr=32000, n_fft=2048, n_mels=64, fmin=50, fmax=14000, is_log=True,
ref=1.0, amin=1e-10, top_db=80.0, freeze_parameters=True):
"""Calculate logmel spectrogram using pytorch. The mel filter bank is
the pytorch implementation of as librosa.filters.mel
"""
super().__init__()
self.is_log = is_log
self.ref = ref
self.amin = amin
self.top_db = top_db
self.melW = librosa.filters.mel(sr=sr, n_fft=n_fft, n_mels=n_mels,
fmin=fmin, fmax=fmax).T
# (n_fft // 2 + 1, mel_bins)
self.melW = nn.Parameter(torch.Tensor(self.melW))
if freeze_parameters:
for param in self.parameters():
param.requires_grad = False
def forward(self, input):
"""input: (batch_size, num_channels, time_steps, freq_bins)
Output: (batch_size, num_channels, time_steps, mel_bins)
"""
# Mel spectrogram
mel_spectrogram = torch.matmul(input, self.melW)
# Logmel spectrogram
if self.is_log:
output = self.power_to_db(mel_spectrogram)
else:
output = mel_spectrogram
return output
def power_to_db(self, input):
"""Power to db, this function is the pytorch implementation of
librosa.core.power_to_lb
"""
ref_value = self.ref
log_spec = 10.0 * torch.log10(torch.clamp(input, min=self.amin, max=np.inf))
log_spec -= 10.0 * np.log10(np.maximum(self.amin, ref_value))
if self.top_db is not None:
if self.top_db < 0:
raise ParameterError('top_db must be non-negative')
log_spec = torch.clamp(log_spec, min=log_spec.max().item() - self.top_db, max=np.inf)
return log_spec
def intensityvector(input, melW):
"""Calculate intensity vector. Input is four channel stft of the signals.
input: (stft_real, stft_imag)
stft_real: (batch_size, 4, time_steps, freq_bins)
stft_imag: (batch_size, 4, time_steps, freq_bins)
out:
intenVec: (batch_size, 3, time_steps, freq_bins)
"""
sig_real, sig_imag = input[0], input[1]
Pref_real, Pref_imag = sig_real[:,0,...], sig_imag[:,0,...]
Px_real, Px_imag = sig_real[:,1,...], sig_imag[:,1,...]
Py_real, Py_imag = sig_real[:,2,...], sig_imag[:,2,...]
Pz_real, Pz_imag = sig_real[:,3,...], sig_imag[:,3,...]
IVx = Pref_real * Px_real + Pref_imag * Px_imag
IVy = Pref_real * Py_real + Pref_imag * Py_imag
IVz = Pref_real * Pz_real + Pref_imag * Pz_imag
normal = torch.sqrt(IVx**2 + IVy**2 + IVz**2) + eps
IVx_mel = torch.matmul(IVx / normal, melW)
IVy_mel = torch.matmul(IVy / normal, melW)
IVz_mel = torch.matmul(IVz / normal, melW)
intenVec = torch.stack([IVx_mel, IVy_mel, IVz_mel], dim=1)
return intenVec
class Enframe(nn.Module):
def __init__(self, frame_length=2048, hop_length=512):
"""Enframe a time sequence. This function is the pytorch implementation
of librosa.util.frame
"""
super().__init__()
'''
self.enframe_conv = nn.Conv1d(in_channels=1, out_channels=frame_length,
kernel_size=frame_length, stride=hop_length,
padding=frame_length // 2, bias=False)
'''
self.enframe_conv = nn.Conv1d(in_channels=1, out_channels=frame_length,
kernel_size=frame_length, stride=hop_length,
padding=0, bias=False)
self.enframe_conv.weight.data = torch.Tensor(torch.eye(frame_length)[:, None, :])
self.enframe_conv.weight.requires_grad = False
def forward(self, input):
"""input: (batch_size, num_channels, samples)
Output: (batch_size, num_channels, window_length, frames_num)
"""
_, num_channels, _ = input.shape
output = []
for n in range(num_channels):
output.append(self.enframe_conv(input[:, n, :][:, None, :]))
output = torch.cat(output, dim=1)
return output
class Scalar(nn.Module):
def __init__(self, scalar, freeze_parameters):
super().__init__()
self.scalar_mean = Parameter(torch.Tensor(scalar['mean']))
self.scalar_std = Parameter(torch.Tensor(scalar['std']))
if freeze_parameters:
for param in self.parameters():
param.requires_grad = False
def forward(self, input):
return (input - self.scalar_mean) / self.scalar_std
def debug(select, device):
"""Compare numpy + librosa and pytorch implementation result. For debug.
Args:
select: 'dft' | 'logmel' | 'logmel&iv' | 'logmel&gcc'
device: 'cpu' | 'cuda'
"""
if select == 'dft':
n = 10
norm = None # None | 'ortho'
np.random.seed(0)
# Data
np_data = np.random.uniform(-1, 1, n)
pt_data = torch.Tensor(np_data)
# Numpy FFT
np_fft = np.fft.fft(np_data, norm=norm)
np_ifft = np.fft.ifft(np_fft, norm=norm)
np_rfft = np.fft.rfft(np_data, norm=norm)
np_irfft = np.fft.ifft(np_rfft, norm=norm)
# Pytorch FFT
obj = DFT(n, norm)
pt_dft = obj.dft(pt_data, torch.zeros_like(pt_data))
pt_idft = obj.idft(pt_dft[0], pt_dft[1])
pt_rdft = obj.rdft(pt_data)
pt_irdft = obj.irdft(pt_rdft[0], pt_rdft[1])
print('Comparing librosa and pytorch implementation of DFT. All numbers '
'below should be close to 0.')
print(np.mean((np.abs(np.real(np_fft) - pt_dft[0].cpu().numpy()))))
print(np.mean((np.abs(np.imag(np_fft) - pt_dft[1].cpu().numpy()))))
print(np.mean((np.abs(np.real(np_ifft) - pt_idft[0].cpu().numpy()))))
print(np.mean((np.abs(np.imag(np_ifft) - pt_idft[1].cpu().numpy()))))
print(np.mean((np.abs(np.real(np_rfft) - pt_rdft[0].cpu().numpy()))))
print(np.mean((np.abs(np.imag(np_rfft) - pt_rdft[1].cpu().numpy()))))
print(np.mean(np.abs(np_data - pt_irdft.cpu().numpy())))
elif select == 'stft':
data_length = 32000
device = torch.device(device)
np.random.seed(0)
sample_rate = 16000
n_fft = 1024
hop_length = 250
win_length = 1024
window = 'hann'
center = True
dtype = np.complex64
pad_mode = 'reflect'
# Data
np_data = np.random.uniform(-1, 1, data_length)
pt_data = torch.Tensor(np_data).to(device)
# Numpy stft matrix
np_stft_matrix = librosa.core.stft(y=np_data, n_fft=n_fft,
hop_length=hop_length, window=window, center=center).T
# Pytorch stft matrix
pt_stft_extractor = STFT(n_fft=n_fft, hop_length=hop_length,
win_length=win_length, window=window, center=center, pad_mode=pad_mode,
freeze_parameters=True)
pt_stft_extractor.to(device)
(pt_stft_real, pt_stft_imag) = pt_stft_extractor.forward(pt_data[None, None, :])
print('Comparing librosa and pytorch implementation of stft. All numbers '
'below should be close to 0.')
print(np.mean(np.abs(np.real(np_stft_matrix) - pt_stft_real.data.cpu().numpy()[0, 0])))
print(np.mean(np.abs(np.imag(np_stft_matrix) - pt_stft_imag.data.cpu().numpy()[0, 0])))
# Numpy istft
np_istft_s = librosa.core.istft(stft_matrix=np_stft_matrix.T,
hop_length=hop_length, window=window, center=center, length=data_length)
# Pytorch istft
pt_istft_extractor = ISTFT(n_fft=n_fft, hop_length=hop_length,
win_length=win_length, window=window, center=center, pad_mode=pad_mode,
freeze_parameters=True)
pt_istft_extractor.to(device)
# Recover from real and imag part
pt_istft_s = pt_istft_extractor.forward(pt_stft_real, pt_stft_imag, data_length)[0, :]
# Recover from magnitude and phase
(pt_stft_mag, cos, sin) = magphase(pt_stft_real, pt_stft_imag)
pt_istft_s2 = pt_istft_extractor.forward(pt_stft_mag * cos, pt_stft_mag * sin, data_length)[0, :]
print(np.mean(np.abs(np_istft_s - pt_istft_s.data.cpu().numpy())))
print(np.mean(np.abs(np_data - pt_istft_s.data.cpu().numpy())))
print(np.mean(np.abs(np_data - pt_istft_s2.data.cpu().numpy())))
elif select == 'logmel':
data_length = 4*32000
norm = None # None | 'ortho'
device = torch.device(device)
np.random.seed(0)
# Spectrogram parameters
sample_rate = 32000
n_fft = 1024
hop_length = 320
win_length = 1024
window = 'hann'
center = True
dtype = np.complex64
pad_mode = 'reflect'
# Mel parameters
n_mels = 128
fmin = 50
fmax = 14000
ref = 1.0
amin = 1e-10
top_db = None
# Data
np_data = np.random.uniform(-1, 1, data_length)
pt_data = torch.Tensor(np_data).to(device)
print('Comparing librosa and pytorch implementation of logmel '
'spectrogram. All numbers below should be close to 0.')
# Numpy librosa
np_stft_matrix = librosa.core.stft(y=np_data, n_fft=n_fft, hop_length=hop_length,
win_length=win_length, window=window, center=center, dtype=dtype,
pad_mode=pad_mode)
np_pad = np.pad(np_data, int(n_fft // 2), mode=pad_mode)
np_melW = librosa.filters.mel(sr=sample_rate, n_fft=n_fft, n_mels=n_mels,
fmin=fmin, fmax=fmax).T
np_mel_spectrogram = np.dot(np.abs(np_stft_matrix.T) ** 2, np_melW)
np_logmel_spectrogram = librosa.core.power_to_db(
np_mel_spectrogram, ref=ref, amin=amin, top_db=top_db)
# Pytorch
stft_extractor = STFT(n_fft=n_fft, hop_length=hop_length,
win_length=win_length, window=window, center=center, pad_mode=pad_mode,
freeze_parameters=True)
logmel_extractor = LogmelFilterBank(sr=sample_rate, n_fft=n_fft,
n_mels=n_mels, fmin=fmin, fmax=fmax, ref=ref, amin=amin,
top_db=top_db, freeze_parameters=True)
stft_extractor.to(device)
logmel_extractor.to(device)
pt_pad = F.pad(pt_data[None, None, :], pad=(n_fft // 2, n_fft // 2), mode=pad_mode)[0, 0]
print(np.mean(np.abs(np_pad - pt_pad.cpu().numpy())))
pt_stft_matrix_real = stft_extractor.conv_real(pt_pad[None, None, :])[0]
pt_stft_matrix_imag = stft_extractor.conv_imag(pt_pad[None, None, :])[0]
print(np.mean(np.abs(np.real(np_stft_matrix) - pt_stft_matrix_real.data.cpu().numpy())))
print(np.mean(np.abs(np.imag(np_stft_matrix) - pt_stft_matrix_imag.data.cpu().numpy())))
# Spectrogram
spectrogram_extractor = Spectrogram(n_fft=n_fft, hop_length=hop_length,
win_length=win_length, window=window, center=center, pad_mode=pad_mode,
freeze_parameters=True)
spectrogram_extractor.to(device)
pt_spectrogram = spectrogram_extractor.forward(pt_data[None, None, :])
pt_mel_spectrogram = torch.matmul(pt_spectrogram, logmel_extractor.melW)
print(np.mean(np.abs(np_mel_spectrogram - pt_mel_spectrogram.data.cpu().numpy()[0, 0])))
# Log mel spectrogram
pt_logmel_spectrogram = logmel_extractor.forward(pt_spectrogram)
print(np.mean(np.abs(np_logmel_spectrogram - pt_logmel_spectrogram[0, 0].data.cpu().numpy())))
elif select == 'enframe':
data_length = 32000
device = torch.device(device)
np.random.seed(0)
# Spectrogram parameters
hop_length = 250
win_length = 1024
# Data
np_data = np.random.uniform(-1, 1, data_length)
pt_data = torch.Tensor(np_data).to(device)
print('Comparing librosa and pytorch implementation of '
'librosa.util.frame. All numbers below should be close to 0.')
# Numpy librosa
np_frames = librosa.util.frame(np_data, frame_length=win_length,
hop_length=hop_length)
# Pytorch
pt_frame_extractor = Enframe(frame_length=win_length, hop_length=hop_length)
pt_frame_extractor.to(device)
pt_frames = pt_frame_extractor(pt_data[None, None, :])
print(np.mean(np.abs(np_frames - pt_frames.data.cpu().numpy())))
elif select == 'logmel&iv':
data_size = (1, 4, 24000*3)
device = torch.device(device)
np.random.seed(0)
# Stft parameters
sample_rate = 24000
n_fft = 1024
hop_length = 240
win_length = 1024
window = 'hann'
center = True
dtype = np.complex64
pad_mode = 'reflect'
# Mel parameters
n_mels = 128
fmin = 50
fmax = 10000
ref = 1.0
amin = 1e-10
top_db = None
# Data
np_data = np.random.uniform(-1, 1, data_size)
pt_data = torch.Tensor(np_data).to(device)
# Numpy stft matrix
np_stft_matrix = []
for chn in range(np_data.shape[1]):
np_stft_matrix.append(librosa.core.stft(y=np_data[0,chn,:], n_fft=n_fft,
hop_length=hop_length, window=window, center=center).T)
np_stft_matrix = np.array(np_stft_matrix)[None,...]
# Pytorch stft matrix
pt_stft_extractor = STFT(n_fft=n_fft, hop_length=hop_length,
win_length=win_length, window=window, center=center, pad_mode=pad_mode,
freeze_parameters=True)
pt_stft_extractor.to(device)
(pt_stft_real, pt_stft_imag) = pt_stft_extractor(pt_data)
print('Comparing librosa and pytorch implementation of intensity vector. All numbers '
'below should be close to 0.')
print(np.mean(np.abs(np.real(np_stft_matrix) - pt_stft_real.cpu().detach().numpy())))
print(np.mean(np.abs(np.imag(np_stft_matrix) - pt_stft_imag.cpu().detach().numpy())))
# Numpy logmel
np_pad = np.pad(np_data, ((0,0), (0,0), (int(n_fft // 2),int(n_fft // 2))), mode=pad_mode)
np_melW = librosa.filters.mel(sr=sample_rate, n_fft=n_fft, n_mels=n_mels,
fmin=fmin, fmax=fmax).T
np_mel_spectrogram = np.dot(np.abs(np_stft_matrix) ** 2, np_melW)
np_logmel_spectrogram = librosa.core.power_to_db(
np_mel_spectrogram, ref=ref, amin=amin, top_db=top_db)
# Pytorch logmel
pt_logmel_extractor = LogmelFilterBank(sr=sample_rate, n_fft=n_fft,
n_mels=n_mels, fmin=fmin, fmax=fmax, ref=ref, amin=amin,
top_db=top_db, freeze_parameters=True)
pt_logmel_extractor.to(device)
pt_pad = F.pad(pt_data, pad=(n_fft // 2, n_fft // 2), mode=pad_mode)
print(np.mean(np.abs(np_pad - pt_pad.cpu().numpy())))
pt_spectrogram = spectrogram_STFTInput((pt_stft_real, pt_stft_imag))
pt_mel_spectrogram = torch.matmul(pt_spectrogram, pt_logmel_extractor.melW)
print(np.mean(np.abs(np_mel_spectrogram - pt_mel_spectrogram.cpu().detach().numpy())))
pt_logmel_spectrogram = pt_logmel_extractor(pt_spectrogram)
print(np.mean(np.abs(np_logmel_spectrogram - pt_logmel_spectrogram.cpu().detach().numpy())))
# Numpy intensity
Pref = np_stft_matrix[:,0,...]
Px = np_stft_matrix[:,1,...]
Py = np_stft_matrix[:,2,...]
Pz = np_stft_matrix[:,3,...]
IVx = np.real(np.conj(Pref) * Px)
IVy = np.real(np.conj(Pref) * Py)
IVz = np.real(np.conj(Pref) * Pz)
normal = np.sqrt(IVx**2 + IVy**2 + IVz**2) + np.finfo(np.float32).eps
IVx_mel = np.dot(IVx / normal, np_melW)
IVy_mel = np.dot(IVy / normal, np_melW)
IVz_mel = np.dot(IVz / normal, np_melW)
np_IV = np.stack([IVx_mel, IVy_mel, IVz_mel], axis=1)
# Pytorch intensity
pt_IV = intensityvector((pt_stft_real, pt_stft_imag), pt_logmel_extractor.melW)
print(np.mean(np.abs(np_IV - pt_IV.cpu().detach().numpy())))
if __name__ == '__main__':
data_length = 12800
norm = None # None | 'ortho'
device = 'cuda' # 'cuda' | 'cpu'
np.random.seed(0)
# Spectrogram parameters
sample_rate = 32000
n_fft = 1024
hop_length = 320
win_length = 1024
window = 'hann'
center = True
dtype = np.complex64
pad_mode = 'reflect'
# Mel parameters
n_mels = 128
fmin = 50
fmax = 14000
ref = 1.0
amin = 1e-10
top_db = None
# Data
np_data = np.random.uniform(-1, 1, data_length)
pt_data = torch.Tensor(np_data).to(device)
# Pytorch
spectrogram_extractor = Spectrogram(n_fft=n_fft, hop_length=hop_length,
win_length=win_length, window=window, center=center, pad_mode=pad_mode,
freeze_parameters=True)
logmel_extractor = LogmelFilterBank(sr=sample_rate, n_fft=n_fft,
n_mels=n_mels, fmin=fmin, fmax=fmax, ref=ref, amin=amin, top_db=top_db,
freeze_parameters=True)
spectrogram_extractor.to(device)
logmel_extractor.to(device)
# Spectrogram
pt_spectrogram = spectrogram_extractor.forward(pt_data[None, None, :])
# Log mel spectrogram
pt_logmel_spectrogram = logmel_extractor.forward(pt_spectrogram)
# Uncomment for debug
if True:
debug(select='dft', device=device)
debug(select='stft', device=device)
debug(select='logmel', device=device)
debug(select='enframe', device=device)
debug(select='logmel&iv', device=device)
| 31,480 | 34.174302 | 107 | py |
DCASE2022-TASK3 | DCASE2022-TASK3-main/code/methods/utils/model_utilities.py | import numpy as np
import torch
import torch.nn as nn
def init_layer(layer, nonlinearity='leaky_relu'):
'''
Initialize a layer
'''
classname = layer.__class__.__name__
if (classname.find('Conv') != -1) or (classname.find('Linear') != -1):
nn.init.kaiming_uniform_(layer.weight, nonlinearity=nonlinearity)
if hasattr(layer, 'bias'):
if layer.bias is not None:
nn.init.constant_(layer.bias, 0.0)
elif classname.find('BatchNorm') != -1:
nn.init.normal_(layer.weight, 1.0, 0.02)
nn.init.constant_(layer.bias, 0.0)
class DoubleConv(nn.Module):
def __init__(self, in_channels, out_channels,
kernel_size=(3,3), stride=(1,1), padding=(1,1),
dilation=1, bias=False):
super().__init__()
self.double_conv = nn.Sequential(
nn.Conv2d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation, bias=bias),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
# nn.LeakyReLU(negative_slope=0.1, inplace=True),
nn.Conv2d(in_channels=out_channels,
out_channels=out_channels,
kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation, bias=bias),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
# nn.LeakyReLU(negative_slope=0.1, inplace=True),
)
self.init_weights()
def init_weights(self):
for layer in self.double_conv:
init_layer(layer)
def forward(self, x):
x = self.double_conv(x)
return x
class PositionalEncoding(nn.Module):
def __init__(self, pos_len, d_model=512, pe_type='t', dropout=0.0):
""" Positional encoding using sin and cos
Args:
pos_len: positional length
d_model: number of feature maps
pe_type: 't' | 'f' , time domain, frequency domain
dropout: dropout probability
"""
super().__init__()
self.pe_type = pe_type
pe = torch.zeros(pos_len, d_model)
pos = torch.arange(0, pos_len).float().unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-np.log(10000.0) / d_model))
pe[:, 0::2] = 0.1 * torch.sin(pos * div_term)
pe[:, 1::2] = 0.1 * torch.cos(pos * div_term)
pe = pe.unsqueeze(0).transpose(1, 2) # (N, C, T)
self.register_buffer('pe', pe)
self.dropout = nn.Dropout(p=dropout)
def forward(self, x):
# x is (N, C, T, F) or (N, C, T) or (N, C, F)
if x.ndim == 4:
if self.pe_type == 't':
pe = self.pe.unsqueeze(3)
x += pe[:, :, :x.shape[2]]
elif self.pe_type == 'f':
pe = self.pe.unsqueeze(2)
x += pe[:, :, :, :x.shape[3]]
elif x.ndim == 3:
x += self.pe[:, :, :x.shape[2]]
return self.dropout(x)
| 3,157 | 33.703297 | 96 | py |
DCASE2022-TASK3 | DCASE2022-TASK3-main/code/methods/utils/loss_utilities.py | import torch
import torch.nn as nn
import torch.nn.functional as F
eps = torch.finfo(torch.float32).eps
class MSELoss:
def __init__(self, reduction='mean'):
self.reduction = reduction
self.name = 'loss_MSE'
if self.reduction != 'PIT':
self.loss = nn.MSELoss(reduction='mean')
else:
self.loss = nn.MSELoss(reduction='none')
def calculate_loss(self, pred, target):
if self.reduction != 'PIT':
return self.loss(pred, target)
else:
return self.loss(pred, target).mean(dim=tuple(range(2, pred.ndim)))
class BCEWithLogitsLoss:
def __init__(self, reduction='mean', pos_weight=None):
self.reduction = reduction
self.name = 'loss_BCEWithLogits'
if self.reduction != 'PIT':
self.loss = nn.BCEWithLogitsLoss(reduction=self.reduction, pos_weight=pos_weight)
else:
self.loss = nn.BCEWithLogitsLoss(reduction='none', pos_weight=pos_weight)
def calculate_loss(self, pred, target):
if self.reduction != 'PIT':
return self.loss(pred, target)
else:
return self.loss(pred, target).mean(dim=tuple(range(2, pred.ndim)))
| 1,222 | 31.184211 | 93 | py |