repo
stringlengths 1
99
| file
stringlengths 13
215
| code
stringlengths 12
59.2M
| file_length
int64 12
59.2M
| avg_line_length
float64 3.82
1.48M
| max_line_length
int64 12
2.51M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
SyNet | SyNet-master/CenterNet/src/lib/models/networks/DCNv2/setup.py | #!/usr/bin/env python
import os
import glob
import torch
from torch.utils.cpp_extension import CUDA_HOME
from torch.utils.cpp_extension import CppExtension
from torch.utils.cpp_extension import CUDAExtension
from setuptools import find_packages
from setuptools import setup
requirements = ["torch", "torchvision"]
def get_extensions():
this_dir = os.path.dirname(os.path.abspath(__file__))
extensions_dir = os.path.join(this_dir, "src")
main_file = glob.glob(os.path.join(extensions_dir, "*.cpp"))
source_cpu = glob.glob(os.path.join(extensions_dir, "cpu", "*.cpp"))
source_cuda = glob.glob(os.path.join(extensions_dir, "cuda", "*.cu"))
os.environ["CC"] = "g++"
sources = main_file + source_cpu
extension = CppExtension
extra_compile_args = {"cxx": []}
define_macros = []
if torch.cuda.is_available() and CUDA_HOME is not None:
extension = CUDAExtension
sources += source_cuda
define_macros += [("WITH_CUDA", None)]
extra_compile_args["nvcc"] = [
"-DCUDA_HAS_FP16=1",
"-D__CUDA_NO_HALF_OPERATORS__",
"-D__CUDA_NO_HALF_CONVERSIONS__",
"-D__CUDA_NO_HALF2_OPERATORS__",
]
else:
#raise NotImplementedError('Cuda is not available')
pass
sources = [os.path.join(extensions_dir, s) for s in sources]
include_dirs = [extensions_dir]
ext_modules = [
extension(
"_ext",
sources,
include_dirs=include_dirs,
define_macros=define_macros,
extra_compile_args=extra_compile_args,
)
]
return ext_modules
setup(
name="DCNv2",
version="0.1",
author="charlesshang",
url="https://github.com/charlesshang/DCNv2",
description="deformable convolutional networks",
packages=find_packages(exclude=("configs", "tests",)),
# install_requires=requirements,
ext_modules=get_extensions(),
cmdclass={"build_ext": torch.utils.cpp_extension.BuildExtension},
) | 2,035 | 27.676056 | 73 | py |
SyNet | SyNet-master/CenterNet/src/lib/trains/exdet.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import numpy as np
import cv2
import sys
import time
from utils.debugger import Debugger
from models.data_parallel import DataParallel
from models.losses import FocalLoss, RegL1Loss
from models.decode import agnex_ct_decode, exct_decode
from models.utils import _sigmoid
from .base_trainer import BaseTrainer
class ExdetLoss(torch.nn.Module):
def __init__(self, opt):
super(ExdetLoss, self).__init__()
self.crit = torch.nn.MSELoss() if opt.mse_loss else FocalLoss()
self.crit_reg = RegL1Loss()
self.opt = opt
self.parts = ['t', 'l', 'b', 'r', 'c']
def forward(self, outputs, batch):
opt = self.opt
hm_loss, reg_loss = 0, 0
for s in range(opt.num_stacks):
output = outputs[s]
for p in self.parts:
tag = 'hm_{}'.format(p)
output[tag] = _sigmoid(output[tag])
hm_loss += self.crit(output[tag], batch[tag]) / opt.num_stacks
if p != 'c' and opt.reg_offset and opt.off_weight > 0:
reg_loss += self.crit_reg(output['reg_{}'.format(p)],
batch['reg_mask'],
batch['ind_{}'.format(p)],
batch['reg_{}'.format(p)]) / opt.num_stacks
loss = opt.hm_weight * hm_loss + opt.off_weight * reg_loss
loss_stats = {'loss': loss, 'off_loss': reg_loss, 'hm_loss': hm_loss}
return loss, loss_stats
class ExdetTrainer(BaseTrainer):
def __init__(self, opt, model, optimizer=None):
super(ExdetTrainer, self).__init__(opt, model, optimizer=optimizer)
self.decode = agnex_ct_decode if opt.agnostic_ex else exct_decode
def _get_losses(self, opt):
loss_states = ['loss', 'hm_loss', 'off_loss']
loss = ExdetLoss(opt)
return loss_states, loss
def debug(self, batch, output, iter_id):
opt = self.opt
detections = self.decode(output['hm_t'], output['hm_l'],
output['hm_b'], output['hm_r'],
output['hm_c']).detach().cpu().numpy()
detections[:, :, :4] *= opt.input_res / opt.output_res
for i in range(1):
debugger = Debugger(
dataset=opt.dataset, ipynb=(opt.debug==3), theme=opt.debugger_theme)
pred_hm = np.zeros((opt.input_res, opt.input_res, 3), dtype=np.uint8)
gt_hm = np.zeros((opt.input_res, opt.input_res, 3), dtype=np.uint8)
img = batch['input'][i].detach().cpu().numpy().transpose(1, 2, 0)
img = ((img * self.opt.std + self.opt.mean) * 255.).astype(np.uint8)
for p in self.parts:
tag = 'hm_{}'.format(p)
pred = debugger.gen_colormap(output[tag][i].detach().cpu().numpy())
gt = debugger.gen_colormap(batch[tag][i].detach().cpu().numpy())
if p != 'c':
pred_hm = np.maximum(pred_hm, pred)
gt_hm = np.maximum(gt_hm, gt)
if p == 'c' or opt.debug > 2:
debugger.add_blend_img(img, pred, 'pred_{}'.format(p))
debugger.add_blend_img(img, gt, 'gt_{}'.format(p))
debugger.add_blend_img(img, pred_hm, 'pred')
debugger.add_blend_img(img, gt_hm, 'gt')
debugger.add_img(img, img_id='out')
for k in range(len(detections[i])):
if detections[i, k, 4] > 0.1:
debugger.add_coco_bbox(detections[i, k, :4], detections[i, k, -1],
detections[i, k, 4], img_id='out')
if opt.debug == 4:
debugger.save_all_imgs(opt.debug_dir, prefix='{}'.format(iter_id))
else:
debugger.show_all_imgs(pause=True) | 3,605 | 40.930233 | 79 | py |
SyNet | SyNet-master/CenterNet/src/lib/trains/ctdet.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import numpy as np
from models.losses import FocalLoss
from models.losses import RegL1Loss, RegLoss, NormRegL1Loss, RegWeightedL1Loss
from models.decode import ctdet_decode
from models.utils import _sigmoid
from utils.debugger import Debugger
from utils.post_process import ctdet_post_process
from utils.oracle_utils import gen_oracle_map
from .base_trainer import BaseTrainer
class CtdetLoss(torch.nn.Module):
def __init__(self, opt):
super(CtdetLoss, self).__init__()
self.crit = torch.nn.MSELoss() if opt.mse_loss else FocalLoss()
self.crit_reg = RegL1Loss() if opt.reg_loss == 'l1' else \
RegLoss() if opt.reg_loss == 'sl1' else None
self.crit_wh = torch.nn.L1Loss(reduction='sum') if opt.dense_wh else \
NormRegL1Loss() if opt.norm_wh else \
RegWeightedL1Loss() if opt.cat_spec_wh else self.crit_reg
self.opt = opt
def forward(self, outputs, batch):
opt = self.opt
hm_loss, wh_loss, off_loss = 0, 0, 0
for s in range(opt.num_stacks):
output = outputs[s]
if not opt.mse_loss:
output['hm'] = _sigmoid(output['hm'])
if opt.eval_oracle_hm:
output['hm'] = batch['hm']
if opt.eval_oracle_wh:
output['wh'] = torch.from_numpy(gen_oracle_map(
batch['wh'].detach().cpu().numpy(),
batch['ind'].detach().cpu().numpy(),
output['wh'].shape[3], output['wh'].shape[2])).to(opt.device)
if opt.eval_oracle_offset:
output['reg'] = torch.from_numpy(gen_oracle_map(
batch['reg'].detach().cpu().numpy(),
batch['ind'].detach().cpu().numpy(),
output['reg'].shape[3], output['reg'].shape[2])).to(opt.device)
hm_loss += self.crit(output['hm'], batch['hm']) / opt.num_stacks
if opt.wh_weight > 0:
if opt.dense_wh:
mask_weight = batch['dense_wh_mask'].sum() + 1e-4
wh_loss += (
self.crit_wh(output['wh'] * batch['dense_wh_mask'],
batch['dense_wh'] * batch['dense_wh_mask']) /
mask_weight) / opt.num_stacks
elif opt.cat_spec_wh:
wh_loss += self.crit_wh(
output['wh'], batch['cat_spec_mask'],
batch['ind'], batch['cat_spec_wh']) / opt.num_stacks
else:
wh_loss += self.crit_reg(
output['wh'], batch['reg_mask'],
batch['ind'], batch['wh']) / opt.num_stacks
if opt.reg_offset and opt.off_weight > 0:
off_loss += self.crit_reg(output['reg'], batch['reg_mask'],
batch['ind'], batch['reg']) / opt.num_stacks
loss = opt.hm_weight * hm_loss + opt.wh_weight * wh_loss + \
opt.off_weight * off_loss
loss_stats = {'loss': loss, 'hm_loss': hm_loss,
'wh_loss': wh_loss, 'off_loss': off_loss}
return loss, loss_stats
class CtdetTrainer(BaseTrainer):
def __init__(self, opt, model, optimizer=None):
super(CtdetTrainer, self).__init__(opt, model, optimizer=optimizer)
def _get_losses(self, opt):
loss_states = ['loss', 'hm_loss', 'wh_loss', 'off_loss']
loss = CtdetLoss(opt)
return loss_states, loss
def debug(self, batch, output, iter_id):
opt = self.opt
reg = output['reg'] if opt.reg_offset else None
dets = ctdet_decode(
output['hm'], output['wh'], reg=reg,
cat_spec_wh=opt.cat_spec_wh, K=opt.K)
dets = dets.detach().cpu().numpy().reshape(1, -1, dets.shape[2])
dets[:, :, :4] *= opt.down_ratio
dets_gt = batch['meta']['gt_det'].numpy().reshape(1, -1, dets.shape[2])
dets_gt[:, :, :4] *= opt.down_ratio
for i in range(1):
debugger = Debugger(
dataset=opt.dataset, ipynb=(opt.debug==3), theme=opt.debugger_theme)
img = batch['input'][i].detach().cpu().numpy().transpose(1, 2, 0)
img = np.clip(((
img * opt.std + opt.mean) * 255.), 0, 255).astype(np.uint8)
pred = debugger.gen_colormap(output['hm'][i].detach().cpu().numpy())
gt = debugger.gen_colormap(batch['hm'][i].detach().cpu().numpy())
debugger.add_blend_img(img, pred, 'pred_hm')
debugger.add_blend_img(img, gt, 'gt_hm')
debugger.add_img(img, img_id='out_pred')
for k in range(len(dets[i])):
if dets[i, k, 4] > opt.center_thresh:
debugger.add_coco_bbox(dets[i, k, :4], dets[i, k, -1],
dets[i, k, 4], img_id='out_pred')
debugger.add_img(img, img_id='out_gt')
for k in range(len(dets_gt[i])):
if dets_gt[i, k, 4] > opt.center_thresh:
debugger.add_coco_bbox(dets_gt[i, k, :4], dets_gt[i, k, -1],
dets_gt[i, k, 4], img_id='out_gt')
if opt.debug == 4:
debugger.save_all_imgs(opt.debug_dir, prefix='{}'.format(iter_id))
else:
debugger.show_all_imgs(pause=True)
def save_result(self, output, batch, results):
reg = output['reg'] if self.opt.reg_offset else None
dets = ctdet_decode(
output['hm'], output['wh'], reg=reg,
cat_spec_wh=self.opt.cat_spec_wh, K=self.opt.K)
dets = dets.detach().cpu().numpy().reshape(1, -1, dets.shape[2])
dets_out = ctdet_post_process(
dets.copy(), batch['meta']['c'].cpu().numpy(),
batch['meta']['s'].cpu().numpy(),
output['hm'].shape[2], output['hm'].shape[3], output['hm'].shape[1])
results[batch['meta']['img_id'].cpu().numpy()[0]] = dets_out[0] | 5,518 | 40.810606 | 78 | py |
SyNet | SyNet-master/CenterNet/src/lib/trains/ddd.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import numpy as np
from models.losses import FocalLoss, L1Loss, BinRotLoss
from models.decode import ddd_decode
from models.utils import _sigmoid
from utils.debugger import Debugger
from utils.post_process import ddd_post_process
from utils.oracle_utils import gen_oracle_map
from .base_trainer import BaseTrainer
class DddLoss(torch.nn.Module):
def __init__(self, opt):
super(DddLoss, self).__init__()
self.crit = torch.nn.MSELoss() if opt.mse_loss else FocalLoss()
self.crit_reg = L1Loss()
self.crit_rot = BinRotLoss()
self.opt = opt
def forward(self, outputs, batch):
opt = self.opt
hm_loss, dep_loss, rot_loss, dim_loss = 0, 0, 0, 0
wh_loss, off_loss = 0, 0
for s in range(opt.num_stacks):
output = outputs[s]
output['hm'] = _sigmoid(output['hm'])
output['dep'] = 1. / (output['dep'].sigmoid() + 1e-6) - 1.
if opt.eval_oracle_dep:
output['dep'] = torch.from_numpy(gen_oracle_map(
batch['dep'].detach().cpu().numpy(),
batch['ind'].detach().cpu().numpy(),
opt.output_w, opt.output_h)).to(opt.device)
hm_loss += self.crit(output['hm'], batch['hm']) / opt.num_stacks
if opt.dep_weight > 0:
dep_loss += self.crit_reg(output['dep'], batch['reg_mask'],
batch['ind'], batch['dep']) / opt.num_stacks
if opt.dim_weight > 0:
dim_loss += self.crit_reg(output['dim'], batch['reg_mask'],
batch['ind'], batch['dim']) / opt.num_stacks
if opt.rot_weight > 0:
rot_loss += self.crit_rot(output['rot'], batch['rot_mask'],
batch['ind'], batch['rotbin'],
batch['rotres']) / opt.num_stacks
if opt.reg_bbox and opt.wh_weight > 0:
wh_loss += self.crit_reg(output['wh'], batch['rot_mask'],
batch['ind'], batch['wh']) / opt.num_stacks
if opt.reg_offset and opt.off_weight > 0:
off_loss += self.crit_reg(output['reg'], batch['rot_mask'],
batch['ind'], batch['reg']) / opt.num_stacks
loss = opt.hm_weight * hm_loss + opt.dep_weight * dep_loss + \
opt.dim_weight * dim_loss + opt.rot_weight * rot_loss + \
opt.wh_weight * wh_loss + opt.off_weight * off_loss
loss_stats = {'loss': loss, 'hm_loss': hm_loss, 'dep_loss': dep_loss,
'dim_loss': dim_loss, 'rot_loss': rot_loss,
'wh_loss': wh_loss, 'off_loss': off_loss}
return loss, loss_stats
class DddTrainer(BaseTrainer):
def __init__(self, opt, model, optimizer=None):
super(DddTrainer, self).__init__(opt, model, optimizer=optimizer)
def _get_losses(self, opt):
loss_states = ['loss', 'hm_loss', 'dep_loss', 'dim_loss', 'rot_loss',
'wh_loss', 'off_loss']
loss = DddLoss(opt)
return loss_states, loss
def debug(self, batch, output, iter_id):
opt = self.opt
wh = output['wh'] if opt.reg_bbox else None
reg = output['reg'] if opt.reg_offset else None
dets = ddd_decode(output['hm'], output['rot'], output['dep'],
output['dim'], wh=wh, reg=reg, K=opt.K)
# x, y, score, r1-r8, depth, dim1-dim3, cls
dets = dets.detach().cpu().numpy().reshape(1, -1, dets.shape[2])
calib = batch['meta']['calib'].detach().numpy()
# x, y, score, rot, depth, dim1, dim2, dim3
# if opt.dataset == 'gta':
# dets[:, 12:15] /= 3
dets_pred = ddd_post_process(
dets.copy(), batch['meta']['c'].detach().numpy(),
batch['meta']['s'].detach().numpy(), calib, opt)
dets_gt = ddd_post_process(
batch['meta']['gt_det'].detach().numpy().copy(),
batch['meta']['c'].detach().numpy(),
batch['meta']['s'].detach().numpy(), calib, opt)
#for i in range(input.size(0)):
for i in range(1):
debugger = Debugger(dataset=opt.dataset, ipynb=(opt.debug==3),
theme=opt.debugger_theme)
img = batch['input'][i].detach().cpu().numpy().transpose(1, 2, 0)
img = ((img * self.opt.std + self.opt.mean) * 255.).astype(np.uint8)
pred = debugger.gen_colormap(
output['hm'][i].detach().cpu().numpy())
gt = debugger.gen_colormap(batch['hm'][i].detach().cpu().numpy())
debugger.add_blend_img(img, pred, 'hm_pred')
debugger.add_blend_img(img, gt, 'hm_gt')
# decode
debugger.add_ct_detection(
img, dets[i], show_box=opt.reg_bbox, center_thresh=opt.center_thresh,
img_id='det_pred')
debugger.add_ct_detection(
img, batch['meta']['gt_det'][i].cpu().numpy().copy(),
show_box=opt.reg_bbox, img_id='det_gt')
debugger.add_3d_detection(
batch['meta']['image_path'][i], dets_pred[i], calib[i],
center_thresh=opt.center_thresh, img_id='add_pred')
debugger.add_3d_detection(
batch['meta']['image_path'][i], dets_gt[i], calib[i],
center_thresh=opt.center_thresh, img_id='add_gt')
# debugger.add_bird_view(
# dets_pred[i], center_thresh=opt.center_thresh, img_id='bird_pred')
# debugger.add_bird_view(dets_gt[i], img_id='bird_gt')
debugger.add_bird_views(
dets_pred[i], dets_gt[i],
center_thresh=opt.center_thresh, img_id='bird_pred_gt')
# debugger.add_blend_img(img, pred, 'out', white=True)
debugger.compose_vis_add(
batch['meta']['image_path'][i], dets_pred[i], calib[i],
opt.center_thresh, pred, 'bird_pred_gt', img_id='out')
# debugger.add_img(img, img_id='out')
if opt.debug ==4:
debugger.save_all_imgs(opt.debug_dir, prefix='{}'.format(iter_id))
else:
debugger.show_all_imgs(pause=True)
def save_result(self, output, batch, results):
opt = self.opt
wh = output['wh'] if opt.reg_bbox else None
reg = output['reg'] if opt.reg_offset else None
dets = ddd_decode(output['hm'], output['rot'], output['dep'],
output['dim'], wh=wh, reg=reg, K=opt.K)
# x, y, score, r1-r8, depth, dim1-dim3, cls
dets = dets.detach().cpu().numpy().reshape(1, -1, dets.shape[2])
calib = batch['meta']['calib'].detach().numpy()
# x, y, score, rot, depth, dim1, dim2, dim3
dets_pred = ddd_post_process(
dets.copy(), batch['meta']['c'].detach().numpy(),
batch['meta']['s'].detach().numpy(), calib, opt)
img_id = batch['meta']['img_id'].detach().numpy()[0]
results[img_id] = dets_pred[0]
for j in range(1, opt.num_classes + 1):
keep_inds = (results[img_id][j][:, -1] > opt.center_thresh)
results[img_id][j] = results[img_id][j][keep_inds] | 6,919 | 43.645161 | 80 | py |
SyNet | SyNet-master/CenterNet/src/lib/trains/multi_pose.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import numpy as np
from models.losses import FocalLoss, RegL1Loss, RegLoss, RegWeightedL1Loss
from models.decode import multi_pose_decode
from models.utils import _sigmoid, flip_tensor, flip_lr_off, flip_lr
from utils.debugger import Debugger
from utils.post_process import multi_pose_post_process
from utils.oracle_utils import gen_oracle_map
from .base_trainer import BaseTrainer
class MultiPoseLoss(torch.nn.Module):
def __init__(self, opt):
super(MultiPoseLoss, self).__init__()
self.crit = FocalLoss()
self.crit_hm_hp = torch.nn.MSELoss() if opt.mse_loss else FocalLoss()
self.crit_kp = RegWeightedL1Loss() if not opt.dense_hp else \
torch.nn.L1Loss(reduction='sum')
self.crit_reg = RegL1Loss() if opt.reg_loss == 'l1' else \
RegLoss() if opt.reg_loss == 'sl1' else None
self.opt = opt
def forward(self, outputs, batch):
opt = self.opt
hm_loss, wh_loss, off_loss = 0, 0, 0
hp_loss, off_loss, hm_hp_loss, hp_offset_loss = 0, 0, 0, 0
for s in range(opt.num_stacks):
output = outputs[s]
output['hm'] = _sigmoid(output['hm'])
if opt.hm_hp and not opt.mse_loss:
output['hm_hp'] = _sigmoid(output['hm_hp'])
if opt.eval_oracle_hmhp:
output['hm_hp'] = batch['hm_hp']
if opt.eval_oracle_hm:
output['hm'] = batch['hm']
if opt.eval_oracle_kps:
if opt.dense_hp:
output['hps'] = batch['dense_hps']
else:
output['hps'] = torch.from_numpy(gen_oracle_map(
batch['hps'].detach().cpu().numpy(),
batch['ind'].detach().cpu().numpy(),
opt.output_res, opt.output_res)).to(opt.device)
if opt.eval_oracle_hp_offset:
output['hp_offset'] = torch.from_numpy(gen_oracle_map(
batch['hp_offset'].detach().cpu().numpy(),
batch['hp_ind'].detach().cpu().numpy(),
opt.output_res, opt.output_res)).to(opt.device)
hm_loss += self.crit(output['hm'], batch['hm']) / opt.num_stacks
if opt.dense_hp:
mask_weight = batch['dense_hps_mask'].sum() + 1e-4
hp_loss += (self.crit_kp(output['hps'] * batch['dense_hps_mask'],
batch['dense_hps'] * batch['dense_hps_mask']) /
mask_weight) / opt.num_stacks
else:
hp_loss += self.crit_kp(output['hps'], batch['hps_mask'],
batch['ind'], batch['hps']) / opt.num_stacks
if opt.wh_weight > 0:
wh_loss += self.crit_reg(output['wh'], batch['reg_mask'],
batch['ind'], batch['wh']) / opt.num_stacks
if opt.reg_offset and opt.off_weight > 0:
off_loss += self.crit_reg(output['reg'], batch['reg_mask'],
batch['ind'], batch['reg']) / opt.num_stacks
if opt.reg_hp_offset and opt.off_weight > 0:
hp_offset_loss += self.crit_reg(
output['hp_offset'], batch['hp_mask'],
batch['hp_ind'], batch['hp_offset']) / opt.num_stacks
if opt.hm_hp and opt.hm_hp_weight > 0:
hm_hp_loss += self.crit_hm_hp(
output['hm_hp'], batch['hm_hp']) / opt.num_stacks
loss = opt.hm_weight * hm_loss + opt.wh_weight * wh_loss + \
opt.off_weight * off_loss + opt.hp_weight * hp_loss + \
opt.hm_hp_weight * hm_hp_loss + opt.off_weight * hp_offset_loss
loss_stats = {'loss': loss, 'hm_loss': hm_loss, 'hp_loss': hp_loss,
'hm_hp_loss': hm_hp_loss, 'hp_offset_loss': hp_offset_loss,
'wh_loss': wh_loss, 'off_loss': off_loss}
return loss, loss_stats
class MultiPoseTrainer(BaseTrainer):
def __init__(self, opt, model, optimizer=None):
super(MultiPoseTrainer, self).__init__(opt, model, optimizer=optimizer)
def _get_losses(self, opt):
loss_states = ['loss', 'hm_loss', 'hp_loss', 'hm_hp_loss',
'hp_offset_loss', 'wh_loss', 'off_loss']
loss = MultiPoseLoss(opt)
return loss_states, loss
def debug(self, batch, output, iter_id):
opt = self.opt
reg = output['reg'] if opt.reg_offset else None
hm_hp = output['hm_hp'] if opt.hm_hp else None
hp_offset = output['hp_offset'] if opt.reg_hp_offset else None
dets = multi_pose_decode(
output['hm'], output['wh'], output['hps'],
reg=reg, hm_hp=hm_hp, hp_offset=hp_offset, K=opt.K)
dets = dets.detach().cpu().numpy().reshape(1, -1, dets.shape[2])
dets[:, :, :4] *= opt.input_res / opt.output_res
dets[:, :, 5:39] *= opt.input_res / opt.output_res
dets_gt = batch['meta']['gt_det'].numpy().reshape(1, -1, dets.shape[2])
dets_gt[:, :, :4] *= opt.input_res / opt.output_res
dets_gt[:, :, 5:39] *= opt.input_res / opt.output_res
for i in range(1):
debugger = Debugger(
dataset=opt.dataset, ipynb=(opt.debug==3), theme=opt.debugger_theme)
img = batch['input'][i].detach().cpu().numpy().transpose(1, 2, 0)
img = np.clip(((
img * opt.std + opt.mean) * 255.), 0, 255).astype(np.uint8)
pred = debugger.gen_colormap(output['hm'][i].detach().cpu().numpy())
gt = debugger.gen_colormap(batch['hm'][i].detach().cpu().numpy())
debugger.add_blend_img(img, pred, 'pred_hm')
debugger.add_blend_img(img, gt, 'gt_hm')
debugger.add_img(img, img_id='out_pred')
for k in range(len(dets[i])):
if dets[i, k, 4] > opt.center_thresh:
debugger.add_coco_bbox(dets[i, k, :4], dets[i, k, -1],
dets[i, k, 4], img_id='out_pred')
debugger.add_coco_hp(dets[i, k, 5:39], img_id='out_pred')
debugger.add_img(img, img_id='out_gt')
for k in range(len(dets_gt[i])):
if dets_gt[i, k, 4] > opt.center_thresh:
debugger.add_coco_bbox(dets_gt[i, k, :4], dets_gt[i, k, -1],
dets_gt[i, k, 4], img_id='out_gt')
debugger.add_coco_hp(dets_gt[i, k, 5:39], img_id='out_gt')
if opt.hm_hp:
pred = debugger.gen_colormap_hp(output['hm_hp'][i].detach().cpu().numpy())
gt = debugger.gen_colormap_hp(batch['hm_hp'][i].detach().cpu().numpy())
debugger.add_blend_img(img, pred, 'pred_hmhp')
debugger.add_blend_img(img, gt, 'gt_hmhp')
if opt.debug == 4:
debugger.save_all_imgs(opt.debug_dir, prefix='{}'.format(iter_id))
else:
debugger.show_all_imgs(pause=True)
def save_result(self, output, batch, results):
reg = output['reg'] if self.opt.reg_offset else None
hm_hp = output['hm_hp'] if self.opt.hm_hp else None
hp_offset = output['hp_offset'] if self.opt.reg_hp_offset else None
dets = multi_pose_decode(
output['hm'], output['wh'], output['hps'],
reg=reg, hm_hp=hm_hp, hp_offset=hp_offset, K=self.opt.K)
dets = dets.detach().cpu().numpy().reshape(1, -1, dets.shape[2])
dets_out = multi_pose_post_process(
dets.copy(), batch['meta']['c'].cpu().numpy(),
batch['meta']['s'].cpu().numpy(),
output['hm'].shape[2], output['hm'].shape[3])
results[batch['meta']['img_id'].cpu().numpy()[0]] = dets_out[0] | 7,252 | 44.049689 | 82 | py |
SyNet | SyNet-master/CenterNet/src/lib/trains/base_trainer.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import torch
from progress.bar import Bar
from models.data_parallel import DataParallel
from utils.utils import AverageMeter
class ModelWithLoss(torch.nn.Module):
def __init__(self, model, loss):
super(ModelWithLoss, self).__init__()
self.model = model
self.loss = loss
def forward(self, batch):
outputs = self.model(batch['input'])
loss, loss_stats = self.loss(outputs, batch)
return outputs[-1], loss, loss_stats
class BaseTrainer(object):
def __init__(
self, opt, model, optimizer=None):
self.opt = opt
self.optimizer = optimizer
self.loss_stats, self.loss = self._get_losses(opt)
self.model_with_loss = ModelWithLoss(model, self.loss)
def set_device(self, gpus, chunk_sizes, device):
if len(gpus) > 1:
self.model_with_loss = DataParallel(
self.model_with_loss, device_ids=gpus,
chunk_sizes=chunk_sizes).to(device)
else:
self.model_with_loss = self.model_with_loss.to(device)
for state in self.optimizer.state.values():
for k, v in state.items():
if isinstance(v, torch.Tensor):
state[k] = v.to(device=device, non_blocking=True)
def run_epoch(self, phase, epoch, data_loader):
model_with_loss = self.model_with_loss
if phase == 'train':
model_with_loss.train()
else:
if len(self.opt.gpus) > 1:
model_with_loss = self.model_with_loss.module
model_with_loss.eval()
torch.cuda.empty_cache()
opt = self.opt
results = {}
data_time, batch_time = AverageMeter(), AverageMeter()
avg_loss_stats = {l: AverageMeter() for l in self.loss_stats}
num_iters = len(data_loader) if opt.num_iters < 0 else opt.num_iters
bar = Bar('{}/{}'.format(opt.task, opt.exp_id), max=num_iters)
end = time.time()
for iter_id, batch in enumerate(data_loader):
if iter_id >= num_iters:
break
data_time.update(time.time() - end)
for k in batch:
if k != 'meta':
batch[k] = batch[k].to(device=opt.device, non_blocking=True)
output, loss, loss_stats = model_with_loss(batch)
loss = loss.mean()
if phase == 'train':
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
batch_time.update(time.time() - end)
end = time.time()
Bar.suffix = '{phase}: [{0}][{1}/{2}]|Tot: {total:} |ETA: {eta:} '.format(
epoch, iter_id, num_iters, phase=phase,
total=bar.elapsed_td, eta=bar.eta_td)
for l in avg_loss_stats:
avg_loss_stats[l].update(
loss_stats[l].mean().item(), batch['input'].size(0))
Bar.suffix = Bar.suffix + '|{} {:.4f} '.format(l, avg_loss_stats[l].avg)
if not opt.hide_data_time:
Bar.suffix = Bar.suffix + '|Data {dt.val:.3f}s({dt.avg:.3f}s) ' \
'|Net {bt.avg:.3f}s'.format(dt=data_time, bt=batch_time)
if opt.print_iter > 0:
if iter_id % opt.print_iter == 0:
print('{}/{}| {}'.format(opt.task, opt.exp_id, Bar.suffix))
else:
bar.next()
if opt.debug > 0:
self.debug(batch, output, iter_id)
if opt.test:
self.save_result(output, batch, results)
del output, loss, loss_stats
bar.finish()
ret = {k: v.avg for k, v in avg_loss_stats.items()}
ret['time'] = bar.elapsed_td.total_seconds() / 60.
return ret, results
def debug(self, batch, output, iter_id):
raise NotImplementedError
def save_result(self, output, batch, results):
raise NotImplementedError
def _get_losses(self, opt):
raise NotImplementedError
def val(self, epoch, data_loader):
return self.run_epoch('val', epoch, data_loader)
def train(self, epoch, data_loader):
return self.run_epoch('train', epoch, data_loader) | 3,913 | 31.890756 | 80 | py |
SyNet | SyNet-master/CenterNet/src/lib/datasets/sample/exdet.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch.utils.data as data
import pycocotools.coco as coco
import numpy as np
import torch
import json
import cv2
import os
from utils.image import flip, color_aug
from utils.image import get_affine_transform, affine_transform
from utils.image import gaussian_radius, draw_umich_gaussian, draw_msra_gaussian
import pycocotools.coco as coco
import math
class EXDetDataset(data.Dataset):
def _coco_box_to_bbox(self, box):
bbox = np.array([box[0], box[1], box[0] + box[2], box[1] + box[3]],
dtype=np.float32)
return bbox
def _get_border(self, border, size):
i = 1
while size - border // i <= border // i:
i *= 2
return border // i
def __getitem__(self, index):
img_id = self.images[index]
img_info = self.coco.loadImgs(ids=[img_id])[0]
img_path = os.path.join(self.img_dir, img_info['file_name'])
img = cv2.imread(img_path)
height, width = img.shape[0], img.shape[1]
c = np.array([img.shape[1] / 2., img.shape[0] / 2.])
s = max(img.shape[0], img.shape[1]) * 1.0
flipped = False
if self.split == 'train':
if not self.opt.not_rand_crop:
s = s * np.random.choice(np.arange(0.6, 1.4, 0.1))
w_border = self._get_border(128, img.shape[1])
h_border = self._get_border(128, img.shape[0])
c[0] = np.random.randint(low=w_border, high=img.shape[1] - w_border)
c[1] = np.random.randint(low=h_border, high=img.shape[0] - h_border)
else:
sf = self.opt.scale
cf = self.opt.shift
s = s * np.clip(np.random.randn()*sf + 1, 1 - sf, 1 + sf)
c[0] += img.shape[1] * np.clip(np.random.randn()*cf, -2*cf, 2*cf)
c[1] += img.shape[0] * np.clip(np.random.randn()*cf, -2*cf, 2*cf)
if np.random.random() < self.opt.flip:
flipped = True
img = img[:, ::-1, :]
trans_input = get_affine_transform(
c, s, 0, [self.opt.input_res, self.opt.input_res])
inp = cv2.warpAffine(img, trans_input,
(self.opt.input_res, self.opt.input_res),
flags=cv2.INTER_LINEAR)
inp = (inp.astype(np.float32) / 255.)
if self.split == 'train' and not self.opt.no_color_aug:
color_aug(self._data_rng, inp, self._eig_val, self._eig_vec)
inp = (inp - self.mean) / self.std
inp = inp.transpose(2, 0, 1)
output_res = self.opt.output_res
num_classes = self.opt.num_classes
trans_output = get_affine_transform(c, s, 0, [output_res, output_res])
num_hm = 1 if self.opt.agnostic_ex else num_classes
hm_t = np.zeros((num_hm, output_res, output_res), dtype=np.float32)
hm_l = np.zeros((num_hm, output_res, output_res), dtype=np.float32)
hm_b = np.zeros((num_hm, output_res, output_res), dtype=np.float32)
hm_r = np.zeros((num_hm, output_res, output_res), dtype=np.float32)
hm_c = np.zeros((num_classes, output_res, output_res), dtype=np.float32)
reg_t = np.zeros((self.max_objs, 2), dtype=np.float32)
reg_l = np.zeros((self.max_objs, 2), dtype=np.float32)
reg_b = np.zeros((self.max_objs, 2), dtype=np.float32)
reg_r = np.zeros((self.max_objs, 2), dtype=np.float32)
ind_t = np.zeros((self.max_objs), dtype=np.int64)
ind_l = np.zeros((self.max_objs), dtype=np.int64)
ind_b = np.zeros((self.max_objs), dtype=np.int64)
ind_r = np.zeros((self.max_objs), dtype=np.int64)
reg_mask = np.zeros((self.max_objs), dtype=np.uint8)
ann_ids = self.coco.getAnnIds(imgIds=[img_id])
anns = self.coco.loadAnns(ids=ann_ids)
num_objs = min(len(anns), self.max_objs)
draw_gaussian = draw_msra_gaussian if self.opt.mse_loss else \
draw_umich_gaussian
for k in range(num_objs):
ann = anns[k]
# bbox = self._coco_box_to_bbox(ann['bbox'])
# tlbr
pts = np.array(ann['extreme_points'], dtype=np.float32).reshape(4, 2)
# cls_id = int(self.cat_ids[ann['category_id']] - 1) # bug
cls_id = int(self.cat_ids[ann['category_id']])
hm_id = 0 if self.opt.agnostic_ex else cls_id
if flipped:
pts[:, 0] = width - pts[:, 0] - 1
pts[1], pts[3] = pts[3].copy(), pts[1].copy()
for j in range(4):
pts[j] = affine_transform(pts[j], trans_output)
pts = np.clip(pts, 0, self.opt.output_res - 1)
h, w = pts[2, 1] - pts[0, 1], pts[3, 0] - pts[1, 0]
if h > 0 and w > 0:
radius = gaussian_radius((math.ceil(h), math.ceil(w)))
radius = max(0, int(radius))
pt_int = pts.astype(np.int32)
draw_gaussian(hm_t[hm_id], pt_int[0], radius)
draw_gaussian(hm_l[hm_id], pt_int[1], radius)
draw_gaussian(hm_b[hm_id], pt_int[2], radius)
draw_gaussian(hm_r[hm_id], pt_int[3], radius)
reg_t[k] = pts[0] - pt_int[0]
reg_l[k] = pts[1] - pt_int[1]
reg_b[k] = pts[2] - pt_int[2]
reg_r[k] = pts[3] - pt_int[3]
ind_t[k] = pt_int[0, 1] * output_res + pt_int[0, 0]
ind_l[k] = pt_int[1, 1] * output_res + pt_int[1, 0]
ind_b[k] = pt_int[2, 1] * output_res + pt_int[2, 0]
ind_r[k] = pt_int[3, 1] * output_res + pt_int[3, 0]
ct = [int((pts[3, 0] + pts[1, 0]) / 2), int((pts[0, 1] + pts[2, 1]) / 2)]
draw_gaussian(hm_c[cls_id], ct, radius)
reg_mask[k] = 1
ret = {'input': inp, 'hm_t': hm_t, 'hm_l': hm_l, 'hm_b': hm_b,
'hm_r': hm_r, 'hm_c': hm_c}
if self.opt.reg_offset:
ret.update({'reg_mask': reg_mask,
'reg_t': reg_t, 'reg_l': reg_l, 'reg_b': reg_b, 'reg_r': reg_r,
'ind_t': ind_t, 'ind_l': ind_l, 'ind_b': ind_b, 'ind_r': ind_r})
return ret | 5,722 | 40.773723 | 81 | py |
SyNet | SyNet-master/CenterNet/src/lib/datasets/sample/ctdet.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch.utils.data as data
import numpy as np
import torch
import json
import cv2
import os
from utils.image import flip, color_aug
from utils.image import get_affine_transform, affine_transform
from utils.image import gaussian_radius, draw_umich_gaussian, draw_msra_gaussian
from utils.image import draw_dense_reg
import math
class CTDetDataset(data.Dataset):
def _coco_box_to_bbox(self, box):
bbox = np.array([box[0], box[1], box[0] + box[2], box[1] + box[3]],
dtype=np.float32)
return bbox
def _get_border(self, border, size):
i = 1
while size - border // i <= border // i:
i *= 2
return border // i
def __getitem__(self, index):
img_id = self.images[index]
file_name = self.coco.loadImgs(ids=[img_id])[0]['file_name']
img_path = os.path.join(self.img_dir, file_name)
ann_ids = self.coco.getAnnIds(imgIds=[img_id])
anns = self.coco.loadAnns(ids=ann_ids)
num_objs = min(len(anns), self.max_objs)
img = cv2.imread(img_path)
height, width = img.shape[0], img.shape[1]
c = np.array([img.shape[1] / 2., img.shape[0] / 2.], dtype=np.float32)
if self.opt.keep_res:
input_h = (height | self.opt.pad) + 1
input_w = (width | self.opt.pad) + 1
s = np.array([input_w, input_h], dtype=np.float32)
else:
s = max(img.shape[0], img.shape[1]) * 1.0
input_h, input_w = self.opt.input_h, self.opt.input_w
flipped = False
if self.split == 'train':
if not self.opt.not_rand_crop:
s = s * np.random.choice(np.arange(0.6, 1.4, 0.1))
w_border = self._get_border(128, img.shape[1])
h_border = self._get_border(128, img.shape[0])
c[0] = np.random.randint(low=w_border, high=img.shape[1] - w_border)
c[1] = np.random.randint(low=h_border, high=img.shape[0] - h_border)
else:
sf = self.opt.scale
cf = self.opt.shift
c[0] += s * np.clip(np.random.randn()*cf, -2*cf, 2*cf)
c[1] += s * np.clip(np.random.randn()*cf, -2*cf, 2*cf)
s = s * np.clip(np.random.randn()*sf + 1, 1 - sf, 1 + sf)
if np.random.random() < self.opt.flip:
flipped = True
img = img[:, ::-1, :]
c[0] = width - c[0] - 1
trans_input = get_affine_transform(
c, s, 0, [input_w, input_h])
inp = cv2.warpAffine(img, trans_input,
(input_w, input_h),
flags=cv2.INTER_LINEAR)
inp = (inp.astype(np.float32) / 255.)
if self.split == 'train' and not self.opt.no_color_aug:
color_aug(self._data_rng, inp, self._eig_val, self._eig_vec)
inp = (inp - self.mean) / self.std
inp = inp.transpose(2, 0, 1)
output_h = input_h // self.opt.down_ratio
output_w = input_w // self.opt.down_ratio
num_classes = self.num_classes
trans_output = get_affine_transform(c, s, 0, [output_w, output_h])
hm = np.zeros((num_classes, output_h, output_w), dtype=np.float32)
wh = np.zeros((self.max_objs, 2), dtype=np.float32)
dense_wh = np.zeros((2, output_h, output_w), dtype=np.float32)
reg = np.zeros((self.max_objs, 2), dtype=np.float32)
ind = np.zeros((self.max_objs), dtype=np.int64)
reg_mask = np.zeros((self.max_objs), dtype=np.uint8)
cat_spec_wh = np.zeros((self.max_objs, num_classes * 2), dtype=np.float32)
cat_spec_mask = np.zeros((self.max_objs, num_classes * 2), dtype=np.uint8)
draw_gaussian = draw_msra_gaussian if self.opt.mse_loss else \
draw_umich_gaussian
gt_det = []
for k in range(num_objs):
ann = anns[k]
bbox = self._coco_box_to_bbox(ann['bbox'])
cls_id = int(self.cat_ids[ann['category_id']])
if flipped:
bbox[[0, 2]] = width - bbox[[2, 0]] - 1
bbox[:2] = affine_transform(bbox[:2], trans_output)
bbox[2:] = affine_transform(bbox[2:], trans_output)
bbox[[0, 2]] = np.clip(bbox[[0, 2]], 0, output_w - 1)
bbox[[1, 3]] = np.clip(bbox[[1, 3]], 0, output_h - 1)
h, w = bbox[3] - bbox[1], bbox[2] - bbox[0]
if h > 0 and w > 0:
radius = gaussian_radius((math.ceil(h), math.ceil(w)))
radius = max(0, int(radius))
radius = self.opt.hm_gauss if self.opt.mse_loss else radius
ct = np.array(
[(bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2], dtype=np.float32)
ct_int = ct.astype(np.int32)
draw_gaussian(hm[cls_id], ct_int, radius)
wh[k] = 1. * w, 1. * h
ind[k] = ct_int[1] * output_w + ct_int[0]
reg[k] = ct - ct_int
reg_mask[k] = 1
cat_spec_wh[k, cls_id * 2: cls_id * 2 + 2] = wh[k]
cat_spec_mask[k, cls_id * 2: cls_id * 2 + 2] = 1
if self.opt.dense_wh:
draw_dense_reg(dense_wh, hm.max(axis=0), ct_int, wh[k], radius)
gt_det.append([ct[0] - w / 2, ct[1] - h / 2,
ct[0] + w / 2, ct[1] + h / 2, 1, cls_id])
ret = {'input': inp, 'hm': hm, 'reg_mask': reg_mask, 'ind': ind, 'wh': wh}
if self.opt.dense_wh:
hm_a = hm.max(axis=0, keepdims=True)
dense_wh_mask = np.concatenate([hm_a, hm_a], axis=0)
ret.update({'dense_wh': dense_wh, 'dense_wh_mask': dense_wh_mask})
del ret['wh']
elif self.opt.cat_spec_wh:
ret.update({'cat_spec_wh': cat_spec_wh, 'cat_spec_mask': cat_spec_mask})
del ret['wh']
if self.opt.reg_offset:
ret.update({'reg': reg})
if self.opt.debug > 0 or not self.split == 'train':
gt_det = np.array(gt_det, dtype=np.float32) if len(gt_det) > 0 else \
np.zeros((1, 6), dtype=np.float32)
meta = {'c': c, 's': s, 'gt_det': gt_det, 'img_id': img_id}
ret['meta'] = meta
return ret | 5,803 | 39.027586 | 80 | py |
SyNet | SyNet-master/CenterNet/src/lib/datasets/sample/ddd.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch.utils.data as data
import pycocotools.coco as coco
import numpy as np
import torch
import json
import cv2
import os
import math
from utils.image import flip, color_aug
from utils.image import get_affine_transform, affine_transform
from utils.image import gaussian_radius, draw_umich_gaussian, draw_msra_gaussian
import pycocotools.coco as coco
class DddDataset(data.Dataset):
def _coco_box_to_bbox(self, box):
bbox = np.array([box[0], box[1], box[0] + box[2], box[1] + box[3]],
dtype=np.float32)
return bbox
def _convert_alpha(self, alpha):
return math.radians(alpha + 45) if self.alpha_in_degree else alpha
def __getitem__(self, index):
img_id = self.images[index]
img_info = self.coco.loadImgs(ids=[img_id])[0]
img_path = os.path.join(self.img_dir, img_info['file_name'])
img = cv2.imread(img_path)
if 'calib' in img_info:
calib = np.array(img_info['calib'], dtype=np.float32)
else:
calib = self.calib
height, width = img.shape[0], img.shape[1]
c = np.array([img.shape[1] / 2., img.shape[0] / 2.])
if self.opt.keep_res:
s = np.array([self.opt.input_w, self.opt.input_h], dtype=np.int32)
else:
s = np.array([width, height], dtype=np.int32)
aug = False
if self.split == 'train' and np.random.random() < self.opt.aug_ddd:
aug = True
sf = self.opt.scale
cf = self.opt.shift
s = s * np.clip(np.random.randn()*sf + 1, 1 - sf, 1 + sf)
c[0] += img.shape[1] * np.clip(np.random.randn()*cf, -2*cf, 2*cf)
c[1] += img.shape[0] * np.clip(np.random.randn()*cf, -2*cf, 2*cf)
trans_input = get_affine_transform(
c, s, 0, [self.opt.input_w, self.opt.input_h])
inp = cv2.warpAffine(img, trans_input,
(self.opt.input_w, self.opt.input_h),
flags=cv2.INTER_LINEAR)
inp = (inp.astype(np.float32) / 255.)
# if self.split == 'train' and not self.opt.no_color_aug:
# color_aug(self._data_rng, inp, self._eig_val, self._eig_vec)
inp = (inp - self.mean) / self.std
inp = inp.transpose(2, 0, 1)
num_classes = self.opt.num_classes
trans_output = get_affine_transform(
c, s, 0, [self.opt.output_w, self.opt.output_h])
hm = np.zeros(
(num_classes, self.opt.output_h, self.opt.output_w), dtype=np.float32)
wh = np.zeros((self.max_objs, 2), dtype=np.float32)
reg = np.zeros((self.max_objs, 2), dtype=np.float32)
dep = np.zeros((self.max_objs, 1), dtype=np.float32)
rotbin = np.zeros((self.max_objs, 2), dtype=np.int64)
rotres = np.zeros((self.max_objs, 2), dtype=np.float32)
dim = np.zeros((self.max_objs, 3), dtype=np.float32)
ind = np.zeros((self.max_objs), dtype=np.int64)
reg_mask = np.zeros((self.max_objs), dtype=np.uint8)
rot_mask = np.zeros((self.max_objs), dtype=np.uint8)
ann_ids = self.coco.getAnnIds(imgIds=[img_id])
anns = self.coco.loadAnns(ids=ann_ids)
num_objs = min(len(anns), self.max_objs)
draw_gaussian = draw_msra_gaussian if self.opt.mse_loss else \
draw_umich_gaussian
gt_det = []
for k in range(num_objs):
ann = anns[k]
bbox = self._coco_box_to_bbox(ann['bbox'])
cls_id = int(self.cat_ids[ann['category_id']])
if cls_id <= -99:
continue
# if flipped:
# bbox[[0, 2]] = width - bbox[[2, 0]] - 1
bbox[:2] = affine_transform(bbox[:2], trans_output)
bbox[2:] = affine_transform(bbox[2:], trans_output)
bbox[[0, 2]] = np.clip(bbox[[0, 2]], 0, self.opt.output_w - 1)
bbox[[1, 3]] = np.clip(bbox[[1, 3]], 0, self.opt.output_h - 1)
h, w = bbox[3] - bbox[1], bbox[2] - bbox[0]
if h > 0 and w > 0:
radius = gaussian_radius((h, w))
radius = max(0, int(radius))
ct = np.array(
[(bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2], dtype=np.float32)
ct_int = ct.astype(np.int32)
if cls_id < 0:
ignore_id = [_ for _ in range(num_classes)] \
if cls_id == - 1 else [- cls_id - 2]
if self.opt.rect_mask:
hm[ignore_id, int(bbox[1]): int(bbox[3]) + 1,
int(bbox[0]): int(bbox[2]) + 1] = 0.9999
else:
for cc in ignore_id:
draw_gaussian(hm[cc], ct, radius)
hm[ignore_id, ct_int[1], ct_int[0]] = 0.9999
continue
draw_gaussian(hm[cls_id], ct, radius)
wh[k] = 1. * w, 1. * h
gt_det.append([ct[0], ct[1], 1] + \
self._alpha_to_8(self._convert_alpha(ann['alpha'])) + \
[ann['depth']] + (np.array(ann['dim']) / 1).tolist() + [cls_id])
if self.opt.reg_bbox:
gt_det[-1] = gt_det[-1][:-1] + [w, h] + [gt_det[-1][-1]]
# if (not self.opt.car_only) or cls_id == 1: # Only estimate ADD for cars !!!
if 1:
alpha = self._convert_alpha(ann['alpha'])
# print('img_id cls_id alpha rot_y', img_path, cls_id, alpha, ann['rotation_y'])
if alpha < np.pi / 6. or alpha > 5 * np.pi / 6.:
rotbin[k, 0] = 1
rotres[k, 0] = alpha - (-0.5 * np.pi)
if alpha > -np.pi / 6. or alpha < -5 * np.pi / 6.:
rotbin[k, 1] = 1
rotres[k, 1] = alpha - (0.5 * np.pi)
dep[k] = ann['depth']
dim[k] = ann['dim']
# print(' cat dim', cls_id, dim[k])
ind[k] = ct_int[1] * self.opt.output_w + ct_int[0]
reg[k] = ct - ct_int
reg_mask[k] = 1 if not aug else 0
rot_mask[k] = 1
# print('gt_det', gt_det)
# print('')
ret = {'input': inp, 'hm': hm, 'dep': dep, 'dim': dim, 'ind': ind,
'rotbin': rotbin, 'rotres': rotres, 'reg_mask': reg_mask,
'rot_mask': rot_mask}
if self.opt.reg_bbox:
ret.update({'wh': wh})
if self.opt.reg_offset:
ret.update({'reg': reg})
if self.opt.debug > 0 or not ('train' in self.split):
gt_det = np.array(gt_det, dtype=np.float32) if len(gt_det) > 0 else \
np.zeros((1, 18), dtype=np.float32)
meta = {'c': c, 's': s, 'gt_det': gt_det, 'calib': calib,
'image_path': img_path, 'img_id': img_id}
ret['meta'] = meta
return ret
def _alpha_to_8(self, alpha):
# return [alpha, 0, 0, 0, 0, 0, 0, 0]
ret = [0, 0, 0, 1, 0, 0, 0, 1]
if alpha < np.pi / 6. or alpha > 5 * np.pi / 6.:
r = alpha - (-0.5 * np.pi)
ret[1] = 1
ret[2], ret[3] = np.sin(r), np.cos(r)
if alpha > -np.pi / 6. or alpha < -5 * np.pi / 6.:
r = alpha - (0.5 * np.pi)
ret[5] = 1
ret[6], ret[7] = np.sin(r), np.cos(r)
return ret
| 6,801 | 38.777778 | 90 | py |
SyNet | SyNet-master/CenterNet/src/lib/datasets/sample/multi_pose.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch.utils.data as data
import numpy as np
import torch
import json
import cv2
import os
from utils.image import flip, color_aug
from utils.image import get_affine_transform, affine_transform
from utils.image import gaussian_radius, draw_umich_gaussian, draw_msra_gaussian
from utils.image import draw_dense_reg
import math
class MultiPoseDataset(data.Dataset):
def _coco_box_to_bbox(self, box):
bbox = np.array([box[0], box[1], box[0] + box[2], box[1] + box[3]],
dtype=np.float32)
return bbox
def _get_border(self, border, size):
i = 1
while size - border // i <= border // i:
i *= 2
return border // i
def __getitem__(self, index):
img_id = self.images[index]
file_name = self.coco.loadImgs(ids=[img_id])[0]['file_name']
img_path = os.path.join(self.img_dir, file_name)
ann_ids = self.coco.getAnnIds(imgIds=[img_id])
anns = self.coco.loadAnns(ids=ann_ids)
num_objs = min(len(anns), self.max_objs)
img = cv2.imread(img_path)
height, width = img.shape[0], img.shape[1]
c = np.array([img.shape[1] / 2., img.shape[0] / 2.], dtype=np.float32)
s = max(img.shape[0], img.shape[1]) * 1.0
rot = 0
flipped = False
if self.split == 'train':
if not self.opt.not_rand_crop:
s = s * np.random.choice(np.arange(0.6, 1.4, 0.1))
w_border = self._get_border(128, img.shape[1])
h_border = self._get_border(128, img.shape[0])
c[0] = np.random.randint(low=w_border, high=img.shape[1] - w_border)
c[1] = np.random.randint(low=h_border, high=img.shape[0] - h_border)
else:
sf = self.opt.scale
cf = self.opt.shift
c[0] += s * np.clip(np.random.randn()*cf, -2*cf, 2*cf)
c[1] += s * np.clip(np.random.randn()*cf, -2*cf, 2*cf)
s = s * np.clip(np.random.randn()*sf + 1, 1 - sf, 1 + sf)
if np.random.random() < self.opt.aug_rot:
rf = self.opt.rotate
rot = np.clip(np.random.randn()*rf, -rf*2, rf*2)
if np.random.random() < self.opt.flip:
flipped = True
img = img[:, ::-1, :]
c[0] = width - c[0] - 1
trans_input = get_affine_transform(
c, s, rot, [self.opt.input_res, self.opt.input_res])
inp = cv2.warpAffine(img, trans_input,
(self.opt.input_res, self.opt.input_res),
flags=cv2.INTER_LINEAR)
inp = (inp.astype(np.float32) / 255.)
if self.split == 'train' and not self.opt.no_color_aug:
color_aug(self._data_rng, inp, self._eig_val, self._eig_vec)
inp = (inp - self.mean) / self.std
inp = inp.transpose(2, 0, 1)
output_res = self.opt.output_res
num_joints = self.num_joints
trans_output_rot = get_affine_transform(c, s, rot, [output_res, output_res])
trans_output = get_affine_transform(c, s, 0, [output_res, output_res])
hm = np.zeros((self.num_classes, output_res, output_res), dtype=np.float32)
hm_hp = np.zeros((num_joints, output_res, output_res), dtype=np.float32)
dense_kps = np.zeros((num_joints, 2, output_res, output_res),
dtype=np.float32)
dense_kps_mask = np.zeros((num_joints, output_res, output_res),
dtype=np.float32)
wh = np.zeros((self.max_objs, 2), dtype=np.float32)
kps = np.zeros((self.max_objs, num_joints * 2), dtype=np.float32)
reg = np.zeros((self.max_objs, 2), dtype=np.float32)
ind = np.zeros((self.max_objs), dtype=np.int64)
reg_mask = np.zeros((self.max_objs), dtype=np.uint8)
kps_mask = np.zeros((self.max_objs, self.num_joints * 2), dtype=np.uint8)
hp_offset = np.zeros((self.max_objs * num_joints, 2), dtype=np.float32)
hp_ind = np.zeros((self.max_objs * num_joints), dtype=np.int64)
hp_mask = np.zeros((self.max_objs * num_joints), dtype=np.int64)
draw_gaussian = draw_msra_gaussian if self.opt.mse_loss else \
draw_umich_gaussian
gt_det = []
for k in range(num_objs):
ann = anns[k]
bbox = self._coco_box_to_bbox(ann['bbox'])
cls_id = int(ann['category_id']) - 1
pts = np.array(ann['keypoints'], np.float32).reshape(num_joints, 3)
if flipped:
bbox[[0, 2]] = width - bbox[[2, 0]] - 1
pts[:, 0] = width - pts[:, 0] - 1
for e in self.flip_idx:
pts[e[0]], pts[e[1]] = pts[e[1]].copy(), pts[e[0]].copy()
bbox[:2] = affine_transform(bbox[:2], trans_output)
bbox[2:] = affine_transform(bbox[2:], trans_output)
bbox = np.clip(bbox, 0, output_res - 1)
h, w = bbox[3] - bbox[1], bbox[2] - bbox[0]
if (h > 0 and w > 0) or (rot != 0):
radius = gaussian_radius((math.ceil(h), math.ceil(w)))
radius = self.opt.hm_gauss if self.opt.mse_loss else max(0, int(radius))
ct = np.array(
[(bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2], dtype=np.float32)
ct_int = ct.astype(np.int32)
wh[k] = 1. * w, 1. * h
ind[k] = ct_int[1] * output_res + ct_int[0]
reg[k] = ct - ct_int
reg_mask[k] = 1
num_kpts = pts[:, 2].sum()
if num_kpts == 0:
hm[cls_id, ct_int[1], ct_int[0]] = 0.9999
reg_mask[k] = 0
hp_radius = gaussian_radius((math.ceil(h), math.ceil(w)))
hp_radius = self.opt.hm_gauss \
if self.opt.mse_loss else max(0, int(hp_radius))
for j in range(num_joints):
if pts[j, 2] > 0:
pts[j, :2] = affine_transform(pts[j, :2], trans_output_rot)
if pts[j, 0] >= 0 and pts[j, 0] < output_res and \
pts[j, 1] >= 0 and pts[j, 1] < output_res:
kps[k, j * 2: j * 2 + 2] = pts[j, :2] - ct_int
kps_mask[k, j * 2: j * 2 + 2] = 1
pt_int = pts[j, :2].astype(np.int32)
hp_offset[k * num_joints + j] = pts[j, :2] - pt_int
hp_ind[k * num_joints + j] = pt_int[1] * output_res + pt_int[0]
hp_mask[k * num_joints + j] = 1
if self.opt.dense_hp:
# must be before draw center hm gaussian
draw_dense_reg(dense_kps[j], hm[cls_id], ct_int,
pts[j, :2] - ct_int, radius, is_offset=True)
draw_gaussian(dense_kps_mask[j], ct_int, radius)
draw_gaussian(hm_hp[j], pt_int, hp_radius)
draw_gaussian(hm[cls_id], ct_int, radius)
gt_det.append([ct[0] - w / 2, ct[1] - h / 2,
ct[0] + w / 2, ct[1] + h / 2, 1] +
pts[:, :2].reshape(num_joints * 2).tolist() + [cls_id])
if rot != 0:
hm = hm * 0 + 0.9999
reg_mask *= 0
kps_mask *= 0
ret = {'input': inp, 'hm': hm, 'reg_mask': reg_mask, 'ind': ind, 'wh': wh,
'hps': kps, 'hps_mask': kps_mask}
if self.opt.dense_hp:
dense_kps = dense_kps.reshape(num_joints * 2, output_res, output_res)
dense_kps_mask = dense_kps_mask.reshape(
num_joints, 1, output_res, output_res)
dense_kps_mask = np.concatenate([dense_kps_mask, dense_kps_mask], axis=1)
dense_kps_mask = dense_kps_mask.reshape(
num_joints * 2, output_res, output_res)
ret.update({'dense_hps': dense_kps, 'dense_hps_mask': dense_kps_mask})
del ret['hps'], ret['hps_mask']
if self.opt.reg_offset:
ret.update({'reg': reg})
if self.opt.hm_hp:
ret.update({'hm_hp': hm_hp})
if self.opt.reg_hp_offset:
ret.update({'hp_offset': hp_offset, 'hp_ind': hp_ind, 'hp_mask': hp_mask})
if self.opt.debug > 0 or not self.split == 'train':
gt_det = np.array(gt_det, dtype=np.float32) if len(gt_det) > 0 else \
np.zeros((1, 40), dtype=np.float32)
meta = {'c': c, 's': s, 'gt_det': gt_det, 'img_id': img_id}
ret['meta'] = meta
return ret
| 7,913 | 42.01087 | 81 | py |
SyNet | SyNet-master/CenterNet/src/lib/datasets/dataset/kitti.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch.utils.data as data
import pycocotools.coco as coco
import numpy as np
import torch
import json
import cv2
import os
import math
import torch.utils.data as data
class KITTI(data.Dataset):
num_classes = 3
default_resolution = [384, 1280]
mean = np.array([0.485, 0.456, 0.406], np.float32).reshape(1, 1, 3)
std = np.array([0.229, 0.224, 0.225], np.float32).reshape(1, 1, 3)
def __init__(self, opt, split):
super(KITTI, self).__init__()
self.data_dir = os.path.join(opt.data_dir, 'kitti')
self.img_dir = os.path.join(self.data_dir, 'images', 'trainval')
if opt.trainval:
split = 'trainval' if split == 'train' else 'test'
self.img_dir = os.path.join(self.data_dir, 'images', split)
self.annot_path = os.path.join(
self.data_dir, 'annotations', 'kitti_{}.json').format(split)
else:
self.annot_path = os.path.join(self.data_dir,
'annotations', 'kitti_{}_{}.json').format(opt.kitti_split, split)
self.max_objs = 50
self.class_name = [
'__background__', 'Pedestrian', 'Car', 'Cyclist']
self.cat_ids = {1:0, 2:1, 3:2, 4:-3, 5:-3, 6:-2, 7:-99, 8:-99, 9:-1}
self._data_rng = np.random.RandomState(123)
self._eig_val = np.array([0.2141788, 0.01817699, 0.00341571],
dtype=np.float32)
self._eig_vec = np.array([
[-0.58752847, -0.69563484, 0.41340352],
[-0.5832747, 0.00994535, -0.81221408],
[-0.56089297, 0.71832671, 0.41158938]
], dtype=np.float32)
self.split = split
self.opt = opt
self.alpha_in_degree = False
print('==> initializing kitti {}, {} data.'.format(opt.kitti_split, split))
self.coco = coco.COCO(self.annot_path)
self.images = self.coco.getImgIds()
self.num_samples = len(self.images)
print('Loaded {} {} samples'.format(split, self.num_samples))
def __len__(self):
return self.num_samples
def _to_float(self, x):
return float("{:.2f}".format(x))
def convert_eval_format(self, all_bboxes):
pass
def save_results(self, results, save_dir):
results_dir = os.path.join(save_dir, 'results')
if not os.path.exists(results_dir):
os.mkdir(results_dir)
for img_id in results.keys():
out_path = os.path.join(results_dir, '{:06d}.txt'.format(img_id))
f = open(out_path, 'w')
for cls_ind in results[img_id]:
for j in range(len(results[img_id][cls_ind])):
class_name = self.class_name[cls_ind]
f.write('{} 0.0 0'.format(class_name))
for i in range(len(results[img_id][cls_ind][j])):
f.write(' {:.2f}'.format(results[img_id][cls_ind][j][i]))
f.write('\n')
f.close()
def run_eval(self, results, save_dir):
self.save_results(results, save_dir)
os.system('./tools/kitti_eval/evaluate_object_3d_offline ' + \
'../data/kitti/training/label_val ' + \
'{}/results/'.format(save_dir))
| 3,058 | 32.988889 | 79 | py |
SyNet | SyNet-master/CenterNet/src/lib/datasets/dataset/visdrone.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pycocotools.coco as coco
from pycocotools.cocoeval import COCOeval
import numpy as np
import json
import os
import torch.utils.data as data
class Visdrone(data.Dataset):
num_classes = 10
default_resolution = [512, 512]
mean = np.array([0.40789654, 0.44719302, 0.47026115],
dtype=np.float32).reshape(1, 1, 3)
std = np.array([0.28863828, 0.27408164, 0.27809835],
dtype=np.float32).reshape(1, 1, 3)
def __init__(self, opt, split):
super(Visdrone, self).__init__()
self.data_dir = os.path.join(opt.data_dir, 'coco')
self.img_dir = os.path.join(self.data_dir, '{}2017'.format(split))
if split == 'test':
self.annot_path = os.path.join(
self.data_dir, 'annotations',
'image_info_test-dev2017.json').format(split)
else:
if opt.task == 'exdet':
self.annot_path = os.path.join(
self.data_dir, 'annotations',
'instances_extreme_{}2017.json').format(split)
else:
self.annot_path = os.path.join(
self.data_dir, 'annotations',
'instances_{}2017.json').format(split)
self.max_objs = 16
self.class_name = ["pedestrian", "people", "bicycle", "car", "van", "truck", "tricycle", "awning-tricycle", "bus", "motor"]
self._valid_ids = [
1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.cat_ids = {v: i for i, v in enumerate(self._valid_ids)}
self.voc_color = [(v // 32 * 64 + 64, (v // 8) % 4 * 64, v % 8 * 32) \
for v in range(1, self.num_classes + 1)]
self._data_rng = np.random.RandomState(123)
self._eig_val = np.array([0.2141788, 0.01817699, 0.00341571],
dtype=np.float32)
self._eig_vec = np.array([
[-0.58752847, -0.69563484, 0.41340352],
[-0.5832747, 0.00994535, -0.81221408],
[-0.56089297, 0.71832671, 0.41158938]
], dtype=np.float32)
# self.mean = np.array([0.485, 0.456, 0.406], np.float32).reshape(1, 1, 3)
# self.std = np.array([0.229, 0.224, 0.225], np.float32).reshape(1, 1, 3)
self.split = split
self.opt = opt
print('==> initializing coco 2017 {} data.'.format(split))
self.coco = coco.COCO(self.annot_path)
self.images = self.coco.getImgIds()
self.num_samples = len(self.images)
print('Loaded {} {} samples'.format(split, self.num_samples))
def _to_float(self, x):
return float("{:.2f}".format(x))
def convert_eval_format(self, all_bboxes):
# import pdb; pdb.set_trace()
detections = []
for image_id in all_bboxes:
for cls_ind in all_bboxes[image_id]:
category_id = self._valid_ids[cls_ind - 1]
for bbox in all_bboxes[image_id][cls_ind]:
bbox[2] -= bbox[0]
bbox[3] -= bbox[1]
score = bbox[4]
bbox_out = list(map(self._to_float, bbox[0:4]))
detection = {
"image_id": int(image_id),
"category_id": int(category_id),
"bbox": bbox_out,
"score": float("{:.2f}".format(score))
}
if len(bbox) > 5:
extreme_points = list(map(self._to_float, bbox[5:13]))
detection["extreme_points"] = extreme_points
detections.append(detection)
return detections
def __len__(self):
return self.num_samples
def save_results(self, results, save_dir):
json.dump(self.convert_eval_format(results),
open('{}/results.json'.format(save_dir), 'w'))
def run_eval(self, results, save_dir):
# result_json = os.path.join(save_dir, "results.json")
# detections = self.convert_eval_format(results)
# json.dump(detections, open(result_json, "w"))
self.save_results(results, save_dir)
coco_dets = self.coco.loadRes('{}/results.json'.format(save_dir))
coco_eval = COCOeval(self.coco, coco_dets, "bbox")
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
| 4,040 | 35.405405 | 127 | py |
SyNet | SyNet-master/CenterNet/src/lib/datasets/dataset/coco_hp.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pycocotools.coco as coco
from pycocotools.cocoeval import COCOeval
import numpy as np
import json
import os
import torch.utils.data as data
class COCOHP(data.Dataset):
num_classes = 13
default_resolution = [512, 512]
mean = np.array([0.40789654, 0.44719302, 0.47026115],
dtype=np.float32).reshape(1, 1, 3)
std = np.array([0.28863828, 0.27408164, 0.27809835],
dtype=np.float32).reshape(1, 1, 3)
def __init__(self, opt, split):
super(COCO, self).__init__()
self.data_dir = os.path.join(opt.data_dir, 'coco')
self.img_dir = os.path.join(self.data_dir, '{}2017'.format(split))
if split == 'test':
self.annot_path = os.path.join(
self.data_dir, 'annotations',
'image_info_test-dev2017.json').format(split)
else:
if opt.task == 'exdet':
self.annot_path = os.path.join(
self.data_dir, 'annotations',
'instances_extreme_{}2017.json').format(split)
else:
self.annot_path = os.path.join(
self.data_dir, 'annotations',
'instances_{}2017.json').format(split)
self.max_objs = 128
self.class_name = ['short sleeve top', 'long sleeve top', 'short sleeve outwear', 'long sleeve outwear', 'vest',
'sling', 'shorts', 'trousers', 'skirt', 'short sleeve dress', 'long sleeve dress',
'vest dress', 'sling dress']
self._valid_ids = [
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13]
self.cat_ids = {v: i for i, v in enumerate(self._valid_ids)}
self.voc_color = [(v // 32 * 64 + 64, (v // 8) % 4 * 64, v % 8 * 32) \
for v in range(1, self.num_classes + 1)]
self._data_rng = np.random.RandomState(123)
self._eig_val = np.array([0.2141788, 0.01817699, 0.00341571],
dtype=np.float32)
self._eig_vec = np.array([
[-0.58752847, -0.69563484, 0.41340352],
[-0.5832747, 0.00994535, -0.81221408],
[-0.56089297, 0.71832671, 0.41158938]
], dtype=np.float32)
# self.mean = np.array([0.485, 0.456, 0.406], np.float32).reshape(1, 1, 3)
# self.std = np.array([0.229, 0.224, 0.225], np.float32).reshape(1, 1, 3)
self.split = split
self.opt = opt
print('==> initializing coco 2017 {} data.'.format(split))
self.coco = coco.COCO(self.annot_path)
self.images = self.coco.getImgIds()
self.num_samples = len(self.images)
print('Loaded {} {} samples'.format(split, self.num_samples))
def _to_float(self, x):
return float("{:.2f}".format(x))
def convert_eval_format(self, all_bboxes):
# import pdb; pdb.set_trace()
detections = []
for image_id in all_bboxes:
for cls_ind in all_bboxes[image_id]:
category_id = self._valid_ids[cls_ind - 1]
for bbox in all_bboxes[image_id][cls_ind]:
bbox[2] -= bbox[0]
bbox[3] -= bbox[1]
score = bbox[4]
bbox_out = list(map(self._to_float, bbox[0:4]))
detection = {
"image_id": int(image_id),
"category_id": int(category_id),
"bbox": bbox_out,
"score": float("{:.2f}".format(score))
}
if len(bbox) > 5:
extreme_points = list(map(self._to_float, bbox[5:13]))
detection["extreme_points"] = extreme_points
detections.append(detection)
return detections
def __len__(self):
return self.num_samples
def save_results(self, results, save_dir):
json.dump(self.convert_eval_format(results),
open('{}/results.json'.format(save_dir), 'w'))
def run_eval(self, results, save_dir):
# result_json = os.path.join(save_dir, "results.json")
# detections = self.convert_eval_format(results)
# json.dump(detections, open(result_json, "w"))
self.save_results(results, save_dir)
coco_dets = self.coco.loadRes('{}/results.json'.format(save_dir))
coco_eval = COCOeval(self.coco, coco_dets, "bbox")
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
| 4,644 | 39.745614 | 120 | py |
SyNet | SyNet-master/CenterNet/src/lib/datasets/dataset/pascal.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pycocotools.coco as coco
import numpy as np
import torch
import json
import os
import torch.utils.data as data
class PascalVOC(data.Dataset):
num_classes = 20
default_resolution = [384, 384]
mean = np.array([0.485, 0.456, 0.406],
dtype=np.float32).reshape(1, 1, 3)
std = np.array([0.229, 0.224, 0.225],
dtype=np.float32).reshape(1, 1, 3)
def __init__(self, opt, split):
super(PascalVOC, self).__init__()
self.data_dir = os.path.join(opt.data_dir, 'voc')
self.img_dir = os.path.join(self.data_dir, 'images')
_ann_name = {'train': 'trainval0712', 'val': 'test2007'}
self.annot_path = os.path.join(
self.data_dir, 'annotations',
'pascal_{}.json').format(_ann_name[split])
self.max_objs = 50
self.class_name = ['__background__', "aeroplane", "bicycle", "bird", "boat",
"bottle", "bus", "car", "cat", "chair", "cow", "diningtable", "dog",
"horse", "motorbike", "person", "pottedplant", "sheep", "sofa",
"train", "tvmonitor"]
self._valid_ids = np.arange(1, 21, dtype=np.int32)
self.cat_ids = {v: i for i, v in enumerate(self._valid_ids)}
self._data_rng = np.random.RandomState(123)
self._eig_val = np.array([0.2141788, 0.01817699, 0.00341571],
dtype=np.float32)
self._eig_vec = np.array([
[-0.58752847, -0.69563484, 0.41340352],
[-0.5832747, 0.00994535, -0.81221408],
[-0.56089297, 0.71832671, 0.41158938]
], dtype=np.float32)
self.split = split
self.opt = opt
print('==> initializing pascal {} data.'.format(_ann_name[split]))
self.coco = coco.COCO(self.annot_path)
self.images = sorted(self.coco.getImgIds())
self.num_samples = len(self.images)
print('Loaded {} {} samples'.format(split, self.num_samples))
def _to_float(self, x):
return float("{:.2f}".format(x))
def convert_eval_format(self, all_bboxes):
detections = [[[] for __ in range(self.num_samples)] \
for _ in range(self.num_classes + 1)]
for i in range(self.num_samples):
img_id = self.images[i]
for j in range(1, self.num_classes + 1):
if isinstance(all_bboxes[img_id][j], np.ndarray):
detections[j][i] = all_bboxes[img_id][j].tolist()
else:
detections[j][i] = all_bboxes[img_id][j]
return detections
def __len__(self):
return self.num_samples
def save_results(self, results, save_dir):
json.dump(self.convert_eval_format(results),
open('{}/results.json'.format(save_dir), 'w'))
def run_eval(self, results, save_dir):
# result_json = os.path.join(save_dir, "results.json")
# detections = self.convert_eval_format(results)
# json.dump(detections, open(result_json, "w"))
self.save_results(results, save_dir)
os.system('python tools/reval.py ' + \
'{}/results.json'.format(save_dir))
| 3,032 | 35.542169 | 80 | py |
SyNet | SyNet-master/CenterNet/src/lib/datasets/dataset/fashion.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pycocotools.coco as coco
from pycocotools.cocoeval import COCOeval
import numpy as np
import json
import os
import torch.utils.data as data
class Fashion(data.Dataset):
num_classes = 13
default_resolution = [512, 512]
mean = np.array([0.40789654, 0.44719302, 0.47026115],
dtype=np.float32).reshape(1, 1, 3)
std = np.array([0.28863828, 0.27408164, 0.27809835],
dtype=np.float32).reshape(1, 1, 3)
def __init__(self, opt, split):
super(Fashion, self).__init__()
self.data_dir = os.path.join(opt.data_dir, 'coco')
self.img_dir = os.path.join(self.data_dir, '{}2017'.format(split))
if split == 'test':
self.annot_path = os.path.join(
self.data_dir, 'annotations',
'image_info_test-dev2017.json').format(split)
else:
if opt.task == 'exdet':
self.annot_path = os.path.join(
self.data_dir, 'annotations',
'instances_extreme_{}2017.json').format(split)
else:
self.annot_path = os.path.join(
self.data_dir, 'annotations',
'instances_{}2017.json').format(split)
self.max_objs = 16
self.class_name = ['short sleeve top', 'long sleeve top', 'short sleeve outwear', 'long sleeve outwear', 'vest', 'sling', 'shorts', 'trousers', 'skirt', 'short sleeve dress', 'long sleeve dress', 'vest dress', 'sling dress']
self._valid_ids = [
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
self.cat_ids = {v: i for i, v in enumerate(self._valid_ids)}
self.voc_color = [(v // 32 * 64 + 64, (v // 8) % 4 * 64, v % 8 * 32) \
for v in range(1, self.num_classes + 1)]
self._data_rng = np.random.RandomState(123)
self._eig_val = np.array([0.2141788, 0.01817699, 0.00341571],
dtype=np.float32)
self._eig_vec = np.array([
[-0.58752847, -0.69563484, 0.41340352],
[-0.5832747, 0.00994535, -0.81221408],
[-0.56089297, 0.71832671, 0.41158938]
], dtype=np.float32)
# self.mean = np.array([0.485, 0.456, 0.406], np.float32).reshape(1, 1, 3)
# self.std = np.array([0.229, 0.224, 0.225], np.float32).reshape(1, 1, 3)
self.split = split
self.opt = opt
print('==> initializing coco 2017 {} data.'.format(split))
self.coco = coco.COCO(self.annot_path)
self.images = self.coco.getImgIds()
self.num_samples = len(self.images)
print('Loaded {} {} samples'.format(split, self.num_samples))
def _to_float(self, x):
return float("{:.2f}".format(x))
def convert_eval_format(self, all_bboxes):
# import pdb; pdb.set_trace()
detections = []
for image_id in all_bboxes:
for cls_ind in all_bboxes[image_id]:
category_id = self._valid_ids[cls_ind - 1]
for bbox in all_bboxes[image_id][cls_ind]:
bbox[2] -= bbox[0]
bbox[3] -= bbox[1]
score = bbox[4]
bbox_out = list(map(self._to_float, bbox[0:4]))
detection = {
"image_id": int(image_id),
"category_id": int(category_id),
"bbox": bbox_out,
"score": float("{:.2f}".format(score))
}
if len(bbox) > 5:
extreme_points = list(map(self._to_float, bbox[5:13]))
detection["extreme_points"] = extreme_points
detections.append(detection)
return detections
def __len__(self):
return self.num_samples
def save_results(self, results, save_dir):
json.dump(self.convert_eval_format(results),
open('{}/results.json'.format(save_dir), 'w'))
def run_eval(self, results, save_dir):
# result_json = os.path.join(save_dir, "results.json")
# detections = self.convert_eval_format(results)
# json.dump(detections, open(result_json, "w"))
self.save_results(results, save_dir)
coco_dets = self.coco.loadRes('{}/results.json'.format(save_dir))
coco_eval = COCOeval(self.coco, coco_dets, "bbox")
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
| 4,151 | 36.405405 | 228 | py |
SyNet | SyNet-master/CenterNet/src/lib/datasets/dataset/coco.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pycocotools.coco as coco
from pycocotools.cocoeval import COCOeval
import numpy as np
import json
import os
import torch.utils.data as data
class COCO(data.Dataset):
num_classes = 80
default_resolution = [512, 512]
mean = np.array([0.40789654, 0.44719302, 0.47026115],
dtype=np.float32).reshape(1, 1, 3)
std = np.array([0.28863828, 0.27408164, 0.27809835],
dtype=np.float32).reshape(1, 1, 3)
def __init__(self, opt, split):
super(COCO, self).__init__()
self.data_dir = os.path.join(opt.data_dir, 'coco')
self.img_dir = os.path.join(self.data_dir, '{}2017'.format(split))
if split == 'test':
self.annot_path = os.path.join(
self.data_dir, 'annotations',
'image_info_test-dev2017.json').format(split)
else:
if opt.task == 'exdet':
self.annot_path = os.path.join(
self.data_dir, 'annotations',
'instances_extreme_{}2017.json').format(split)
else:
self.annot_path = os.path.join(
self.data_dir, 'annotations',
'instances_{}2017.json').format(split)
self.max_objs = 128
self.class_name = [
'__background__', 'person', 'bicycle', 'car', 'motorcycle', 'airplane',
'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant',
'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse',
'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack',
'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis',
'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove',
'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass',
'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich',
'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake',
'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv',
'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave',
'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase',
'scissors', 'teddy bear', 'hair drier', 'toothbrush']
self._valid_ids = [
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13,
14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
24, 25, 27, 28, 31, 32, 33, 34, 35, 36,
37, 38, 39, 40, 41, 42, 43, 44, 46, 47,
48, 49, 50, 51, 52, 53, 54, 55, 56, 57,
58, 59, 60, 61, 62, 63, 64, 65, 67, 70,
72, 73, 74, 75, 76, 77, 78, 79, 80, 81,
82, 84, 85, 86, 87, 88, 89, 90]
self.cat_ids = {v: i for i, v in enumerate(self._valid_ids)}
self.voc_color = [(v // 32 * 64 + 64, (v // 8) % 4 * 64, v % 8 * 32) \
for v in range(1, self.num_classes + 1)]
self._data_rng = np.random.RandomState(123)
self._eig_val = np.array([0.2141788, 0.01817699, 0.00341571],
dtype=np.float32)
self._eig_vec = np.array([
[-0.58752847, -0.69563484, 0.41340352],
[-0.5832747, 0.00994535, -0.81221408],
[-0.56089297, 0.71832671, 0.41158938]
], dtype=np.float32)
# self.mean = np.array([0.485, 0.456, 0.406], np.float32).reshape(1, 1, 3)
# self.std = np.array([0.229, 0.224, 0.225], np.float32).reshape(1, 1, 3)
self.split = split
self.opt = opt
print('==> initializing coco 2017 {} data.'.format(split))
self.coco = coco.COCO(self.annot_path)
self.images = self.coco.getImgIds()
self.num_samples = len(self.images)
print('Loaded {} {} samples'.format(split, self.num_samples))
def _to_float(self, x):
return float("{:.2f}".format(x))
def convert_eval_format(self, all_bboxes):
# import pdb; pdb.set_trace()
detections = []
for image_id in all_bboxes:
for cls_ind in all_bboxes[image_id]:
category_id = self._valid_ids[cls_ind - 1]
for bbox in all_bboxes[image_id][cls_ind]:
bbox[2] -= bbox[0]
bbox[3] -= bbox[1]
score = bbox[4]
bbox_out = list(map(self._to_float, bbox[0:4]))
detection = {
"image_id": int(image_id),
"category_id": int(category_id),
"bbox": bbox_out,
"score": float("{:.2f}".format(score))
}
if len(bbox) > 5:
extreme_points = list(map(self._to_float, bbox[5:13]))
detection["extreme_points"] = extreme_points
detections.append(detection)
return detections
def __len__(self):
return self.num_samples
def save_results(self, results, save_dir):
json.dump(self.convert_eval_format(results),
open('{}/results.json'.format(save_dir), 'w'))
def run_eval(self, results, save_dir):
# result_json = os.path.join(save_dir, "results.json")
# detections = self.convert_eval_format(results)
# json.dump(detections, open(result_json, "w"))
self.save_results(results, save_dir)
coco_dets = self.coco.loadRes('{}/results.json'.format(save_dir))
coco_eval = COCOeval(self.coco, coco_dets, "bbox")
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
| 5,214 | 39.115385 | 78 | py |
SyNet | SyNet-master/CenterNet/src/lib/utils/utils.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
if self.count > 0:
self.avg = self.sum / self.count | 542 | 22.608696 | 59 | py |
SyNet | SyNet-master/tensorpack/examples/FasterRCNN/config.py | # -*- coding: utf-8 -*-
# File: config.py
import numpy as np
import os
import pprint
import six
from tensorpack.utils import logger
from tensorpack.utils.gpu import get_num_gpu
__all__ = ['config', 'finalize_configs']
class AttrDict():
_freezed = False
""" Avoid accidental creation of new hierarchies. """
def __getattr__(self, name):
if self._freezed:
raise AttributeError(name)
if name.startswith('_'):
# Do not mess with internals. Otherwise copy/pickle will fail
raise AttributeError(name)
ret = AttrDict()
setattr(self, name, ret)
return ret
def __setattr__(self, name, value):
if self._freezed and name not in self.__dict__:
raise AttributeError(
"Config was freezed! Unknown config: {}".format(name))
super().__setattr__(name, value)
def __str__(self):
return pprint.pformat(self.to_dict(), indent=1, width=100, compact=True)
__repr__ = __str__
def to_dict(self):
"""Convert to a nested dict. """
return {k: v.to_dict() if isinstance(v, AttrDict) else v
for k, v in self.__dict__.items() if not k.startswith('_')}
def from_dict(self, d):
self.freeze(False)
for k, v in d.items():
self_v = getattr(self, k)
if isinstance(self_v, AttrDict):
self_v.from_dict(v)
else:
setattr(self, k, v)
def update_args(self, args):
"""Update from command line args. """
for cfg in args:
keys, v = cfg.split('=', maxsplit=1)
keylist = keys.split('.')
dic = self
for i, k in enumerate(keylist[:-1]):
assert k in dir(dic), "Unknown config key: {}".format(keys)
dic = getattr(dic, k)
key = keylist[-1]
oldv = getattr(dic, key)
if not isinstance(oldv, str):
v = eval(v)
setattr(dic, key, v)
def freeze(self, freezed=True):
self._freezed = freezed
for v in self.__dict__.values():
if isinstance(v, AttrDict):
v.freeze(freezed)
# avoid silent bugs
def __eq__(self, _):
raise NotImplementedError()
def __ne__(self, _):
raise NotImplementedError()
config = AttrDict()
_C = config # short alias to avoid coding
# mode flags ---------------------
_C.TRAINER = 'replicated' # options: 'horovod', 'replicated'
_C.MODE_MASK = True # Faster R-CNN or Mask R-CNN
_C.MODE_FPN = True
# dataset -----------------------
_C.DATA.BASEDIR = '/path/to/your/DATA/DIR'
# All available dataset names are defined in `dataset/coco.py:register_coco`.
# All TRAIN dataset will be concatenated for training.
_C.DATA.TRAIN = ('coco_train2017',) # i.e. trainval35k
# Each VAL dataset will be evaluated separately (instead of concatenated)
_C.DATA.VAL = ('coco_val2017',) # AKA minival2014
# These two configs will be populated later inside `finalize_configs`.
_C.DATA.NUM_CATEGORY = -1 # without the background class (e.g., 80 for COCO)
_C.DATA.CLASS_NAMES = [] # NUM_CLASS (NUM_CATEGORY+1) strings, the first is "BG".
# whether the coordinates in your registered dataset are
# absolute pixel values in range [0, W or H] or relative values in [0, 1]
_C.DATA.ABSOLUTE_COORD = True
# Filter Negative Samples from dataset
_C.DATA.FILTER_EMPTY_ANNOTATIONS = True
# Number of data loading workers.
# In case of horovod training, this is the number of workers per-GPU (so you may want to use a smaller number).
# Set to 0 to disable parallel data loading
_C.DATA.NUM_WORKERS = 10
# backbone ----------------------
_C.BACKBONE.WEIGHTS = ''
# To train from scratch, set it to empty, and set FREEZE_AT to 0
# To train from ImageNet pre-trained models, use the one that matches your
# architecture from http://models.tensorpack.com under the 'FasterRCNN' section.
# To train from an existing COCO model, use the path to that file, and change
# the other configurations according to that model.
_C.BACKBONE.RESNET_NUM_BLOCKS = [3, 4, 6, 3] # for resnet50
# RESNET_NUM_BLOCKS = [3, 4, 23, 3] # for resnet101
_C.BACKBONE.FREEZE_AFFINE = False # do not train affine parameters inside norm layers
_C.BACKBONE.NORM = 'FreezeBN' # options: FreezeBN, SyncBN, GN, None
_C.BACKBONE.FREEZE_AT = 2 # options: 0, 1, 2. How many stages in backbone to freeze (not training)
# Use a base model with TF-preferred padding mode,
# which may pad more pixels on right/bottom than top/left.
# See https://github.com/tensorflow/tensorflow/issues/18213
# In tensorpack model zoo, ResNet models with TF_PAD_MODE=False are marked with "-AlignPadding".
# All other models under `ResNet/` in the model zoo are using TF_PAD_MODE=True.
# Using either one should probably give the same performance.
# We use the "AlignPadding" one just to be consistent with caffe2.
_C.BACKBONE.TF_PAD_MODE = False
_C.BACKBONE.STRIDE_1X1 = False # True for MSRA models
# schedule -----------------------
_C.TRAIN.NUM_GPUS = None # by default, will be set from code
_C.TRAIN.WEIGHT_DECAY = 1e-4
_C.TRAIN.BASE_LR = 1e-2 # defined for total batch size=8. Otherwise it will be adjusted automatically
_C.TRAIN.WARMUP = 1000 # in terms of iterations. This is not affected by #GPUs
_C.TRAIN.WARMUP_INIT_LR = 1e-5 # defined for total batch size=8. Otherwise it will be adjusted automatically
_C.TRAIN.STEPS_PER_EPOCH = 500
_C.TRAIN.STARTING_EPOCH = 1 # the first epoch to start with, useful to continue a training
# LR_SCHEDULE means equivalent steps when the total batch size is 8.
# It can be either a string like "3x" that refers to standard convention, or a list of int.
# LR_SCHEDULE=3x is the same as LR_SCHEDULE=[420000, 500000, 540000], which
# means to decrease LR at steps 420k and 500k and stop training at 540k.
# When the total bs!=8, the actual iterations to decrease learning rate, and
# the base learning rate are computed from BASE_LR and LR_SCHEDULE.
# Therefore, there is *no need* to modify the config if you only change the number of GPUs.
_C.TRAIN.LR_SCHEDULE = "1x" # "1x" schedule in detectron
_C.TRAIN.EVAL_PERIOD = 50 # period (epochs) to run evaluation
_C.TRAIN.CHECKPOINT_PERIOD = 20 # period (epochs) to save model
# preprocessing --------------------
# Alternative old (worse & faster) setting: 600
_C.PREPROC.TRAIN_SHORT_EDGE_SIZE = [800, 800] # [min, max] to sample from
_C.PREPROC.TEST_SHORT_EDGE_SIZE = 800
_C.PREPROC.MAX_SIZE = 1333
# mean and std in RGB order.
# Un-scaled version: [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]
_C.PREPROC.PIXEL_MEAN = [123.675, 116.28, 103.53]
_C.PREPROC.PIXEL_STD = [58.395, 57.12, 57.375]
# anchors -------------------------
_C.RPN.ANCHOR_STRIDE = 16
_C.RPN.ANCHOR_SIZES = (32, 64, 128, 256, 512) # sqrtarea of the anchor box
_C.RPN.ANCHOR_RATIOS = (0.5, 1., 2.)
_C.RPN.POSITIVE_ANCHOR_THRESH = 0.7
_C.RPN.NEGATIVE_ANCHOR_THRESH = 0.3
# rpn training -------------------------
_C.RPN.FG_RATIO = 0.5 # fg ratio among selected RPN anchors
_C.RPN.BATCH_PER_IM = 256 # total (across FPN levels) number of anchors that are marked valid
_C.RPN.MIN_SIZE = 0
_C.RPN.PROPOSAL_NMS_THRESH = 0.7
# Anchors which overlap with a crowd box (IOA larger than threshold) will be ignored.
# Setting this to a value larger than 1.0 will disable the feature.
# It is disabled by default because Detectron does not do this.
_C.RPN.CROWD_OVERLAP_THRESH = 9.99
_C.RPN.HEAD_DIM = 1024 # used in C4 only
# RPN proposal selection -------------------------------
# for C4
_C.RPN.TRAIN_PRE_NMS_TOPK = 12000
_C.RPN.TRAIN_POST_NMS_TOPK = 2000
_C.RPN.TEST_PRE_NMS_TOPK = 6000
_C.RPN.TEST_POST_NMS_TOPK = 1000 # if you encounter OOM in inference, set this to a smaller number
# for FPN, #proposals per-level and #proposals after merging are (for now) the same
# if FPN.PROPOSAL_MODE = 'Joint', these options have no effect
_C.RPN.TRAIN_PER_LEVEL_NMS_TOPK = 2000
_C.RPN.TEST_PER_LEVEL_NMS_TOPK = 1000
# fastrcnn training ---------------------
_C.FRCNN.BATCH_PER_IM = 512
_C.FRCNN.BBOX_REG_WEIGHTS = [10., 10., 5., 5.] # Slightly better setting: 20, 20, 10, 10
_C.FRCNN.FG_THRESH = 0.5
_C.FRCNN.FG_RATIO = 0.25 # fg ratio in a ROI batch
# FPN -------------------------
_C.FPN.ANCHOR_STRIDES = (4, 8, 16, 32, 64) # strides for each FPN level. Must be the same length as ANCHOR_SIZES
_C.FPN.PROPOSAL_MODE = 'Level' # 'Level', 'Joint'
_C.FPN.NUM_CHANNEL = 256
_C.FPN.NORM = 'None' # 'None', 'GN'
# The head option is only used in FPN. For C4 models, the head is C5
_C.FPN.FRCNN_HEAD_FUNC = 'fastrcnn_2fc_head'
# choices: fastrcnn_2fc_head, fastrcnn_4conv1fc_{,gn_}head
_C.FPN.FRCNN_CONV_HEAD_DIM = 256
_C.FPN.FRCNN_FC_HEAD_DIM = 1024
_C.FPN.MRCNN_HEAD_FUNC = 'maskrcnn_up4conv_head' # choices: maskrcnn_up4conv_{,gn_}head
# Mask R-CNN
_C.MRCNN.HEAD_DIM = 256
_C.MRCNN.ACCURATE_PASTE = True # slightly more aligned results, but very slow on numpy
# Cascade R-CNN, only available in FPN mode
_C.FPN.CASCADE = False
_C.CASCADE.IOUS = [0.5, 0.6, 0.7]
_C.CASCADE.BBOX_REG_WEIGHTS = [[10., 10., 5., 5.], [20., 20., 10., 10.], [30., 30., 15., 15.]]
# testing -----------------------
_C.TEST.FRCNN_NMS_THRESH = 0.5
# Smaller threshold value gives significantly better mAP. But we use 0.05 for consistency with Detectron.
# mAP with 1e-4 threshold can be found at https://github.com/tensorpack/tensorpack/commit/26321ae58120af2568bdbf2269f32aa708d425a8#diff-61085c48abee915b584027e1085e1043 # noqa
_C.TEST.RESULT_SCORE_THRESH = 0.05
_C.TEST.RESULT_SCORE_THRESH_VIS = 0.5 # only visualize confident results
_C.TEST.RESULTS_PER_IM = 100
_C.freeze() # avoid typo / wrong config keys
def finalize_configs(is_training):
"""
Run some sanity checks, and populate some configs from others
"""
_C.freeze(False) # populate new keys now
if isinstance(_C.DATA.VAL, six.string_types): # support single string (the typical case) as well
_C.DATA.VAL = (_C.DATA.VAL, )
if isinstance(_C.DATA.TRAIN, six.string_types): # support single string
_C.DATA.TRAIN = (_C.DATA.TRAIN, )
# finalize dataset definitions ...
from dataset import DatasetRegistry
datasets = list(_C.DATA.TRAIN) + list(_C.DATA.VAL)
_C.DATA.CLASS_NAMES = DatasetRegistry.get_metadata(datasets[0], "class_names")
_C.DATA.NUM_CATEGORY = len(_C.DATA.CLASS_NAMES) - 1
assert _C.BACKBONE.NORM in ['FreezeBN', 'SyncBN', 'GN', 'None'], _C.BACKBONE.NORM
if _C.BACKBONE.NORM != 'FreezeBN':
assert not _C.BACKBONE.FREEZE_AFFINE
assert _C.BACKBONE.FREEZE_AT in [0, 1, 2]
_C.RPN.NUM_ANCHOR = len(_C.RPN.ANCHOR_SIZES) * len(_C.RPN.ANCHOR_RATIOS)
assert len(_C.FPN.ANCHOR_STRIDES) == len(_C.RPN.ANCHOR_SIZES)
# image size into the backbone has to be multiple of this number
_C.FPN.RESOLUTION_REQUIREMENT = _C.FPN.ANCHOR_STRIDES[3] # [3] because we build FPN with features r2,r3,r4,r5
if _C.MODE_FPN:
size_mult = _C.FPN.RESOLUTION_REQUIREMENT * 1.
_C.PREPROC.MAX_SIZE = np.ceil(_C.PREPROC.MAX_SIZE / size_mult) * size_mult
assert _C.FPN.PROPOSAL_MODE in ['Level', 'Joint']
assert _C.FPN.FRCNN_HEAD_FUNC.endswith('_head')
assert _C.FPN.MRCNN_HEAD_FUNC.endswith('_head')
assert _C.FPN.NORM in ['None', 'GN']
if _C.FPN.CASCADE:
# the first threshold is the proposal sampling threshold
assert _C.CASCADE.IOUS[0] == _C.FRCNN.FG_THRESH
assert len(_C.CASCADE.BBOX_REG_WEIGHTS) == len(_C.CASCADE.IOUS)
if is_training:
train_scales = _C.PREPROC.TRAIN_SHORT_EDGE_SIZE
if isinstance(train_scales, (list, tuple)) and train_scales[1] - train_scales[0] > 100:
# don't autotune if augmentation is on
os.environ['TF_CUDNN_USE_AUTOTUNE'] = '0'
os.environ['TF_AUTOTUNE_THRESHOLD'] = '1'
assert _C.TRAINER in ['horovod', 'replicated'], _C.TRAINER
lr = _C.TRAIN.LR_SCHEDULE
if isinstance(lr, six.string_types):
if lr.endswith("x"):
LR_SCHEDULE_KITER = {
"{}x".format(k):
[180 * k - 120, 180 * k - 40, 180 * k]
for k in range(2, 10)}
LR_SCHEDULE_KITER["1x"] = [120, 160, 180]
_C.TRAIN.LR_SCHEDULE = [x * 1000 for x in LR_SCHEDULE_KITER[lr]]
else:
_C.TRAIN.LR_SCHEDULE = eval(lr)
# setup NUM_GPUS
if _C.TRAINER == 'horovod':
import horovod.tensorflow as hvd
ngpu = hvd.size()
logger.info("Horovod Rank={}, Size={}, LocalRank={}".format(
hvd.rank(), hvd.size(), hvd.local_rank()))
else:
assert 'OMPI_COMM_WORLD_SIZE' not in os.environ
ngpu = get_num_gpu()
assert ngpu > 0, "Has to train with GPU!"
assert ngpu % 8 == 0 or 8 % ngpu == 0, "Can only train with 1,2,4 or >=8 GPUs, but found {} GPUs".format(ngpu)
else:
# autotune is too slow for inference
os.environ['TF_CUDNN_USE_AUTOTUNE'] = '0'
ngpu = get_num_gpu()
if _C.TRAIN.NUM_GPUS is None:
_C.TRAIN.NUM_GPUS = ngpu
else:
if _C.TRAINER == 'horovod':
assert _C.TRAIN.NUM_GPUS == ngpu
else:
assert _C.TRAIN.NUM_GPUS <= ngpu
_C.freeze()
logger.info("Config: ------------------------------------------\n" + str(_C))
| 13,461 | 40.678019 | 176 | py |
SyNet | SyNet-master/tensorpack/tests/benchmark-serializer.py | #!/usr/bin/env python3
import numpy as np
import argparse
import pyarrow as pa
from tabulate import tabulate
import operator
from tensorpack.utils import logger
from tensorpack.utils.serialize import (
MsgpackSerializer,
PyarrowSerializer,
PickleSerializer,
ForkingPickler,
)
from tensorpack.utils.timer import Timer
def benchmark_serializer(dumps, loads, data, num):
buf = dumps(data)
enc_timer = Timer()
dec_timer = Timer()
enc_timer.pause()
dec_timer.pause()
for k in range(num):
enc_timer.resume()
buf = dumps(data)
enc_timer.pause()
dec_timer.resume()
loads(buf)
dec_timer.pause()
dumps_time = enc_timer.seconds() / num
loads_time = dec_timer.seconds() / num
return dumps_time, loads_time
def display_results(name, results):
logger.info("Encoding benchmark for {}:".format(name))
data = sorted(((x, y[0]) for x, y in results), key=operator.itemgetter(1))
print(tabulate(data, floatfmt='.5f'))
logger.info("Decoding benchmark for {}:".format(name))
data = sorted(((x, y[1]) for x, y in results), key=operator.itemgetter(1))
print(tabulate(data, floatfmt='.5f'))
def benchmark_all(name, serializers, data, num=30):
logger.info("Benchmarking {} ...".format(name))
results = []
for serializer_name, dumps, loads in serializers:
results.append((serializer_name, benchmark_serializer(dumps, loads, data, num=num)))
display_results(name, results)
def fake_json_data():
return {
'words': """
Lorem ipsum dolor sit amet, consectetur adipiscing
elit. Mauris adipiscing adipiscing placerat.
Vestibulum augue augue,
pellentesque quis sollicitudin id, adipiscing.
""" * 100,
'list': list(range(100)) * 500,
'dict': {str(i): 'a' for i in range(50000)},
'dict2': {i: 'a' for i in range(50000)},
'int': 3000,
'float': 100.123456
}
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("task")
args = parser.parse_args()
serializers = [
("msgpack", MsgpackSerializer.dumps, MsgpackSerializer.loads),
("pyarrow-buf", PyarrowSerializer.dumps, PyarrowSerializer.loads),
("pyarrow-bytes", PyarrowSerializer.dumps_bytes, PyarrowSerializer.loads),
("pickle", PickleSerializer.dumps, PickleSerializer.loads),
("forking-pickle", ForkingPickler.dumps, ForkingPickler.loads),
]
if args.task == "numpy":
numpy_data = [np.random.rand(64, 224, 224, 3).astype("float32"), np.random.rand(64).astype('int32')]
benchmark_all("numpy data", serializers, numpy_data)
elif args.task == "json":
benchmark_all("json data", serializers, fake_json_data(), num=50)
elif args.task == "torch":
import torch
from pyarrow.lib import _default_serialization_context
pa.register_torch_serialization_handlers(_default_serialization_context)
torch_data = [torch.rand(64, 224, 224, 3), torch.rand(64).to(dtype=torch.int32)]
benchmark_all("torch data", serializers[1:], torch_data)
| 3,180 | 31.131313 | 108 | py |
SyNet | SyNet-master/tensorpack/docs/conf.py | # -*- coding: utf-8 -*-
# flake8: noqa
# tensorpack documentation build configuration file, created by
# sphinx-quickstart on Sun Mar 27 01:41:24 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os, re
import mock
import inspect
from sphinx.domains import Domain
class GithubURLDomain(Domain):
"""
Resolve certain links in markdown files to github source.
"""
name = "githuburl"
ROOT = "https://github.com/tensorpack/tensorpack/blob/master/"
def resolve_any_xref(self, env, fromdocname, builder, target, node, contnode):
github_url = None
if ".html" not in target:
if target.startswith("../../") and not target.startswith("../../modules"):
url = target.replace("../", "")
github_url = url
if github_url is not None:
if github_url.endswith("README"):
# bug of recommonmark.
# https://github.com/readthedocs/recommonmark/blob/ddd56e7717e9745f11300059e4268e204138a6b1/recommonmark/parser.py#L152-L155
github_url += ".md"
print("Ref {} resolved to github:{}".format(target, github_url))
contnode["refuri"] = self.ROOT + github_url
return [("githuburl:any", contnode)]
else:
return []
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../'))
os.environ['DOC_BUILDING'] = '1'
ON_RTD = (os.environ.get('READTHEDOCS') == 'True')
MOCK_MODULES = ['tabulate', 'h5py',
'cv2', 'zmq', 'lmdb',
'msgpack', 'msgpack_numpy', 'pyarrow',
'sklearn', 'sklearn.datasets',
'scipy', 'scipy.misc', 'scipy.io',
'tornado', 'tornado.concurrent',
'horovod', 'horovod.tensorflow',
'subprocess32', 'functools32', 'psutil']
# it's better to have tensorflow installed (for some docs to show)
# but it's OK to mock it as well
try:
import tensorflow
except ImportError:
mod = sys.modules['tensorflow'] = mock.Mock(name='tensorflow')
mod.__version__ = mod.VERSION = '1.12'
MOCK_MODULES.extend(['tensorflow.python.training.monitored_session'])
MOCK_MODULES.extend(['tensorflow.python.training'])
MOCK_MODULES.extend(['tensorflow.python.client'])
MOCK_MODULES.extend(['tensorflow.python.framework'])
MOCK_MODULES.extend(['tensorflow.python.platform'])
MOCK_MODULES.extend(['tensorflow.python.tools'])
MOCK_MODULES.extend(['tensorflow.contrib.graph_editor'])
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = mock.Mock(name=mod_name)
sys.modules['cv2'].__version__ = '3.2.1' # fake version
sys.modules['msgpack'].version = (0, 5, 2)
import tensorpack
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '3.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'recommonmark',
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.napoleon',
#'sphinx.ext.autosectionlabel',
#'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
]
# -- Configurations for plugins ------------
napoleon_google_docstring = True
napoleon_include_init_with_doc = True
napoleon_include_special_with_doc = True
napoleon_numpy_docstring = False
napoleon_use_rtype = False
if ON_RTD:
intersphinx_timeout = 10
else:
# skip this when building locally
intersphinx_timeout = 0.1
intersphinx_mapping = {
'python': ('https://docs.python.org/3.6', None),
'numpy': ('https://docs.scipy.org/doc/numpy/', None),
}
# -------------------------
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
source_suffix = ['.rst', '.md']
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'tensorpack'
copyright = u'2015 - 2020, Yuxin Wu, et al.'
author = u'Yuxin Wu, et al.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = tensorpack.__version__
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['build', 'README.md']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# 'tensorpack.' prefix was removed by js
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
modindex_common_prefix = ['tensorpack.']
# If true, keep warnings as "system message" paragraphs in the built documents.
keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {}
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = '_static/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = True
# If false, no index is generated.
html_use_index = True
# If true, the index is split into individual pages for each letter.
html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# avoid li fonts being larger
# TODO but li indices fonts are still larger
html_compact_lists = False
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'tensorpackdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'tensorpack.tex', u'tensorpack documentation',
author, 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'tensorpack', u'tensorpack documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'tensorpack', u'tensorpack documentation',
author, 'tensorpack', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
suppress_warnings = ['image.nonlocal_uri']
#autodoc_member_order = 'bysource'
def process_signature(app, what, name, obj, options, signature,
return_annotation):
if signature:
# replace Mock function names
signature = re.sub('<Mock name=\'([^\']+)\'.*>', '\g<1>', signature)
signature = re.sub('tensorflow', 'tf', signature)
# add scope name to layer signatures:
if hasattr(obj, 'use_scope'):
if obj.use_scope:
signature = signature[0] + 'variable_scope_name, ' + signature[1:]
elif obj.use_scope is None:
signature = signature[0] + '[variable_scope_name,] ' + signature[1:]
# signature: arg list
return signature, return_annotation
_DEPRECATED_NAMES = set([
# deprecated stuff:
'QueueInputTrainer',
'dump_dataflow_to_process_queue',
'DistributedTrainerReplicated',
'DistributedTrainerParameterServer',
'Augmentor',
"get_model_loader",
# renamed items that should not appear in docs
'DumpTensor',
'DumpParamAsImage',
'get_nr_gpu',
'TrainingMonitor',
'PeakMemoryTracker',
'TowerFuncWrapper',
'PrefetchData',
'MultiProcessPrefetchData',
'PrefetchDataZMQ',
'MultiThreadPrefetchData',
# deprecated or renamed symbolic code
'Deconv2D',
# shouldn't appear in doc:
'l2_regularizer', 'l1_regularizer',
# internal only
'execute_only_once',
'humanize_time_delta',
'SessionUpdate',
'get_checkpoint_path',
'IterSpeedCounter'
])
def autodoc_skip_member(app, what, name, obj, skip, options):
# we hide something deliberately
if getattr(obj, '__HIDE_SPHINX_DOC__', False):
return True
if name == '__init__':
if obj.__doc__ and skip:
# include_init_with_doc doesn't work well for decorated init
# https://github.com/sphinx-doc/sphinx/issues/4258
return False
# Hide some names that are deprecated or not intended to be used
if name in _DEPRECATED_NAMES:
return True
if name in ['__iter__', '__len__', 'reset_state', 'get_data', 'size']:
# skip these methods with empty docstring
if not obj.__doc__ and inspect.isfunction(obj):
# https://stackoverflow.com/questions/3589311/get-defining-class-of-unbound-method-object-in-python-3
cls = getattr(inspect.getmodule(obj),
obj.__qualname__.split('.<locals>', 1)[0].rsplit('.', 1)[0])
if issubclass(cls, tensorpack.DataFlow):
return True
return None
def setup(app):
from recommonmark.transform import AutoStructify
app.add_domain(GithubURLDomain)
app.connect('autodoc-process-signature', process_signature)
app.connect('autodoc-skip-member', autodoc_skip_member)
app.add_config_value(
'recommonmark_config',
{'auto_toc_tree_section': 'Contents',
'enable_math': True,
'enable_inline_math': True,
'enable_eval_rst': True
}, True)
app.add_transform(AutoStructify)
| 15,709 | 32.283898 | 140 | py |
SyNet | SyNet-master/tensorpack/tensorpack/compat/__init__.py | #!/usr/bin/env python
import tensorflow as tf
def backport_tensor_spec():
if hasattr(tf, 'TensorSpec'):
return tf.TensorSpec
try:
# available since 1.7
from tensorflow.python.framework.tensor_spec import TensorSpec
except ImportError:
pass
else:
tf.TensorSpec = TensorSpec
return TensorSpec
from .tensor_spec import TensorSpec
tf.TensorSpec = TensorSpec
return TensorSpec
def is_tfv2():
try:
from tensorflow.python import tf2
return tf2.enabled()
except Exception:
return False
if is_tfv2():
tfv1 = tf.compat.v1
if not hasattr(tf, 'layers'):
# promised at https://github.com/tensorflow/community/pull/24#issuecomment-440453886
tf.layers = tf.keras.layers
else:
try:
tfv1 = tf.compat.v1 # this will silent some warnings
except AttributeError:
tfv1 = tf
| 917 | 21.390244 | 92 | py |
SyNet | SyNet-master/tensorpack/tensorpack/models/fc.py | # -*- coding: utf-8 -*-
# File: fc.py
import numpy as np
from ..compat import tfv1 as tf # this should be avoided first in model code
from ..tfutils.common import get_tf_version_tuple
from .common import VariableHolder, layer_register
from .tflayer import convert_to_tflayer_args, rename_get_variable
__all__ = ['FullyConnected']
def batch_flatten(x):
"""
Flatten the tensor except the first dimension.
"""
shape = x.get_shape().as_list()[1:]
if None not in shape:
return tf.reshape(x, [-1, int(np.prod(shape))])
return tf.reshape(x, tf.stack([tf.shape(x)[0], -1]))
@layer_register(log_shape=True)
@convert_to_tflayer_args(
args_names=['units'],
name_mapping={'out_dim': 'units'})
def FullyConnected(
inputs,
units,
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=tf.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None):
"""
A wrapper around `tf.layers.Dense`.
One difference to maintain backward-compatibility:
Default weight initializer is variance_scaling_initializer(2.0).
Variable Names:
* ``W``: weights of shape [in_dim, out_dim]
* ``b``: bias
"""
if kernel_initializer is None:
if get_tf_version_tuple() <= (1, 12):
kernel_initializer = tf.contrib.layers.variance_scaling_initializer(2.0) # deprecated
else:
kernel_initializer = tf.keras.initializers.VarianceScaling(2.0, distribution='untruncated_normal')
inputs = batch_flatten(inputs)
with rename_get_variable({'kernel': 'W', 'bias': 'b'}):
layer = tf.layers.Dense(
units=units,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
_reuse=tf.get_variable_scope().reuse)
ret = layer.apply(inputs, scope=tf.get_variable_scope())
ret = tf.identity(ret, name='output')
ret.variables = VariableHolder(W=layer.kernel)
if use_bias:
ret.variables.b = layer.bias
return ret
| 2,337 | 30.594595 | 110 | py |
SyNet | SyNet-master/tensorpack/tensorpack/models/batch_norm.py | # Copyright (c) Tensorpack Contributors. All Rights Reserved
# -*- coding: utf-8 -*-
# File: batch_norm.py
import re
from ..compat import tfv1 as tf # this should be avoided first in model code
from tensorflow.python.training import moving_averages
from ..tfutils.collection import backup_collection, restore_collection
from ..tfutils.common import get_tf_version_tuple
from ..tfutils.tower import get_current_tower_context
from ..utils import logger
from ..utils.argtools import get_data_format, log_once
from .common import VariableHolder, layer_register
from .tflayer import convert_to_tflayer_args, rename_get_variable
from .utils import disable_autograph
__all__ = ['BatchNorm', 'BatchRenorm']
# decay: being too close to 1 leads to slow start-up. torch use 0.9.
# eps: torch: 1e-5. Lasagne: 1e-4
def get_bn_variables(n_out, use_scale, use_bias, beta_init, gamma_init):
if use_bias:
beta = tf.get_variable('beta', [n_out], initializer=beta_init)
else:
beta = tf.zeros([n_out], name='beta')
if use_scale:
gamma = tf.get_variable('gamma', [n_out], initializer=gamma_init)
else:
gamma = tf.ones([n_out], name='gamma')
# x * gamma + beta
moving_mean = tf.get_variable('mean/EMA', [n_out],
initializer=tf.constant_initializer(), trainable=False)
moving_var = tf.get_variable('variance/EMA', [n_out],
initializer=tf.constant_initializer(1.0), trainable=False)
if get_current_tower_context().is_main_training_tower:
for v in [moving_mean, moving_var]:
tf.add_to_collection(tf.GraphKeys.MODEL_VARIABLES, v)
return beta, gamma, moving_mean, moving_var
def internal_update_bn_ema(xn, batch_mean, batch_var,
moving_mean, moving_var, decay):
update_op1 = moving_averages.assign_moving_average(
moving_mean, batch_mean, decay, zero_debias=False,
name='mean_ema_op')
update_op2 = moving_averages.assign_moving_average(
moving_var, batch_var, decay, zero_debias=False,
name='var_ema_op')
# When sync_statistics is True, always enable internal_update.
# Otherwise the update ops (only executed on main tower)
# will hang when some BatchNorm layers are unused (https://github.com/tensorpack/tensorpack/issues/1078)
with tf.control_dependencies([update_op1, update_op2]):
return tf.identity(xn, name='output')
def get_sync_bn_mean_var(inputs, red_axis, sync_statistics):
ctx = get_current_tower_context()
batch_mean = tf.reduce_mean(inputs, axis=red_axis)
batch_mean_square = tf.reduce_mean(tf.square(inputs), axis=red_axis)
TF_version = get_tf_version_tuple()
if sync_statistics == 'nccl':
num_dev = ctx.total
if num_dev == 1:
logger.warn("BatchNorm(sync_statistics='nccl') is used with only one tower!")
else:
assert TF_version >= (1, 10), \
"Cross-GPU BatchNorm is only supported in TF>=1.10 ." \
"Upgrade TF or apply this patch manually: https://github.com/tensorflow/tensorflow/pull/20360"
if TF_version <= (1, 12):
try:
from tensorflow.contrib.nccl.python.ops.nccl_ops import _validate_and_load_nccl_so # deprecated
except Exception:
pass
else:
_validate_and_load_nccl_so()
from tensorflow.contrib.nccl.ops import gen_nccl_ops # deprecated
else:
from tensorflow.python.ops import gen_nccl_ops
shared_name = re.sub('tower[0-9]+/', '', tf.get_variable_scope().name)
batch_mean = gen_nccl_ops.nccl_all_reduce(
input=batch_mean,
reduction='sum',
num_devices=num_dev,
shared_name=shared_name + '_NCCL_mean') * (1.0 / num_dev)
batch_mean_square = gen_nccl_ops.nccl_all_reduce(
input=batch_mean_square,
reduction='sum',
num_devices=num_dev,
shared_name=shared_name + '_NCCL_mean_square') * (1.0 / num_dev)
elif sync_statistics == 'horovod':
# Require https://github.com/uber/horovod/pull/331
import horovod.tensorflow as hvd
if hvd.size() == 1:
logger.warn("BatchNorm(sync_statistics='horovod') is used with only one process!")
else:
import horovod
hvd_version = tuple(map(int, horovod.__version__.split('.')[:3]))
assert hvd_version >= (0, 13, 6), "sync_statistics=horovod needs horovod>=0.13.6 !"
batch_mean = hvd.allreduce(batch_mean, average=True)
batch_mean_square = hvd.allreduce(batch_mean_square, average=True)
batch_var = batch_mean_square - tf.square(batch_mean)
return batch_mean, batch_var
@layer_register()
@convert_to_tflayer_args(
args_names=[],
name_mapping={
'use_bias': 'center',
'use_scale': 'scale',
'gamma_init': 'gamma_initializer',
'decay': 'momentum',
'use_local_stat': 'training'
})
@disable_autograph()
def BatchNorm(inputs, axis=None, *, training=None, momentum=0.9, epsilon=1e-5,
center=True, scale=True,
beta_initializer=tf.zeros_initializer(),
gamma_initializer=tf.ones_initializer(),
virtual_batch_size=None,
data_format='channels_last',
ema_update='default',
sync_statistics=None):
"""
A more powerful version of `tf.layers.batch_normalization`. It differs from
the offical one in the following aspects:
1. Accepts an alternative ``data_format`` option when ``axis`` is None. For 2D input, this argument will be ignored.
2. Default value for ``momentum`` and ``epsilon`` is different.
3. Default value for ``training`` is automatically obtained from tensorpack's ``TowerContext``.
User-provided value can overwrite this behavior.
4. Support the ``ema_update`` option, which covers broader use cases than the standard EMA update.
5. Support the ``sync_statistics`` option, which implements "SyncBN" and is very useful in small-batch models.
6. Better support of the ``virtual_batch_size`` option that does not have the bugs in ``tf.layers``.
Args:
training (bool): if True, use per-batch statistics to normalize. Otherwise, use stored EMA
to normalize. By default, it is equal to `get_current_tower_context().is_training`.
This is not a good argument name, but it is what the Tensorflow layer uses.
virtual_batch_size (int): implement "Ghost BatchNorm" that normalizes
the data with a smaller batch size than the input. Only effective when training is True.
The value has to be a divisor of the actual batch size.
It does not use the buggy TensorFlow implementation which has the
problems of (1) wrong behavior at inference; (2) create variables with unnecessary size=1 dimensions.
Corresponding TF issue: https://github.com/tensorflow/tensorflow/issues/23050
ema_update (str): Only effective when ``training=True``. It has the following options:
* "default": same as "collection". Because this is the default behavior in TensorFlow.
* "skip": do not update EMA. This can be useful when you reuse a batch norm layer in several places
but do not want them to all update your EMA.
* "collection": Add EMA update ops to collection `tf.GraphKeys.UPDATE_OPS`.
The ops in the collection will be run automatically by the callback :class:`RunUpdateOps`, along with
your training iterations. This can waste compute if your training iterations do not always depend
on the BatchNorm layer.
* "internal": EMA is updated inside this layer itself by control dependencies.
In standard scenarios, it has similar speed to "collection". But it has some more benefits:
1. BatchNorm is used inside dynamic control flow.
The collection-based update does not support dynamic control flows.
2. BatchNorm layer is sometimes unused (e.g., in GANs you have two networks to train alternatively).
Putting all update ops into a single collection will waste a lot of compute.
3. Other part of the model relies on the "updated" EMA. The collection-based method does not update
EMA immediately.
4. It has less chance to cause TensorFlow bugs in a graph with complicated control flow.
Therefore this option is preferred over TensorFlow default.
Corresponding TF issue: https://github.com/tensorflow/tensorflow/issues/14699
sync_statistics (str or None): one of None, "nccl", or "horovod". It determines how to compute the
"per-batch statistics" when ``training==True``.
* None: it uses statistics of the input tensor to normalize during training.
This is the standard way BatchNorm was implemented in most frameworks.
* "nccl": this layer must be used under tensorpack's multi-GPU trainers.
It uses the aggregated statistics of the whole batch (across all GPUs) to normalize.
* "horovod": this layer must be used under tensorpack's :class:`HorovodTrainer`.
It uses the aggregated statistics of the whole batch (across all MPI ranks) to normalize.
Note that on a single machine this is found to be slower than the "nccl" implementation.
When not None, each GPU computes its own E[x] and E[x^2],
which are then averaged among all GPUs to compute global mean & variance.
Therefore each GPU needs to have the same batch size.
The synchronization is based on the current variable scope + the name of the layer
(`BatchNorm('name', input)`). Therefore, you need to make sure that:
1. The BatchNorm layer on different GPUs needs to have the same name, so that
statistics can be synchronized. If names do not match, this layer will hang.
2. A BatchNorm layer cannot be reused within one tower.
3. A BatchNorm layer needs to be executed for the same number of times by all GPUs.
If different GPUs execute one BatchNorm layer for different number of times
(e.g., if some GPUs do not execute it), this layer may hang.
This option is also known as "SyncBN" or "Cross-GPU BatchNorm" as mentioned in:
`MegDet: A Large Mini-Batch Object Detector <https://arxiv.org/abs/1711.07240>`_.
Corresponding TF issue: https://github.com/tensorflow/tensorflow/issues/18222.
When `sync_statistics` is enabled, `ema_update` is set to "internal" automatically.
This is to avoid running `UPDATE_OPS`, which requires synchronization.
Variable Names:
* ``beta``: the bias term. Will be zero-inited by default.
* ``gamma``: the scale term. Will be one-inited by default.
* ``mean/EMA``: the moving average of mean.
* ``variance/EMA``: the moving average of variance.
Note:
This layer is more flexible than the standard "BatchNorm" layer and provides more features:
1. No matter whether you're doing training or not, you can set the ``training`` argument
to use batch statistics or EMA statistics.
i.e., you can use batch statistics during inference, or use EMA statistics during training.
Using EMA statistics in training is useful when you load a pre-trained BN and
don't want to update it.
2. As long as `training=True`, `sync_statistics` and `ema_update` option will take effect.
"""
# parse training/ctx
ctx = get_current_tower_context()
if training is None:
training = ctx.is_training
training = bool(training)
if not training:
virtual_batch_size = None
# parse shapes
data_format = get_data_format(data_format, keras_mode=False)
shape = inputs.get_shape().as_list()
ndims = len(shape)
assert ndims in [2, 4], ndims
if sync_statistics is not None:
sync_statistics = sync_statistics.lower()
assert sync_statistics in [None, 'nccl', 'horovod'], sync_statistics
assert ema_update in ["default", "collection", "internal", "skip"]
if ema_update == "default":
ema_update = "collection"
# Logic:
# 1. EMA update is possible only when we compute batch statistics (training=True)
# 2. We know that in training, non-main training tower does not need EMA
# update (unless you need, e.g., inference during training on all towers)
# We don't know about what to do in prediction context, so be conservative and do the update.
# 3. User can explicit disable update by "skip".
do_ema_update = training and \
(ctx.is_main_training_tower or not ctx.is_training) \
and (ema_update != "skip")
if axis is None:
if ndims == 2:
axis = 1
else:
axis = 1 if data_format == 'NCHW' else 3
assert axis in [1, 3], axis
num_chan = shape[axis]
freeze_bn_backward = not training and ctx.is_training
if freeze_bn_backward:
if ctx.is_main_training_tower: # only warn in first tower
log_once("Some BatchNorm layer uses moving_mean/moving_variance in training.", func='warn')
# Using moving_mean/moving_variance in training, which means we
# loaded a pre-trained BN and only fine-tuning the affine part.
do_sync_bn = (sync_statistics is not None) and training
if not do_sync_bn and not virtual_batch_size:
# Use the builtin layer for regular per-GPU BN.
# Use our own implementation for SyncBN and GhostBN
coll_bk = backup_collection([tf.GraphKeys.UPDATE_OPS])
with rename_get_variable(
{'moving_mean': 'mean/EMA',
'moving_variance': 'variance/EMA'}):
tf_args = dict(
axis=axis,
momentum=momentum, epsilon=epsilon,
center=center, scale=scale,
beta_initializer=beta_initializer,
gamma_initializer=gamma_initializer,
fused=(ndims == 4 and axis in [1, 3]),
_reuse=tf.get_variable_scope().reuse)
use_fp16 = inputs.dtype == tf.float16
if use_fp16:
# non-fused does not support fp16; fused does not support all layouts.
# we made our best guess here
tf_args['fused'] = True
layer = tf.layers.BatchNormalization(**tf_args)
xn = layer.apply(inputs, training=training, scope=tf.get_variable_scope())
# Add EMA variables to the correct collection
if ctx.is_main_training_tower:
for v in layer.non_trainable_variables:
if isinstance(v, tf.Variable):
tf.add_to_collection(tf.GraphKeys.MODEL_VARIABLES, v)
if not do_ema_update:
restore_collection(coll_bk)
if do_ema_update and ema_update == "internal":
# Implement "internal" update.
restore_collection(coll_bk)
assert layer.updates
with tf.control_dependencies(layer.updates):
ret = tf.identity(xn, name='output')
else:
ret = tf.identity(xn, name='output')
vh = ret.variables = VariableHolder(
moving_mean=layer.moving_mean,
mean=layer.moving_mean, # for backward-compatibility
moving_variance=layer.moving_variance,
variance=layer.moving_variance) # for backward-compatibility
if scale:
vh.gamma = layer.gamma
if center:
vh.beta = layer.beta
else:
red_axis = [0] if ndims == 2 else ([0, 2, 3] if axis == 1 else [0, 1, 2])
beta, gamma, moving_mean, moving_var = get_bn_variables(
num_chan, scale, center, beta_initializer, gamma_initializer)
assert sync_statistics is None or virtual_batch_size is None, "Cannot use SyncBN and GhostBN together!"
new_shape = None # don't need to reshape unless ...
if sync_statistics is not None:
# sync bn
batch_mean, batch_var = get_sync_bn_mean_var(inputs, red_axis, sync_statistics)
batch_mean_vec = batch_mean
batch_var_vec = batch_var
if ndims == 4 and axis == 1:
new_shape = [1, num_chan, 1, 1]
batch_mean = tf.reshape(batch_mean, new_shape)
batch_var = tf.reshape(batch_var, new_shape)
else:
orig_shape = tf.shape(inputs)
inputs = tf.reshape(
inputs,
tf.concat([[-1, virtual_batch_size],
tf.shape(inputs)[1:]], axis=0))
# B/V, V, ...
red_axis = [x + 1 for x in red_axis]
new_shape = [1] * (ndims + 1)
new_shape[axis + 1] = num_chan
batch_mean, batch_var = tf.nn.moments(inputs, red_axis, keepdims=True)
# B/V, C
# vec for EMA update: use the first one only to mimic per-GPU BN
batch_mean_vec = tf.reshape(batch_mean[0], [num_chan])
batch_var_vec = tf.reshape(batch_var[0], [num_chan])
if new_shape is not None:
# Using fused_batch_norm(is_training=False) is actually slightly faster,
# but hopefully this call will be JITed in the future.
xn = tf.nn.batch_normalization(
inputs, batch_mean, batch_var,
tf.reshape(beta, new_shape),
tf.reshape(gamma, new_shape), epsilon)
else:
xn = tf.nn.batch_normalization(
inputs, batch_mean, batch_var,
beta, gamma, epsilon)
if virtual_batch_size is not None:
xn = tf.reshape(xn, orig_shape)
if do_ema_update:
ret = internal_update_bn_ema(
xn, batch_mean_vec, batch_var_vec, moving_mean, moving_var, momentum)
else:
ret = tf.identity(xn, name='output')
vh = ret.variables = VariableHolder(
moving_mean=moving_mean,
mean=moving_mean, # for backward-compatibility
moving_variance=moving_var,
variance=moving_var) # for backward-compatibility
if scale:
vh.gamma = gamma
if center:
vh.beta = beta
return ret
@layer_register()
@convert_to_tflayer_args(
args_names=[],
name_mapping={
'use_bias': 'center',
'use_scale': 'scale',
'gamma_init': 'gamma_initializer',
'decay': 'momentum'
})
def BatchRenorm(x, rmax, dmax, *, momentum=0.9, epsilon=1e-5,
center=True, scale=True, gamma_initializer=None,
data_format='channels_last'):
"""
Batch Renormalization layer, as described in the paper:
`Batch Renormalization: Towards Reducing Minibatch Dependence in Batch-Normalized Models
<https://arxiv.org/abs/1702.03275>`_.
This implementation is a wrapper around `tf.layers.batch_normalization`.
Args:
x (tf.Tensor): a NHWC or NC tensor.
rmax, dmax (tf.Tensor): a scalar tensor, the maximum allowed corrections.
decay (float): decay rate of moving average.
epsilon (float): epsilon to avoid divide-by-zero.
use_scale, use_bias (bool): whether to use the extra affine transformation or not.
Returns:
tf.Tensor: a tensor named ``output`` with the same shape of x.
Variable Names:
* ``beta``: the bias term.
* ``gamma``: the scale term. Input will be transformed by ``x * gamma + beta``.
* ``moving_mean, renorm_mean, renorm_mean_weight``: See TF documentation.
* ``moving_variance, renorm_stddev, renorm_stddev_weight``: See TF documentation.
"""
shape = x.get_shape().as_list()
ndims = len(shape)
assert ndims in [2, 4]
if ndims == 2:
data_format = 'channels_first'
ctx = get_current_tower_context()
coll_bk = backup_collection([tf.GraphKeys.UPDATE_OPS])
layer = tf.layers.BatchNormalization(
axis=1 if data_format == 'channels_first' else 3,
momentum=momentum, epsilon=epsilon,
center=center, scale=scale,
renorm=True,
renorm_clipping={
'rmin': 1.0 / rmax,
'rmax': rmax,
'dmax': dmax},
renorm_momentum=0.99,
gamma_initializer=gamma_initializer,
fused=False,
_reuse=tf.get_variable_scope().reuse)
xn = layer.apply(x, training=ctx.is_training, scope=tf.get_variable_scope())
if ctx.is_main_training_tower:
for v in layer.non_trainable_variables:
if isinstance(v, tf.Variable):
tf.add_to_collection(tf.GraphKeys.MODEL_VARIABLES, v)
else:
# only run UPDATE_OPS in the first tower
restore_collection(coll_bk)
if ndims == 2:
xn = tf.squeeze(xn, [1, 2])
ret = tf.identity(xn, name='output')
# TODO not sure whether to add moving_mean/moving_var to VH now
vh = ret.variables = VariableHolder()
if scale:
vh.gamma = layer.gamma
if center:
vh.beta = layer.beta
return ret
| 21,444 | 44.530786 | 120 | py |
SyNet | SyNet-master/tensorpack/tensorpack/models/conv2d.py | # -*- coding: utf-8 -*-
# File: conv2d.py
from ..compat import tfv1 as tf # this should be avoided first in model code
from ..tfutils.common import get_tf_version_tuple
from ..utils.argtools import get_data_format, shape2d, shape4d, log_once
from .common import VariableHolder, layer_register
from .tflayer import convert_to_tflayer_args, rename_get_variable
__all__ = ['Conv2D', 'Deconv2D', 'Conv2DTranspose']
@layer_register(log_shape=True)
@convert_to_tflayer_args(
args_names=['filters', 'kernel_size'],
name_mapping={
'out_channel': 'filters',
'kernel_shape': 'kernel_size',
'stride': 'strides',
})
def Conv2D(
inputs,
filters,
kernel_size,
strides=(1, 1),
padding='same',
data_format='channels_last',
dilation_rate=(1, 1),
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=tf.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
split=1):
"""
Similar to `tf.layers.Conv2D`, but with some differences:
1. Default kernel initializer is variance_scaling_initializer(2.0).
2. Default padding is 'same'.
3. Support 'split' argument to do group convolution.
Variable Names:
* ``W``: weights
* ``b``: bias
"""
if kernel_initializer is None:
if get_tf_version_tuple() <= (1, 12):
kernel_initializer = tf.contrib.layers.variance_scaling_initializer(2.0) # deprecated
else:
kernel_initializer = tf.keras.initializers.VarianceScaling(2.0, distribution='untruncated_normal')
dilation_rate = shape2d(dilation_rate)
if split == 1 and dilation_rate == [1, 1]:
# tf.layers.Conv2D has bugs with dilations (https://github.com/tensorflow/tensorflow/issues/26797)
with rename_get_variable({'kernel': 'W', 'bias': 'b'}):
layer = tf.layers.Conv2D(
filters,
kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
_reuse=tf.get_variable_scope().reuse)
ret = layer.apply(inputs, scope=tf.get_variable_scope())
ret = tf.identity(ret, name='output')
ret.variables = VariableHolder(W=layer.kernel)
if use_bias:
ret.variables.b = layer.bias
else:
# group conv implementation
data_format = get_data_format(data_format, keras_mode=False)
in_shape = inputs.get_shape().as_list()
channel_axis = 3 if data_format == 'NHWC' else 1
in_channel = in_shape[channel_axis]
assert in_channel is not None, "[Conv2D] Input cannot have unknown channel!"
assert in_channel % split == 0
assert kernel_regularizer is None and bias_regularizer is None and activity_regularizer is None, \
"Not supported by group conv or dilated conv!"
out_channel = filters
assert out_channel % split == 0
assert dilation_rate == [1, 1] or get_tf_version_tuple() >= (1, 5), 'TF>=1.5 required for dilated conv.'
kernel_shape = shape2d(kernel_size)
filter_shape = kernel_shape + [in_channel / split, out_channel]
stride = shape4d(strides, data_format=data_format)
kwargs = {"data_format": data_format}
if get_tf_version_tuple() >= (1, 5):
kwargs['dilations'] = shape4d(dilation_rate, data_format=data_format)
# matching input dtype (ex. tf.float16) since the default dtype of variable if tf.float32
inputs_dtype = inputs.dtype
W = tf.get_variable(
'W', filter_shape, dtype=inputs_dtype, initializer=kernel_initializer)
if use_bias:
b = tf.get_variable('b', [out_channel], dtype=inputs_dtype, initializer=bias_initializer)
if split == 1:
conv = tf.nn.conv2d(inputs, W, stride, padding.upper(), **kwargs)
else:
conv = None
if get_tf_version_tuple() >= (1, 13):
try:
conv = tf.nn.conv2d(inputs, W, stride, padding.upper(), **kwargs)
except ValueError:
log_once("CUDNN group convolution support is only available with "
"https://github.com/tensorflow/tensorflow/pull/25818 . "
"Will fall back to a loop-based slow implementation instead!", 'warn')
if conv is None:
inputs = tf.split(inputs, split, channel_axis)
kernels = tf.split(W, split, 3)
outputs = [tf.nn.conv2d(i, k, stride, padding.upper(), **kwargs)
for i, k in zip(inputs, kernels)]
conv = tf.concat(outputs, channel_axis)
ret = tf.nn.bias_add(conv, b, data_format=data_format) if use_bias else conv
if activation is not None:
ret = activation(ret)
ret = tf.identity(ret, name='output')
ret.variables = VariableHolder(W=W)
if use_bias:
ret.variables.b = b
return ret
@layer_register(log_shape=True)
@convert_to_tflayer_args(
args_names=['filters', 'kernel_size', 'strides'],
name_mapping={
'out_channel': 'filters',
'kernel_shape': 'kernel_size',
'stride': 'strides',
})
def Conv2DTranspose(
inputs,
filters,
kernel_size,
strides=(1, 1),
padding='same',
data_format='channels_last',
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=tf.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None):
"""
A wrapper around `tf.layers.Conv2DTranspose`.
Some differences to maintain backward-compatibility:
1. Default kernel initializer is variance_scaling_initializer(2.0).
2. Default padding is 'same'
Variable Names:
* ``W``: weights
* ``b``: bias
"""
if kernel_initializer is None:
if get_tf_version_tuple() <= (1, 12):
kernel_initializer = tf.contrib.layers.variance_scaling_initializer(2.0) # deprecated
else:
kernel_initializer = tf.keras.initializers.VarianceScaling(2.0, distribution='untruncated_normal')
if get_tf_version_tuple() <= (1, 12):
with rename_get_variable({'kernel': 'W', 'bias': 'b'}):
layer = tf.layers.Conv2DTranspose(
filters,
kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
_reuse=tf.get_variable_scope().reuse)
ret = layer.apply(inputs, scope=tf.get_variable_scope())
ret = tf.identity(ret, name='output')
ret.variables = VariableHolder(W=layer.kernel)
if use_bias:
ret.variables.b = layer.bias
else:
# Our own implementation, to avoid Keras bugs. https://github.com/tensorflow/tensorflow/issues/25946
assert kernel_regularizer is None and bias_regularizer is None and activity_regularizer is None, \
"Unsupported arguments due to Keras bug in TensorFlow 1.13"
data_format = get_data_format(data_format, keras_mode=False)
shape_dyn = tf.shape(inputs)
shape_sta = inputs.shape.as_list()
strides2d = shape2d(strides)
kernel_shape = shape2d(kernel_size)
assert padding.lower() in ['valid', 'same'], "Padding {} is not supported!".format(padding)
if padding.lower() == 'valid':
shape_res2d = [max(kernel_shape[0] - strides2d[0], 0),
max(kernel_shape[1] - strides2d[1], 0)]
else:
shape_res2d = shape2d(0)
if data_format == 'NCHW':
channels_in = shape_sta[1]
out_shape_dyn = tf.stack(
[shape_dyn[0], filters,
shape_dyn[2] * strides2d[0] + shape_res2d[0],
shape_dyn[3] * strides2d[1] + shape_res2d[1]])
out_shape3_sta = [filters,
None if shape_sta[2] is None else shape_sta[2] * strides2d[0] + shape_res2d[0],
None if shape_sta[3] is None else shape_sta[3] * strides2d[1] + shape_res2d[1]]
else:
channels_in = shape_sta[-1]
out_shape_dyn = tf.stack(
[shape_dyn[0],
shape_dyn[1] * strides2d[0] + shape_res2d[0],
shape_dyn[2] * strides2d[1] + shape_res2d[1],
filters])
out_shape3_sta = [None if shape_sta[1] is None else shape_sta[1] * strides2d[0] + shape_res2d[0],
None if shape_sta[2] is None else shape_sta[2] * strides2d[1] + shape_res2d[1],
filters]
inputs_dtype = inputs.dtype
W = tf.get_variable('W', kernel_shape + [filters, channels_in],
dtype=inputs_dtype, initializer=kernel_initializer)
if use_bias:
b = tf.get_variable('b', [filters], dtype=inputs_dtype, initializer=bias_initializer)
conv = tf.nn.conv2d_transpose(
inputs, W, out_shape_dyn,
shape4d(strides, data_format=data_format),
padding=padding.upper(),
data_format=data_format)
conv.set_shape(tf.TensorShape([shape_sta[0]] + out_shape3_sta))
ret = tf.nn.bias_add(conv, b, data_format=data_format) if use_bias else conv
if activation is not None:
ret = activation(ret)
ret = tf.identity(ret, name='output')
ret.variables = VariableHolder(W=W)
if use_bias:
ret.variables.b = b
return ret
Deconv2D = Conv2DTranspose
| 10,577 | 38.470149 | 112 | py |
SyNet | SyNet-master/tensorpack/tensorpack/models/tflayer.py | # -*- coding: utf-8 -*-
# File: tflayer.py
import functools
import six
import tensorflow as tf
from ..tfutils.common import get_tf_version_tuple
from ..tfutils.varreplace import custom_getter_scope
from ..utils.argtools import get_data_format
__all__ = []
def map_common_tfargs(kwargs):
df = kwargs.pop('data_format', None)
if df is not None:
df = get_data_format(df, keras_mode=True)
kwargs['data_format'] = df
old_nl = kwargs.pop('nl', None)
if old_nl is not None:
kwargs['activation'] = lambda x, name=None: old_nl(x, name=name)
if 'W_init' in kwargs:
kwargs['kernel_initializer'] = kwargs.pop('W_init')
if 'b_init' in kwargs:
kwargs['bias_initializer'] = kwargs.pop('b_init')
return kwargs
def convert_to_tflayer_args(args_names, name_mapping):
"""
After applying this decorator:
1. data_format becomes tf.layers style
2. nl becomes activation
3. initializers are renamed
4. positional args are transformed to corresponding kwargs, according to args_names
5. kwargs are mapped to tf.layers names if needed, by name_mapping
"""
def decorator(func):
@functools.wraps(func)
def decorated_func(inputs, *args, **kwargs):
kwargs = map_common_tfargs(kwargs)
posarg_dic = {}
assert len(args) <= len(args_names), \
"Please use kwargs instead of positional args to call this model, " \
"except for the following arguments: {}".format(', '.join(args_names))
for pos_arg, name in zip(args, args_names):
posarg_dic[name] = pos_arg
ret = {}
for name, arg in six.iteritems(kwargs):
newname = name_mapping.get(name, None)
if newname is not None:
assert newname not in kwargs, \
"Argument {} and {} conflicts!".format(name, newname)
else:
newname = name
ret[newname] = arg
ret.update(posarg_dic) # Let pos arg overwrite kw arg, for argscope to work
return func(inputs, **ret)
return decorated_func
return decorator
def rename_get_variable(mapping):
"""
Args:
mapping(dict): an old -> new mapping for variable basename. e.g. {'kernel': 'W'}
Returns:
A context where the variables are renamed.
"""
def custom_getter(getter, name, *args, **kwargs):
splits = name.split('/')
basename = splits[-1]
if basename in mapping:
basename = mapping[basename]
splits[-1] = basename
name = '/'.join(splits)
return getter(name, *args, **kwargs)
return custom_getter_scope(custom_getter)
def rename_tflayer_get_variable():
"""
Rename all :func:`tf.get_variable` with rules that transforms tflayer style to tensorpack style.
Returns:
A context where the variables are renamed.
Example:
.. code-block:: python
with rename_tflayer_get_variable():
x = tf.layer.conv2d(input, 3, 3, name='conv0')
# variables will be named 'conv0/W', 'conv0/b'
"""
mapping = {
'kernel': 'W',
'bias': 'b',
'moving_mean': 'mean/EMA',
'moving_variance': 'variance/EMA',
}
return rename_get_variable(mapping)
def monkeypatch_tf_layers():
if get_tf_version_tuple() < (1, 4):
if not hasattr(tf.layers, 'Dense'):
from tensorflow.python.layers.core import Dense
tf.layers.Dense = Dense
from tensorflow.python.layers.normalization import BatchNormalization
tf.layers.BatchNormalization = BatchNormalization
from tensorflow.python.layers.convolutional import Conv2DTranspose, Conv2D
tf.layers.Conv2DTranspose = Conv2DTranspose
tf.layers.Conv2D = Conv2D
from tensorflow.python.layers.pooling import MaxPooling2D, AveragePooling2D
tf.layers.MaxPooling2D = MaxPooling2D
tf.layers.AveragePooling2D = AveragePooling2D
monkeypatch_tf_layers()
| 4,159 | 29.814815 | 100 | py |
SyNet | SyNet-master/tensorpack/tensorpack/models/utils.py | # -*- coding: utf-8 -*-
# File: utils.py
import six
class VariableHolder(object):
""" A proxy to access variables defined in a layer. """
def __init__(self, **kwargs):
"""
Args:
kwargs: {name:variable}
"""
self._vars = {}
for k, v in six.iteritems(kwargs):
self._add_variable(k, v)
def _add_variable(self, name, var):
assert name not in self._vars
self._vars[name] = var
def __setattr__(self, name, var):
if not name.startswith('_'):
self._add_variable(name, var)
else:
# private attributes
super(VariableHolder, self).__setattr__(name, var)
def __getattr__(self, name):
return self._vars[name]
def all(self):
"""
Returns:
list of all variables
"""
return list(six.itervalues(self._vars))
try:
# When BN is used as an activation, keras layers try to autograph.convert it
# This leads to massive warnings so we disable it.
from tensorflow.python.autograph.impl.api import do_not_convert as disable_autograph
except ImportError:
def disable_autograph():
return lambda x: x
| 1,212 | 24.808511 | 88 | py |
SyNet | SyNet-master/tensorpack/tensorpack/models/_old_batch_norm.py | # -*- coding: utf-8 -*-
# File: _old_batch_norm.py
import tensorflow as tf
from tensorflow.contrib.framework import add_model_variable
from tensorflow.python.training import moving_averages
from ..tfutils.common import get_tf_version_tuple
from ..tfutils.tower import get_current_tower_context
from ..utils import logger
from ..utils.argtools import get_data_format
from .common import VariableHolder, layer_register
from .tflayer import convert_to_tflayer_args
"""
Old Custom BN Implementation, Kept Here For Future Reference
"""
def get_bn_variables(n_out, use_scale, use_bias, gamma_init):
if use_bias:
beta = tf.get_variable('beta', [n_out], initializer=tf.constant_initializer())
else:
beta = tf.zeros([n_out], name='beta')
if use_scale:
gamma = tf.get_variable('gamma', [n_out], initializer=gamma_init)
else:
gamma = tf.ones([n_out], name='gamma')
# x * gamma + beta
moving_mean = tf.get_variable('mean/EMA', [n_out],
initializer=tf.constant_initializer(), trainable=False)
moving_var = tf.get_variable('variance/EMA', [n_out],
initializer=tf.constant_initializer(1.0), trainable=False)
return beta, gamma, moving_mean, moving_var
def update_bn_ema(xn, batch_mean, batch_var,
moving_mean, moving_var, decay, internal_update):
update_op1 = moving_averages.assign_moving_average(
moving_mean, batch_mean, decay, zero_debias=False,
name='mean_ema_op')
update_op2 = moving_averages.assign_moving_average(
moving_var, batch_var, decay, zero_debias=False,
name='var_ema_op')
if internal_update:
with tf.control_dependencies([update_op1, update_op2]):
return tf.identity(xn, name='output')
else:
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, update_op1)
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, update_op2)
return tf.identity(xn, name='output')
@layer_register()
@convert_to_tflayer_args(
args_names=[],
name_mapping={
'use_bias': 'center',
'use_scale': 'scale',
'gamma_init': 'gamma_initializer',
'decay': 'momentum',
'use_local_stat': 'training'
})
def BatchNorm(inputs, training=None, momentum=0.9, epsilon=1e-5,
center=True, scale=True,
gamma_initializer=tf.ones_initializer(),
data_format='channels_last',
internal_update=False):
"""
Mostly equivalent to `tf.layers.batch_normalization`, but difference in
the following:
1. Accepts `data_format` rather than `axis`. For 2D input, this argument will be ignored.
2. Default value for `momentum` and `epsilon` is different.
3. Default value for `training` is automatically obtained from `TowerContext`.
4. Support the `internal_update` option.
Args:
internal_update (bool): if False, add EMA update ops to
`tf.GraphKeys.UPDATE_OPS`. If True, update EMA inside the layer
by control dependencies.
Variable Names:
* ``beta``: the bias term. Will be zero-inited by default.
* ``gamma``: the scale term. Will be one-inited by default. Input will be transformed by ``x * gamma + beta``.
* ``mean/EMA``: the moving average of mean.
* ``variance/EMA``: the moving average of variance.
Note:
1. About multi-GPU training: moving averages across GPUs are not aggregated.
Batch statistics are computed independently. This is consistent with most frameworks.
2. Combinations of ``training`` and ``ctx.is_training``:
* ``training == ctx.is_training``: standard BN, EMA are
maintained during training and used during inference. This is
the default.
* ``training and not ctx.is_training``: still use batch statistics in inference.
* ``not training and ctx.is_training``: use EMA to normalize in
training. This is useful when you load a pre-trained BN and
don't want to fine tune the EMA. EMA will not be updated in
this case.
"""
data_format = get_data_format(data_format, keras_mode=False)
shape = inputs.get_shape().as_list()
ndims = len(shape)
assert ndims in [2, 4]
if ndims == 2:
data_format = 'NHWC'
if data_format == 'NCHW':
n_out = shape[1]
else:
n_out = shape[-1] # channel
assert n_out is not None, "Input to BatchNorm cannot have unknown channels!"
beta, gamma, moving_mean, moving_var = get_bn_variables(n_out, scale, center, gamma_initializer)
ctx = get_current_tower_context()
use_local_stat = training
if use_local_stat is None:
use_local_stat = ctx.is_training
use_local_stat = bool(use_local_stat)
if use_local_stat:
if ndims == 2:
inputs = tf.reshape(inputs, [-1, 1, 1, n_out]) # fused_bn only takes 4D input
# fused_bn has error using NCHW? (see #190)
xn, batch_mean, batch_var = tf.nn.fused_batch_norm(
inputs, gamma, beta, epsilon=epsilon,
is_training=True, data_format=data_format)
if ndims == 2:
xn = tf.squeeze(xn, [1, 2])
else:
if ctx.is_training:
assert get_tf_version_tuple() >= (1, 4), \
"Fine tuning a BatchNorm model with fixed statistics is only " \
"supported after https://github.com/tensorflow/tensorflow/pull/12580 "
if ctx.is_main_training_tower: # only warn in first tower
logger.warn("[BatchNorm] Using moving_mean/moving_variance in training.")
# Using moving_mean/moving_variance in training, which means we
# loaded a pre-trained BN and only fine-tuning the affine part.
xn, _, _ = tf.nn.fused_batch_norm(
inputs, gamma, beta,
mean=moving_mean, variance=moving_var, epsilon=epsilon,
data_format=data_format, is_training=False)
else:
if ndims == 4:
xn, _, _ = tf.nn.fused_batch_norm(
inputs, gamma, beta,
mean=moving_mean, variance=moving_var, epsilon=epsilon,
data_format=data_format, is_training=False)
else:
xn = tf.nn.batch_normalization(
inputs, moving_mean, moving_var, beta, gamma, epsilon)
# maintain EMA only on one GPU is OK, even in replicated mode.
# because training time doesn't use EMA
if ctx.is_main_training_tower:
add_model_variable(moving_mean)
add_model_variable(moving_var)
if ctx.is_main_training_tower and use_local_stat:
ret = update_bn_ema(xn, batch_mean, batch_var, moving_mean, moving_var, momentum, internal_update)
else:
ret = tf.identity(xn, name='output')
vh = ret.variables = VariableHolder(mean=moving_mean, variance=moving_var)
if scale:
vh.gamma = gamma
if center:
vh.beta = beta
return ret
| 7,082 | 40.664706 | 114 | py |
SyNet | SyNet-master/tensorpack/tensorpack/models/layer_norm.py | # -*- coding: utf-8 -*-
# File: layer_norm.py
from ..compat import tfv1 as tf # this should be avoided first in model code
from ..utils.argtools import get_data_format
from ..utils.develop import log_deprecated
from .common import VariableHolder, layer_register
from .tflayer import convert_to_tflayer_args
__all__ = ['LayerNorm', 'InstanceNorm']
@layer_register()
@convert_to_tflayer_args(
args_names=[],
name_mapping={
'use_bias': 'center',
'use_scale': 'scale',
'gamma_init': 'gamma_initializer',
})
def LayerNorm(
x, epsilon=1e-5, *,
center=True, scale=True,
gamma_initializer=tf.ones_initializer(),
data_format='channels_last'):
"""
Layer Normalization layer, as described in the paper:
`Layer Normalization <https://arxiv.org/abs/1607.06450>`_.
Args:
x (tf.Tensor): a 4D or 2D tensor. When 4D, the layout should match data_format.
epsilon (float): epsilon to avoid divide-by-zero.
center, scale (bool): whether to use the extra affine transformation or not.
"""
data_format = get_data_format(data_format, keras_mode=False)
shape = x.get_shape().as_list()
ndims = len(shape)
assert ndims in [2, 4]
mean, var = tf.nn.moments(x, list(range(1, len(shape))), keep_dims=True)
if data_format == 'NCHW':
chan = shape[1]
new_shape = [1, chan, 1, 1]
else:
chan = shape[-1]
new_shape = [1, 1, 1, chan]
if ndims == 2:
new_shape = [1, chan]
if center:
beta = tf.get_variable('beta', [chan], initializer=tf.constant_initializer())
beta = tf.reshape(beta, new_shape)
else:
beta = tf.zeros([1] * ndims, name='beta')
if scale:
gamma = tf.get_variable('gamma', [chan], initializer=gamma_initializer)
gamma = tf.reshape(gamma, new_shape)
else:
gamma = tf.ones([1] * ndims, name='gamma')
ret = tf.nn.batch_normalization(x, mean, var, beta, gamma, epsilon, name='output')
vh = ret.variables = VariableHolder()
if scale:
vh.gamma = gamma
if center:
vh.beta = beta
return ret
@layer_register()
@convert_to_tflayer_args(
args_names=[],
name_mapping={
'gamma_init': 'gamma_initializer',
})
def InstanceNorm(x, epsilon=1e-5, *, center=True, scale=True,
gamma_initializer=tf.ones_initializer(),
data_format='channels_last', use_affine=None):
"""
Instance Normalization, as in the paper:
`Instance Normalization: The Missing Ingredient for Fast Stylization
<https://arxiv.org/abs/1607.08022>`_.
Args:
x (tf.Tensor): a 4D tensor.
epsilon (float): avoid divide-by-zero
center, scale (bool): whether to use the extra affine transformation or not.
use_affine: deprecated. Don't use.
"""
data_format = get_data_format(data_format, keras_mode=False)
shape = x.get_shape().as_list()
assert len(shape) == 4, "Input of InstanceNorm has to be 4D!"
if use_affine is not None:
log_deprecated("InstanceNorm(use_affine=)", "Use center= or scale= instead!", "2020-06-01")
center = scale = use_affine
if data_format == 'NHWC':
axis = [1, 2]
ch = shape[3]
new_shape = [1, 1, 1, ch]
else:
axis = [2, 3]
ch = shape[1]
new_shape = [1, ch, 1, 1]
assert ch is not None, "Input of InstanceNorm require known channel!"
mean, var = tf.nn.moments(x, axis, keep_dims=True)
if center:
beta = tf.get_variable('beta', [ch], initializer=tf.constant_initializer())
beta = tf.reshape(beta, new_shape)
else:
beta = tf.zeros([1, 1, 1, 1], name='beta', dtype=x.dtype)
if scale:
gamma = tf.get_variable('gamma', [ch], initializer=gamma_initializer)
gamma = tf.reshape(gamma, new_shape)
else:
gamma = tf.ones([1, 1, 1, 1], name='gamma', dtype=x.dtype)
ret = tf.nn.batch_normalization(x, mean, var, beta, gamma, epsilon, name='output')
vh = ret.variables = VariableHolder()
if scale:
vh.gamma = gamma
if center:
vh.beta = beta
return ret
| 4,188 | 30.734848 | 99 | py |
SyNet | SyNet-master/tensorpack/tensorpack/models/pool.py | # -*- coding: utf-8 -*-
# File: pool.py
import numpy as np
from ..compat import tfv1 as tf # this should be avoided first in model code
from ..utils.argtools import get_data_format, shape2d
from .common import layer_register
from .shape_utils import StaticDynamicShape
from .tflayer import convert_to_tflayer_args
__all__ = ['MaxPooling', 'FixedUnPooling', 'AvgPooling', 'GlobalAvgPooling']
@layer_register(log_shape=True)
@convert_to_tflayer_args(
args_names=['pool_size', 'strides'],
name_mapping={'shape': 'pool_size', 'stride': 'strides'})
def MaxPooling(
inputs,
pool_size,
strides=None,
padding='valid',
data_format='channels_last'):
"""
Same as `tf.layers.MaxPooling2D`. Default strides is equal to pool_size.
"""
if strides is None:
strides = pool_size
layer = tf.layers.MaxPooling2D(pool_size, strides, padding=padding, data_format=data_format)
ret = layer.apply(inputs, scope=tf.get_variable_scope())
return tf.identity(ret, name='output')
@layer_register(log_shape=True)
@convert_to_tflayer_args(
args_names=['pool_size', 'strides'],
name_mapping={'shape': 'pool_size', 'stride': 'strides'})
def AvgPooling(
inputs,
pool_size,
strides=None,
padding='valid',
data_format='channels_last'):
"""
Same as `tf.layers.AveragePooling2D`. Default strides is equal to pool_size.
"""
if strides is None:
strides = pool_size
layer = tf.layers.AveragePooling2D(pool_size, strides, padding=padding, data_format=data_format)
ret = layer.apply(inputs, scope=tf.get_variable_scope())
return tf.identity(ret, name='output')
@layer_register(log_shape=True)
def GlobalAvgPooling(x, data_format='channels_last'):
"""
Global average pooling as in the paper `Network In Network
<http://arxiv.org/abs/1312.4400>`_.
Args:
x (tf.Tensor): a 4D tensor.
Returns:
tf.Tensor: a NC tensor named ``output``.
"""
assert x.shape.ndims == 4
data_format = get_data_format(data_format)
axis = [1, 2] if data_format == 'channels_last' else [2, 3]
return tf.reduce_mean(x, axis, name='output')
def UnPooling2x2ZeroFilled(x):
# https://github.com/tensorflow/tensorflow/issues/2169
out = tf.concat([x, tf.zeros_like(x)], 3)
out = tf.concat([out, tf.zeros_like(out)], 2)
sh = x.get_shape().as_list()
if None not in sh[1:]:
out_size = [-1, sh[1] * 2, sh[2] * 2, sh[3]]
return tf.reshape(out, out_size)
else:
shv = tf.shape(x)
ret = tf.reshape(out, tf.stack([-1, shv[1] * 2, shv[2] * 2, sh[3]]))
return ret
@layer_register(log_shape=True)
def FixedUnPooling(x, shape, unpool_mat=None, data_format='channels_last'):
"""
Unpool the input with a fixed matrix to perform kronecker product with.
Args:
x (tf.Tensor): a 4D image tensor
shape: int or (h, w) tuple
unpool_mat: a tf.Tensor or np.ndarray 2D matrix with size=shape.
If is None, will use a matrix with 1 at top-left corner.
Returns:
tf.Tensor: a 4D image tensor.
"""
data_format = get_data_format(data_format, keras_mode=False)
shape = shape2d(shape)
output_shape = StaticDynamicShape(x)
output_shape.apply(1 if data_format == 'NHWC' else 2, lambda x: x * shape[0])
output_shape.apply(2 if data_format == 'NHWC' else 3, lambda x: x * shape[1])
# a faster implementation for this special case
if shape[0] == 2 and shape[1] == 2 and unpool_mat is None and data_format == 'NHWC':
ret = UnPooling2x2ZeroFilled(x)
else:
# check unpool_mat
if unpool_mat is None:
mat = np.zeros(shape, dtype='float32')
mat[0][0] = 1
unpool_mat = tf.constant(mat, name='unpool_mat')
elif isinstance(unpool_mat, np.ndarray):
unpool_mat = tf.constant(unpool_mat, name='unpool_mat')
assert unpool_mat.shape.as_list() == list(shape)
if data_format == 'NHWC':
x = tf.transpose(x, [0, 3, 1, 2])
# perform a tensor-matrix kronecker product
x = tf.expand_dims(x, -1) # bchwx1
mat = tf.expand_dims(unpool_mat, 0) # 1xshxsw
ret = tf.tensordot(x, mat, axes=1) # bxcxhxwxshxsw
if data_format == 'NHWC':
ret = tf.transpose(ret, [0, 2, 4, 3, 5, 1])
else:
ret = tf.transpose(ret, [0, 1, 2, 4, 3, 5])
shape3_dyn = [output_shape.get_dynamic(k) for k in range(1, 4)]
ret = tf.reshape(ret, tf.stack([-1] + shape3_dyn))
ret.set_shape(tf.TensorShape(output_shape.get_static()))
return ret
| 4,686 | 32.719424 | 100 | py |
SyNet | SyNet-master/tensorpack/tensorpack/models/regularize.py | # -*- coding: utf-8 -*-
# File: regularize.py
import re
import tensorflow as tf
from ..compat import tfv1
from ..tfutils.common import get_tf_version_tuple
from ..tfutils.tower import get_current_tower_context
from ..utils import logger
from ..utils.argtools import graph_memoized
from .common import layer_register
__all__ = ['regularize_cost', 'regularize_cost_from_collection',
'l2_regularizer', 'l1_regularizer', 'Dropout']
@graph_memoized
def _log_once(msg):
logger.info(msg)
if get_tf_version_tuple() <= (1, 12):
l2_regularizer = tf.contrib.layers.l2_regularizer # deprecated
l1_regularizer = tf.contrib.layers.l1_regularizer # deprecated
else:
# oh these little dirty details
l2_regularizer = lambda x: tf.keras.regularizers.l2(x * 0.5) # noqa
l1_regularizer = tf.keras.regularizers.l1
def regularize_cost(regex, func, name='regularize_cost'):
"""
Apply a regularizer on trainable variables matching the regex, and print
the matched variables (only print once in multi-tower training).
In replicated mode, it will only regularize variables within the current tower.
If called under a TowerContext with `is_training==False`, this function returns a zero constant tensor.
Args:
regex (str): a regex to match variable names, e.g. "conv.*/W"
func: the regularization function, which takes a tensor and returns a scalar tensor.
E.g., ``tf.nn.l2_loss, tf.contrib.layers.l1_regularizer(0.001)``.
Returns:
tf.Tensor: a scalar, the total regularization cost.
Example:
.. code-block:: python
cost = cost + regularize_cost("fc.*/W", l2_regularizer(1e-5))
"""
assert len(regex)
ctx = get_current_tower_context()
if not ctx.is_training:
# Currently cannot build the wd_cost correctly at inference,
# because ths vs_name used in inference can be '', therefore the
# variable filter will fail
return tf.constant(0, dtype=tf.float32, name='empty_' + name)
# If vars are shared, regularize all of them
# If vars are replicated, only regularize those in the current tower
if ctx.has_own_variables:
params = ctx.get_collection_in_tower(tfv1.GraphKeys.TRAINABLE_VARIABLES)
else:
params = tfv1.trainable_variables()
names = []
with tfv1.name_scope(name + '_internals'):
costs = []
for p in params:
para_name = p.op.name
if re.search(regex, para_name):
regloss = func(p)
assert regloss.dtype.is_floating, regloss
# Some variables may not be fp32, but it should
# be fine to assume regularization in fp32
if regloss.dtype != tf.float32:
regloss = tf.cast(regloss, tf.float32)
costs.append(regloss)
names.append(p.name)
if not costs:
return tf.constant(0, dtype=tf.float32, name='empty_' + name)
# remove tower prefix from names, and print
if len(ctx.vs_name):
prefix = ctx.vs_name + '/'
prefixlen = len(prefix)
def f(name):
if name.startswith(prefix):
return name[prefixlen:]
return name
names = list(map(f, names))
logger.info("regularize_cost() found {} variables to regularize.".format(len(names)))
_log_once("The following tensors will be regularized: {}".format(', '.join(names)))
return tf.add_n(costs, name=name)
def regularize_cost_from_collection(name='regularize_cost'):
"""
Get the cost from the regularizers in ``tf.GraphKeys.REGULARIZATION_LOSSES``.
If in replicated mode, will only regularize variables created within the current tower.
Args:
name (str): the name of the returned tensor
Returns:
tf.Tensor: a scalar, the total regularization cost.
"""
ctx = get_current_tower_context()
if not ctx.is_training:
# TODO Currently cannot build the wd_cost correctly at inference,
# because ths vs_name used in inference can be '', therefore the
# variable filter will fail
return tf.constant(0, dtype=tf.float32, name='empty_' + name)
# NOTE: this collection doesn't always grow with towers.
# It only grows with actual variable creation, but not get_variable call.
if ctx.has_own_variables: # be careful of the first tower (name='')
losses = ctx.get_collection_in_tower(tfv1.GraphKeys.REGULARIZATION_LOSSES)
else:
losses = tfv1.get_collection(tfv1.GraphKeys.REGULARIZATION_LOSSES)
if len(losses) > 0:
logger.info("regularize_cost_from_collection() found {} regularizers "
"in REGULARIZATION_LOSSES collection.".format(len(losses)))
def maploss(l):
assert l.dtype.is_floating, l
if l.dtype != tf.float32:
l = tf.cast(l, tf.float32)
return l
losses = [maploss(l) for l in losses]
reg_loss = tf.add_n(losses, name=name)
return reg_loss
else:
return tf.constant(0, dtype=tf.float32, name='empty_' + name)
@layer_register(use_scope=None)
def Dropout(x, *args, **kwargs):
"""
Same as `tf.layers.dropout`.
However, for historical reasons, the first positional argument is
interpreted as keep_prob rather than drop_prob.
Explicitly use `rate=` keyword arguments to ensure things are consistent.
"""
if 'is_training' in kwargs:
kwargs['training'] = kwargs.pop('is_training')
if len(args) > 0:
if args[0] != 0.5:
logger.warn(
"The first positional argument to tensorpack.Dropout is the probability to keep, rather than to drop. "
"This is different from the rate argument in tf.layers.Dropout due to historical reasons. "
"To mimic tf.layers.Dropout, explicitly use keyword argument 'rate' instead")
rate = 1 - args[0]
elif 'keep_prob' in kwargs:
assert 'rate' not in kwargs, "Cannot set both keep_prob and rate!"
rate = 1 - kwargs.pop('keep_prob')
elif 'rate' in kwargs:
rate = kwargs.pop('rate')
else:
rate = 0.5
if kwargs.get('training', None) is None:
kwargs['training'] = get_current_tower_context().is_training
if get_tf_version_tuple() <= (1, 12):
return tf.layers.dropout(x, rate=rate, **kwargs)
else:
return tf.nn.dropout(x, rate=rate if kwargs['training'] else 0.)
| 6,528 | 36.096591 | 119 | py |
SyNet | SyNet-master/tensorpack/tensorpack/dataflow/format.py | # -*- coding: utf-8 -*-
# File: format.py
import numpy as np
import os
import six
from ..utils import logger
from ..utils.argtools import log_once
from ..utils.serialize import loads
from ..utils.develop import create_dummy_class # noqa
from ..utils.loadcaffe import get_caffe_pb
from ..utils.timer import timed_operation
from ..utils.utils import get_tqdm
from .base import DataFlowReentrantGuard, RNGDataFlow
from .common import MapData
__all__ = ['HDF5Data', 'LMDBData', 'LMDBDataDecoder',
'CaffeLMDB', 'SVMLightData']
"""
Adapters for different data format.
"""
class HDF5Data(RNGDataFlow):
"""
Zip data from different paths in an HDF5 file.
Warning:
The current implementation will load all data into memory. (TODO)
"""
# TODO
def __init__(self, filename, data_paths, shuffle=True):
"""
Args:
filename (str): h5 data file.
data_paths (list): list of h5 paths to zipped.
For example `['images', 'labels']`.
shuffle (bool): shuffle all data.
"""
self.f = h5py.File(filename, 'r')
logger.info("Loading {} to memory...".format(filename))
self.dps = [self.f[k].value for k in data_paths]
lens = [len(k) for k in self.dps]
assert all(k == lens[0] for k in lens)
self._size = lens[0]
self.shuffle = shuffle
def __len__(self):
return self._size
def __iter__(self):
idxs = list(range(self._size))
if self.shuffle:
self.rng.shuffle(idxs)
for k in idxs:
yield [dp[k] for dp in self.dps]
class LMDBData(RNGDataFlow):
"""
Read a LMDB database and produce (k,v) raw bytes pairs.
The raw bytes are usually not what you're interested in.
You might want to use
:class:`LMDBDataDecoder` or apply a
mapper function after :class:`LMDBData`.
"""
def __init__(self, lmdb_path, shuffle=True, keys=None):
"""
Args:
lmdb_path (str): a directory or a file.
shuffle (bool): shuffle the keys or not.
keys (list[str] or str): list of str as the keys, used only when shuffle is True.
It can also be a format string e.g. ``{:0>8d}`` which will be
formatted with the indices from 0 to *total_size - 1*.
If not given, it will then look in the database for ``__keys__`` which
:func:`LMDBSerializer.save` used to store the list of keys.
If still not found, it will iterate over the database to find
all the keys.
"""
self._lmdb_path = lmdb_path
self._shuffle = shuffle
self._open_lmdb()
self._size = self._txn.stat()['entries']
self._set_keys(keys)
logger.info("Found {} entries in {}".format(self._size, self._lmdb_path))
# Clean them up after finding the list of keys, since we don't want to fork them
self._close_lmdb()
def _set_keys(self, keys=None):
def find_keys(txn, size):
logger.warn("Traversing the database to find keys is slow. Your should specify the keys.")
keys = []
with timed_operation("Loading LMDB keys ...", log_start=True), \
get_tqdm(total=size) as pbar:
for k in self._txn.cursor():
assert k[0] != b'__keys__'
keys.append(k[0])
pbar.update()
return keys
self.keys = self._txn.get(b'__keys__')
if self.keys is not None:
self.keys = loads(self.keys)
self._size -= 1 # delete this item
if self._shuffle: # keys are necessary when shuffle is True
if keys is None:
if self.keys is None:
self.keys = find_keys(self._txn, self._size)
else:
# check if key-format like '{:0>8d}' was given
if isinstance(keys, six.string_types):
self.keys = map(lambda x: keys.format(x), list(np.arange(self._size)))
else:
self.keys = keys
def _open_lmdb(self):
self._lmdb = lmdb.open(self._lmdb_path,
subdir=os.path.isdir(self._lmdb_path),
readonly=True, lock=False, readahead=True,
map_size=1099511627776 * 2, max_readers=100)
self._txn = self._lmdb.begin()
def _close_lmdb(self):
self._lmdb.close()
del self._lmdb
del self._txn
def reset_state(self):
self._guard = DataFlowReentrantGuard()
super(LMDBData, self).reset_state()
self._open_lmdb() # open the LMDB in the worker process
def __len__(self):
return self._size
def __iter__(self):
with self._guard:
if not self._shuffle:
c = self._txn.cursor()
for k, v in c:
if k != b'__keys__':
yield [k, v]
else:
self.rng.shuffle(self.keys)
for k in self.keys:
v = self._txn.get(k)
yield [k, v]
class LMDBDataDecoder(MapData):
""" Read a LMDB database with a custom decoder and produce decoded outputs."""
def __init__(self, lmdb_data, decoder):
"""
Args:
lmdb_data: a :class:`LMDBData` instance.
decoder (k,v -> dp | None): a function taking k, v and returning a datapoint,
or return None to discard.
"""
def f(dp):
return decoder(dp[0], dp[1])
super(LMDBDataDecoder, self).__init__(lmdb_data, f)
def CaffeLMDB(lmdb_path, shuffle=True, keys=None):
"""
Read a Caffe-format LMDB file where each value contains a ``caffe.Datum`` protobuf.
Produces datapoints of the format: [HWC image, label].
Note that Caffe LMDB format is not efficient: it stores serialized raw
arrays rather than JPEG images.
Args:
lmdb_path, shuffle, keys: same as :class:`LMDBData`.
Example:
.. code-block:: python
ds = CaffeLMDB("/tmp/validation", keys='{:0>8d}')
"""
cpb = get_caffe_pb()
lmdb_data = LMDBData(lmdb_path, shuffle, keys)
def decoder(k, v):
try:
datum = cpb.Datum()
datum.ParseFromString(v)
img = np.fromstring(datum.data, dtype=np.uint8)
img = img.reshape(datum.channels, datum.height, datum.width)
except Exception:
log_once("Cannot read key {}".format(k), 'warn')
return None
return [img.transpose(1, 2, 0), datum.label]
logger.warn("Caffe LMDB format doesn't store jpeg-compressed images, \
it's not recommended due to its inferior performance.")
return LMDBDataDecoder(lmdb_data, decoder)
class SVMLightData(RNGDataFlow):
""" Read X,y from an SVMlight file, and produce [X_i, y_i] pairs. """
def __init__(self, filename, shuffle=True):
"""
Args:
filename (str): input file
shuffle (bool): shuffle the data
"""
import sklearn.datasets # noqa
self.X, self.y = sklearn.datasets.load_svmlight_file(filename)
self.X = np.asarray(self.X.todense())
self.shuffle = shuffle
def __len__(self):
return len(self.y)
def __iter__(self):
idxs = np.arange(self.__len__())
if self.shuffle:
self.rng.shuffle(idxs)
for id in idxs:
yield [self.X[id, :], self.y[id]]
try:
import h5py
except ImportError:
HDF5Data = create_dummy_class('HDF5Data', 'h5py') # noqa
try:
import lmdb
except ImportError:
for klass in ['LMDBData', 'LMDBDataDecoder', 'CaffeLMDB']:
globals()[klass] = create_dummy_class(klass, 'lmdb')
| 7,910 | 31.690083 | 102 | py |
SyNet | SyNet-master/tensorpack/tensorpack/dataflow/parallel.py | # -*- coding: utf-8 -*-
# File: parallel.py
import atexit
import pickle
import errno
import traceback
import itertools
import multiprocessing as mp
import os
import sys
import uuid
import weakref
from contextlib import contextmanager
import zmq
from six.moves import queue, range
from ..utils import logger
from ..utils.concurrency import (
StoppableThread, enable_death_signal, ensure_proc_terminate, start_proc_mask_signal)
from ..utils.serialize import dumps_once as dumps, loads_once as loads
from .base import DataFlow, DataFlowReentrantGuard, DataFlowTerminated, ProxyDataFlow
__all__ = ['PrefetchData', 'MultiProcessPrefetchData',
'MultiProcessRunner', 'MultiProcessRunnerZMQ', 'MultiThreadRunner',
'PrefetchDataZMQ', 'MultiThreadPrefetchData']
# from https://github.com/pytorch/pytorch/blob/master/torch/utils/data/_utils/__init__.py
class _ExceptionWrapper:
MAGIC = b"EXC_MAGIC"
"""Wraps an exception plus traceback to communicate across threads"""
def __init__(self, exc_info):
# It is important that we don't store exc_info, see
# NOTE [ Python Traceback Reference Cycle Problem ]
self.exc_type = exc_info[0]
self.exc_msg = "".join(traceback.format_exception(*exc_info))
def pack(self):
return self.MAGIC + pickle.dumps(self)
@staticmethod
def unpack(dp):
if isinstance(dp, bytes) and dp.startswith(_ExceptionWrapper.MAGIC):
return pickle.loads(dp[len(_ExceptionWrapper.MAGIC):])
def _repeat_iter(get_itr):
while True:
yield from get_itr()
def _bind_guard(sock, name):
try:
sock.bind(name)
except zmq.ZMQError:
logger.error(
"ZMQError in socket.bind('{}'). Perhaps you're \
using pipes on a non-local file system. See documentation of MultiProcessRunnerZMQ \
for more information.".format(name))
raise
def _get_pipe_name(name):
if sys.platform.startswith('linux'):
# linux supports abstract sockets: http://api.zeromq.org/4-1:zmq-ipc
pipename = "ipc://@{}-pipe-{}".format(name, str(uuid.uuid1())[:8])
pipedir = os.environ.get('TENSORPACK_PIPEDIR', None)
if pipedir is not None:
logger.warn("TENSORPACK_PIPEDIR is not used on Linux any more! Abstract sockets will be used.")
else:
pipedir = os.environ.get('TENSORPACK_PIPEDIR', None)
if pipedir is not None:
logger.info("ZMQ uses TENSORPACK_PIPEDIR={}".format(pipedir))
else:
pipedir = '.'
assert os.path.isdir(pipedir), pipedir
filename = '{}/{}-pipe-{}'.format(pipedir.rstrip('/'), name, str(uuid.uuid1())[:6])
assert not os.path.exists(filename), "Pipe {} exists! You may be unlucky.".format(filename)
pipename = "ipc://{}".format(filename)
return pipename
def del_weakref(x):
o = x()
if o is not None:
o.__del__()
@contextmanager
def _zmq_catch_error(name):
try:
yield
except zmq.ContextTerminated:
logger.info("[{}] Context terminated.".format(name))
raise DataFlowTerminated()
except zmq.ZMQError as e:
if e.errno == errno.ENOTSOCK: # socket closed
logger.info("[{}] Socket closed.".format(name))
raise DataFlowTerminated()
else:
raise
except Exception:
raise
class _MultiProcessZMQDataFlow(DataFlow):
def __init__(self):
assert os.name != 'nt', "ZMQ IPC doesn't support windows!"
self._reset_done = False
self._procs = []
def reset_state(self):
"""
All forked dataflows should only be reset **once and only once** in spawned processes.
Subclasses should call this method with super.
"""
assert not self._reset_done, "reset_state() was called twice! This violates the API of DataFlow!"
self._reset_done = True
# __del__ not guaranteed to get called at exit
atexit.register(del_weakref, weakref.ref(self))
def _start_processes(self):
start_proc_mask_signal(self._procs)
def __del__(self):
try:
if not self._reset_done:
return
if not self.context.closed:
self.socket.close(0)
self.context.destroy(0)
for x in self._procs:
x.terminate()
x.join(5)
print("{} successfully cleaned-up.".format(type(self).__name__))
except Exception:
pass
class MultiProcessRunner(ProxyDataFlow):
"""
Running a DataFlow in >=1 processes using Python multiprocessing utilities.
It will fork the process that calls :meth:`__init__`, collect datapoints from `ds` in each
process by a Python :class:`multiprocessing.Queue`.
Note:
1. (Data integrity) An iterator cannot run faster automatically -- what's happening is
that the process will be forked ``num_proc`` times.
There will be ``num_proc`` dataflow running in parallel and **independently**.
As a result, we have the following guarantee on the dataflow correctness:
a. When ``num_proc=1``, this dataflow produces the same data as the
given dataflow in the same order.
b. When ``num_proc>1``, if each sample from the given dataflow is i.i.d.,
then this dataflow produces the **same distribution** of data as the given dataflow.
This implies that there will be duplication, reordering, etc.
You probably only want to use it for training.
For example, if your original dataflow contains no randomness and produces the same first datapoint,
then after parallel prefetching, the datapoint will be produced ``num_proc`` times
at the beginning.
Even when your original dataflow is fully shuffled, you still need to be aware of the
`Birthday Paradox <https://en.wikipedia.org/wiki/Birthday_problem>`_
and know that you'll likely see duplicates.
To utilize parallelism with more strict data integrity, you can use
the parallel versions of :class:`MapData`: :class:`MultiThreadMapData`, :class:`MultiProcessMapData`.
2. This has more serialization overhead than :class:`MultiProcessRunnerZMQ` when data is large.
3. You can nest like this: ``MultiProcessRunnerZMQ(MultiProcessRunner(df, num_proc=a), num_proc=b)``.
A total of ``a`` instances of ``df`` worker processes will be created.
4. Fork happens in `__init__`. `reset_state()` is a no-op.
DataFlow in the worker processes will be reset at the time of fork.
5. This DataFlow does support windows. However, Windows requires more strict picklability on processes,
which means that some code that's forkable on Linux may not be forkable on Windows. If that happens you'll
need to re-organize some part of code that's not forkable.
"""
class _Worker(mp.Process):
def __init__(self, ds, queue, idx):
super(MultiProcessRunner._Worker, self).__init__()
self.ds = ds
self.queue = queue
self.idx = idx
def run(self):
enable_death_signal(_warn=self.idx == 0)
# reset all ds so each process will produce different data
self.ds.reset_state()
while True:
for dp in self.ds:
self.queue.put(dp)
def __init__(self, ds, num_prefetch, num_proc):
"""
Args:
ds (DataFlow): input DataFlow.
num_prefetch (int): size of the queue to hold prefetched datapoints.
Required.
num_proc (int): number of processes to use. Required.
"""
# https://docs.python.org/3.6/library/multiprocessing.html?highlight=process#the-spawn-and-forkserver-start-methods
if os.name == 'nt':
logger.warn("MultiProcessRunner does support Windows. \
However, Windows requires more strict picklability on processes, which may \
lead of failure on some of the code.")
super(MultiProcessRunner, self).__init__(ds)
try:
self._size = len(ds)
except NotImplementedError:
self._size = -1
assert num_proc > 0, num_proc
assert num_prefetch > 0, num_prefetch
self.num_proc = num_proc
self.num_prefetch = num_prefetch
if num_proc > 1:
logger.info("[MultiProcessRunner] Will fork a dataflow more than one times. "
"This assumes the datapoints are i.i.d.")
self.queue = mp.Queue(self.num_prefetch)
self.procs = [MultiProcessRunner._Worker(self.ds, self.queue, idx)
for idx in range(self.num_proc)]
ensure_proc_terminate(self.procs)
self._reset_done = False
def __iter__(self):
for k in itertools.count():
if self._size > 0 and k >= self._size:
break
dp = self.queue.get()
yield dp
def reset_state(self):
assert not self._reset_done, "reset_state() was called twice! This violates the API of DataFlow!"
self._reset_done = True
start_proc_mask_signal(self.procs)
class MultiProcessRunnerZMQ(_MultiProcessZMQDataFlow):
"""
Run a DataFlow in >=1 processes, with ZeroMQ for communication.
It will fork the calling process of :meth:`reset_state()`,
and collect datapoints from the given dataflow in each process by ZeroMQ IPC pipe.
This is typically faster than :class:`MultiProcessRunner`.
Note:
1. (Data integrity) An iterator cannot run faster automatically -- what's happening is
that the process will be forked ``num_proc`` times.
There will be ``num_proc`` dataflow running in parallel and **independently**.
As a result, we have the following guarantee on the dataflow correctness:
a. When ``num_proc=1``, this dataflow produces the same data as the
given dataflow in the same order.
b. When ``num_proc>1``, if each sample from the given dataflow is i.i.d.,
then this dataflow produces the **same distribution** of data as the given dataflow.
This implies that there will be duplication, reordering, etc.
You probably only want to use it for training.
For example, if your original dataflow contains no randomness and produces the same first datapoint,
then after parallel prefetching, the datapoint will be produced ``num_proc`` times
at the beginning.
Even when your original dataflow is fully shuffled, you still need to be aware of the
`Birthday Paradox <https://en.wikipedia.org/wiki/Birthday_problem>`_
and know that you'll likely see duplicates.
To utilize parallelism with more strict data integrity, you can use
the parallel versions of :class:`MapData`: :class:`MultiThreadMapData`, :class:`MultiProcessMapData`.
2. `reset_state()` of the given dataflow will be called **once and only once** in the worker processes.
3. The fork of processes happened in this dataflow's `reset_state()` method.
Please note that forking a TensorFlow GPU session may be unsafe.
If you're managing this dataflow on your own,
it's better to fork before creating the session.
4. (Fork-safety) After the fork has happened, this dataflow becomes not fork-safe.
i.e., if you fork an already reset instance of this dataflow,
it won't be usable in the forked process. Therefore, do not nest two `MultiProcessRunnerZMQ`.
5. (Thread-safety) ZMQ is not thread safe. Therefore, do not call :meth:`get_data` of the same dataflow in
more than 1 threads.
6. This dataflow does not support windows. Use `MultiProcessRunner` which works on windows.
7. (For Mac only) A UNIX named pipe will be created in the current directory.
However, certain non-local filesystem such as NFS/GlusterFS/AFS doesn't always support pipes.
You can change the directory by ``export TENSORPACK_PIPEDIR=/other/dir``.
In particular, you can use somewhere under '/tmp' which is usually local.
Note that some non-local FS may appear to support pipes and code
may appear to run but crash with bizarre error.
Also note that ZMQ limits the maximum length of pipe path.
If you hit the limit, you can set the directory to a softlink
which points to a local directory.
"""
class _Worker(mp.Process):
def __init__(self, ds, conn_name, hwm, idx):
super(MultiProcessRunnerZMQ._Worker, self).__init__()
self.ds = ds
self.conn_name = conn_name
self.hwm = hwm
self.idx = idx
def run(self):
enable_death_signal(_warn=self.idx == 0)
self.ds.reset_state()
itr = _repeat_iter(lambda: self.ds)
context = zmq.Context()
socket = context.socket(zmq.PUSH)
socket.set_hwm(self.hwm)
socket.connect(self.conn_name)
try:
while True:
try:
dp = next(itr)
socket.send(dumps(dp), copy=False)
except Exception:
dp = _ExceptionWrapper(sys.exc_info()).pack()
socket.send(dumps(dp), copy=False)
raise
# sigint could still propagate here, e.g. when nested
except KeyboardInterrupt:
pass
finally:
socket.close(0)
context.destroy(0)
def __init__(self, ds, num_proc=1, hwm=50):
"""
Args:
ds (DataFlow): input DataFlow.
num_proc (int): number of processes to use.
hwm (int): the zmq "high-water mark" (queue size) for both sender and receiver.
"""
super(MultiProcessRunnerZMQ, self).__init__()
self.ds = ds
self.num_proc = num_proc
self._hwm = hwm
if num_proc > 1:
logger.info("[MultiProcessRunnerZMQ] Will fork a dataflow more than one times. "
"This assumes the datapoints are i.i.d.")
try:
self._size = ds.__len__()
except NotImplementedError:
self._size = -1
def _recv(self):
ret = loads(self.socket.recv(copy=False))
exc = _ExceptionWrapper.unpack(ret)
if exc is not None:
logger.error("Exception '{}' in worker:".format(str(exc.exc_type)))
raise exc.exc_type(exc.exc_msg)
return ret
def __len__(self):
return self.ds.__len__()
def __iter__(self):
with self._guard, _zmq_catch_error('MultiProcessRunnerZMQ'):
for k in itertools.count():
if self._size > 0 and k >= self._size:
break
yield self._recv()
def reset_state(self):
super(MultiProcessRunnerZMQ, self).reset_state()
self._guard = DataFlowReentrantGuard()
self.context = zmq.Context()
self.socket = self.context.socket(zmq.PULL)
self.socket.set_hwm(self._hwm)
pipename = _get_pipe_name('dataflow')
_bind_guard(self.socket, pipename)
self._procs = [MultiProcessRunnerZMQ._Worker(self.ds, pipename, self._hwm, idx)
for idx in range(self.num_proc)]
self._start_processes()
class MultiThreadRunner(DataFlow):
"""
Create multiple dataflow instances and run them each in one thread.
Collect outputs from them with a queue.
Note:
1. (Data integrity) An iterator cannot run faster automatically -- what's happening is
that each thread will create a dataflow iterator.
There will be ``num_thread`` dataflow running in parallel and **independently**.
As a result, we have the following guarantee on the dataflow correctness:
a. When ``num_thread=1``, this dataflow produces the same data as the
given dataflow in the same order.
b. When ``num_thread>1``, if each sample from the given dataflow is i.i.d.,
then this dataflow produces the **same distribution** of data as the given dataflow.
This implies that there will be duplication, reordering, etc.
You probably only want to use it for training.
For example, if your original dataflow contains no randomness and produces the same first datapoint,
then after parallel prefetching, the datapoint will be produced ``num_thread`` times
at the beginning.
Even when your original dataflow is fully shuffled, you still need to be aware of the
`Birthday Paradox <https://en.wikipedia.org/wiki/Birthday_problem>`_
and know that you'll likely see duplicates.
To utilize parallelism with more strict data integrity, you can use
the parallel versions of :class:`MapData`: :class:`MultiThreadMapData`, :class:`MultiProcessMapData`.
"""
class _Worker(StoppableThread):
def __init__(self, get_df, queue):
super(MultiThreadRunner._Worker, self).__init__()
self.df = get_df()
assert isinstance(self.df, DataFlow), self.df
self.queue = queue
self.daemon = True
def run(self):
self.df.reset_state()
try:
while True:
for dp in self.df:
if self.stopped():
return
self.queue_put_stoppable(self.queue, dp)
except Exception:
if self.stopped():
pass # skip duplicated error messages
else:
raise
finally:
self.stop()
def __init__(self, get_df, num_prefetch, num_thread):
"""
Args:
get_df ( -> DataFlow): a callable which returns a DataFlow.
Each thread will call this function to get the DataFlow to use.
Therefore do not return the same DataFlow object for each call,
unless your dataflow is stateless.
num_prefetch (int): size of the queue
num_thread (int): number of threads
"""
assert num_thread > 0, num_thread
assert num_prefetch > 0, num_prefetch
self.num_thread = num_thread
self.queue = queue.Queue(maxsize=num_prefetch)
self.threads = [
MultiThreadRunner._Worker(get_df, self.queue)
for _ in range(num_thread)]
try:
self._size = self.__len__()
except NotImplementedError:
self._size = -1
def reset_state(self):
for th in self.threads:
th.df.reset_state()
th.start()
def __len__(self):
return self.threads[0].df.__len__()
def __iter__(self):
for k in itertools.count():
if self._size > 0 and k >= self._size:
break
yield self.queue.get()
def __del__(self):
for p in self.threads:
if p.is_alive():
p.stop()
p.join()
class PlasmaPutData(ProxyDataFlow):
"""
Put each data point to plasma shared memory object store, and yield the object id instead.
Experimental.
"""
def __init__(self, ds, socket="/tmp/plasma"):
self._socket = socket
super(PlasmaPutData, self).__init__(ds)
def reset_state(self):
super(PlasmaPutData, self).reset_state()
self.client = plasma.connect(self._socket, "", 0)
def __iter__(self):
for dp in self.ds:
oid = self.client.put(dp)
yield [oid.binary()]
class PlasmaGetData(ProxyDataFlow):
"""
Take plasma object id from a DataFlow, and retrieve it from plasma shared
memory object store.
Experimental.
"""
def __init__(self, ds, socket="/tmp/plasma"):
self._socket = socket
super(PlasmaGetData, self).__init__(ds)
def reset_state(self):
super(PlasmaGetData, self).reset_state()
self.client = plasma.connect(self._socket, "", 0)
def __iter__(self):
for dp in self.ds:
oid = plasma.ObjectID(dp[0])
dp = self.client.get(oid)
yield dp
plasma = None
# These plasma code is only experimental
# try:
# import pyarrow.plasma as plasma
# except ImportError:
# from ..utils.develop import create_dummy_class
# PlasmaPutData = create_dummy_class('PlasmaPutData', 'pyarrow') # noqa
# PlasmaGetData = create_dummy_class('PlasmaGetData', 'pyarrow') # noqa
# The old inappropriate names:
PrefetchData = MultiProcessRunner
MultiProcessPrefetchData = MultiProcessRunner
PrefetchDataZMQ = MultiProcessRunnerZMQ
MultiThreadPrefetchData = MultiThreadRunner
if __name__ == '__main__':
import time
from .raw import DataFromGenerator
from .common import FixedSizeData
x = DataFromGenerator(itertools.count())
x = FixedSizeData(x, 100)
x = MultiProcessRunnerZMQ(x, 2)
x.reset_state()
for idx, dp in enumerate(x):
print(dp)
time.sleep(0.1)
| 21,575 | 38.588991 | 123 | py |
SyNet | SyNet-master/tensorpack/tensorpack/dataflow/dataset/ilsvrc.py | # -*- coding: utf-8 -*-
# File: ilsvrc.py
import numpy as np
import os
import tarfile
import tqdm
from ...utils import logger
from ...utils.fs import download, get_dataset_path, mkdir_p
from ...utils.loadcaffe import get_caffe_pb
from ...utils.timer import timed_operation
from ..base import RNGDataFlow
__all__ = ['ILSVRCMeta', 'ILSVRC12', 'ILSVRC12Files']
CAFFE_ILSVRC12_URL = ("http://dl.caffe.berkeleyvision.org/caffe_ilsvrc12.tar.gz", 17858008)
class ILSVRCMeta(object):
"""
Provide methods to access metadata for ILSVRC dataset.
"""
def __init__(self, dir=None):
if dir is None:
dir = get_dataset_path('ilsvrc_metadata')
self.dir = os.path.expanduser(dir)
mkdir_p(self.dir)
f = os.path.join(self.dir, 'synsets.txt')
if not os.path.isfile(f):
self._download_caffe_meta()
self.caffepb = None
def get_synset_words_1000(self):
"""
Returns:
dict: {cls_number: cls_name}
"""
fname = os.path.join(self.dir, 'synset_words.txt')
assert os.path.isfile(fname), fname
lines = [x.strip() for x in open(fname).readlines()]
return dict(enumerate(lines))
def get_synset_1000(self):
"""
Returns:
dict: {cls_number: synset_id}
"""
fname = os.path.join(self.dir, 'synsets.txt')
assert os.path.isfile(fname)
lines = [x.strip() for x in open(fname).readlines()]
return dict(enumerate(lines))
def _download_caffe_meta(self):
fpath = download(CAFFE_ILSVRC12_URL[0], self.dir, expect_size=CAFFE_ILSVRC12_URL[1])
tarfile.open(fpath, 'r:gz').extractall(self.dir)
def get_image_list(self, name, dir_structure='original'):
"""
Args:
name (str): 'train' or 'val' or 'test'
dir_structure (str): same as in :meth:`ILSVRC12.__init__()`.
Returns:
list: list of (image filename, label)
"""
assert name in ['train', 'val', 'test']
assert dir_structure in ['original', 'train']
add_label_to_fname = (name != 'train' and dir_structure != 'original')
if add_label_to_fname:
synset = self.get_synset_1000()
fname = os.path.join(self.dir, name + '.txt')
assert os.path.isfile(fname), fname
with open(fname) as f:
ret = []
for line in f.readlines():
name, cls = line.strip().split()
cls = int(cls)
if add_label_to_fname:
name = os.path.join(synset[cls], name)
ret.append((name.strip(), cls))
assert len(ret), fname
return ret
def get_per_pixel_mean(self, size=None):
"""
Args:
size (tuple): image size in (h, w). Defaults to (256, 256).
Returns:
np.ndarray: per-pixel mean of shape (h, w, 3 (BGR)) in range [0, 255].
"""
if self.caffepb is None:
self.caffepb = get_caffe_pb()
obj = self.caffepb.BlobProto()
mean_file = os.path.join(self.dir, 'imagenet_mean.binaryproto')
with open(mean_file, 'rb') as f:
obj.ParseFromString(f.read())
arr = np.array(obj.data).reshape((3, 256, 256)).astype('float32')
arr = np.transpose(arr, [1, 2, 0])
if size is not None:
arr = cv2.resize(arr, size[::-1])
return arr
@staticmethod
def guess_dir_structure(dir):
"""
Return the directory structure of "dir".
Args:
dir(str): something like '/path/to/imagenet/val'
Returns:
either 'train' or 'original'
"""
subdir = os.listdir(dir)[0]
# find a subdir starting with 'n'
if subdir.startswith('n') and \
os.path.isdir(os.path.join(dir, subdir)):
dir_structure = 'train'
else:
dir_structure = 'original'
logger.info(
"[ILSVRC12] Assuming directory {} has '{}' structure.".format(
dir, dir_structure))
return dir_structure
class ILSVRC12Files(RNGDataFlow):
"""
Same as :class:`ILSVRC12`, but produces filenames of the images instead of nparrays.
This could be useful when ``cv2.imread`` is a bottleneck and you want to
decode it in smarter ways (e.g. in parallel).
"""
def __init__(self, dir, name, meta_dir=None,
shuffle=None, dir_structure=None):
"""
Same as in :class:`ILSVRC12`.
"""
assert name in ['train', 'test', 'val'], name
dir = os.path.expanduser(dir)
assert os.path.isdir(dir), dir
self.full_dir = os.path.join(dir, name)
self.name = name
assert os.path.isdir(self.full_dir), self.full_dir
assert meta_dir is None or os.path.isdir(meta_dir), meta_dir
if shuffle is None:
shuffle = name == 'train'
self.shuffle = shuffle
if name == 'train':
dir_structure = 'train'
if dir_structure is None:
dir_structure = ILSVRCMeta.guess_dir_structure(self.full_dir)
meta = ILSVRCMeta(meta_dir)
self.imglist = meta.get_image_list(name, dir_structure)
for fname, _ in self.imglist[:10]:
fname = os.path.join(self.full_dir, fname)
assert os.path.isfile(fname), fname
def __len__(self):
return len(self.imglist)
def __iter__(self):
idxs = np.arange(len(self.imglist))
if self.shuffle:
self.rng.shuffle(idxs)
for k in idxs:
fname, label = self.imglist[k]
fname = os.path.join(self.full_dir, fname)
yield [fname, label]
class ILSVRC12(ILSVRC12Files):
"""
Produces uint8 ILSVRC12 images of shape [h, w, 3(BGR)], and a label between [0, 999].
The label map follows the synsets.txt file in http://dl.caffe.berkeleyvision.org/caffe_ilsvrc12.tar.gz.
"""
def __init__(self, dir, name, meta_dir=None,
shuffle=None, dir_structure=None):
"""
Args:
dir (str): A directory containing a subdir named ``name``,
containing the images in a structure described below.
name (str): One of 'train' or 'val' or 'test'.
shuffle (bool): shuffle the dataset.
Defaults to True if name=='train'.
dir_structure (str): One of 'original' or 'train'.
The directory structure for the 'val' directory.
'original' means the original decompressed directory, which only has list of image files (as below).
If set to 'train', it expects the same two-level directory structure similar to 'dir/train/'.
By default, it tries to automatically detect the structure.
You probably do not need to care about this option because 'original' is what people usually have.
Example:
When `dir_structure=='original'`, `dir` should have the following structure:
.. code-block:: none
dir/
train/
n02134418/
n02134418_198.JPEG
...
...
val/
ILSVRC2012_val_00000001.JPEG
...
test/
ILSVRC2012_test_00000001.JPEG
...
With the downloaded ILSVRC12_img_*.tar, you can use the following
command to build the above structure:
.. code-block:: none
mkdir val && tar xvf ILSVRC12_img_val.tar -C val
mkdir test && tar xvf ILSVRC12_img_test.tar -C test
mkdir train && tar xvf ILSVRC12_img_train.tar -C train && cd train
find -type f -name '*.tar' | parallel -P 10 'echo {} && mkdir -p {/.} && tar xf {} -C {/.}'
When `dir_structure=='train'`, `dir` should have the following structure:
.. code-block:: none
dir/
train/
n02134418/
n02134418_198.JPEG
...
...
val/
n01440764/
ILSVRC2012_val_00000293.JPEG
...
...
test/
ILSVRC2012_test_00000001.JPEG
...
"""
super(ILSVRC12, self).__init__(
dir, name, meta_dir, shuffle, dir_structure)
"""
There are some CMYK / png images, but cv2 seems robust to them.
https://github.com/tensorflow/models/blob/c0cd713f59cfe44fa049b3120c417cc4079c17e3/research/inception/inception/data/build_imagenet_data.py#L264-L300
"""
def __iter__(self):
for fname, label in super(ILSVRC12, self).__iter__():
im = cv2.imread(fname, cv2.IMREAD_COLOR)
assert im is not None, fname
yield [im, label]
@staticmethod
def get_training_bbox(bbox_dir, imglist):
import xml.etree.ElementTree as ET
ret = []
def parse_bbox(fname):
root = ET.parse(fname).getroot()
size = root.find('size').getchildren()
size = map(int, [size[0].text, size[1].text])
box = root.find('object').find('bndbox').getchildren()
box = map(lambda x: float(x.text), box)
return np.asarray(box, dtype='float32')
with timed_operation('Loading Bounding Boxes ...'):
cnt = 0
for k in tqdm.trange(len(imglist)):
fname = imglist[k][0]
fname = fname[:-4] + 'xml'
fname = os.path.join(bbox_dir, fname)
try:
ret.append(parse_bbox(fname))
cnt += 1
except Exception:
ret.append(None)
logger.info("{}/{} images have bounding box.".format(cnt, len(imglist)))
return ret
try:
import cv2
except ImportError:
from ...utils.develop import create_dummy_class
ILSVRC12 = create_dummy_class('ILSVRC12', 'cv2') # noqa
if __name__ == '__main__':
meta = ILSVRCMeta()
# print(meta.get_synset_words_1000())
ds = ILSVRC12('/home/wyx/data/fake_ilsvrc/', 'train', shuffle=False)
ds.reset_state()
for _ in ds:
from IPython import embed
embed()
break
| 10,381 | 32.81759 | 153 | py |
SyNet | SyNet-master/tensorpack/tensorpack/dataflow/imgaug/crop.py | # -*- coding: utf-8 -*-
# File: crop.py
import numpy as np
import cv2
from ...utils.argtools import shape2d
from ...utils.develop import log_deprecated
from .base import ImageAugmentor, ImagePlaceholder
from .transform import CropTransform, TransformList, ResizeTransform, PhotometricTransform
from .misc import ResizeShortestEdge
__all__ = ['RandomCrop', 'CenterCrop', 'RandomCropRandomShape',
'GoogleNetRandomCropAndResize', 'RandomCutout']
class RandomCrop(ImageAugmentor):
""" Randomly crop the image into a smaller one """
def __init__(self, crop_shape):
"""
Args:
crop_shape: (h, w), int or a tuple of int
"""
crop_shape = shape2d(crop_shape)
crop_shape = (int(crop_shape[0]), int(crop_shape[1]))
super(RandomCrop, self).__init__()
self._init(locals())
def get_transform(self, img):
orig_shape = img.shape
assert orig_shape[0] >= self.crop_shape[0] \
and orig_shape[1] >= self.crop_shape[1], orig_shape
diffh = orig_shape[0] - self.crop_shape[0]
h0 = self.rng.randint(diffh + 1)
diffw = orig_shape[1] - self.crop_shape[1]
w0 = self.rng.randint(diffw + 1)
return CropTransform(h0, w0, self.crop_shape[0], self.crop_shape[1])
class CenterCrop(ImageAugmentor):
""" Crop the image at the center"""
def __init__(self, crop_shape):
"""
Args:
crop_shape: (h, w) tuple or a int
"""
crop_shape = shape2d(crop_shape)
self._init(locals())
def get_transform(self, img):
orig_shape = img.shape
assert orig_shape[0] >= self.crop_shape[0] \
and orig_shape[1] >= self.crop_shape[1], orig_shape
h0 = int((orig_shape[0] - self.crop_shape[0]) * 0.5)
w0 = int((orig_shape[1] - self.crop_shape[1]) * 0.5)
return CropTransform(h0, w0, self.crop_shape[0], self.crop_shape[1])
class RandomCropRandomShape(ImageAugmentor):
""" Random crop with a random shape"""
def __init__(self, wmin, hmin,
wmax=None, hmax=None,
max_aspect_ratio=None):
"""
Randomly crop a box of shape (h, w), sampled from [min, max] (both inclusive).
If max is None, will use the input image shape.
Args:
wmin, hmin, wmax, hmax: range to sample shape.
max_aspect_ratio (float): this argument has no effect and is deprecated.
"""
super(RandomCropRandomShape, self).__init__()
if max_aspect_ratio is not None:
log_deprecated("RandomCropRandomShape(max_aspect_ratio)", "It is never implemented!", "2020-06-06")
self._init(locals())
def get_transform(self, img):
hmax = self.hmax or img.shape[0]
wmax = self.wmax or img.shape[1]
h = self.rng.randint(self.hmin, hmax + 1)
w = self.rng.randint(self.wmin, wmax + 1)
diffh = img.shape[0] - h
diffw = img.shape[1] - w
assert diffh >= 0 and diffw >= 0, str(diffh) + ", " + str(diffw)
y0 = 0 if diffh == 0 else self.rng.randint(diffh)
x0 = 0 if diffw == 0 else self.rng.randint(diffw)
return CropTransform(y0, x0, h, w)
class GoogleNetRandomCropAndResize(ImageAugmentor):
"""
The random crop and resize augmentation proposed in
Sec. 6 of "Going Deeper with Convolutions" by Google.
This implementation follows the details in ``fb.resnet.torch``.
It attempts to crop a random rectangle with 8%~100% area of the original image,
and keep the aspect ratio between 3/4 to 4/3. Then it resize this crop to the target shape.
If such crop cannot be found in 10 iterations, it will do a ResizeShortestEdge + CenterCrop.
"""
def __init__(self, crop_area_fraction=(0.08, 1.),
aspect_ratio_range=(0.75, 1.333),
target_shape=224, interp=cv2.INTER_LINEAR):
"""
Args:
crop_area_fraction (tuple(float)): Defaults to crop 8%-100% area.
aspect_ratio_range (tuple(float)): Defaults to make aspect ratio in 3/4-4/3.
target_shape (int): Defaults to 224, the standard ImageNet image shape.
"""
super(GoogleNetRandomCropAndResize, self).__init__()
self._init(locals())
def get_transform(self, img):
h, w = img.shape[:2]
area = h * w
for _ in range(10):
targetArea = self.rng.uniform(*self.crop_area_fraction) * area
aspectR = self.rng.uniform(*self.aspect_ratio_range)
ww = int(np.sqrt(targetArea * aspectR) + 0.5)
hh = int(np.sqrt(targetArea / aspectR) + 0.5)
if self.rng.uniform() < 0.5:
ww, hh = hh, ww
if hh <= h and ww <= w:
x1 = self.rng.randint(0, w - ww + 1)
y1 = self.rng.randint(0, h - hh + 1)
return TransformList([
CropTransform(y1, x1, hh, ww),
ResizeTransform(hh, ww, self.target_shape, self.target_shape, interp=self.interp)
])
resize = ResizeShortestEdge(self.target_shape, interp=self.interp).get_transform(img)
out_shape = (resize.new_h, resize.new_w)
crop = CenterCrop(self.target_shape).get_transform(ImagePlaceholder(shape=out_shape))
return TransformList([resize, crop])
class RandomCutout(ImageAugmentor):
"""
The cutout augmentation, as described in https://arxiv.org/abs/1708.04552
"""
def __init__(self, h_range, w_range, fill=0.):
"""
Args:
h_range (int or tuple): the height of rectangle to cut.
If a tuple, will randomly sample from this range [low, high)
w_range (int or tuple): similar to above
fill (float): the fill value
"""
super(RandomCutout, self).__init__()
self._init(locals())
def _get_cutout_shape(self):
if isinstance(self.h_range, int):
h = self.h_range
else:
h = self.rng.randint(self.h_range)
if isinstance(self.w_range, int):
w = self.w_range
else:
w = self.rng.randint(self.w_range)
return h, w
@staticmethod
def _cutout(img, y0, x0, h, w, fill):
img[y0:y0 + h, x0:x0 + w] = fill
return img
def get_transform(self, img):
h, w = self._get_cutout_shape()
x0 = self.rng.randint(0, img.shape[1] + 1 - w)
y0 = self.rng.randint(0, img.shape[0] + 1 - h)
return PhotometricTransform(
lambda img: RandomCutout._cutout(img, y0, x0, h, w, self.fill),
"cutout")
| 6,674 | 36.711864 | 111 | py |
SyNet | SyNet-master/tensorpack/tensorpack/dataflow/imgaug/imgproc.py | # -*- coding: utf-8 -*-
# File: imgproc.py
import numpy as np
import cv2
from ...utils.develop import log_deprecated
from .base import PhotometricAugmentor
__all__ = ['Hue', 'Brightness', 'BrightnessScale', 'Contrast', 'MeanVarianceNormalize',
'GaussianBlur', 'Gamma', 'Clip', 'Saturation', 'Lighting', 'MinMaxNormalize']
class Hue(PhotometricAugmentor):
""" Randomly change color hue.
"""
def __init__(self, range=(0, 180), rgb=True):
"""
Args:
range(list or tuple): range from which the applied hue offset is selected
(maximum range can be [-90,90] for both uint8 and float32)
rgb (bool): whether input is RGB or BGR.
"""
super(Hue, self).__init__()
rgb = bool(rgb)
self._init(locals())
def _get_augment_params(self, _):
return self._rand_range(*self.range)
def _augment(self, img, hue):
m = cv2.COLOR_BGR2HSV if not self.rgb else cv2.COLOR_RGB2HSV
hsv = cv2.cvtColor(img, m)
# https://docs.opencv.org/3.2.0/de/d25/imgproc_color_conversions.html#color_convert_rgb_hsv
if hsv.dtype.itemsize == 1:
# OpenCV uses 0-179 for 8-bit images
hsv[..., 0] = (hsv[..., 0] + hue) % 180
else:
# OpenCV uses 0-360 for floating point images
hsv[..., 0] = (hsv[..., 0] + 2 * hue) % 360
m = cv2.COLOR_HSV2BGR if not self.rgb else cv2.COLOR_HSV2RGB
img = cv2.cvtColor(hsv, m)
return img
class Brightness(PhotometricAugmentor):
"""
Adjust brightness by adding a random number.
"""
def __init__(self, delta, clip=True):
"""
Args:
delta (float): Randomly add a value within [-delta,delta]
clip (bool): clip results to [0,255] even when data type is not uint8.
"""
super(Brightness, self).__init__()
assert delta > 0
self._init(locals())
def _get_augment_params(self, _):
return self._rand_range(-self.delta, self.delta)
def _augment(self, img, v):
old_dtype = img.dtype
img = img.astype('float32')
img += v
if self.clip or old_dtype == np.uint8:
img = np.clip(img, 0, 255)
return img.astype(old_dtype)
class BrightnessScale(PhotometricAugmentor):
"""
Adjust brightness by scaling by a random factor.
"""
def __init__(self, range, clip=True):
"""
Args:
range (tuple): Randomly scale the image by a factor in (range[0], range[1])
clip (bool): clip results to [0,255] even when data type is not uint8.
"""
super(BrightnessScale, self).__init__()
self._init(locals())
def _get_augment_params(self, _):
return self._rand_range(*self.range)
def _augment(self, img, v):
old_dtype = img.dtype
img = img.astype('float32')
img *= v
if self.clip or old_dtype == np.uint8:
img = np.clip(img, 0, 255)
return img.astype(old_dtype)
class Contrast(PhotometricAugmentor):
"""
Apply ``x = (x - mean) * contrast_factor + mean`` to each channel.
"""
def __init__(self, factor_range, rgb=None, clip=True):
"""
Args:
factor_range (list or tuple): an interval to randomly sample the `contrast_factor`.
rgb (bool or None): if None, use the mean per-channel.
clip (bool): clip to [0, 255] even when data type is not uint8.
"""
super(Contrast, self).__init__()
self._init(locals())
def _get_augment_params(self, _):
return self._rand_range(*self.factor_range)
def _augment(self, img, r):
old_dtype = img.dtype
if img.ndim == 3:
if self.rgb is not None:
m = cv2.COLOR_RGB2GRAY if self.rgb else cv2.COLOR_BGR2GRAY
grey = cv2.cvtColor(img.astype('float32'), m)
mean = np.mean(grey)
else:
mean = np.mean(img, axis=(0, 1), keepdims=True)
else:
mean = np.mean(img)
img = img * r + mean * (1 - r)
if self.clip or old_dtype == np.uint8:
img = np.clip(img, 0, 255)
return img.astype(old_dtype)
class MeanVarianceNormalize(PhotometricAugmentor):
"""
Linearly scales the image to have zero mean and unit norm.
``x = (x - mean) / adjusted_stddev``
where ``adjusted_stddev = max(stddev, 1.0/sqrt(num_pixels * channels))``
This augmentor always returns float32 images.
"""
def __init__(self, all_channel=True):
"""
Args:
all_channel (bool): if True, normalize all channels together. else separately.
"""
self._init(locals())
def _augment(self, img, _):
img = img.astype('float32')
if self.all_channel:
mean = np.mean(img)
std = np.std(img)
else:
mean = np.mean(img, axis=(0, 1), keepdims=True)
std = np.std(img, axis=(0, 1), keepdims=True)
std = np.maximum(std, 1.0 / np.sqrt(np.prod(img.shape)))
img = (img - mean) / std
return img
class GaussianBlur(PhotometricAugmentor):
""" Gaussian blur the image with random window size"""
def __init__(self, size_range=(0, 3), sigma_range=(0, 0), symmetric=True, max_size=None):
"""
Args:
size_range (tuple[int]): Gaussian window size would be 2 * size +
1, where size is randomly sampled from this [low, high) range.
sigma_range (tuple[float]): min,max of the sigma value. 0 means
opencv's default.
symmetric (bool): whether to use the same size & sigma for x and y.
max_size (int): deprecated
"""
super(GaussianBlur, self).__init__()
if not isinstance(size_range, (list, tuple)):
size_range = (0, size_range)
assert isinstance(sigma_range, (list, tuple)), sigma_range
if max_size is not None:
log_deprecated("GaussianBlur(max_size=)", "Use size_range= instead!", "2020-09-01")
size_range = (0, max_size)
self._init(locals())
def _get_augment_params(self, _):
size_xy = self.rng.randint(self.size_range[0], self.size_range[1], size=(2,)) * 2 + 1
sigma_xy = self._rand_range(*self.sigma_range, size=(2,))
if self.symmetric:
size_xy[1] = size_xy[0]
sigma_xy[1] = sigma_xy[0]
return tuple(size_xy), tuple(sigma_xy)
def _augment(self, img, prm):
size, sigma = prm
return np.reshape(cv2.GaussianBlur(img, size, sigmaX=sigma[0], sigmaY=sigma[1],
borderType=cv2.BORDER_REPLICATE), img.shape)
class Gamma(PhotometricAugmentor):
""" Randomly adjust gamma """
def __init__(self, range=(-0.5, 0.5)):
"""
Args:
range(list or tuple): gamma range
"""
super(Gamma, self).__init__()
self._init(locals())
def _get_augment_params(self, _):
return self._rand_range(*self.range)
def _augment(self, img, gamma):
old_dtype = img.dtype
lut = ((np.arange(256, dtype='float32') / 255) ** (1. / (1. + gamma)) * 255).astype('uint8')
img = np.clip(img, 0, 255).astype('uint8')
ret = cv2.LUT(img, lut).astype(old_dtype)
if img.ndim == 3 and ret.ndim == 2:
ret = ret[:, :, np.newaxis]
return ret
class Clip(PhotometricAugmentor):
""" Clip the pixel values """
def __init__(self, min=0, max=255):
"""
Args:
min, max: the clip range
"""
self._init(locals())
def _augment(self, img, _):
return np.clip(img, self.min, self.max)
class Saturation(PhotometricAugmentor):
""" Randomly adjust saturation.
Follows the implementation in `fb.resnet.torch
<https://github.com/facebook/fb.resnet.torch/blob/master/datasets/transforms.lua#L218>`__.
"""
def __init__(self, alpha=0.4, rgb=True, clip=True):
"""
Args:
alpha(float): maximum saturation change.
rgb (bool): whether input is RGB or BGR.
clip (bool): clip results to [0,255] even when data type is not uint8.
"""
super().__init__()
rgb = bool(rgb)
assert alpha < 1
self._init(locals())
def _get_augment_params(self, _):
return 1 + self._rand_range(-self.alpha, self.alpha)
def _augment(self, img, v):
old_dtype = img.dtype
m = cv2.COLOR_RGB2GRAY if self.rgb else cv2.COLOR_BGR2GRAY
grey = cv2.cvtColor(img, m)
ret = img * v + (grey * (1 - v))[:, :, np.newaxis]
if self.clip or old_dtype == np.uint8:
ret = np.clip(ret, 0, 255)
return ret.astype(old_dtype)
class Lighting(PhotometricAugmentor):
""" Lighting noise, as in the paper
`ImageNet Classification with Deep Convolutional Neural Networks
<https://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks.pdf>`_.
The implementation follows `fb.resnet.torch
<https://github.com/facebook/fb.resnet.torch/blob/master/datasets/transforms.lua#L184>`__.
"""
def __init__(self, std, eigval, eigvec, clip=True):
"""
Args:
std (float): maximum standard deviation
eigval: a vector of (3,). The eigenvalues of 3 channels.
eigvec: a 3x3 matrix. Each column is one eigen vector.
clip (bool): clip results to [0,255] even when data type is not uint8.
"""
super(Lighting, self).__init__()
eigval = np.asarray(eigval, dtype="float32")
eigvec = np.asarray(eigvec, dtype="float32")
assert eigval.shape == (3,)
assert eigvec.shape == (3, 3)
self._init(locals())
def _get_augment_params(self, img):
assert img.shape[2] == 3
return (self.rng.randn(3) * self.std).astype("float32")
def _augment(self, img, v):
old_dtype = img.dtype
v = v * self.eigval
v = v.reshape((3, 1))
inc = np.dot(self.eigvec, v).reshape((3,))
img = np.add(img, inc)
if self.clip or old_dtype == np.uint8:
img = np.clip(img, 0, 255)
return img.astype(old_dtype)
class MinMaxNormalize(PhotometricAugmentor):
"""
Linearly scales the image to the range [min, max].
This augmentor always returns float32 images.
"""
def __init__(self, min=0, max=255, all_channel=True):
"""
Args:
max (float): The new maximum value
min (float): The new minimum value
all_channel (bool): if True, normalize all channels together. else separately.
"""
self._init(locals())
def _augment(self, img, _):
img = img.astype('float32')
if self.all_channel:
minimum = np.min(img)
maximum = np.max(img)
else:
minimum = np.min(img, axis=(0, 1), keepdims=True)
maximum = np.max(img, axis=(0, 1), keepdims=True)
img = (self.max - self.min) * (img - minimum) / (maximum - minimum) + self.min
return img
| 11,285 | 32.993976 | 114 | py |
SyNet | SyNet-master/tensorpack/tensorpack/tfutils/gradproc.py | # -*- coding: utf-8 -*-
# File: gradproc.py
import inspect
import re
from abc import ABCMeta, abstractmethod
import six
import tensorflow as tf
from ..compat import tfv1
from ..utils import logger
from .summary import add_moving_summary
from .symbolic_functions import print_stat, rms
__all__ = ['GradientProcessor',
'FilterNoneGrad', 'GlobalNormClip', 'MapGradient', 'SummaryGradient',
'PrintGradient', 'CheckGradient', 'ScaleGradient']
@six.add_metaclass(ABCMeta)
class GradientProcessor(object):
"""
Base class for all gradient processors.
Gradient processors can be applied to optimizers by
:func:`optimizer.apply_grad_processors`.
Subclass should override the ``_process()`` method.
"""
_name_scope = None
def process(self, grads):
"""
Process the symbolic gradients.
Args:
grads (list): list of (grad, var).
Returns:
list: processed gradients, with the same type as input.
"""
# reuse the old name_scope, if process() is called multiple times
if self._name_scope is None:
with tfv1.name_scope(type(self).__name__) as scope:
self._name_scope = scope
return self._process(grads)
else:
with tfv1.name_scope(self._name_scope):
return self._process(grads)
@abstractmethod
def _process(self, grads):
pass
class FilterNoneGrad(GradientProcessor):
"""
Skip the update and print a warning (instead of crashing),
when the gradient of certain variable is None.
"""
def __init__(self, verbose=True):
"""
Args:
verbose (bool): whether to print warning about None gradients.
"""
super(FilterNoneGrad, self).__init__()
self._verbose = verbose
def _process(self, grads):
g = []
to_print = []
for grad, var in grads:
if grad is None:
to_print.append(var.op.name)
else:
g.append((grad, var))
if self._verbose and len(to_print):
message = ', '.join(to_print)
logger.warn("No gradient w.r.t {} trainable variables: {}".format(len(to_print), message))
return g
class GlobalNormClip(GradientProcessor):
""" Clip by global norm.
The global norm is the sum of norm for **all** gradients.
See :func:`tf.clip_by_global_norm` for more information.
"""
def __init__(self, global_norm):
"""
Args:
global_norm(float): the threshold to clip with.
"""
super(GlobalNormClip, self).__init__()
self._norm = float(global_norm)
def _process(self, grads):
g = [k[0] for k in grads]
v = [k[1] for k in grads]
g, _ = tf.clip_by_global_norm(g, self._norm, name='clip_by_global_norm')
return list(zip(g, v))
class MapGradient(GradientProcessor):
"""
Apply a function on all gradient if the name matches regex.
Keep the other gradients unchanged.
It can be used for gradient clipping, etc.
"""
def __init__(self, func, regex='.*'):
"""
Args:
func: a user-supplied function which takes one or two arguments.
The argument(s) can be either a `grad` tensor, or `grad` and `var`.
The function should return the new gradient to be used.
If it return None, the gradient is discarded (hence no update to the variable will happen).
regex (str): used to match variables. Defaults to match all variables.
"""
args = inspect.getfullargspec(func).args
arg_num = len(args) - inspect.ismethod(func)
assert arg_num in [1, 2], \
"The function must take 1 or 2 arguments! ({})".format(args)
if arg_num == 1:
self.func = lambda grad, var: func(grad)
else:
self.func = func
if not regex.endswith('$'):
regex = regex + '$'
self.regex = regex
super(MapGradient, self).__init__()
def _process(self, grads):
ret = []
matched = False
for grad, var in grads:
if re.match(self.regex, var.op.name):
matched = True
grad = self.func(grad, var)
if grad is not None:
ret.append((grad, var))
else:
ret.append((grad, var))
if not matched:
logger.warn("[MapGradient] No match was found for regex {}.".format(self.regex))
return ret
# TODO has dependency problems: sess.run may not depend on grad
# maybe group maintain op and grad ?
class SummaryGradient(MapGradient):
"""
For each gradient tensor, summary its histogram and add it to moving
summaries.
"""
# avoid duplicate summaries from towers
# TODO this is global. not good.
_summaried_gradient = set()
def __init__(self, regex='.*', collections=None):
"""
Args:
regex(str): same as in :class:`MapGradient`.
collections (list[str]): list of collection names
"""
super(SummaryGradient, self).__init__(self._mapper, regex)
self._coll = collections
def _mapper(self, grad, var):
name = var.op.name
if re.match('tower[0-9]+/', name):
# replicated training, var may come from different towers
return grad
if name not in SummaryGradient._summaried_gradient:
SummaryGradient._summaried_gradient.add(name)
tfv1.summary.histogram(name + '-grad', grad, collections=self._coll)
add_moving_summary(rms(grad, name=name + '/rms'))
return grad
class PrintGradient(MapGradient):
"""
Print the gradients every step with :func:`symbolic_functions.print_stat`.
"""
_printed = set()
# TODO this is global. not good.
def __init__(self, regex='.*'):
"""
Args:
regex(str): same as in :class:`MapGradient`.
"""
super(PrintGradient, self).__init__(self._mapper, regex)
def _mapper(self, grad, var):
name = var.op.name
if name not in PrintGradient._printed:
PrintGradient._printed.add(name)
grad = print_stat(grad, message=name + '-grad')
return grad
class CheckGradient(MapGradient):
"""
Run :func:`tf.check_numerics` for each gradient.
"""
def __init__(self):
super(CheckGradient, self).__init__(self._mapper)
def _mapper(self, grad, var):
# this was very slow.... see #3649
# op = tf.Assert(tf.reduce_all(tf.is_finite(var)), [var], summarize=100)
grad = tf.check_numerics(grad, 'CheckGradient/' + var.op.name)
return grad
class ScaleGradient(MapGradient):
"""
Scale certain gradient by a multiplier.
"""
def __init__(self, multipliers, verbose=True):
"""
Args:
multipliers (tuple or list): tuple of (regex, float), or list of such tuples.
verbose (bool): whether to print logs or not
Example:
Use double learning rate for all the bias (as in caffe), and freeze layer0:
.. code-block:: python
from tensorpack.tfutils import optimizer, gradproc
opt = optimizer.apply_grad_processors(
opt, [gradproc.ScaleGradient(
[('.*/b', 2.), ('layer0/.*', 0.)]
)])
"""
if not isinstance(multipliers, list):
multipliers = [multipliers]
self.multipliers = multipliers
assert verbose in [True, False], verbose
self._verbose = verbose
super(ScaleGradient, self).__init__(self._mapper)
def _mapper(self, grad, var):
varname = var.op.name
for regex, val in self.multipliers:
# always match against the whole name
if not regex.endswith('$'):
regex = regex + '$'
if re.match(regex, varname):
if self._verbose:
logger.info("Gradient of '{}' is multipled by {}".format(varname, val))
if val != 0: # skip zero to speed up
return grad * val
else:
return None
return grad
| 8,395 | 30.683019 | 107 | py |
SyNet | SyNet-master/tensorpack/tensorpack/utils/argtools.py | # -*- coding: utf-8 -*-
# File: argtools.py
import inspect
import functools
from . import logger
__all__ = ['map_arg', 'memoized', 'memoized_method', 'graph_memoized', 'shape2d', 'shape4d',
'memoized_ignoreargs', 'log_once']
def map_arg(**maps):
"""
Apply a mapping on certain argument before calling the original function.
Args:
maps (dict): {argument_name: map_func}
"""
def deco(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
# getcallargs was deprecated since 3.5
sig = inspect.signature(func)
argmap = sig.bind_partial(*args, **kwargs).arguments
for k, map_func in maps.items():
if k in argmap:
argmap[k] = map_func(argmap[k])
return func(**argmap)
return wrapper
return deco
memoized = functools.lru_cache(maxsize=None)
""" Alias to :func:`functools.lru_cache`
WARNING: memoization will keep keys and values alive!
"""
def graph_memoized(func):
"""
Like memoized, but keep one cache per default graph.
"""
# TODO it keeps the graph alive
from ..compat import tfv1
GRAPH_ARG_NAME = '__IMPOSSIBLE_NAME_FOR_YOU__'
@memoized
def func_with_graph_arg(*args, **kwargs):
kwargs.pop(GRAPH_ARG_NAME)
return func(*args, **kwargs)
@functools.wraps(func)
def wrapper(*args, **kwargs):
assert GRAPH_ARG_NAME not in kwargs, "No Way!!"
graph = tfv1.get_default_graph()
kwargs[GRAPH_ARG_NAME] = graph
return func_with_graph_arg(*args, **kwargs)
return wrapper
_MEMOIZED_NOARGS = {}
def memoized_ignoreargs(func):
"""
A decorator. It performs memoization ignoring the arguments used to call
the function.
"""
def wrapper(*args, **kwargs):
if func not in _MEMOIZED_NOARGS:
res = func(*args, **kwargs)
_MEMOIZED_NOARGS[func] = res
return res
return _MEMOIZED_NOARGS[func]
return wrapper
def shape2d(a):
"""
Ensure a 2D shape.
Args:
a: a int or tuple/list of length 2
Returns:
list: of length 2. if ``a`` is a int, return ``[a, a]``.
"""
if type(a) == int:
return [a, a]
if isinstance(a, (list, tuple)):
assert len(a) == 2
return list(a)
raise RuntimeError("Illegal shape: {}".format(a))
def get_data_format(data_format, keras_mode=True):
if keras_mode:
dic = {'NCHW': 'channels_first', 'NHWC': 'channels_last'}
else:
dic = {'channels_first': 'NCHW', 'channels_last': 'NHWC'}
ret = dic.get(data_format, data_format)
if ret not in dic.values():
raise ValueError("Unknown data_format: {}".format(data_format))
return ret
def shape4d(a, data_format='NHWC'):
"""
Ensuer a 4D shape, to use with 4D symbolic functions.
Args:
a: a int or tuple/list of length 2
Returns:
list: of length 4. if ``a`` is a int, return ``[1, a, a, 1]``
or ``[1, 1, a, a]`` depending on data_format.
"""
s2d = shape2d(a)
if get_data_format(data_format, False) == 'NHWC':
return [1] + s2d + [1]
else:
return [1, 1] + s2d
@memoized
def log_once(message, func='info'):
"""
Log certain message only once. Call this function more than one times with
the same message will result in no-op.
Args:
message(str): message to log
func(str): the name of the logger method. e.g. "info", "warn", "error".
"""
getattr(logger, func)(message)
def call_only_once(func):
"""
Decorate a method or property of a class, so that this method can only
be called once for every instance.
Calling it more than once will result in exception.
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
self = args[0]
# cannot use hasattr here, because hasattr tries to getattr, which
# fails if func is a property
assert func.__name__ in dir(self), "call_only_once can only be used on method or property!"
if not hasattr(self, '_CALL_ONLY_ONCE_CACHE'):
cache = self._CALL_ONLY_ONCE_CACHE = set()
else:
cache = self._CALL_ONLY_ONCE_CACHE
cls = type(self)
# cannot use ismethod(), because decorated method becomes a function
is_method = inspect.isfunction(getattr(cls, func.__name__))
assert func not in cache, \
"{} {}.{} can only be called once per object!".format(
'Method' if is_method else 'Property',
cls.__name__, func.__name__)
cache.add(func)
return func(*args, **kwargs)
return wrapper
def memoized_method(func):
"""
A decorator that performs memoization on methods. It stores the cache on the object instance itself.
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
self = args[0]
assert func.__name__ in dir(self), "memoized_method can only be used on method!"
if not hasattr(self, '_MEMOIZED_CACHE'):
cache = self._MEMOIZED_CACHE = {}
else:
cache = self._MEMOIZED_CACHE
key = (func, ) + args[1:] + tuple(kwargs)
ret = cache.get(key, None)
if ret is not None:
return ret
value = func(*args, **kwargs)
cache[key] = value
return value
return wrapper
if __name__ == '__main__':
class A():
def __init__(self):
self._p = 0
@call_only_once
def f(self, x):
print(x)
@property
def p(self):
return self._p
@p.setter
@call_only_once
def p(self, val):
self._p = val
a = A()
a.f(1)
b = A()
b.f(2)
b.f(1)
print(b.p)
print(b.p)
b.p = 2
print(b.p)
b.p = 3
print(b.p)
| 5,918 | 24.734783 | 104 | py |
SyNet | SyNet-master/tensorpack/tensorpack/utils/fs.py | # -*- coding: utf-8 -*-
# File: fs.py
import errno
import os
import tqdm
from six.moves import urllib
from . import logger
from .utils import execute_only_once
__all__ = ['mkdir_p', 'download', 'recursive_walk', 'get_dataset_path', 'normpath']
def mkdir_p(dirname):
""" Like "mkdir -p", make a dir recursively, but do nothing if the dir exists
Args:
dirname(str):
"""
assert dirname is not None
if dirname == '' or os.path.isdir(dirname):
return
try:
os.makedirs(dirname)
except OSError as e:
if e.errno != errno.EEXIST:
raise e
def download(url, dir, filename=None, expect_size=None):
"""
Download URL to a directory.
Will figure out the filename automatically from URL, if not given.
"""
mkdir_p(dir)
if filename is None:
filename = url.split('/')[-1]
fpath = os.path.join(dir, filename)
if os.path.isfile(fpath):
if expect_size is not None and os.stat(fpath).st_size == expect_size:
logger.info("File {} exists! Skip download.".format(filename))
return fpath
else:
logger.warn("File {} exists. Will overwrite with a new download!".format(filename))
def hook(t):
last_b = [0]
def inner(b, bsize, tsize=None):
if tsize is not None:
t.total = tsize
t.update((b - last_b[0]) * bsize)
last_b[0] = b
return inner
try:
with tqdm.tqdm(unit='B', unit_scale=True, miniters=1, desc=filename) as t:
fpath, _ = urllib.request.urlretrieve(url, fpath, reporthook=hook(t))
statinfo = os.stat(fpath)
size = statinfo.st_size
except IOError:
logger.error("Failed to download {}".format(url))
raise
assert size > 0, "Downloaded an empty file from {}!".format(url)
if expect_size is not None and size != expect_size:
logger.error("File downloaded from {} does not match the expected size!".format(url))
logger.error("You may have downloaded a broken file, or the upstream may have modified the file.")
# TODO human-readable size
logger.info('Succesfully downloaded ' + filename + ". " + str(size) + ' bytes.')
return fpath
def recursive_walk(rootdir):
"""
Yields:
str: All files in rootdir, recursively.
"""
for r, dirs, files in os.walk(rootdir):
for f in files:
yield os.path.join(r, f)
def get_dataset_path(*args):
"""
Get the path to some dataset under ``$TENSORPACK_DATASET``.
Args:
args: strings to be joined to form path.
Returns:
str: path to the dataset.
"""
d = os.environ.get('TENSORPACK_DATASET', None)
if d is None:
d = os.path.join(os.path.expanduser('~'), 'tensorpack_data')
if execute_only_once():
logger.warn("Env var $TENSORPACK_DATASET not set, using {} for datasets.".format(d))
if not os.path.isdir(d):
mkdir_p(d)
logger.info("Created the directory {}.".format(d))
assert os.path.isdir(d), d
return os.path.join(d, *args)
def normpath(path):
"""
Normalizes a path to a folder by taking into consideration remote storages like Cloud storaged
referenced by '://' at the beginning of the path.
Args:
args: path to be normalized.
Returns:
str: normalized path.
"""
return path if '://' in path else os.path.normpath(path)
if __name__ == '__main__':
download('http://dl.caffe.berkeleyvision.org/caffe_ilsvrc12.tar.gz', '.')
| 3,592 | 27.744 | 106 | py |
SyNet | SyNet-master/tensorpack/tensorpack/utils/loadcaffe.py | # -*- coding: utf-8 -*-
# File: loadcaffe.py
import numpy as np
import os
import sys
from . import logger
from .concurrency import subproc_call
from .fs import download, get_dataset_path
from .utils import change_env
__all__ = ['load_caffe', 'get_caffe_pb']
CAFFE_PROTO_URL = "https://github.com/BVLC/caffe/raw/master/src/caffe/proto/caffe.proto"
class CaffeLayerProcessor(object):
def __init__(self, net):
self.net = net
self.layer_names = net._layer_names
self.param_dict = {}
self.processors = {
'Convolution': self.proc_conv,
'InnerProduct': self.proc_fc,
'BatchNorm': self.proc_bn,
'Scale': self.proc_scale
}
def process(self):
for idx, layer in enumerate(self.net.layers):
param = layer.blobs
name = self.layer_names[idx]
if layer.type in self.processors:
logger.info("Processing layer {} of type {}".format(
name, layer.type))
dic = self.processors[layer.type](idx, name, param)
self.param_dict.update(dic)
elif len(layer.blobs) != 0:
logger.warn(
"{} layer contains parameters but is not supported!".format(layer.type))
return self.param_dict
def proc_conv(self, idx, name, param):
assert len(param) <= 2
assert param[0].data.ndim == 4
# caffe: ch_out, ch_in, h, w
W = param[0].data.transpose(2, 3, 1, 0)
if len(param) == 1:
return {name + '/W': W}
else:
return {name + '/W': W,
name + '/b': param[1].data}
def proc_fc(self, idx, name, param):
# TODO caffe has an 'transpose' option for fc/W
assert len(param) == 2
prev_layer_name = self.net.bottom_names[name][0]
prev_layer_output = self.net.blobs[prev_layer_name].data
if prev_layer_output.ndim == 4:
logger.info("FC layer {} takes spatial data.".format(name))
W = param[0].data
# original: outx(CxHxW)
W = W.reshape((-1,) + prev_layer_output.shape[1:]).transpose(2, 3, 1, 0)
# become: (HxWxC)xout
else:
W = param[0].data.transpose()
return {name + '/W': W,
name + '/b': param[1].data}
def proc_bn(self, idx, name, param):
scale_factor = param[2].data[0]
return {name + '/mean/EMA': param[0].data / scale_factor,
name + '/variance/EMA': param[1].data / scale_factor}
def proc_scale(self, idx, name, param):
bottom_name = self.net.bottom_names[name][0]
# find the bn layer before this scaling
for i, layer in enumerate(self.net.layers):
if layer.type == 'BatchNorm':
name2 = self.layer_names[i]
bottom_name2 = self.net.bottom_names[name2][0]
if bottom_name2 == bottom_name:
# scaling and BN share the same bottom, should merge
logger.info("Merge {} and {} into one BatchNorm layer".format(
name, name2))
return {name2 + '/beta': param[1].data,
name2 + '/gamma': param[0].data}
# assume this scaling layer is part of some BN
logger.error("Could not find a BN layer corresponding to this Scale layer!")
raise ValueError()
def load_caffe(model_desc, model_file):
"""
Load a caffe model. You must be able to ``import caffe`` to use this
function.
Args:
model_desc (str): path to caffe model description file (.prototxt).
model_file (str): path to caffe model parameter file (.caffemodel).
Returns:
dict: the parameters.
"""
with change_env('GLOG_minloglevel', '2'):
import caffe
caffe.set_mode_cpu()
net = caffe.Net(model_desc, model_file, caffe.TEST)
param_dict = CaffeLayerProcessor(net).process()
logger.info("Model loaded from caffe. Params: " +
", ".join(sorted(param_dict.keys())))
return param_dict
def get_caffe_pb():
"""
Get caffe protobuf.
Returns:
The imported caffe protobuf module.
"""
dir = get_dataset_path('caffe')
caffe_pb_file = os.path.join(dir, 'caffe_pb2.py')
if not os.path.isfile(caffe_pb_file):
download(CAFFE_PROTO_URL, dir)
assert os.path.isfile(os.path.join(dir, 'caffe.proto'))
cmd = "protoc --version"
version, ret = subproc_call(cmd, timeout=3)
if ret != 0:
sys.exit(1)
try:
version = version.decode('utf-8')
version = float('.'.join(version.split(' ')[1].split('.')[:2]))
assert version >= 2.7, "Require protoc>=2.7 for Python3"
except Exception:
logger.exception("protoc --version gives: " + str(version))
raise
cmd = 'cd {} && protoc caffe.proto --python_out .'.format(dir)
ret = os.system(cmd)
assert ret == 0, \
"Command `{}` failed!".format(cmd)
assert os.path.isfile(caffe_pb_file), caffe_pb_file
import imp
return imp.load_source('caffepb', caffe_pb_file)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('model', help='.prototxt file')
parser.add_argument('weights', help='.caffemodel file')
parser.add_argument('output', help='output npz file')
args = parser.parse_args()
ret = load_caffe(args.model, args.weights)
if args.output.endswith('.npz'):
np.savez_compressed(args.output, **ret)
elif args.output.endswith('.npy'):
logger.warn("Please use npz format instead!")
np.save(args.output, ret)
else:
raise ValueError("Unknown format {}".format(args.output))
| 5,887 | 34.257485 | 92 | py |
SyNet | SyNet-master/tensorpack/tensorpack/contrib/keras.py | # -*- coding: utf-8 -*-
# File: keras.py
from contextlib import contextmanager
import six
import tensorflow as tf
from tensorflow import keras
from ..callbacks import Callback, CallbackToHook, InferenceRunner, InferenceRunnerBase, ScalarStats
from ..models.regularize import regularize_cost_from_collection
from ..tfutils.collection import backup_collection, restore_collection
from ..tfutils.common import get_op_tensor_name
from ..tfutils.scope_utils import cached_name_scope
from ..tfutils.summary import add_moving_summary
from ..tfutils.tower import get_current_tower_context
from ..train import SimpleTrainer, SyncMultiGPUTrainerParameterServer, Trainer
from ..train.interface import apply_default_prefetch
from ..train.trainers import DistributedTrainerBase
from ..utils import logger
from ..utils.gpu import get_nr_gpu
__all__ = ['KerasPhaseCallback', 'setup_keras_trainer', 'KerasModel']
TOTAL_LOSS_NAME = 'total_loss'
def _check_name(tensor, name):
tensorname = get_op_tensor_name(tensor.name)[0]
assert tensorname.split('/')[-1] == name, \
"{} does not match {}, you may have name conflict somewhere!".format(tensor.name, name)
class KerasModelCaller(object):
"""
Keras model doesn't support variable scope reuse.
This is a wrapper around keras model to mimic reuse.
"""
def __init__(self, get_model):
self.get_model = get_model
self.cached_model = None
def __call__(self, *input_tensors):
"""
Args:
input_tensors ([tf.Tensor])
Returns:
output tensors of this tower, evaluated with the input tensors.
"""
reuse = tf.get_variable_scope().reuse
old_trainable_names = {x.name for x in tf.trainable_variables()}
trainable_backup = backup_collection([tf.GraphKeys.TRAINABLE_VARIABLES])
update_ops_backup = backup_collection([tf.GraphKeys.UPDATE_OPS])
def post_process_model(model):
added_trainable_names = {x.name for x in tf.trainable_variables()}
restore_collection(trainable_backup)
for v in model.weights:
# In Keras, the collection is not respected and could contain non-trainable vars.
# We put M.weights into the collection instead.
if v.name not in old_trainable_names and v.name in added_trainable_names:
tf.add_to_collection(tf.GraphKeys.TRAINABLE_VARIABLES, v)
new_trainable_names = {x.name for x in tf.trainable_variables()}
for n in added_trainable_names:
if n not in new_trainable_names:
logger.warn("Keras created trainable variable '{}' which is actually not trainable. "
"This was automatically corrected.".format(n))
# Keras models might not use this collection at all (in some versions).
# This is a BC-breaking change of tf.keras: https://github.com/tensorflow/tensorflow/issues/19643
restore_collection(update_ops_backup)
for op in model.updates:
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, op)
if self.cached_model is None:
assert not reuse
# starting from some versions, tf.keras starts to prepend name scope to variable names ..
@contextmanager
def clear_tower0_name_scope():
ns = tf.get_default_graph().get_name_scope()
if ns == 'tower0':
with tf.name_scope('/'):
yield
else:
yield
with clear_tower0_name_scope():
model = self.cached_model = self.get_model(*input_tensors)
assert isinstance(model, keras.Model), \
"Your get_model function should return a `tf.keras.Model`!"
outputs = model.outputs
elif reuse:
# use the cached Keras model to mimic reuse
# NOTE: ctx.is_training won't be useful inside model,
# because inference will always use the cached Keras model
model = self.cached_model
outputs = model.call(*input_tensors)
else:
# create new Keras model if not reuse
model = self.get_model(*input_tensors)
outputs = model.outputs
post_process_model(model)
if isinstance(outputs, list) and len(outputs) == 1:
return outputs[0]
return outputs
class KerasPhaseCallback(Callback):
"""
Keras needs an extra input if learning_phase is used by the model
This callback will be used:
1. By the trainer with isTrain=True
2. By InferenceRunner with isTrain=False, in the form of hooks
If you use :class:`KerasModel` or :func:`setup_keras_trainer`,
this callback will be automatically added when needed.
"""
def __init__(self, isTrain):
assert isinstance(isTrain, bool), isTrain
self._isTrain = isTrain
self._learning_phase = keras.backend.learning_phase()
def _setup_graph(self):
logger.info("Using Keras learning phase {} in the graph!".format(
self._learning_phase.name))
cbs = self.trainer._callbacks.cbs
for cb in cbs:
# XXX HACK
if isinstance(cb, InferenceRunnerBase):
h = CallbackToHook(KerasPhaseCallback(False))
cb.register_hook(h)
def _before_run(self, ctx):
return tf.train.SessionRunArgs(
fetches=[], feed_dict={self._learning_phase: int(self._isTrain)})
def setup_keras_trainer(
trainer, get_model,
input_signature, target_signature,
input, optimizer, loss, metrics):
"""
Args:
trainer (SingleCostTrainer):
get_model (input1, input2, ... -> tf.keras.Model):
A function which takes tensors, builds and returns a Keras model.
It will be part of the tower function.
input (InputSource):
optimizer (tf.train.Optimizer):
loss, metrics: list of strings
"""
assert isinstance(optimizer, tf.train.Optimizer), optimizer
assert isinstance(loss, list), loss
assert len(loss) >= 1, "No loss was given!"
assert isinstance(metrics, list), metrics
model_caller = KerasModelCaller(get_model)
nr_inputs = len(input_signature)
def get_cost(*inputs):
ctx = get_current_tower_context()
input_tensors = list(inputs[:nr_inputs])
target_tensors = list(inputs[nr_inputs:])
# TODO mapping between target tensors & output tensors
outputs = model_caller(*input_tensors)
if isinstance(outputs, tf.Tensor):
outputs = [outputs]
assert len(outputs) == len(target_tensors), \
"len({}) != len({})".format(str(outputs), str(target_tensors))
assert len(outputs) == len(loss), \
"len({}) != len({})".format(str(outputs), str(loss))
loss_tensors = []
for idx, loss_name in enumerate(loss):
with cached_name_scope('keras_loss', top_level=False):
loss_fn = keras.losses.get(loss_name)
curr_loss = loss_fn(target_tensors[idx], outputs[idx])
curr_loss = tf.reduce_mean(curr_loss, name=loss_name)
_check_name(curr_loss, loss_name)
loss_tensors.append(curr_loss)
loss_reg = regularize_cost_from_collection()
if loss_reg is not None:
total_loss = tf.add_n(loss_tensors + [loss_reg], name=TOTAL_LOSS_NAME)
add_moving_summary(loss_reg, total_loss, *loss_tensors)
else:
total_loss = tf.add_n(loss_tensors, name=TOTAL_LOSS_NAME)
add_moving_summary(total_loss, *loss_tensors)
if metrics and (ctx.is_main_training_tower or not ctx.is_training):
# for list: one metric for each output
metric_tensors = []
for oid, metric_name in enumerate(metrics):
output_tensor = outputs[oid]
target_tensor = target_tensors[oid] # TODO may not have the same mapping?
with cached_name_scope('keras_metric', top_level=False):
metric_fn = keras.metrics.get(metric_name)
metric_tensor = metric_fn(target_tensor, output_tensor)
metric_tensor = tf.reduce_mean(metric_tensor, name=metric_name)
_check_name(metric_tensor, metric_name)
# check name conflict here
metric_tensors.append(metric_tensor)
add_moving_summary(*metric_tensors)
return total_loss
trainer.setup_graph(
input_signature + target_signature,
input,
get_cost,
lambda: optimizer)
if isinstance(keras.backend.learning_phase(), tf.Tensor) and len(keras.backend.learning_phase().consumers()) > 0:
# check if learning_phase is used in this model
trainer.register_callback(KerasPhaseCallback(True))
class KerasModel(object):
def __init__(self, get_model, input_signature=None, target_signature=None,
input=None, trainer=None):
"""
Args:
get_model (input1, input2, ... -> keras.Model):
A function which takes tensors, builds and returns a Keras model.
It will be part of the tower function.
input_signature ([tf.TensorSpec]): required. The signature for inputs.
target_signature ([tf.TensorSpec]): required. The signature for the targets tensors.
input (InputSource | DataFlow): the InputSource or DataFlow where the input data comes from.
trainer (Trainer): the default will check the number of available GPUs and use them all.
"""
self.get_model = get_model
assert callable(get_model), get_model
self.input_signature = input_signature
self.target_signature = target_signature
if trainer is None:
nr_gpu = get_nr_gpu()
if nr_gpu <= 1:
trainer = SimpleTrainer()
else:
# the default multi-gpu trainer
trainer = SyncMultiGPUTrainerParameterServer(nr_gpu)
assert isinstance(trainer, Trainer), trainer
assert not isinstance(trainer, DistributedTrainerBase)
assert input is not None, "Argument 'input' is required!"
self.input = apply_default_prefetch(input, trainer)
self.trainer = trainer
def compile(self, optimizer, loss, metrics=None):
"""
Args:
optimizer (tf.train.Optimizer):
loss, metrics: string or list of strings
"""
if isinstance(loss, six.string_types):
loss = [loss]
if metrics is None:
metrics = []
if isinstance(metrics, six.string_types):
metrics = [metrics]
self._stats_to_inference = loss + metrics + [TOTAL_LOSS_NAME]
setup_keras_trainer(
self.trainer, get_model=self.get_model,
input_signature=self.input_signature,
target_signature=self.target_signature,
input=self.input,
optimizer=optimizer,
loss=loss,
metrics=metrics)
def fit(self, validation_data=None, **kwargs):
"""
Args:
validation_data (DataFlow or InputSource): to be used for inference.
The inference callback is added as the first in the callback list.
If you need to use it in a different order, please write it in the callback list manually.
kwargs: same arguments as :meth:`Trainer.train_with_defaults`.
"""
callbacks = kwargs.pop('callbacks', [])
if validation_data is not None:
# There is no way to guess where users want this callback. So we have to choose one.
# MinSaver may need results from this callback,
# so we put this callback at first.
callbacks.insert(0, InferenceRunner(
validation_data, ScalarStats(self._stats_to_inference)))
self.trainer.train_with_defaults(callbacks=callbacks, **kwargs)
| 12,196 | 40.345763 | 117 | py |
pytorch-playground | pytorch-playground-master/setup.py | from setuptools import setup, find_packages
with open("requirements.txt") as requirements_file:
REQUIREMENTS = requirements_file.readlines()
setup(
name="pytorch-playground",
version="1.0.0",
author='Aaron Chen',
author_email='aaron.xichen@gmail.com',
packages=find_packages(),
entry_points = {
'console_scripts': [
'quantize=quantize:main',
]
},
install_requires=REQUIREMENTS,
)
| 447 | 21.4 | 51 | py |
pytorch-playground | pytorch-playground-master/quantize.py | import argparse
from utee import misc, quant, selector
import torch
import torch.backends.cudnn as cudnn
cudnn.benchmark =True
from collections import OrderedDict
def main():
parser = argparse.ArgumentParser(description='PyTorch SVHN Example')
parser.add_argument('--type', default='cifar10', help='|'.join(selector.known_models))
parser.add_argument('--quant_method', default='linear', help='linear|minmax|log|tanh')
parser.add_argument('--batch_size', type=int, default=100, help='input batch size for training (default: 64)')
parser.add_argument('--gpu', default=None, help='index of gpus to use')
parser.add_argument('--ngpu', type=int, default=8, help='number of gpus to use')
parser.add_argument('--seed', type=int, default=117, help='random seed (default: 1)')
parser.add_argument('--model_root', default='~/.torch/models/', help='folder to save the model')
parser.add_argument('--data_root', default='/data/public_dataset/pytorch/', help='folder to save the model')
parser.add_argument('--logdir', default='log/default', help='folder to save to the log')
parser.add_argument('--input_size', type=int, default=224, help='input size of image')
parser.add_argument('--n_sample', type=int, default=20, help='number of samples to infer the scaling factor')
parser.add_argument('--param_bits', type=int, default=8, help='bit-width for parameters')
parser.add_argument('--bn_bits', type=int, default=32, help='bit-width for running mean and std')
parser.add_argument('--fwd_bits', type=int, default=8, help='bit-width for layer output')
parser.add_argument('--overflow_rate', type=float, default=0.0, help='overflow rate')
args = parser.parse_args()
args.gpu = misc.auto_select_gpu(utility_bound=0, num_gpu=args.ngpu, selected_gpus=args.gpu)
args.ngpu = len(args.gpu)
misc.ensure_dir(args.logdir)
args.model_root = misc.expand_user(args.model_root)
args.data_root = misc.expand_user(args.data_root)
args.input_size = 299 if 'inception' in args.type else args.input_size
assert args.quant_method in ['linear', 'minmax', 'log', 'tanh']
print("=================FLAGS==================")
for k, v in args.__dict__.items():
print('{}: {}'.format(k, v))
print("========================================")
assert torch.cuda.is_available(), 'no cuda'
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
# load model and dataset fetcher
model_raw, ds_fetcher, is_imagenet = selector.select(args.type, model_root=args.model_root)
args.ngpu = args.ngpu if is_imagenet else 1
# quantize parameters
if args.param_bits < 32:
state_dict = model_raw.state_dict()
state_dict_quant = OrderedDict()
sf_dict = OrderedDict()
for k, v in state_dict.items():
if 'running' in k:
if args.bn_bits >=32:
print("Ignoring {}".format(k))
state_dict_quant[k] = v
continue
else:
bits = args.bn_bits
else:
bits = args.param_bits
if args.quant_method == 'linear':
sf = bits - 1. - quant.compute_integral_part(v, overflow_rate=args.overflow_rate)
v_quant = quant.linear_quantize(v, sf, bits=bits)
elif args.quant_method == 'log':
v_quant = quant.log_minmax_quantize(v, bits=bits)
elif args.quant_method == 'minmax':
v_quant = quant.min_max_quantize(v, bits=bits)
else:
v_quant = quant.tanh_quantize(v, bits=bits)
state_dict_quant[k] = v_quant
print(k, bits)
model_raw.load_state_dict(state_dict_quant)
# quantize forward activation
if args.fwd_bits < 32:
model_raw = quant.duplicate_model_with_quant(model_raw, bits=args.fwd_bits, overflow_rate=args.overflow_rate,
counter=args.n_sample, type=args.quant_method)
print(model_raw)
val_ds_tmp = ds_fetcher(10, data_root=args.data_root, train=False, input_size=args.input_size)
misc.eval_model(model_raw, val_ds_tmp, ngpu=1, n_sample=args.n_sample, is_imagenet=is_imagenet)
# eval model
val_ds = ds_fetcher(args.batch_size, data_root=args.data_root, train=False, input_size=args.input_size)
acc1, acc5 = misc.eval_model(model_raw, val_ds, ngpu=args.ngpu, is_imagenet=is_imagenet)
# print sf
print(model_raw)
res_str = "type={}, quant_method={}, param_bits={}, bn_bits={}, fwd_bits={}, overflow_rate={}, acc1={:.4f}, acc5={:.4f}".format(
args.type, args.quant_method, args.param_bits, args.bn_bits, args.fwd_bits, args.overflow_rate, acc1, acc5)
print(res_str)
with open('acc1_acc5.txt', 'a') as f:
f.write(res_str + '\n')
if __name__ == '__main__':
main()
| 4,928 | 48.29 | 132 | py |
pytorch-playground | pytorch-playground-master/svhn/model.py | import torch
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
import os
from collections import OrderedDict
from utee import misc
print = misc.logger.info
model_urls = {
'svhn': 'http://ml.cs.tsinghua.edu.cn/~chenxi/pytorch-models/svhn-f564f3d8.pth',
}
class SVHN(nn.Module):
def __init__(self, features, n_channel, num_classes):
super(SVHN, self).__init__()
assert isinstance(features, nn.Sequential), type(features)
self.features = features
self.classifier = nn.Sequential(
nn.Linear(n_channel, num_classes)
)
print(self.features)
print(self.classifier)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
def make_layers(cfg, batch_norm=False):
layers = []
in_channels = 3
for i, v in enumerate(cfg):
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
padding = v[1] if isinstance(v, tuple) else 1
out_channels = v[0] if isinstance(v, tuple) else v
conv2d = nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=padding)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(out_channels, affine=False), nn.ReLU(), nn.Dropout(0.3)]
else:
layers += [conv2d, nn.ReLU(), nn.Dropout(0.3)]
in_channels = out_channels
return nn.Sequential(*layers)
def svhn(n_channel, pretrained=None):
cfg = [n_channel, n_channel, 'M', 2*n_channel, 2*n_channel, 'M', 4*n_channel, 4*n_channel, 'M', (8*n_channel, 0), 'M']
layers = make_layers(cfg, batch_norm=True)
model = SVHN(layers, n_channel=8*n_channel, num_classes=10)
if pretrained is not None:
m = model_zoo.load_url(model_urls['svhn'])
state_dict = m.state_dict() if isinstance(m, nn.Module) else m
assert isinstance(state_dict, (dict, OrderedDict)), type(state_dict)
model.load_state_dict(state_dict)
return model
| 2,056 | 33.864407 | 122 | py |
pytorch-playground | pytorch-playground-master/svhn/dataset.py | import torch
from torchvision import datasets, transforms
from torch.utils.data import DataLoader
import os
def get(batch_size, data_root='/tmp/public_dataset/pytorch', train=True, val=True, **kwargs):
data_root = os.path.expanduser(os.path.join(data_root, 'svhn-data'))
num_workers = kwargs.setdefault('num_workers', 1)
kwargs.pop('input_size', None)
print("Building SVHN data loader with {} workers".format(num_workers))
def target_transform(target):
return int(target) - 1
ds = []
if train:
train_loader = torch.utils.data.DataLoader(
datasets.SVHN(
root=data_root, split='train', download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]),
target_transform=target_transform,
),
batch_size=batch_size, shuffle=True, **kwargs)
ds.append(train_loader)
if val:
test_loader = torch.utils.data.DataLoader(
datasets.SVHN(
root=data_root, split='test', download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]),
target_transform=target_transform
),
batch_size=batch_size, shuffle=False, **kwargs)
ds.append(test_loader)
ds = ds[0] if len(ds) == 1 else ds
return ds
| 1,565 | 34.590909 | 93 | py |
pytorch-playground | pytorch-playground-master/svhn/train.py | import argparse
import os
import time
from utee import misc
import torch
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
import dataset
import model
from IPython import embed
parser = argparse.ArgumentParser(description='PyTorch SVHN Example')
parser.add_argument('--channel', type=int, default=32, help='first conv channel (default: 32)')
parser.add_argument('--wd', type=float, default=0.001, help='weight decay')
parser.add_argument('--batch_size', type=int, default=200, help='input batch size for training (default: 64)')
parser.add_argument('--epochs', type=int, default=150, help='number of epochs to train (default: 10)')
parser.add_argument('--lr', type=float, default=0.001, help='learning rate (default: 1e-3)')
parser.add_argument('--gpu', default=None, help='index of gpus to use')
parser.add_argument('--ngpu', type=int, default=2, help='number of gpus to use')
parser.add_argument('--seed', type=int, default=117, help='random seed (default: 1)')
parser.add_argument('--log_interval', type=int, default=100, help='how many batches to wait before logging training status')
parser.add_argument('--test_interval', type=int, default=5, help='how many epochs to wait before another test')
parser.add_argument('--logdir', default='log/default', help='folder to save to the log')
parser.add_argument('--data_root', default='/tmp/public_dataset/pytorch/', help='folder to save the model')
parser.add_argument('--decreasing_lr', default='80,120', help='decreasing strategy')
args = parser.parse_args()
args.logdir = os.path.join(os.path.dirname(__file__), args.logdir)
misc.logger.init(args.logdir, 'train_log')
print = misc.logger.info
# select gpu
args.gpu = misc.auto_select_gpu(utility_bound=0, num_gpu=args.ngpu, selected_gpus=args.gpu)
args.ngpu = len(args.gpu)
# logger
misc.ensure_dir(args.logdir)
print("=================FLAGS==================")
for k, v in args.__dict__.items():
print('{}: {}'.format(k, v))
print("========================================")
# seed
args.cuda = torch.cuda.is_available()
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
# data loader and model
train_loader, test_loader = dataset.get(batch_size=args.batch_size, data_root=args.data_root, num_workers=1)
model = model.svhn(n_channel=args.channel)
model = torch.nn.DataParallel(model, device_ids= range(args.ngpu))
if args.cuda:
model.cuda()
# optimizer
optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.wd)
decreasing_lr = list(map(int, args.decreasing_lr.split(',')))
print('decreasing_lr: ' + str(decreasing_lr))
best_acc, old_file = 0, None
t_begin = time.time()
try:
for epoch in range(args.epochs):
model.train()
if epoch in decreasing_lr:
optimizer.param_groups[0]['lr'] *= 0.1
for batch_idx, (data, target) in enumerate(train_loader):
indx_target = target.clone()
if args.cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data), Variable(target)
optimizer.zero_grad()
output = model(data)
loss = F.cross_entropy(output, target)
loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0 and batch_idx > 0:
pred = output.data.max(1)[1] # get the index of the max log-probability
correct = pred.cpu().eq(indx_target).sum()
acc = correct * 1.0 / len(data)
print('Train Epoch: {} [{}/{}] Loss: {:.6f} Acc: {:.4f} lr: {:.2e}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
loss.data[0], acc, optimizer.param_groups[0]['lr']))
elapse_time = time.time() - t_begin
speed_epoch = elapse_time / (epoch + 1)
speed_batch = speed_epoch / len(train_loader)
eta = speed_epoch * args.epochs - elapse_time
print("Elapsed {:.2f}s, {:.2f} s/epoch, {:.2f} s/batch, ets {:.2f}s".format(
elapse_time, speed_epoch, speed_batch, eta))
misc.model_snapshot(model, os.path.join(args.logdir, 'latest.pth'))
if epoch % args.test_interval == 0:
model.eval()
test_loss = 0
correct = 0
for data, target in test_loader:
indx_target = target.clone()
if args.cuda:
data, target = data.cuda(), target.cuda().long().squeeze()
data, target = Variable(data, volatile=True), Variable(target)
output = model(data)
test_loss += F.cross_entropy(output, target).data[0]
pred = output.data.max(1)[1] # get the index of the max log-probability
correct += pred.cpu().eq(indx_target).sum()
test_loss = test_loss / len(test_loader) # average over number of mini-batch
acc = 100. * correct / len(test_loader.dataset)
print('\tTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)'.format(
test_loss, correct, len(test_loader.dataset), acc))
if acc > best_acc:
new_file = os.path.join(args.logdir, 'best-{}.pth'.format(epoch))
misc.model_snapshot(model, new_file, old_file=old_file, verbose=True)
best_acc = acc
old_file = new_file
except Exception as e:
import traceback
traceback.print_exc()
finally:
print("Total Elapse: {:.2f}, Best Result: {:.3f}%".format(time.time()-t_begin, best_acc))
| 5,590 | 43.023622 | 125 | py |
pytorch-playground | pytorch-playground-master/stl10/model.py | import torch
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
import os
from utee import misc
from collections import OrderedDict
print = misc.logger.info
model_urls = {
'stl10': 'http://ml.cs.tsinghua.edu.cn/~chenxi/pytorch-models/stl10-866321e9.pth',
}
class SVHN(nn.Module):
def __init__(self, features, n_channel, num_classes):
super(SVHN, self).__init__()
assert isinstance(features, nn.Sequential), type(features)
self.features = features
self.classifier = nn.Sequential(
nn.Linear(n_channel, num_classes)
)
print(self.features)
print(self.classifier)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
def make_layers(cfg, batch_norm=False):
layers = []
in_channels = 3
for i, v in enumerate(cfg):
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
padding = v[1] if isinstance(v, tuple) else 1
out_channels = v[0] if isinstance(v, tuple) else v
conv2d = nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=padding)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(out_channels, affine=False), nn.ReLU()]
else:
layers += [conv2d, nn.ReLU()]
in_channels = out_channels
return nn.Sequential(*layers)
def stl10(n_channel, pretrained=None):
cfg = [
n_channel, 'M',
2*n_channel, 'M',
4*n_channel, 'M',
4*n_channel, 'M',
(8*n_channel, 0), (8*n_channel, 0), 'M'
]
layers = make_layers(cfg, batch_norm=True)
model = SVHN(layers, n_channel=8*n_channel, num_classes=10)
if pretrained is not None:
m = model_zoo.load_url(model_urls['stl10'])
state_dict = m.state_dict() if isinstance(m, nn.Module) else m
assert isinstance(state_dict, (dict, OrderedDict)), type(state_dict)
model.load_state_dict(state_dict)
return model
| 2,071 | 30.876923 | 89 | py |
pytorch-playground | pytorch-playground-master/stl10/dataset.py | import torch
from torchvision import datasets, transforms
from torch.utils.data import DataLoader
from IPython import embed
import os
def get(batch_size, data_root='/mnt/local0/public_dataset/pytorch/', train=True, val=True, **kwargs):
data_root = os.path.expanduser(os.path.join(data_root, 'stl10-data'))
num_workers = kwargs.setdefault('num_workers', 1)
kwargs.pop('input_size', None)
print("Building STL10 data loader with {} workers".format(num_workers))
ds = []
if train:
train_loader = torch.utils.data.DataLoader(
datasets.STL10(
root=data_root, split='train', download=True,
transform=transforms.Compose([
transforms.Pad(4),
transforms.RandomCrop(96),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])),
batch_size=batch_size, shuffle=True, **kwargs)
ds.append(train_loader)
if val:
test_loader = torch.utils.data.DataLoader(
datasets.STL10(
root=data_root, split='test', download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])),
batch_size=batch_size, shuffle=False, **kwargs)
ds.append(test_loader)
ds = ds[0] if len(ds) == 1 else ds
return ds
if __name__ == '__main__':
train_ds, test_ds = get(200, num_workers=1)
for data, target in train_ds:
print("~~")
| 1,678 | 36.311111 | 101 | py |
pytorch-playground | pytorch-playground-master/stl10/train.py | import argparse
import os
import time
from utee import misc
import torch
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
import dataset
import model
from IPython import embed
parser = argparse.ArgumentParser(description='PyTorch SVHN Example')
parser.add_argument('--channel', type=int, default=32, help='first conv channel (default: 32)')
parser.add_argument('--wd', type=float, default=0.00, help='weight decay')
parser.add_argument('--batch_size', type=int, default=200, help='input batch size for training (default: 64)')
parser.add_argument('--epochs', type=int, default=150, help='number of epochs to train (default: 10)')
parser.add_argument('--lr', type=float, default=0.001, help='learning rate (default: 1e-3)')
parser.add_argument('--gpu', default=None, help='index of gpus to use')
parser.add_argument('--ngpu', type=int, default=2, help='number of gpus to use')
parser.add_argument('--seed', type=int, default=117, help='random seed (default: 1)')
parser.add_argument('--log_interval', type=int, default=20, help='how many batches to wait before logging training status')
parser.add_argument('--test_interval', type=int, default=5, help='how many epochs to wait before another test')
parser.add_argument('--logdir', default='log/default', help='folder to save to the log')
parser.add_argument('--decreasing_lr', default='80,120', help='decreasing strategy')
args = parser.parse_args()
args.logdir = os.path.join(os.path.dirname(__file__), args.logdir)
misc.logger.init(args.logdir, 'train_log')
print = misc.logger.info
# select gpu
args.gpu = misc.auto_select_gpu(utility_bound=0, num_gpu=args.ngpu, selected_gpus=args.gpu)
args.ngpu = len(args.gpu)
# logger
misc.ensure_dir(args.logdir)
print("=================FLAGS==================")
for k, v in args.__dict__.items():
print('{}: {}'.format(k, v))
print("========================================")
# seed
args.cuda = torch.cuda.is_available()
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
# data loader and model
train_loader, test_loader = dataset.get(batch_size=args.batch_size, num_workers=1)
model = model.stl10(n_channel=args.channel)
model = torch.nn.DataParallel(model, device_ids= range(args.ngpu))
if args.cuda:
model.cuda()
# optimizer
optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.wd)
decreasing_lr = list(map(int, args.decreasing_lr.split(',')))
print('decreasing_lr: ' + str(decreasing_lr))
best_acc, old_file = 0, None
t_begin = time.time()
try:
# ready to go
for epoch in range(args.epochs):
model.train()
if epoch in decreasing_lr:
optimizer.param_groups[0]['lr'] *= 0.1
for batch_idx, (data, target) in enumerate(train_loader):
indx_target = target.clone()
if args.cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data), Variable(target)
optimizer.zero_grad()
output = model(data)
loss = F.cross_entropy(output, target)
loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0 and batch_idx > 0:
pred = output.data.max(1)[1] # get the index of the max log-probability
correct = pred.cpu().eq(indx_target).sum()
acc = correct * 1.0 / len(data)
print('Train Epoch: {} [{}/{}] Loss: {:.6f} Acc: {:.4f} lr: {:.2e}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
loss.data[0], acc, optimizer.param_groups[0]['lr']))
elapse_time = time.time() - t_begin
speed_epoch = elapse_time / (epoch + 1)
speed_batch = speed_epoch / len(train_loader)
eta = speed_epoch * args.epochs - elapse_time
print("Elapsed {:.2f}s, {:.2f} s/epoch, {:.2f} s/batch, ets {:.2f}s".format(
elapse_time, speed_epoch, speed_batch, eta))
misc.model_snapshot(model, os.path.join(args.logdir, 'latest.pth'))
if epoch % args.test_interval == 0:
model.eval()
test_loss = 0
correct = 0
for data, target in test_loader:
indx_target = target.clone()
if args.cuda:
data, target = data.cuda(), target.cuda().long().squeeze()
data, target = Variable(data, volatile=True), Variable(target)
output = model(data)
test_loss += F.cross_entropy(output, target).data[0]
pred = output.data.max(1)[1] # get the index of the max log-probability
correct += pred.cpu().eq(indx_target).sum()
test_loss = test_loss / len(test_loader) # average over number of mini-batch
acc = 100. * correct / len(test_loader.dataset)
print('\tTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)'.format(
test_loss, correct, len(test_loader.dataset), acc))
if acc > best_acc:
new_file = os.path.join(args.logdir, 'best-{}.pth'.format(epoch))
misc.model_snapshot(model, new_file, old_file=old_file, verbose=True)
best_acc = acc
old_file = new_file
except Exception as e:
import traceback
traceback.print_exc()
finally:
print("Total Elapse: {:.2f}, Best Result: {:.3f}%".format(time.time()-t_begin, best_acc))
| 5,473 | 42.102362 | 124 | py |
pytorch-playground | pytorch-playground-master/imagenet/inception.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from utee import misc
from collections import OrderedDict
__all__ = ['Inception3', 'inception_v3']
model_urls = {
'inception_v3_google': 'https://download.pytorch.org/models/inception_v3_google-1a9a5a14.pth',
}
def inception_v3(pretrained=False, model_root=None, **kwargs):
if pretrained:
if 'transform_input' not in kwargs:
kwargs['transform_input'] = True
model = Inception3(**kwargs)
misc.load_state_dict(model, model_urls['inception_v3_google'], model_root)
return model
return Inception3(**kwargs)
class Inception3(nn.Module):
def __init__(self, num_classes=1000, aux_logits=True, transform_input=False):
super(Inception3, self).__init__()
self.aux_logits = aux_logits
self.transform_input = transform_input
self.Conv2d_1a_3x3 = BasicConv2d(3, 32, kernel_size=3, stride=2)
self.Conv2d_2a_3x3 = BasicConv2d(32, 32, kernel_size=3)
self.Conv2d_2b_3x3 = BasicConv2d(32, 64, kernel_size=3, padding=1)
self.Conv2d_3b_1x1 = BasicConv2d(64, 80, kernel_size=1)
self.Conv2d_4a_3x3 = BasicConv2d(80, 192, kernel_size=3)
self.Mixed_5b = InceptionA(192, pool_features=32)
self.Mixed_5c = InceptionA(256, pool_features=64)
self.Mixed_5d = InceptionA(288, pool_features=64)
self.Mixed_6a = InceptionB(288)
self.Mixed_6b = InceptionC(768, channels_7x7=128)
self.Mixed_6c = InceptionC(768, channels_7x7=160)
self.Mixed_6d = InceptionC(768, channels_7x7=160)
self.Mixed_6e = InceptionC(768, channels_7x7=192)
if aux_logits:
self.AuxLogits = InceptionAux(768, num_classes)
self.Mixed_7a = InceptionD(768)
self.Mixed_7b = InceptionE(1280)
self.Mixed_7c = InceptionE(2048)
self.group1 = nn.Sequential(
OrderedDict([
('fc', nn.Linear(2048, num_classes))
])
)
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
import scipy.stats as stats
stddev = m.stddev if hasattr(m, 'stddev') else 0.1
X = stats.truncnorm(-2, 2, scale=stddev)
values = torch.Tensor(X.rvs(m.weight.data.numel()))
m.weight.data.copy_(values.reshape(m.weight.shape))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, x):
if self.transform_input:
x = x.clone()
x[0] = x[0] * (0.229 / 0.5) + (0.485 - 0.5) / 0.5
x[1] = x[1] * (0.224 / 0.5) + (0.456 - 0.5) / 0.5
x[2] = x[2] * (0.225 / 0.5) + (0.406 - 0.5) / 0.5
# 299 x 299 x 3
x = self.Conv2d_1a_3x3(x)
# 149 x 149 x 32
x = self.Conv2d_2a_3x3(x)
# 147 x 147 x 32
x = self.Conv2d_2b_3x3(x)
# 147 x 147 x 64
x = F.max_pool2d(x, kernel_size=3, stride=2)
# 73 x 73 x 64
x = self.Conv2d_3b_1x1(x)
# 73 x 73 x 80
x = self.Conv2d_4a_3x3(x)
# 71 x 71 x 192
x = F.max_pool2d(x, kernel_size=3, stride=2)
# 35 x 35 x 192
x = self.Mixed_5b(x)
# 35 x 35 x 256
x = self.Mixed_5c(x)
# 35 x 35 x 288
x = self.Mixed_5d(x)
# 35 x 35 x 288
x = self.Mixed_6a(x)
# 17 x 17 x 768
x = self.Mixed_6b(x)
# 17 x 17 x 768
x = self.Mixed_6c(x)
# 17 x 17 x 768
x = self.Mixed_6d(x)
# 17 x 17 x 768
x = self.Mixed_6e(x)
# 17 x 17 x 768
if self.training and self.aux_logits:
aux = self.AuxLogits(x)
# 17 x 17 x 768
x = self.Mixed_7a(x)
# 8 x 8 x 1280
x = self.Mixed_7b(x)
# 8 x 8 x 2048
x = self.Mixed_7c(x)
# 8 x 8 x 2048
x = F.avg_pool2d(x, kernel_size=8)
# 1 x 1 x 2048
x = F.dropout(x, training=self.training)
# 1 x 1 x 2048
x = x.view(x.size(0), -1)
# 2048
x = self.group1(x)
# 1000 (num_classes)
if self.training and self.aux_logits:
return x, aux
return x
class InceptionA(nn.Module):
def __init__(self, in_channels, pool_features):
super(InceptionA, self).__init__()
self.branch1x1 = BasicConv2d(in_channels, 64, kernel_size=1)
self.branch5x5_1 = BasicConv2d(in_channels, 48, kernel_size=1)
self.branch5x5_2 = BasicConv2d(48, 64, kernel_size=5, padding=2)
self.branch3x3dbl_1 = BasicConv2d(in_channels, 64, kernel_size=1)
self.branch3x3dbl_2 = BasicConv2d(64, 96, kernel_size=3, padding=1)
self.branch3x3dbl_3 = BasicConv2d(96, 96, kernel_size=3, padding=1)
self.branch_pool = BasicConv2d(in_channels, pool_features, kernel_size=1)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch5x5 = self.branch5x5_1(x)
branch5x5 = self.branch5x5_2(branch5x5)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch5x5, branch3x3dbl, branch_pool]
return torch.cat(outputs, 1)
class InceptionB(nn.Module):
def __init__(self, in_channels):
super(InceptionB, self).__init__()
self.branch3x3 = BasicConv2d(in_channels, 384, kernel_size=3, stride=2)
self.branch3x3dbl_1 = BasicConv2d(in_channels, 64, kernel_size=1)
self.branch3x3dbl_2 = BasicConv2d(64, 96, kernel_size=3, padding=1)
self.branch3x3dbl_3 = BasicConv2d(96, 96, kernel_size=3, stride=2)
def forward(self, x):
branch3x3 = self.branch3x3(x)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)
branch_pool = F.max_pool2d(x, kernel_size=3, stride=2)
outputs = [branch3x3, branch3x3dbl, branch_pool]
return torch.cat(outputs, 1)
class InceptionC(nn.Module):
def __init__(self, in_channels, channels_7x7):
super(InceptionC, self).__init__()
self.branch1x1 = BasicConv2d(in_channels, 192, kernel_size=1)
c7 = channels_7x7
self.branch7x7_1 = BasicConv2d(in_channels, c7, kernel_size=1)
self.branch7x7_2 = BasicConv2d(c7, c7, kernel_size=(1, 7), padding=(0, 3))
self.branch7x7_3 = BasicConv2d(c7, 192, kernel_size=(7, 1), padding=(3, 0))
self.branch7x7dbl_1 = BasicConv2d(in_channels, c7, kernel_size=1)
self.branch7x7dbl_2 = BasicConv2d(c7, c7, kernel_size=(7, 1), padding=(3, 0))
self.branch7x7dbl_3 = BasicConv2d(c7, c7, kernel_size=(1, 7), padding=(0, 3))
self.branch7x7dbl_4 = BasicConv2d(c7, c7, kernel_size=(7, 1), padding=(3, 0))
self.branch7x7dbl_5 = BasicConv2d(c7, 192, kernel_size=(1, 7), padding=(0, 3))
self.branch_pool = BasicConv2d(in_channels, 192, kernel_size=1)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch7x7 = self.branch7x7_1(x)
branch7x7 = self.branch7x7_2(branch7x7)
branch7x7 = self.branch7x7_3(branch7x7)
branch7x7dbl = self.branch7x7dbl_1(x)
branch7x7dbl = self.branch7x7dbl_2(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_3(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_4(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_5(branch7x7dbl)
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch7x7, branch7x7dbl, branch_pool]
return torch.cat(outputs, 1)
class InceptionD(nn.Module):
def __init__(self, in_channels):
super(InceptionD, self).__init__()
self.branch3x3_1 = BasicConv2d(in_channels, 192, kernel_size=1)
self.branch3x3_2 = BasicConv2d(192, 320, kernel_size=3, stride=2)
self.branch7x7x3_1 = BasicConv2d(in_channels, 192, kernel_size=1)
self.branch7x7x3_2 = BasicConv2d(192, 192, kernel_size=(1, 7), padding=(0, 3))
self.branch7x7x3_3 = BasicConv2d(192, 192, kernel_size=(7, 1), padding=(3, 0))
self.branch7x7x3_4 = BasicConv2d(192, 192, kernel_size=3, stride=2)
def forward(self, x):
branch3x3 = self.branch3x3_1(x)
branch3x3 = self.branch3x3_2(branch3x3)
branch7x7x3 = self.branch7x7x3_1(x)
branch7x7x3 = self.branch7x7x3_2(branch7x7x3)
branch7x7x3 = self.branch7x7x3_3(branch7x7x3)
branch7x7x3 = self.branch7x7x3_4(branch7x7x3)
branch_pool = F.max_pool2d(x, kernel_size=3, stride=2)
outputs = [branch3x3, branch7x7x3, branch_pool]
return torch.cat(outputs, 1)
class InceptionE(nn.Module):
def __init__(self, in_channels):
super(InceptionE, self).__init__()
self.branch1x1 = BasicConv2d(in_channels, 320, kernel_size=1)
self.branch3x3_1 = BasicConv2d(in_channels, 384, kernel_size=1)
self.branch3x3_2a = BasicConv2d(384, 384, kernel_size=(1, 3), padding=(0, 1))
self.branch3x3_2b = BasicConv2d(384, 384, kernel_size=(3, 1), padding=(1, 0))
self.branch3x3dbl_1 = BasicConv2d(in_channels, 448, kernel_size=1)
self.branch3x3dbl_2 = BasicConv2d(448, 384, kernel_size=3, padding=1)
self.branch3x3dbl_3a = BasicConv2d(384, 384, kernel_size=(1, 3), padding=(0, 1))
self.branch3x3dbl_3b = BasicConv2d(384, 384, kernel_size=(3, 1), padding=(1, 0))
self.branch_pool = BasicConv2d(in_channels, 192, kernel_size=1)
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch3x3 = self.branch3x3_1(x)
branch3x3 = [
self.branch3x3_2a(branch3x3),
self.branch3x3_2b(branch3x3),
]
branch3x3 = torch.cat(branch3x3, 1)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = [
self.branch3x3dbl_3a(branch3x3dbl),
self.branch3x3dbl_3b(branch3x3dbl),
]
branch3x3dbl = torch.cat(branch3x3dbl, 1)
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool]
return torch.cat(outputs, 1)
class InceptionAux(nn.Module):
def __init__(self, in_channels, num_classes):
super(InceptionAux, self).__init__()
self.conv0 = BasicConv2d(in_channels, 128, kernel_size=1)
self.conv1 = BasicConv2d(128, 768, kernel_size=5)
self.conv1.stddev = 0.01
fc = nn.Linear(768, num_classes)
fc.stddev = 0.001
self.group1 = nn.Sequential(
OrderedDict([
('fc', fc)
])
)
def forward(self, x):
# 17 x 17 x 768
x = F.avg_pool2d(x, kernel_size=5, stride=3)
# 5 x 5 x 768
x = self.conv0(x)
# 5 x 5 x 128
x = self.conv1(x)
# 1 x 1 x 768
x = x.view(x.size(0), -1)
# 768
x = self.group1(x)
# 1000
return x
class BasicConv2d(nn.Module):
def __init__(self, in_channels, out_channels, **kwargs):
super(BasicConv2d, self).__init__()
self.group1 = nn.Sequential(
OrderedDict([
('conv', nn.Conv2d(in_channels, out_channels, bias=False, **kwargs)),
('bn', nn.BatchNorm2d(out_channels, eps=0.001))
])
)
def forward(self, x):
x = self.group1(x)
return F.relu(x, inplace=True)
| 11,908 | 34.549254 | 98 | py |
pytorch-playground | pytorch-playground-master/imagenet/resnet.py | import torch.nn as nn
import math
from utee import misc
from collections import OrderedDict
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1):
# "3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
m = OrderedDict()
m['conv1'] = conv3x3(inplanes, planes, stride)
m['bn1'] = nn.BatchNorm2d(planes)
m['relu1'] = nn.ReLU(inplace=True)
m['conv2'] = conv3x3(planes, planes)
m['bn2'] = nn.BatchNorm2d(planes)
self.group1 = nn.Sequential(m)
self.relu= nn.Sequential(nn.ReLU(inplace=True))
self.downsample = downsample
def forward(self, x):
if self.downsample is not None:
residual = self.downsample(x)
else:
residual = x
out = self.group1(x) + residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
m = OrderedDict()
m['conv1'] = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
m['bn1'] = nn.BatchNorm2d(planes)
m['relu1'] = nn.ReLU(inplace=True)
m['conv2'] = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
m['bn2'] = nn.BatchNorm2d(planes)
m['relu2'] = nn.ReLU(inplace=True)
m['conv3'] = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
m['bn3'] = nn.BatchNorm2d(planes * 4)
self.group1 = nn.Sequential(m)
self.relu= nn.Sequential(nn.ReLU(inplace=True))
self.downsample = downsample
def forward(self, x):
if self.downsample is not None:
residual = self.downsample(x)
else:
residual = x
out = self.group1(x) + residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000):
self.inplanes = 64
super(ResNet, self).__init__()
m = OrderedDict()
m['conv1'] = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
m['bn1'] = nn.BatchNorm2d(64)
m['relu1'] = nn.ReLU(inplace=True)
m['maxpool'] = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.group1= nn.Sequential(m)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.Sequential(nn.AvgPool2d(7))
self.group2 = nn.Sequential(
OrderedDict([
('fc', nn.Linear(512 * block.expansion, num_classes))
])
)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.group1(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.group2(x)
return x
def resnet18(pretrained=False, model_root=None, **kwargs):
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
misc.load_state_dict(model, model_urls['resnet18'], model_root)
return model
def resnet34(pretrained=False, model_root=None, **kwargs):
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
if pretrained:
misc.load_state_dict(model, model_urls['resnet34'], model_root)
return model
def resnet50(pretrained=False, model_root=None, **kwargs):
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
misc.load_state_dict(model, model_urls['resnet50'], model_root)
return model
def resnet101(pretrained=False, model_root=None, **kwargs):
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
misc.load_state_dict(model, model_urls['resnet101'], model_root)
return model
def resnet152(pretrained=False, model_root=None, **kwargs):
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
if pretrained:
misc.load_state_dict(model, model_urls['resnet152'], model_root)
return model
| 5,916 | 32.055866 | 109 | py |
pytorch-playground | pytorch-playground-master/imagenet/squeezenet.py | import math
import torch
import torch.nn as nn
from utee import misc
from collections import OrderedDict
__all__ = ['SqueezeNet', 'squeezenet1_0', 'squeezenet1_1']
model_urls = {
'squeezenet1_0': 'https://download.pytorch.org/models/squeezenet1_0-a815701f.pth',
'squeezenet1_1': 'https://download.pytorch.org/models/squeezenet1_1-f364aa15.pth',
}
class Fire(nn.Module):
def __init__(self, inplanes, squeeze_planes,
expand1x1_planes, expand3x3_planes):
super(Fire, self).__init__()
self.inplanes = inplanes
self.group1 = nn.Sequential(
OrderedDict([
('squeeze', nn.Conv2d(inplanes, squeeze_planes, kernel_size=1)),
('squeeze_activation', nn.ReLU(inplace=True))
])
)
self.group2 = nn.Sequential(
OrderedDict([
('expand1x1', nn.Conv2d(squeeze_planes, expand1x1_planes, kernel_size=1)),
('expand1x1_activation', nn.ReLU(inplace=True))
])
)
self.group3 = nn.Sequential(
OrderedDict([
('expand3x3', nn.Conv2d(squeeze_planes, expand3x3_planes, kernel_size=3, padding=1)),
('expand3x3_activation', nn.ReLU(inplace=True))
])
)
def forward(self, x):
x = self.group1(x)
return torch.cat([self.group2(x),self.group3(x)], 1)
class SqueezeNet(nn.Module):
def __init__(self, version=1.0, num_classes=1000):
super(SqueezeNet, self).__init__()
if version not in [1.0, 1.1]:
raise ValueError("Unsupported SqueezeNet version {version}:"
"1.0 or 1.1 expected".format(version=version))
self.num_classes = num_classes
if version == 1.0:
self.features = nn.Sequential(
nn.Conv2d(3, 96, kernel_size=7, stride=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
Fire(96, 16, 64, 64),
Fire(128, 16, 64, 64),
Fire(128, 32, 128, 128),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
Fire(256, 32, 128, 128),
Fire(256, 48, 192, 192),
Fire(384, 48, 192, 192),
Fire(384, 64, 256, 256),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
Fire(512, 64, 256, 256),
)
else:
self.features = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=3, stride=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
Fire(64, 16, 64, 64),
Fire(128, 16, 64, 64),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
Fire(128, 32, 128, 128),
Fire(256, 32, 128, 128),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
Fire(256, 48, 192, 192),
Fire(384, 48, 192, 192),
Fire(384, 64, 256, 256),
Fire(512, 64, 256, 256),
)
# Final convolution is initialized differently form the rest
final_conv = nn.Conv2d(512, num_classes, kernel_size=1)
self.classifier = nn.Sequential(
nn.Dropout(p=0.5),
final_conv,
nn.ReLU(inplace=True),
nn.AvgPool2d(13)
)
for m in self.modules():
if isinstance(m, nn.Conv2d):
gain = 2.0
if m is final_conv:
m.weight.data.normal_(0, 0.01)
else:
fan_in = m.kernel_size[0] * m.kernel_size[1] * m.in_channels
u = math.sqrt(3.0 * gain / fan_in)
m.weight.data.uniform_(-u, u)
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x):
x = self.features(x)
x = self.classifier(x)
return x.view(x.size(0), self.num_classes)
def squeezenet1_0(pretrained=False, model_root=None, **kwargs):
r"""SqueezeNet model architecture from the `"SqueezeNet: AlexNet-level
accuracy with 50x fewer parameters and <0.5MB model size"
<https://arxiv.org/abs/1602.07360>`_ paper.
"""
model = SqueezeNet(version=1.0, **kwargs)
if pretrained:
misc.load_state_dict(model, model_urls['squeezenet1_0'], model_root)
return model
def squeezenet1_1(pretrained=False, model_root=None, **kwargs):
r"""SqueezeNet 1.1 model from the `official SqueezeNet repo
<https://github.com/DeepScale/SqueezeNet/tree/master/SqueezeNet_v1.1>`_.
SqueezeNet 1.1 has 2.4x less computation and slightly fewer parameters
than SqueezeNet 1.0, without sacrificing accuracy.
"""
model = SqueezeNet(version=1.1, **kwargs)
if pretrained:
misc.load_state_dict(model, model_urls['squeezenet1_1'], model_root)
return model
| 5,022 | 35.398551 | 101 | py |
pytorch-playground | pytorch-playground-master/imagenet/vgg.py | import torch.nn as nn
import torch.utils.model_zoo as model_zoo
import math
__all__ = [
'VGG', 'vgg11', 'vgg11_bn', 'vgg13', 'vgg13_bn', 'vgg16', 'vgg16_bn',
'vgg19_bn', 'vgg19',
]
model_urls = {
'vgg11': 'https://download.pytorch.org/models/vgg11-bbd30ac9.pth',
'vgg13': 'https://download.pytorch.org/models/vgg13-c768596a.pth',
'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth',
'vgg19': 'https://download.pytorch.org/models/vgg19-dcbb9e9d.pth',
}
class VGG(nn.Module):
def __init__(self, features, num_classes=1000):
super(VGG, self).__init__()
self.features = features
self.classifier = nn.Sequential(
nn.Linear(512 * 7 * 7, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, num_classes),
)
self._initialize_weights()
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
n = m.weight.size(1)
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
def make_layers(cfg, batch_norm=False):
layers = []
in_channels = 3
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
cfg = {
'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
def vgg11(pretrained=False, model_root=None, **kwargs):
"""VGG 11-layer model (configuration "A")"""
model = VGG(make_layers(cfg['A']), **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['vgg11'], model_root))
return model
def vgg11_bn(**kwargs):
"""VGG 11-layer model (configuration "A") with batch normalization"""
kwargs.pop('model_root', None)
return VGG(make_layers(cfg['A'], batch_norm=True), **kwargs)
def vgg13(pretrained=False, model_root=None, **kwargs):
"""VGG 13-layer model (configuration "B")"""
model = VGG(make_layers(cfg['B']), **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['vgg13'], model_root))
return model
def vgg13_bn(**kwargs):
"""VGG 13-layer model (configuration "B") with batch normalization"""
kwargs.pop('model_root', None)
return VGG(make_layers(cfg['B'], batch_norm=True), **kwargs)
def vgg16(pretrained=False, model_root=None, **kwargs):
"""VGG 16-layer model (configuration "D")"""
model = VGG(make_layers(cfg['D']), **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['vgg16'], model_root))
return model
def vgg16_bn(**kwargs):
"""VGG 16-layer model (configuration "D") with batch normalization"""
kwargs.pop('model_root', None)
return VGG(make_layers(cfg['D'], batch_norm=True), **kwargs)
def vgg19(pretrained=False, model_root=None, **kwargs):
"""VGG 19-layer model (configuration "E")"""
model = VGG(make_layers(cfg['E']), **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['vgg19'], model_root))
return model
def vgg19_bn(**kwargs):
"""VGG 19-layer model (configuration 'E') with batch normalization"""
kwargs.pop('model_root', None)
return VGG(make_layers(cfg['E'], batch_norm=True), **kwargs)
| 4,505 | 32.132353 | 113 | py |
pytorch-playground | pytorch-playground-master/imagenet/dataset.py | from utee import misc
import os
import os.path
import numpy as np
import joblib
def get(batch_size, data_root='/tmp/public_dataset/pytorch', train=False, val=True, **kwargs):
data_root = os.path.expanduser(os.path.join(data_root, 'imagenet-data'))
print("Building IMAGENET data loader, 50000 for train, 50000 for test")
ds = []
assert train is not True, 'train not supported yet'
if train:
ds.append(IMAGENET(data_root, batch_size, True, **kwargs))
if val:
ds.append(IMAGENET(data_root, batch_size, False, **kwargs))
ds = ds[0] if len(ds) == 1 else ds
return ds
class IMAGENET(object):
def __init__(self, root, batch_size, train=False, input_size=224, **kwargs):
self.mean = np.array([0.485, 0.456, 0.406]).reshape(1, 1, 1, 3)
self.std = np.array([0.229, 0.224, 0.225]).reshape(1, 1, 1, 3)
self.train = train
if train:
pkl_file = os.path.join(root, 'train{}.pkl'.format(input_size))
else:
pkl_file = os.path.join(root, 'val{}.pkl'.format(input_size))
self.data_dict = joblib.load(pkl_file)
self.batch_size = batch_size
self.idx = 0
@property
def n_batch(self):
return int(np.ceil(self.n_sample* 1.0 / self.batch_size))
@property
def n_sample(self):
return len(self.data_dict['data'])
def __len__(self):
return self.n_batch
def __iter__(self):
return self
def __next__(self):
if self.idx >= self.n_batch:
self.idx = 0
raise StopIteration
else:
img = self.data_dict['data'][self.idx*self.batch_size:(self.idx+1)*self.batch_size].astype('float32')
target = self.data_dict['target'][self.idx*self.batch_size:(self.idx+1)*self.batch_size]
self.idx += 1
return img, target
if __name__ == '__main__':
train_ds, val_ds = get(200)
| 1,927 | 29.603175 | 113 | py |
pytorch-playground | pytorch-playground-master/imagenet/alexnet.py | import torch.nn as nn
import torch.utils.model_zoo as model_zoo
__all__ = ['AlexNet', 'alexnet']
model_urls = {
'alexnet': 'https://download.pytorch.org/models/alexnet-owt-4df8aa71.pth',
}
class AlexNet(nn.Module):
def __init__(self, num_classes=1000):
super(AlexNet, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.Conv2d(64, 192, kernel_size=5, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.Conv2d(192, 384, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(384, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(256, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
)
self.classifier = nn.Sequential(
nn.Dropout(),
nn.Linear(256 * 6 * 6, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(inplace=True),
nn.Linear(4096, num_classes),
)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), 256 * 6 * 6)
x = self.classifier(x)
return x
def alexnet(pretrained=False, model_root=None, **kwargs):
model = AlexNet(**kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['alexnet'], model_root))
return model
| 1,637 | 29.333333 | 84 | py |
pytorch-playground | pytorch-playground-master/mnist/model.py | import torch.nn as nn
from collections import OrderedDict
import torch.utils.model_zoo as model_zoo
from utee import misc
print = misc.logger.info
model_urls = {
'mnist': 'http://ml.cs.tsinghua.edu.cn/~chenxi/pytorch-models/mnist-b07bb66b.pth'
}
class MLP(nn.Module):
def __init__(self, input_dims, n_hiddens, n_class):
super(MLP, self).__init__()
assert isinstance(input_dims, int), 'Please provide int for input_dims'
self.input_dims = input_dims
current_dims = input_dims
layers = OrderedDict()
if isinstance(n_hiddens, int):
n_hiddens = [n_hiddens]
else:
n_hiddens = list(n_hiddens)
for i, n_hidden in enumerate(n_hiddens):
layers['fc{}'.format(i+1)] = nn.Linear(current_dims, n_hidden)
layers['relu{}'.format(i+1)] = nn.ReLU()
layers['drop{}'.format(i+1)] = nn.Dropout(0.2)
current_dims = n_hidden
layers['out'] = nn.Linear(current_dims, n_class)
self.model= nn.Sequential(layers)
print(self.model)
def forward(self, input):
input = input.view(input.size(0), -1)
assert input.size(1) == self.input_dims
return self.model.forward(input)
def mnist(input_dims=784, n_hiddens=[256, 256], n_class=10, pretrained=None):
model = MLP(input_dims, n_hiddens, n_class)
if pretrained is not None:
m = model_zoo.load_url(model_urls['mnist'])
state_dict = m.state_dict() if isinstance(m, nn.Module) else m
assert isinstance(state_dict, (dict, OrderedDict)), type(state_dict)
model.load_state_dict(state_dict)
return model
| 1,660 | 34.340426 | 85 | py |
pytorch-playground | pytorch-playground-master/mnist/dataset.py | from torch.utils.data import DataLoader
import torch
from torchvision import datasets, transforms
import os
def get(batch_size, data_root='/tmp/public_dataset/pytorch', train=True, val=True, **kwargs):
data_root = os.path.expanduser(os.path.join(data_root, 'mnist-data'))
kwargs.pop('input_size', None)
num_workers = kwargs.setdefault('num_workers', 1)
print("Building MNIST data loader with {} workers".format(num_workers))
ds = []
if train:
train_loader = torch.utils.data.DataLoader(
datasets.MNIST(root=data_root, train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=batch_size, shuffle=True, **kwargs)
ds.append(train_loader)
if val:
test_loader = torch.utils.data.DataLoader(
datasets.MNIST(root=data_root, train=False, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=batch_size, shuffle=True, **kwargs)
ds.append(test_loader)
ds = ds[0] if len(ds) == 1 else ds
return ds
| 1,398 | 41.393939 | 93 | py |
pytorch-playground | pytorch-playground-master/mnist/train.py | import argparse
import os
import time
from utee import misc
import torch
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
import dataset
import model
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
parser.add_argument('--wd', type=float, default=0.0001, help='weight decay')
parser.add_argument('--batch_size', type=int, default=200, help='input batch size for training (default: 64)')
parser.add_argument('--epochs', type=int, default=40, help='number of epochs to train (default: 10)')
parser.add_argument('--lr', type=float, default=0.01, help='learning rate (default: 1e-3)')
parser.add_argument('--gpu', default=None, help='index of gpus to use')
parser.add_argument('--ngpu', type=int, default=1, help='number of gpus to use')
parser.add_argument('--seed', type=int, default=117, help='random seed (default: 1)')
parser.add_argument('--log_interval', type=int, default=100, help='how many batches to wait before logging training status')
parser.add_argument('--test_interval', type=int, default=5, help='how many epochs to wait before another test')
parser.add_argument('--logdir', default='log/default', help='folder to save to the log')
parser.add_argument('--data_root', default='/tmp/public_dataset/pytorch/', help='folder to save the model')
parser.add_argument('--decreasing_lr', default='80,120', help='decreasing strategy')
args = parser.parse_args()
args.logdir = os.path.join(os.path.dirname(__file__), args.logdir)
misc.logger.init(args.logdir, 'train_log')
print = misc.logger.info
# select gpu
args.gpu = misc.auto_select_gpu(utility_bound=0, num_gpu=args.ngpu, selected_gpus=args.gpu)
args.ngpu = len(args.gpu)
# logger
misc.ensure_dir(args.logdir)
print("=================FLAGS==================")
for k, v in args.__dict__.items():
print('{}: {}'.format(k, v))
print("========================================")
# seed
args.cuda = torch.cuda.is_available()
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
# data loader
train_loader, test_loader = dataset.get(batch_size=args.batch_size, data_root=args.data_root, num_workers=1)
# model
model = model.mnist(input_dims=784, n_hiddens=[256, 256], n_class=10)
model = torch.nn.DataParallel(model, device_ids= range(args.ngpu))
if args.cuda:
model.cuda()
# optimizer
optimizer = optim.SGD(model.parameters(), lr=args.lr, weight_decay=args.wd, momentum=0.9)
decreasing_lr = list(map(int, args.decreasing_lr.split(',')))
print('decreasing_lr: ' + str(decreasing_lr))
best_acc, old_file = 0, None
t_begin = time.time()
try:
# ready to go
for epoch in range(args.epochs):
model.train()
if epoch in decreasing_lr:
optimizer.param_groups[0]['lr'] *= 0.1
for batch_idx, (data, target) in enumerate(train_loader):
indx_target = target.clone()
if args.cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data), Variable(target)
optimizer.zero_grad()
output = model(data)
loss = F.cross_entropy(output, target)
loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0 and batch_idx > 0:
pred = output.data.max(1)[1] # get the index of the max log-probability
correct = pred.cpu().eq(indx_target).sum()
acc = correct * 1.0 / len(data)
print('Train Epoch: {} [{}/{}] Loss: {:.6f} Acc: {:.4f} lr: {:.2e}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
loss.data, acc, optimizer.param_groups[0]['lr']))
elapse_time = time.time() - t_begin
speed_epoch = elapse_time / (epoch + 1)
speed_batch = speed_epoch / len(train_loader)
eta = speed_epoch * args.epochs - elapse_time
print("Elapsed {:.2f}s, {:.2f} s/epoch, {:.2f} s/batch, ets {:.2f}s".format(
elapse_time, speed_epoch, speed_batch, eta))
misc.model_snapshot(model, os.path.join(args.logdir, 'latest.pth'))
if epoch % args.test_interval == 0:
model.eval()
test_loss = 0
correct = 0
for data, target in test_loader:
indx_target = target.clone()
if args.cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data, volatile=True), Variable(target)
output = model(data)
test_loss += F.cross_entropy(output, target).data
pred = output.data.max(1)[1] # get the index of the max log-probability
correct += pred.cpu().eq(indx_target).sum()
test_loss = test_loss / len(test_loader) # average over number of mini-batch
acc = 100. * correct / len(test_loader.dataset)
print('\tTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)'.format(
test_loss, correct, len(test_loader.dataset), acc))
if acc > best_acc:
new_file = os.path.join(args.logdir, 'best-{}.pth'.format(epoch))
misc.model_snapshot(model, new_file, old_file=old_file, verbose=True)
best_acc = acc
old_file = new_file
except Exception as e:
import traceback
traceback.print_exc()
finally:
print("Total Elapse: {:.2f}, Best Result: {:.3f}%".format(time.time()-t_begin, best_acc))
| 5,502 | 41.992188 | 125 | py |
pytorch-playground | pytorch-playground-master/cifar/model.py | import torch.nn as nn
import torch.utils.model_zoo as model_zoo
from IPython import embed
from collections import OrderedDict
from utee import misc
print = misc.logger.info
model_urls = {
'cifar10': 'http://ml.cs.tsinghua.edu.cn/~chenxi/pytorch-models/cifar10-d875770b.pth',
'cifar100': 'http://ml.cs.tsinghua.edu.cn/~chenxi/pytorch-models/cifar100-3a55a987.pth',
}
class CIFAR(nn.Module):
def __init__(self, features, n_channel, num_classes):
super(CIFAR, self).__init__()
assert isinstance(features, nn.Sequential), type(features)
self.features = features
self.classifier = nn.Sequential(
nn.Linear(n_channel, num_classes)
)
print(self.features)
print(self.classifier)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
def make_layers(cfg, batch_norm=False):
layers = []
in_channels = 3
for i, v in enumerate(cfg):
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
padding = v[1] if isinstance(v, tuple) else 1
out_channels = v[0] if isinstance(v, tuple) else v
conv2d = nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=padding)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(out_channels, affine=False), nn.ReLU()]
else:
layers += [conv2d, nn.ReLU()]
in_channels = out_channels
return nn.Sequential(*layers)
def cifar10(n_channel, pretrained=None):
cfg = [n_channel, n_channel, 'M', 2*n_channel, 2*n_channel, 'M', 4*n_channel, 4*n_channel, 'M', (8*n_channel, 0), 'M']
layers = make_layers(cfg, batch_norm=True)
model = CIFAR(layers, n_channel=8*n_channel, num_classes=10)
if pretrained is not None:
m = model_zoo.load_url(model_urls['cifar10'])
state_dict = m.state_dict() if isinstance(m, nn.Module) else m
assert isinstance(state_dict, (dict, OrderedDict)), type(state_dict)
model.load_state_dict(state_dict)
return model
def cifar100(n_channel, pretrained=None):
cfg = [n_channel, n_channel, 'M', 2*n_channel, 2*n_channel, 'M', 4*n_channel, 4*n_channel, 'M', (8*n_channel, 0), 'M']
layers = make_layers(cfg, batch_norm=True)
model = CIFAR(layers, n_channel=8*n_channel, num_classes=100)
if pretrained is not None:
m = model_zoo.load_url(model_urls['cifar100'])
state_dict = m.state_dict() if isinstance(m, nn.Module) else m
assert isinstance(state_dict, (dict, OrderedDict)), type(state_dict)
model.load_state_dict(state_dict)
return model
if __name__ == '__main__':
model = cifar10(128, pretrained='log/cifar10/best-135.pth')
embed()
| 2,809 | 36.972973 | 122 | py |
pytorch-playground | pytorch-playground-master/cifar/dataset.py | import torch
from torchvision import datasets, transforms
from torch.utils.data import DataLoader
import os
def get10(batch_size, data_root='/tmp/public_dataset/pytorch', train=True, val=True, **kwargs):
data_root = os.path.expanduser(os.path.join(data_root, 'cifar10-data'))
num_workers = kwargs.setdefault('num_workers', 1)
kwargs.pop('input_size', None)
print("Building CIFAR-10 data loader with {} workers".format(num_workers))
ds = []
if train:
train_loader = torch.utils.data.DataLoader(
datasets.CIFAR10(
root=data_root, train=True, download=True,
transform=transforms.Compose([
transforms.Pad(4),
transforms.RandomCrop(32),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])),
batch_size=batch_size, shuffle=True, **kwargs)
ds.append(train_loader)
if val:
test_loader = torch.utils.data.DataLoader(
datasets.CIFAR10(
root=data_root, train=False, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])),
batch_size=batch_size, shuffle=False, **kwargs)
ds.append(test_loader)
ds = ds[0] if len(ds) == 1 else ds
return ds
def get100(batch_size, data_root='/tmp/public_dataset/pytorch', train=True, val=True, **kwargs):
data_root = os.path.expanduser(os.path.join(data_root, 'cifar100-data'))
num_workers = kwargs.setdefault('num_workers', 1)
kwargs.pop('input_size', None)
print("Building CIFAR-100 data loader with {} workers".format(num_workers))
ds = []
if train:
train_loader = torch.utils.data.DataLoader(
datasets.CIFAR100(
root=data_root, train=True, download=True,
transform=transforms.Compose([
transforms.Pad(4),
transforms.RandomCrop(32),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])),
batch_size=batch_size, shuffle=True, **kwargs)
ds.append(train_loader)
if val:
test_loader = torch.utils.data.DataLoader(
datasets.CIFAR100(
root=data_root, train=False, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])),
batch_size=batch_size, shuffle=False, **kwargs)
ds.append(test_loader)
ds = ds[0] if len(ds) == 1 else ds
return ds
| 2,937 | 40.380282 | 96 | py |
pytorch-playground | pytorch-playground-master/cifar/train.py | import argparse
import os
import time
from utee import misc
import torch
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
import dataset
import model
from IPython import embed
parser = argparse.ArgumentParser(description='PyTorch CIFAR-X Example')
parser.add_argument('--type', default='cifar10', help='cifar10|cifar100')
parser.add_argument('--channel', type=int, default=128, help='first conv channel (default: 32)')
parser.add_argument('--wd', type=float, default=0.00, help='weight decay')
parser.add_argument('--batch_size', type=int, default=200, help='input batch size for training (default: 64)')
parser.add_argument('--epochs', type=int, default=150, help='number of epochs to train (default: 10)')
parser.add_argument('--lr', type=float, default=0.001, help='learning rate (default: 1e-3)')
parser.add_argument('--gpu', default=None, help='index of gpus to use')
parser.add_argument('--ngpu', type=int, default=2, help='number of gpus to use')
parser.add_argument('--seed', type=int, default=117, help='random seed (default: 1)')
parser.add_argument('--log_interval', type=int, default=100, help='how many batches to wait before logging training status')
parser.add_argument('--test_interval', type=int, default=5, help='how many epochs to wait before another test')
parser.add_argument('--logdir', default='log/default', help='folder to save to the log')
parser.add_argument('--decreasing_lr', default='80,120', help='decreasing strategy')
args = parser.parse_args()
args.logdir = os.path.join(os.path.dirname(__file__), args.logdir)
misc.logger.init(args.logdir, 'train_log')
print = misc.logger.info
# select gpu
args.gpu = misc.auto_select_gpu(utility_bound=0, num_gpu=args.ngpu, selected_gpus=args.gpu)
args.ngpu = len(args.gpu)
# logger
misc.ensure_dir(args.logdir)
print("=================FLAGS==================")
for k, v in args.__dict__.items():
print('{}: {}'.format(k, v))
print("========================================")
# seed
args.cuda = torch.cuda.is_available()
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
# data loader and model
assert args.type in ['cifar10', 'cifar100'], args.type
if args.type == 'cifar10':
train_loader, test_loader = dataset.get10(batch_size=args.batch_size, num_workers=1)
model = model.cifar10(n_channel=args.channel)
else:
train_loader, test_loader = dataset.get100(batch_size=args.batch_size, num_workers=1)
model = model.cifar100(n_channel=args.channel)
model = torch.nn.DataParallel(model, device_ids= range(args.ngpu))
if args.cuda:
model.cuda()
# optimizer
optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.wd)
decreasing_lr = list(map(int, args.decreasing_lr.split(',')))
print('decreasing_lr: ' + str(decreasing_lr))
best_acc, old_file = 0, None
t_begin = time.time()
try:
# ready to go
for epoch in range(args.epochs):
model.train()
if epoch in decreasing_lr:
optimizer.param_groups[0]['lr'] *= 0.1
for batch_idx, (data, target) in enumerate(train_loader):
indx_target = target.clone()
if args.cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data), Variable(target)
optimizer.zero_grad()
output = model(data)
loss = F.cross_entropy(output, target)
loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0 and batch_idx > 0:
pred = output.data.max(1)[1] # get the index of the max log-probability
correct = pred.cpu().eq(indx_target).sum()
acc = correct * 1.0 / len(data)
print('Train Epoch: {} [{}/{}] Loss: {:.6f} Acc: {:.4f} lr: {:.2e}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
loss.data[0], acc, optimizer.param_groups[0]['lr']))
elapse_time = time.time() - t_begin
speed_epoch = elapse_time / (epoch + 1)
speed_batch = speed_epoch / len(train_loader)
eta = speed_epoch * args.epochs - elapse_time
print("Elapsed {:.2f}s, {:.2f} s/epoch, {:.2f} s/batch, ets {:.2f}s".format(
elapse_time, speed_epoch, speed_batch, eta))
misc.model_snapshot(model, os.path.join(args.logdir, 'latest.pth'))
if epoch % args.test_interval == 0:
model.eval()
test_loss = 0
correct = 0
for data, target in test_loader:
indx_target = target.clone()
if args.cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data, volatile=True), Variable(target)
output = model(data)
test_loss += F.cross_entropy(output, target).data[0]
pred = output.data.max(1)[1] # get the index of the max log-probability
correct += pred.cpu().eq(indx_target).sum()
test_loss = test_loss / len(test_loader) # average over number of mini-batch
acc = 100. * correct / len(test_loader.dataset)
print('\tTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)'.format(
test_loss, correct, len(test_loader.dataset), acc))
if acc > best_acc:
new_file = os.path.join(args.logdir, 'best-{}.pth'.format(epoch))
misc.model_snapshot(model, new_file, old_file=old_file, verbose=True)
best_acc = acc
old_file = new_file
except Exception as e:
import traceback
traceback.print_exc()
finally:
print("Total Elapse: {:.2f}, Best Result: {:.3f}%".format(time.time()-t_begin, best_acc))
| 5,777 | 42.119403 | 125 | py |
pytorch-playground | pytorch-playground-master/utee/quant.py | from torch.autograd import Variable
import torch
from torch import nn
from collections import OrderedDict
import math
from IPython import embed
def compute_integral_part(input, overflow_rate):
abs_value = input.abs().view(-1)
sorted_value = abs_value.sort(dim=0, descending=True)[0]
split_idx = int(overflow_rate * len(sorted_value))
v = sorted_value[split_idx]
if isinstance(v, Variable):
v = float(v.data.cpu())
sf = math.ceil(math.log2(v+1e-12))
return sf
def linear_quantize(input, sf, bits):
assert bits >= 1, bits
if bits == 1:
return torch.sign(input) - 1
delta = math.pow(2.0, -sf)
bound = math.pow(2.0, bits-1)
min_val = - bound
max_val = bound - 1
rounded = torch.floor(input / delta + 0.5)
clipped_value = torch.clamp(rounded, min_val, max_val) * delta
return clipped_value
def log_minmax_quantize(input, bits):
assert bits >= 1, bits
if bits == 1:
return torch.sign(input), 0.0, 0.0
s = torch.sign(input)
input0 = torch.log(torch.abs(input) + 1e-20)
v = min_max_quantize(input0, bits-1)
v = torch.exp(v) * s
return v
def log_linear_quantize(input, sf, bits):
assert bits >= 1, bits
if bits == 1:
return torch.sign(input), 0.0, 0.0
s = torch.sign(input)
input0 = torch.log(torch.abs(input) + 1e-20)
v = linear_quantize(input0, sf, bits-1)
v = torch.exp(v) * s
return v
def min_max_quantize(input, bits):
assert bits >= 1, bits
if bits == 1:
return torch.sign(input) - 1
min_val, max_val = input.min(), input.max()
if isinstance(min_val, Variable):
max_val = float(max_val.data.cpu().numpy()[0])
min_val = float(min_val.data.cpu().numpy()[0])
input_rescale = (input - min_val) / (max_val - min_val)
n = math.pow(2.0, bits) - 1
v = torch.floor(input_rescale * n + 0.5) / n
v = v * (max_val - min_val) + min_val
return v
def tanh_quantize(input, bits):
assert bits >= 1, bits
if bits == 1:
return torch.sign(input)
input = torch.tanh(input) # [-1, 1]
input_rescale = (input + 1.0) / 2 #[0, 1]
n = math.pow(2.0, bits) - 1
v = torch.floor(input_rescale * n + 0.5) / n
v = 2 * v - 1 # [-1, 1]
v = 0.5 * torch.log((1 + v) / (1 - v)) # arctanh
return v
class LinearQuant(nn.Module):
def __init__(self, name, bits, sf=None, overflow_rate=0.0, counter=10):
super(LinearQuant, self).__init__()
self.name = name
self._counter = counter
self.bits = bits
self.sf = sf
self.overflow_rate = overflow_rate
@property
def counter(self):
return self._counter
def forward(self, input):
if self._counter > 0:
self._counter -= 1
sf_new = self.bits - 1 - compute_integral_part(input, self.overflow_rate)
self.sf = min(self.sf, sf_new) if self.sf is not None else sf_new
return input
else:
output = linear_quantize(input, self.sf, self.bits)
return output
def __repr__(self):
return '{}(sf={}, bits={}, overflow_rate={:.3f}, counter={})'.format(
self.__class__.__name__, self.sf, self.bits, self.overflow_rate, self.counter)
class LogQuant(nn.Module):
def __init__(self, name, bits, sf=None, overflow_rate=0.0, counter=10):
super(LogQuant, self).__init__()
self.name = name
self._counter = counter
self.bits = bits
self.sf = sf
self.overflow_rate = overflow_rate
@property
def counter(self):
return self._counter
def forward(self, input):
if self._counter > 0:
self._counter -= 1
log_abs_input = torch.log(torch.abs(input))
sf_new = self.bits - 1 - compute_integral_part(log_abs_input, self.overflow_rate)
self.sf = min(self.sf, sf_new) if self.sf is not None else sf_new
return input
else:
output = log_linear_quantize(input, self.sf, self.bits)
return output
def __repr__(self):
return '{}(sf={}, bits={}, overflow_rate={:.3f}, counter={})'.format(
self.__class__.__name__, self.sf, self.bits, self.overflow_rate, self.counter)
class NormalQuant(nn.Module):
def __init__(self, name, bits, quant_func):
super(NormalQuant, self).__init__()
self.name = name
self.bits = bits
self.quant_func = quant_func
@property
def counter(self):
return self._counter
def forward(self, input):
output = self.quant_func(input, self.bits)
return output
def __repr__(self):
return '{}(bits={})'.format(self.__class__.__name__, self.bits)
def duplicate_model_with_quant(model, bits, overflow_rate=0.0, counter=10, type='linear'):
"""assume that original model has at least a nn.Sequential"""
assert type in ['linear', 'minmax', 'log', 'tanh']
if isinstance(model, nn.Sequential):
l = OrderedDict()
for k, v in model._modules.items():
if isinstance(v, (nn.Conv2d, nn.Linear, nn.BatchNorm1d, nn.BatchNorm2d, nn.AvgPool2d)):
l[k] = v
if type == 'linear':
quant_layer = LinearQuant('{}_quant'.format(k), bits=bits, overflow_rate=overflow_rate, counter=counter)
elif type == 'log':
# quant_layer = LogQuant('{}_quant'.format(k), bits=bits, overflow_rate=overflow_rate, counter=counter)
quant_layer = NormalQuant('{}_quant'.format(k), bits=bits, quant_func=log_minmax_quantize)
elif type == 'minmax':
quant_layer = NormalQuant('{}_quant'.format(k), bits=bits, quant_func=min_max_quantize)
else:
quant_layer = NormalQuant('{}_quant'.format(k), bits=bits, quant_func=tanh_quantize)
l['{}_{}_quant'.format(k, type)] = quant_layer
else:
l[k] = duplicate_model_with_quant(v, bits, overflow_rate, counter, type)
m = nn.Sequential(l)
return m
else:
for k, v in model._modules.items():
model._modules[k] = duplicate_model_with_quant(v, bits, overflow_rate, counter, type)
return model
| 6,302 | 32.705882 | 124 | py |
pytorch-playground | pytorch-playground-master/utee/misc.py | import cv2
import os
import shutil
import pickle as pkl
import time
import numpy as np
import hashlib
from IPython import embed
class Logger(object):
def __init__(self):
self._logger = None
def init(self, logdir, name='log'):
if self._logger is None:
import logging
if not os.path.exists(logdir):
os.makedirs(logdir)
log_file = os.path.join(logdir, name)
if os.path.exists(log_file):
os.remove(log_file)
self._logger = logging.getLogger()
self._logger.setLevel('INFO')
fh = logging.FileHandler(log_file)
ch = logging.StreamHandler()
self._logger.addHandler(fh)
self._logger.addHandler(ch)
def info(self, str_info):
self.init('/tmp', 'tmp.log')
self._logger.info(str_info)
logger = Logger()
print = logger.info
def ensure_dir(path, erase=False):
if os.path.exists(path) and erase:
print("Removing old folder {}".format(path))
shutil.rmtree(path)
if not os.path.exists(path):
print("Creating folder {}".format(path))
os.makedirs(path)
def load_pickle(path):
begin_st = time.time()
with open(path, 'rb') as f:
print("Loading pickle object from {}".format(path))
v = pkl.load(f)
print("=> Done ({:.4f} s)".format(time.time() - begin_st))
return v
def dump_pickle(obj, path):
with open(path, 'wb') as f:
print("Dumping pickle object to {}".format(path))
pkl.dump(obj, f, protocol=pkl.HIGHEST_PROTOCOL)
def auto_select_gpu(mem_bound=500, utility_bound=0, gpus=(0, 1, 2, 3, 4, 5, 6, 7), num_gpu=1, selected_gpus=None):
import sys
import os
import subprocess
import re
import time
import numpy as np
if 'CUDA_VISIBLE_DEVCIES' in os.environ:
sys.exit(0)
if selected_gpus is None:
mem_trace = []
utility_trace = []
for i in range(5): # sample 5 times
info = subprocess.check_output('nvidia-smi', shell=True).decode('utf-8')
mem = [int(s[:-5]) for s in re.compile('\d+MiB\s/').findall(info)]
utility = [int(re.compile('\d+').findall(s)[0]) for s in re.compile('\d+%\s+Default').findall(info)]
mem_trace.append(mem)
utility_trace.append(utility)
time.sleep(0.1)
mem = np.mean(mem_trace, axis=0)
utility = np.mean(utility_trace, axis=0)
assert(len(mem) == len(utility))
nGPU = len(utility)
ideal_gpus = [i for i in range(nGPU) if mem[i] <= mem_bound and utility[i] <= utility_bound and i in gpus]
if len(ideal_gpus) < num_gpu:
print("No sufficient resource, available: {}, require {} gpu".format(ideal_gpus, num_gpu))
sys.exit(0)
else:
selected_gpus = list(map(str, ideal_gpus[:num_gpu]))
else:
selected_gpus = selected_gpus.split(',')
print("Setting GPU: {}".format(selected_gpus))
os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(selected_gpus)
return selected_gpus
def expand_user(path):
return os.path.abspath(os.path.expanduser(path))
def model_snapshot(model, new_file, old_file=None, verbose=False):
from collections import OrderedDict
import torch
if isinstance(model, torch.nn.DataParallel):
model = model.module
if old_file and os.path.exists(expand_user(old_file)):
if verbose:
print("Removing old model {}".format(expand_user(old_file)))
os.remove(expand_user(old_file))
if verbose:
print("Saving model to {}".format(expand_user(new_file)))
state_dict = OrderedDict()
for k, v in model.state_dict().items():
if v.is_cuda:
v = v.cpu()
state_dict[k] = v
torch.save(state_dict, expand_user(new_file))
def load_lmdb(lmdb_file, n_records=None):
import lmdb
import numpy as np
lmdb_file = expand_user(lmdb_file)
if os.path.exists(lmdb_file):
data = []
env = lmdb.open(lmdb_file, readonly=True, max_readers=512)
with env.begin() as txn:
cursor = txn.cursor()
begin_st = time.time()
print("Loading lmdb file {} into memory".format(lmdb_file))
for key, value in cursor:
_, target, _ = key.decode('ascii').split(':')
target = int(target)
img = cv2.imdecode(np.fromstring(value, np.uint8), cv2.IMREAD_COLOR)
data.append((img, target))
if n_records is not None and len(data) >= n_records:
break
env.close()
print("=> Done ({:.4f} s)".format(time.time() - begin_st))
return data
else:
print("Not found lmdb file".format(lmdb_file))
def str2img(str_b):
return cv2.imdecode(np.fromstring(str_b, np.uint8), cv2.IMREAD_COLOR)
def img2str(img):
return cv2.imencode('.jpg', img)[1].tostring()
def md5(s):
m = hashlib.md5()
m.update(s)
return m.hexdigest()
def eval_model(model, ds, n_sample=None, ngpu=1, is_imagenet=False):
import tqdm
import torch
from torch import nn
from torch.autograd import Variable
class ModelWrapper(nn.Module):
def __init__(self, model):
super(ModelWrapper, self).__init__()
self.model = model
self.mean = [0.485, 0.456, 0.406]
self.std = [0.229, 0.224, 0.225]
def forward(self, input):
input.data.div_(255.)
input.data[:, 0, :, :].sub_(self.mean[0]).div_(self.std[0])
input.data[:, 1, :, :].sub_(self.mean[1]).div_(self.std[1])
input.data[:, 2, :, :].sub_(self.mean[2]).div_(self.std[2])
return self.model(input)
correct1, correct5 = 0, 0
n_passed = 0
if is_imagenet:
model = ModelWrapper(model)
model = model.eval()
model = torch.nn.DataParallel(model, device_ids=range(ngpu)).cuda()
n_sample = len(ds) if n_sample is None else n_sample
for idx, (data, target) in enumerate(tqdm.tqdm(ds, total=n_sample)):
n_passed += len(data)
data = Variable(torch.FloatTensor(data)).cuda()
indx_target = torch.LongTensor(target)
output = model(data)
bs = output.size(0)
idx_pred = output.data.sort(1, descending=True)[1]
idx_gt1 = indx_target.expand(1, bs).transpose_(0, 1)
idx_gt5 = idx_gt1.expand(bs, 5)
correct1 += idx_pred[:, :1].cpu().eq(idx_gt1).sum()
correct5 += idx_pred[:, :5].cpu().eq(idx_gt5).sum()
if idx >= n_sample - 1:
break
acc1 = correct1 * 1.0 / n_passed
acc5 = correct5 * 1.0 / n_passed
return acc1, acc5
def load_state_dict(model, model_urls, model_root):
from torch.utils import model_zoo
from torch import nn
import re
from collections import OrderedDict
own_state_old = model.state_dict()
own_state = OrderedDict() # remove all 'group' string
for k, v in own_state_old.items():
k = re.sub('group\d+\.', '', k)
own_state[k] = v
state_dict = model_zoo.load_url(model_urls, model_root)
for name, param in state_dict.items():
if name not in own_state:
print(own_state.keys())
raise KeyError('unexpected key "{}" in state_dict'
.format(name))
if isinstance(param, nn.Parameter):
# backwards compatibility for serialized parameters
param = param.data
own_state[name].copy_(param)
missing = set(own_state.keys()) - set(state_dict.keys())
no_use = set(state_dict.keys()) - set(own_state.keys())
if len(no_use) > 0:
raise KeyError('some keys are not used: "{}"'.format(no_use))
| 7,772 | 32.943231 | 114 | py |
pytorch-playground | pytorch-playground-master/utee/selector.py | from utee import misc
import os
from imagenet import dataset
print = misc.logger.info
from IPython import embed
known_models = [
'mnist', 'svhn', # 28x28
'cifar10', 'cifar100', # 32x32
'stl10', # 96x96
'alexnet', # 224x224
'vgg16', 'vgg16_bn', 'vgg19', 'vgg19_bn', # 224x224
'resnet18', 'resnet34', 'resnet50', 'resnet101','resnet152', # 224x224
'squeezenet_v0', 'squeezenet_v1', #224x224
'inception_v3', # 299x299
]
def mnist(cuda=True, model_root=None):
print("Building and initializing mnist parameters")
from mnist import model, dataset
m = model.mnist(pretrained=os.path.join(model_root, 'mnist.pth'))
if cuda:
m = m.cuda()
return m, dataset.get, False
def svhn(cuda=True, model_root=None):
print("Building and initializing svhn parameters")
from svhn import model, dataset
m = model.svhn(32, pretrained=os.path.join(model_root, 'svhn.pth'))
if cuda:
m = m.cuda()
return m, dataset.get, False
def cifar10(cuda=True, model_root=None):
print("Building and initializing cifar10 parameters")
from cifar import model, dataset
m = model.cifar10(128, pretrained=os.path.join(model_root, 'cifar10.pth'))
if cuda:
m = m.cuda()
return m, dataset.get10, False
def cifar100(cuda=True, model_root=None):
print("Building and initializing cifar100 parameters")
from cifar import model, dataset
m = model.cifar100(128, pretrained=os.path.join(model_root, 'cifar100.pth'))
if cuda:
m = m.cuda()
return m, dataset.get100, False
def stl10(cuda=True, model_root=None):
print("Building and initializing stl10 parameters")
from stl10 import model, dataset
m = model.stl10(32, pretrained=os.path.join(model_root, 'stl10.pth'))
if cuda:
m = m.cuda()
return m, dataset.get, False
def alexnet(cuda=True, model_root=None):
print("Building and initializing alexnet parameters")
from imagenet import alexnet as alx
m = alx.alexnet(True, model_root)
if cuda:
m = m.cuda()
return m, dataset.get, True
def vgg16(cuda=True, model_root=None):
print("Building and initializing vgg16 parameters")
from imagenet import vgg
m = vgg.vgg16(True, model_root)
if cuda:
m = m.cuda()
return m, dataset.get, True
def vgg16_bn(cuda=True, model_root=None):
print("Building vgg16_bn parameters")
from imagenet import vgg
m = vgg.vgg16_bn(model_root)
if cuda:
m = m.cuda()
return m, dataset.get, True
def vgg19(cuda=True, model_root=None):
print("Building and initializing vgg19 parameters")
from imagenet import vgg
m = vgg.vgg19(True, model_root)
if cuda:
m = m.cuda()
return m, dataset.get, True
def vgg19_bn(cuda=True, model_root=None):
print("Building vgg19_bn parameters")
from imagenet import vgg
m = vgg.vgg19_bn(model_root)
if cuda:
m = m.cuda()
return m, dataset.get, True
def inception_v3(cuda=True, model_root=None):
print("Building and initializing inception_v3 parameters")
from imagenet import inception
m = inception.inception_v3(True, model_root)
if cuda:
m = m.cuda()
return m, dataset.get, True
def resnet18(cuda=True, model_root=None):
print("Building and initializing resnet-18 parameters")
from imagenet import resnet
m = resnet.resnet18(True, model_root)
if cuda:
m = m.cuda()
return m, dataset.get, True
def resnet34(cuda=True, model_root=None):
print("Building and initializing resnet-34 parameters")
from imagenet import resnet
m = resnet.resnet34(True, model_root)
if cuda:
m = m.cuda()
return m, dataset.get, True
def resnet50(cuda=True, model_root=None):
print("Building and initializing resnet-50 parameters")
from imagenet import resnet
m = resnet.resnet50(True, model_root)
if cuda:
m = m.cuda()
return m, dataset.get, True
def resnet101(cuda=True, model_root=None):
print("Building and initializing resnet-101 parameters")
from imagenet import resnet
m = resnet.resnet101(True, model_root)
if cuda:
m = m.cuda()
return m, dataset.get, True
def resnet152(cuda=True, model_root=None):
print("Building and initializing resnet-152 parameters")
from imagenet import resnet
m = resnet.resnet152(True, model_root)
if cuda:
m = m.cuda()
return m, dataset.get, True
def squeezenet_v0(cuda=True, model_root=None):
print("Building and initializing squeezenet_v0 parameters")
from imagenet import squeezenet
m = squeezenet.squeezenet1_0(True, model_root)
if cuda:
m = m.cuda()
return m, dataset.get, True
def squeezenet_v1(cuda=True, model_root=None):
print("Building and initializing squeezenet_v1 parameters")
from imagenet import squeezenet
m = squeezenet.squeezenet1_1(True, model_root)
if cuda:
m = m.cuda()
return m, dataset.get, True
def select(model_name, **kwargs):
assert model_name in known_models, model_name
kwargs.setdefault('model_root', os.path.expanduser('~/.torch/models'))
return eval('{}'.format(model_name))(**kwargs)
if __name__ == '__main__':
m1 = alexnet()
embed()
| 5,245 | 29.5 | 80 | py |
pytorch-playground | pytorch-playground-master/script/convert.py | import os
import numpy as np
import tqdm
from utee import misc
import argparse
import cv2
import joblib
parser = argparse.ArgumentParser(description='Extract the ILSVRC2012 val dataset')
parser.add_argument('--in_file', default='val224_compressed.pkl', help='input file path')
parser.add_argument('--out_root', default='/data/public_dataset/pytorch/imagenet-data/', help='output file path')
args = parser.parse_args()
d = misc.load_pickle(args.in_file)
assert len(d['data']) == 50000, len(d['data'])
assert len(d['target']) == 50000, len(d['target'])
data299 = []
for img, target in tqdm.tqdm(zip(d['data'], d['target']), total=50000):
img224 = misc.str2img(img)
img299 = cv2.resize(img224, (299, 299))
data299.append(img299)
data_dict299 = dict(
data = np.array(data299).transpose(0, 3, 1, 2),
target = d['target']
)
if not os.path.exists(args.out_root):
os.makedirs(args.out_root)
joblib.dump(data_dict299, os.path.join(args.out_root, 'val299.pkl'))
data299.clear()
data_dict299.clear()
data224 = []
for img, target in tqdm.tqdm(zip(d['data'], d['target']), total=50000):
img224 = misc.str2img(img)
data224.append(img224)
data_dict224 = dict(
data = np.array(data224).transpose(0, 3, 1, 2),
target = d['target']
)
joblib.dump(data_dict224, os.path.join(args.out_root, 'val224.pkl'))
| 1,337 | 25.76 | 113 | py |
checklist | checklist-master/checklist/text_generation.py | from transformers import AutoTokenizer, AutoModelForMaskedLM
import collections
import itertools
import numpy as np
import re
from transformers import GPT2Config
from transformers import GPT2LMHeadModel, GPT2Tokenizer
from tqdm.auto import tqdm
import torch
import torch.nn.functional as F
from pattern.en import wordnet, pluralize
import requests
import json
def all_synsets(word, pos=None):
map = {
'NOUN': wordnet.NOUN,
'VERB': wordnet.VERB,
'ADJ': wordnet.ADJECTIVE,
'ADV': wordnet.ADVERB
}
if pos is None:
pos_list = [wordnet.VERB, wordnet.ADJECTIVE, wordnet.NOUN, wordnet.ADVERB]
else:
pos_list = [map[pos]]
ret = []
for pos in pos_list:
ret.extend(wordnet.synsets(word, pos=pos))
return ret
def clean_senses(synsets):
return [x for x in set(synsets) if '_' not in x]
def all_possible_synonyms(word, pos=None):
ret = []
for syn in all_synsets(word, pos=pos):
# if syn.synonyms[0] != word:
# continue
ret.extend(syn.senses)
return clean_senses(ret)
def all_possible_antonyms(word, pos=None):
ret = []
for syn in all_synsets(word, pos=pos):
if not syn.antonym:
continue
for s in syn.antonym:
ret.extend(s.senses)
return clean_senses(ret)
def all_possible_hypernyms(word, pos=None, depth=None):
ret = []
for syn in all_synsets(word, pos=pos):
ret.extend([y for x in syn.hypernyms(recursive=True, depth=depth) for y in x.senses])
return clean_senses(ret)
def all_possible_hyponyms(word, pos=None, depth=None):
ret = []
for syn in all_synsets(word, pos=pos):
ret.extend([y for x in syn.hyponyms(recursive=True, depth=depth) for y in x.senses])
return clean_senses(ret)
def all_possible_related(words, pos=None, depth=1):
all_syns = [y for word in words for y in all_synsets(word, pos=pos)]
# all_syns = [all_synsets(x, pos=pos) for x in words]
# all_syns = [x[0] for x in all_syns if x]
# return all_syns
# print(all_syns)
all_ancestors = [wordnet.ancestor(s1, s2) for s1, s2 in itertools.combinations(all_syns, 2)]
all_ancestors = [x for x in all_ancestors if x]
# print(all_ancestors)
mapz = {x.lexname: x for x in all_ancestors}
all_ancestors = list(mapz.values())
all_descendents = [y for x in all_ancestors for y in x.hyponyms(recursive=True, depth=depth)]
ret = [y for x in all_descendents for y in x.senses]
return clean_senses(ret)
class TextGenerator(object):
def __init__(self, url=None, model_name='roberta-base', prefix_sentence='', allow_word_pieces=False, **kwargs):
self.url = url
if url is None:
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# self.tokenizer = BertTokenizer.from_pretrained('bert-base-cased')
# self.model = BertForMaskedLM.from_pretrained('bert-base-cased')
self.tokenizer = AutoTokenizer.from_pretrained(model_name)
self.model = AutoModelForMaskedLM.from_pretrained(model_name)
self.model.to(self.device)
self.model.eval()
self.prefix_sentence = prefix_sentence
self.prefix_len = len(self.tokenizer.encode(prefix_sentence, add_special_tokens=False))
self.allow_word_pieces = allow_word_pieces
tmp = self.tokenizer.tokenize(' a')[0].split('a')
assert len(tmp) == 2
assert tmp[1] == ''
self.space_prefix = tmp[0]
if not self.allow_word_pieces:
self.with_space = torch.tensor(np.array(list(set([i for x, i in self.tokenizer.get_vocab().items() if x.startswith(self.space_prefix)]))), device=self.device);
self.with_space_set = set(self.with_space.cpu().numpy())
self.special_chars = set([i for x, i in self.tokenizer.get_vocab().items() if not x.strip(self.space_prefix).isalnum()])
def unmask_multiple(self, texts, beam_size=500, candidates=None, metric='avg', **kwargs):
rets = []
for text in texts:
rets.append(self.unmask(text, beam_size, candidates))
scores = collections.defaultdict(lambda: 0.) if metric == 'avg' else collections.defaultdict(lambda: 999999999)
count = collections.defaultdict(lambda: 0.)
examples = {}
longest = max([len(x[0][0]) for x in rets])
rets = sorted(rets, key=lambda x:len(x[0][0]), reverse=True)
for r in rets:
for x in r:
tup = tuple(x[0])
if len(tup) != longest:
tups = [k for k in scores if tuple(k[:len(tup)]) == tup]
else:
tups = [tup]
for tup in tups:
count[tup] += 1
examples[tup] = x[1]
if metric == 'avg':
scores[tup] += x[-1]
elif metric == 'min':
scores[tup] = min(scores[tup], x[-1])
if metric == 'min':
for x in count:
# print(x, count[x])
if count[x] != len(texts):
scores[x] = -999999
else:
for x in scores:
scores[x] = scores[x] / len(texts)
scores = sorted(scores.items(), key=lambda x:x[1], reverse=True)
return [(list(x[0]), examples[x[0]], x[1]) for x in scores]
def unmask(self, text_with_mask, beam_size=10, candidates=None):
if self.url is not None:
params = {'text': text_with_mask, 'beam_size': beam_size, 'candidates': candidates}
r = requests.post(url='%s/unmask' % self.url, data={'params': json.dumps(params)})
r = [tuple(x) for x in json.loads(r.text)]
return r
tokenizer = self.tokenizer
model = self.model
encoded = np.array(tokenizer.encode(self.prefix_sentence + text_with_mask, add_special_tokens=True))
cands = []
if candidates is not None:
candidates = candidates + [self.space_prefix + x for x in candidates]
cands = tokenizer.convert_tokens_to_ids(candidates)
if self.allow_word_pieces:
cands_with_space = list(set(cands))
else:
cands_with_space = list(set(cands).intersection(self.with_space_set))
if not len(cands_with_space):
return []
input_ids = torch.tensor(encoded)
# toks = tokenizer.tokenize('[CLS] %s [SEP]' % string)
current_beam= [([], 0)]
masked = (input_ids == self.tokenizer.mask_token_id).numpy().nonzero()[0]
# print(masked)
while len(current_beam[0][0]) != masked.shape[0]:
current_beam = current_beam[:beam_size]
size = len(current_beam[0][0])
to_pred = []
new_beam = []
for i, current in enumerate(current_beam):
idxs = current[0]
c = encoded.copy()
c[masked[:len(idxs)]] = idxs
to_pred.append(c)
# print('ae')
# print('\n'.join([tokenizer.decode(x) for x in to_pred]))
# print()
to_pred = torch.tensor(to_pred, device=self.device).to(torch.int64)
with torch.no_grad():
outputs = model(to_pred)[0]
for i, current in enumerate(current_beam):
prev = int(to_pred[i][masked[size] - 1])
forbid = False
# allow tokens that don't start with space if previous is not alphanumeric
if not self.allow_word_pieces and prev not in self.special_chars:
forbid = True
# print('Forbid Prev, current', prev, tokenizer.decode(to_pred[i][masked[size] - 1:masked[size]+1]))
if candidates is not None:
cands_to_use = cands_with_space if forbid else cands
scores = [outputs[i, masked[size], j] for j in cands_to_use]
new = [(current[0] + [int(x[0])], float(x[1]) + current[1]) for x in zip(cands_to_use, scores)]
else:
if forbid:
v, top_preds = torch.topk(outputs[i, masked[size], self.with_space.to(torch.int64)], beam_size + 10)
top_preds = self.with_space[top_preds]
else:
v, top_preds = torch.topk(outputs[i, masked[size]], beam_size + 10)
new = [(current[0] + [int(x[0])], float(x[1]) + current[1]) for x in zip(top_preds, v)]
new_beam.extend(new)
current_beam = sorted(new_beam, key=lambda x:x[1], reverse=True)
ret = []
ret_text = []
cop = encoded.copy()
for idxs, score in current_beam:
# words = tokenizer.convert_ids_to_tokens(idxs)
words = [str(tokenizer.decode([i])).strip() for i in idxs]
cop[masked] = idxs
text = tokenizer.decode(cop[1 + self.prefix_len:-1])
ret.append((words, text, score / masked.shape[0]))
ret = sorted(ret, key=lambda x:x[2], reverse=True)
return ret
def fill_in_between(self, pieces, beam_size=10, candidates=None):
text = ''
for p in pieces[:-1]:
text += p
text += ' ' + self.tokenizer.mask_token
if p != '':
text += ' '
text += pieces[-1]
if pieces[-1] == '':
text = text.rstrip()
return self.unmask(text, beam_size=beam_size, candidates=candidates)
def replace_word(self, text, word, threshold=5, beam_size=100, candidates=None):
masked = re.sub(r'\b%s\b' % re.escape(word), self.tokenizer.mask_token, text)
if masked == text:
return []
if candidates is not None:
candidates = [word] + candidates
ret = self.unmask(masked, beam_size=beam_size, candidates=candidates)
non_word = [x for x in ret if np.all([y not in [self.tokenizer.unk_token, word] for y in x[0]])]
score = [x for x in ret if np.all([y in [word, self.tokenizer.unk_token] for y in x[0]])]
if not score:
score = 0
else:
score = score[0][-1]
escaped = re.escape(word)
# new_ret = [(x[0], x[1], score - x[2]) for x in non_word if score - x[2] < threshold]
try:
new_ret = [(x[0], re.sub(r'\b%s\b' % escaped, x[0][0], text), score - x[2]) for x in non_word if score - x[2] < threshold]
except:
new_ret = [(x[0], x[1], score - x[2]) for x in non_word if score - x[2] < threshold]
return new_ret
def more_general(self, texts, word, threshold=5, pos=None, **kwargs):
options = all_possible_hypernyms(word, pos=pos)
# print(options)
return self.filter_options(texts, word, options, threshold)
def more_specific(self, texts, word, threshold=5, depth=3, pos=None, **kwargs):
options = all_possible_hyponyms(word, depth=depth, pos=pos)
return self.filter_options(texts, word, options, threshold)
def related_words(self, texts, words, threshold=5, depth=3, pos=None, **kwargs):
if type(words) != list:
words = [words]
if len(words) == 1:
options = all_possible_hypernyms(words[0], pos=pos)
ancestors = [x[0][0] for x in self.filter_options(texts, words[0], options, threshold)]
# print(ancestors)
options = list(set([y for x in ancestors for y in all_possible_hyponyms(x, depth=depth)]))
else:
options = all_possible_related(words, depth=depth)
return self.filter_options(texts, words[0], options, threshold)
def antonyms(self, texts, word, threshold=5, pos=None, **kwargs):
options = all_possible_antonyms(word, pos=pos)
return self.filter_options(texts, word, options, threshold)
def synonyms(self, texts, word, threshold=5, pos=None, **kwargs):
options = all_possible_synonyms(word, pos=pos)
# print(options)
return self.filter_options(texts, word, options, threshold)
def filter_options(self, texts, word, options, threshold=5):
if type(texts) != list:
texts = [texts]
options = options + [word]
in_all = set(options)
orig_ret = []
for text in texts:
masked = re.sub(r'\b%s\b' % re.escape(word), self.tokenizer.mask_token, text)
if masked == text:
continue
ret = self.unmask(masked, beam_size=100, candidates=options)
if not ret:
in_all = in_all.intersection(set())
continue
non_word = [x for x in ret if np.all([y not in [self.tokenizer.unk_token, word] for y in x[0]])]
score = [x for x in ret if np.all([y in [word, self.tokenizer.unk_token] for y in x[0]])]
if score:
score = score[0][-1]
# this will happen when the word is not in the vocabulary, in which case we don't look at the score
else:
score = 0
new_ret = [(x[0], x[1], score - x[2]) for x in non_word if score - x[2] < threshold]
# print(text)
# print(new_ret)
# print()
if text == texts[0]:
orig_ret = new_ret
in_all = in_all.intersection(set([x[0][0] for x in new_ret]))
return [x for x in orig_ret if x[0][0] in in_all]
def antonym(self, text, word, threshold=5, synonym=False):
options = all_possible_antonyms(word)
if synonym:
options = all_possible_synonyms(word)
if not options:
return []
options = options + [word]
masked = re.sub(r'\b%s\b' % re.escape(word), '[MASK]', text)
if masked == text:
return []
ret = self.unmask(masked, beam_size=100000000, candidates=options)
non_word = [x for x in ret if np.all([y not in [self.tokenizer.unk_token, word] for y in x[0]])]
score = [x for x in ret if np.all([y in [word, self.tokenizer.unk_token] for y in x[0]])][0][-1]
new_ret = [(x[0], x[1], score - x[2]) for x in non_word if score - x[2] < threshold]
return new_ret
def try_all_antonyms(self, text, threshold=5, synonym=False):
if self.url is not None:
params = {'text': text }
r = requests.post(url='%s/tokenize' % self.url, data={'params': json.dumps(params)})
words = json.loads(r.text)
else:
words = self.tokenizer.tokenize(text)
new_ret = []
for word in words:
word = word.strip(self.space_prefix)
try:
if synonym:
ret = self.synonyms(text, word, threshold)
else:
ret = self.antonyms(text, word, threshold)
except:
print('Error', word)
print()
continue
new_ret.extend(ret)
return sorted(new_ret, key=lambda x:x[2])
| 15,163 | 44.951515 | 175 | py |
checklist | checklist-master/docs/source/conf.py | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('..'))
sys.path.insert(0, os.path.abspath('../..'))
sys.path.insert(0, os.path.abspath('../../checklist'))
autodoc_mock_imports = [
'spacy', 'spacy.cli', 'nltk', 'nltk.corpus', 'nltk.tree', 'pattern',
'numpy', 'np', 'spacy.syntax.nn_parser.array', '__reduce_cython__',
'numpy.dtype', 'spacy.syntax.nn_parser.array.__reduce_cython__', '_ARRAY_API',
'BertForMaskedLM', 'dill', 'munch', 'pattern.en', 'transformers', 'ipywidgets', 'tqdm',
'traitlets', 'torch', 'typing', 'spacy.attrs', 'spacy.lang.en', 'IPython', 'IPython.core.display',
'iso639'
]
# -- Project information -----------------------------------------------------
project = 'checklist'
copyright = '2020, Marco Tulio Ribeiro'
author = 'Marco Tulio Ribeiro'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.coverage',
'sphinx.ext.doctest',
'sphinx.ext.linkcode',
'sphinx.ext.mathjax',
'sphinx.ext.autosummary',
'sphinx.ext.coverage',
]
# The master toctree document.
master_doc = 'index'
autodoc_member_order = 'groupwise'
autoclass_content = 'both'
# Napoleon settings
napoleon_google_docstring = True
napoleon_numpy_docstring = True
napoleon_include_init_with_doc = False
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = False
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = False
napoleon_use_rtype = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', "static"]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# make github links resolve
def linkcode_resolve(domain, info):
"""
Determine the URL corresponding to Python object
This code is from
https://github.com/numpy/numpy/blob/master/doc/source/conf.py#L290
and https://github.com/Lasagne/Lasagne/pull/262
"""
if domain != 'py':
return None
modname = info['module']
fullname = info['fullname']
submod = sys.modules.get(modname)
if submod is None:
return None
obj = submod
for part in fullname.split('.'):
try:
obj = getattr(obj, part)
except:
return None
try:
fn = inspect.getsourcefile(obj)
except:
fn = None
if not fn:
return None
try:
source, lineno = inspect.getsourcelines(obj)
except:
lineno = None
if lineno:
linespec = "#L%d-L%d" % (lineno, lineno + len(source) - 1)
else:
linespec = ""
filename = info['module'].replace('.', '/')
return "https://github.com/marcotcr/checklist/blob/master/%s.py%s" % (filename, linespec)
| 4,326 | 30.355072 | 102 | py |
Semi-Online-KD | Semi-Online-KD-master/main.py | import argparse
import yaml
import os
import torch
from trainer import build_trainer
from utils.utils import save_code, save_opts
def main():
parser = argparse.ArgumentParser(description='KnowledgeDistillation')
parser.add_argument('--configs', '-c', dest='params', default='./configs/sokd.yaml')
parser.add_argument('--name', '-n', dest='name', default='debug')
parser.add_argument('--seed', '-s', type=int, default=8888)
parser.add_argument('--gpus', '-g', type=str, default='0')
args = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpus
with open(args.params) as f:
params = yaml.load(f, Loader=yaml.FullLoader)
params['name'] = args.name
params['seed'] = args.seed
params['device'] = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
trainer = build_trainer(**params)
save_opts(params, trainer.save_folder)
save_code(trainer.repo_path, f"{trainer.save_folder}/code", ['results', 'datasets'])
trainer.run()
trainer.logger.info(f"{trainer.experimental_name} done!")
if __name__ == '__main__':
main()
| 1,116 | 31.852941 | 88 | py |
Semi-Online-KD | Semi-Online-KD-master/trainer/vanilla.py | import torch.nn as nn
import torch
from tqdm import tqdm
from trainer.base_trainer import BaseTrainer
from models import model_dict
from utils.utils import count_parameters_in_MB, AverageMeter, accuracy, save_checkpoint
from dataset import get_dataloader
class Vanilla(BaseTrainer):
def __init__(self, params, experimental_name=''):
# Data
self.data_name = params.get('data_name')
self.data_path = params.get('data_path')
self.num_classes = params.get('num_classes', 100)
self.train_loader = None
self.test_loader = None
# Model
self.model_name = params.get('model_name')
self.model_depth = params.get('model_depth', '')
self.model_widen = params.get('model_widen', '')
self.model_checkpoint = params.get('model_checkpoint')
self.model = None
self.testing = params.get('evaluation', False)
# Base training settings
self.start_epoch = params.get('start_epoch', 1)
self.epochs = params.get('epochs', 200)
self.batch_size = params.get('batch_size', 128)
self.lr = params.get('lr', 0.1)
self.device = params.get('device', 'cuda')
self.milestones = params.get('milestones', [200])
self.optimizer = None
self.scheduler = None
self.criterion_ce = nn.CrossEntropyLoss()
# Log
self.best_top1 = 0
self.best_top5 = 0
self.best_epoch = 0
seed = params.get('seed', None)
experimental_name = f"{self.__class__.__name__}_{self.model_name}{self.model_depth}-{self.model_widen}_{self.data_name}_" \
f"{experimental_name}/{params.get('name', 'debug')}"
super().__init__(experimental_name, seed)
def run(self):
self.set_data()
self.set_model()
self.set_optimizer_scheduler()
self.train_model()
def train_model(self):
if self.model_checkpoint:
state_dict = torch.load(self.model_checkpoint)
self.model.load_state_dict(state_dict['model'])
self.optimizer.load_state_dict(state_dict['optimizer'])
self.scheduler.load_state_dict(state_dict['scheduler'])
self.best_top1 = state_dict['best_top1']
self.best_top5 = state_dict['best_top5']
self.best_epoch = state_dict['best_epoch']
self.start_epoch = state_dict['start_epoch']
self.logger.info("Load model's checkpoint done!")
if self.testing:
self.logger.info("Start testing model...")
top1, top5 = self.evaluation_vanilla(self.model)
self.logger.info(f"top1:{top1.avg:.2f}, top5:{top5.avg:.2f}")
else:
self.logger.info("Start training model...")
for epoch in tqdm(range(self.start_epoch, self.epochs + 1)):
self.logger.info(f'Epoch[{epoch}/{self.epochs}]')
self.train()
top1, top5 = self.evaluation(self.model)
self.writer.add_scalar('test/top1', top1.avg, epoch)
is_best = False
if top1.avg > self.best_top1:
self.best_top1 = top1.avg
self.best_top5 = top5.avg
self.best_epoch = epoch
is_best = True
state_dict = {'model': self.model.state_dict(),
'optimizer': self.optimizer.state_dict(),
'scheduler': self.scheduler.state_dict(),
'best_top1': self.best_top1,
'best_top5': self.best_top5,
'best_epoch': self.best_epoch,
'start_epoch': epoch
}
save_checkpoint(state_dict, is_best, f"{self.save_folder}/model")
self.logger.info(
f"Test=> lr:{self.optimizer.param_groups[0]['lr']}, "
f"top1:{top1.avg:.2f}, top5:{top5.avg:.2f} "
f"@Best:({self.best_top1}, {self.best_top5}, {self.best_epoch})")
self.scheduler.step()
def train(self):
self.model.train()
total_loss = AverageMeter()
total_top1 = AverageMeter()
total_top5 = AverageMeter()
for batch_id, (data, targets) in enumerate(self.train_loader):
data = data.to(self.device)
targets = targets.to(self.device)
output = self.model(data)
loss = self.criterion_ce(output, targets)
top1, top5 = accuracy(output, targets, topk=(1, 5))
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
total_loss.update(loss.item(), data.size(0))
total_top1.update(top1.item(), data.size(0))
total_top5.update(top5.item(), data.size(0))
info_str = f"Train=> total_loss: {total_loss.avg}, " \
f"prec@1: {total_top1.avg}, prec@5: {total_top5.avg}"
self.logger.info(info_str)
@torch.no_grad()
def evaluation_vanilla(self, model):
model.eval()
total_top1 = AverageMeter()
total_top5 = AverageMeter()
for batch_id, (data, targets) in enumerate(self.test_loader):
data = data.to(self.device)
targets = targets.to(self.device)
output_S = model(data)
top1, top5 = accuracy(output_S, targets, topk=(1, 5))
total_top1.update(top1.item(), data.size(0))
total_top5.update(top5.item(), data.size(0))
return total_top1, total_top5
@torch.no_grad()
def evaluation(self, model):
model.eval()
total_top1 = AverageMeter()
total_top5 = AverageMeter()
for batch_id, (data, targets) in enumerate(self.test_loader):
data = data.to(self.device)
targets = targets.to(self.device)
output_S = model(data)
top1, top5 = accuracy(output_S, targets, topk=(1, 5))
total_top1.update(top1.item(), data.size(0))
total_top5.update(top5.item(), data.size(0))
return total_top1, total_top5
def set_data(self):
self.train_loader, self.test_loader = get_dataloader(self.data_name, self.data_path, self.batch_size)
def set_model(self):
if self.data_name.startswith('CIFAR'):
if self.model_name == 'wideresnet':
self.model = model_dict[f"wrn_{self.model_depth}_{self.model_widen}"](num_classes=self.num_classes)
else:
assert False, f'Not considering {self.model_name}'
if torch.cuda.device_count() > 1:
self.model = torch.nn.DataParallel(self.model)
self.model = self.model.to(self.device)
else:
assert False, f"Not considering {self.data_name}"
def set_optimizer_scheduler(self):
self.optimizer = torch.optim.SGD(self.model.parameters(), lr=self.lr, momentum=0.9, weight_decay=5e-4)
self.scheduler = torch.optim.lr_scheduler.MultiStepLR(self.optimizer, self.milestones)
| 7,150 | 41.820359 | 131 | py |
Semi-Online-KD | Semi-Online-KD-master/trainer/sokd.py | import torch
from trainer.vanilla import Vanilla
from utils.utils import accuracy, AverageMeter, save_checkpoint
from kd_losses import SoftTarget
from models import model_dict
class SemiOnlineKnowledgeDistillation(Vanilla):
def __init__(self, params):
# Model
self.teacher_name = params.get('teacher_name')
self.teacher_depth = params.get('teacher_depth', '')
self.teacher_widen = params.get('teacher_widen', '')
self.teacher_checkpoint = params.get('teacher_checkpoint')
self.teacher = None
# Coefficient
self.lambda_kd = params.get('lambda_kd', 1)
self.lambda_ce = params.get('lambda_ce', 1)
self.auxiliary_lambda_kd_t = params.get('auxiliary_lambda_kd_t', 1)
self.auxiliary_lambda_kd_s = params.get('auxiliary_lambda_kd_s', 1)
self.auxiliary_lambda_ce = params.get('auxiliary_lambda_ce', 1)
self.lr_auxiliary = params.get('lr_auxiliary', 0.05)
self.distillation_name = params.get('distillation_name', 'soft_target')
self.criterion_kd = SoftTarget(T=4)
self.auxiliary_index = -3
self.best_top1_A = 0
experimental_name = f"Teacher-{self.teacher_name}{self.teacher_depth}-{self.teacher_widen}"
super().__init__(params, experimental_name)
def run(self):
self.set_data()
self.set_model()
self.load_teacher()
self.set_optimizer_scheduler()
self.train_model()
def load_teacher(self):
if self.teacher_name == 'wideresnet':
self.teacher = model_dict[f"wrn_{self.teacher_depth}_{self.teacher_widen}"](
num_classes=self.num_classes)
else:
assert False, f'Not considering {self.teacher_name}'
if torch.cuda.device_count() > 1:
self.teacher = torch.nn.DataParallel(self.teacher)
self.teacher = self.teacher.to(self.device)
if self.teacher_checkpoint:
state = torch.load(self.teacher_checkpoint)['model']
teacher_state_dict = self.teacher.state_dict()
loaded_state = {k: v for k, v in state.items() if k in teacher_state_dict}
teacher_state_dict.update(loaded_state)
self.teacher.load_state_dict(teacher_state_dict)
self.logger.info("Load teacher's checkpoint done!")
else:
self.logger.info("No teacher's checkpoint!")
top1, _ = self.evaluation_vanilla(self.teacher)
self.logger.info(f'Teacher ACC: {top1.avg}')
for k, v in self.teacher.named_parameters():
if 'auxiliary' not in k:
v.requires_grad = False
def train(self):
self.model.train()
self.teacher.train()
# log of student
total_loss = AverageMeter()
total_loss_ce = AverageMeter()
total_loss_kd = AverageMeter()
total_top1 = AverageMeter()
total_top5 = AverageMeter()
# log of auxiliary
total_loss_A = AverageMeter()
total_loss_ce_A = AverageMeter()
total_loss_kd_T_A = AverageMeter()
total_loss_kd_S_A = AverageMeter()
total_top1_A = AverageMeter()
total_top5_A = AverageMeter()
for batch_id, (data, targets) in enumerate(self.train_loader):
data = data.to(self.device)
targets = targets.to(self.device)
feature_S, output_S = self.model(data, is_feat=True)
feature_T, output_T = self.teacher(data, is_feat=True)
feature_A, output_A = self.teacher.auxiliary_forward(feature_T[self.auxiliary_index].detach())
# loss of auxiliary
loss_kd_T_A, loss_kd_S_A, loss_kd = self.calculate_kd(self.distillation_name, feature_S, feature_A,
feature_T, output_S, output_A, output_T)
loss_ce_A = self.criterion_ce(output_A, targets) * self.auxiliary_lambda_ce
loss_A = loss_ce_A + loss_kd_T_A + loss_kd_S_A
# loss of student
loss_ce = self.criterion_ce(output_S, targets) * self.lambda_ce
loss = loss_ce + loss_kd
loss_total = loss_A + loss
# accuracy
top1, top5 = accuracy(output_S, targets, topk=(1, 5))
top1_A, top5_A = accuracy(output_A, targets, topk=(1, 5))
# update parameter of student
self.optimizer.zero_grad()
loss_total.backward()
self.optimizer.step()
# update log of student
total_loss.update(loss.item(), data.size(0))
total_loss_ce.update(loss_ce.item(), data.size(0))
total_loss_kd.update(loss_kd.item(), data.size(0))
total_top1.update(top1.item(), data.size(0))
total_top5.update(top5.item(), data.size(0))
# update log of auxiliary
total_loss_A.update(loss_A.item(), data.size(0))
total_loss_ce_A.update(loss_ce_A.item(), data.size(0))
total_loss_kd_T_A.update(loss_kd_T_A.item(), data.size(0))
total_loss_kd_S_A.update(loss_kd_S_A.item(), data.size(0))
total_top1_A.update(top1_A.item(), data.size(0))
total_top5_A.update(top5_A.item(), data.size(0))
info_str = f"Train (Branch)=> loss_ce: {total_loss_ce_A.avg:.4f}, loss_kd_T_A: {total_loss_kd_T_A.avg:.4f}," \
f"loss_kd_S_A: {total_loss_kd_S_A.avg:.4f}, prec@1: {total_top1_A.avg:.2f}, prec@5: {total_top5_A.avg:.2f}"
self.logger.info(info_str)
info_str = f"Train (Student)=> loss_ce: {total_loss_ce.avg:.4f}, loss_kd: {total_loss_kd.avg:.4f}, " \
f"prec@1: {total_top1.avg:.2f}, prec@5: {total_top5.avg:.2f}"
self.logger.info(info_str)
return total_top1, total_top5
@torch.no_grad()
def evaluation(self, model):
model.eval()
self.teacher.eval()
total_top1 = AverageMeter()
total_top5 = AverageMeter()
total_top1_t = AverageMeter()
total_top5_t = AverageMeter()
for batch_id, (data, targets) in enumerate(self.test_loader):
data = data.to(self.device)
targets = targets.to(self.device)
output_S = model(data)
feature_T, output_T = self.teacher(data, is_feat=True)
_, output_A = self.teacher.auxiliary_forward(feature_T[self.auxiliary_index].detach())
top1, top5 = accuracy(output_S, targets, topk=(1, 5))
total_top1.update(top1.item(), data.size(0))
total_top5.update(top5.item(), data.size(0))
top1_t, top5_t = accuracy(output_A, targets, topk=(1, 5))
total_top1_t.update(top1_t.item(), data.size(0))
total_top5_t.update(top5_t.item(), data.size(0))
if total_top1_t.avg > self.best_top1_A:
self.best_top1_A = total_top1_t.avg
state_dict = {'model': self.teacher.state_dict()}
save_checkpoint(state_dict, True, f"{self.save_folder}/teacher")
self.logger.info(
f"Test (branch)=> lr:{self.optimizer.param_groups[1]['lr']}, "
f"top1_A:{total_top1_t.avg:.2f}, top5_A:{total_top5_t.avg:.2f}, @Best: {self.best_top1_A}")
return total_top1, total_top5
def set_optimizer_scheduler(self):
self.optimizer = torch.optim.SGD([{'params': self.model.parameters()},
{'params': self.teacher.parameters(), 'lr': self.lr_auxiliary}],
lr=self.lr, momentum=0.9, weight_decay=5e-4)
self.scheduler = torch.optim.lr_scheduler.MultiStepLR(self.optimizer, self.milestones)
def calculate_kd(self, name, feature_S, feature_A, feature_T, output_S, output_A, output_T):
if name == 'soft_target':
loss_kd_T_A = self.criterion_kd(output_A, output_T.detach()) * self.auxiliary_lambda_kd_t
loss_kd_S_A = self.criterion_kd(output_A, output_S.detach()) * self.auxiliary_lambda_kd_s
loss_S = self.criterion_kd(output_S, output_A.detach()) * self.lambda_kd
else:
assert NotImplementedError, f"No considering {name}"
return loss_kd_T_A, loss_kd_S_A, loss_S
| 8,214 | 46.212644 | 126 | py |
Semi-Online-KD | Semi-Online-KD-master/dataset/__init__.py | from torchvision import transforms
from torchvision import datasets
import torch
def get_dataset(data_name, data_path):
"""
Get dataset according to data name and data path.
"""
transform_train, transform_test = data_transform(data_name)
if data_name.lower() == 'cifar100':
train_dataset = datasets.CIFAR100(data_path, train=True, download=True, transform=transform_train)
test_dataset = datasets.CIFAR100(data_path, train=False, download=True, transform=transform_test)
else:
raise NotImplementedError(f'No considering {data_name}')
return train_dataset, test_dataset
def get_dataloader(data_name, data_path, batch_size):
train_dataset, test_dataset = get_dataset(data_name, data_path)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=2)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False, num_workers=2)
return train_loader, test_loader
def data_transform(data_name):
transform_train, transform_test = None, None
if data_name.lower().startswith('cifar'):
transform_train = transforms.Compose([
transforms.Pad(4, padding_mode='reflect'),
transforms.RandomCrop(32),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2470, 0.2435, 0.2616)),
])
transform_test = transforms.Compose([
transforms.CenterCrop(32),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2470, 0.2435, 0.2616)),
])
else:
assert False
return transform_train, transform_test
| 1,750 | 38.795455 | 113 | py |
Semi-Online-KD | Semi-Online-KD-master/models/wrn.py | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from copy import deepcopy
__all__ = ['wrn']
class BasicBlock(nn.Module):
def __init__(self, in_planes, out_planes, stride, dropRate=0.0):
super(BasicBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu1 = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(out_planes)
self.relu2 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(out_planes, out_planes, kernel_size=3, stride=1,
padding=1, bias=False)
self.droprate = dropRate
self.equalInOut = (in_planes == out_planes)
self.convShortcut = (not self.equalInOut) and nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride,
padding=0, bias=False) or None
def forward(self, x):
if not self.equalInOut:
x = self.relu1(self.bn1(x))
else:
out = self.relu1(self.bn1(x))
out = self.relu2(self.bn2(self.conv1(out if self.equalInOut else x)))
if self.droprate > 0:
out = F.dropout(out, p=self.droprate, training=self.training)
out = self.conv2(out)
return torch.add(x if self.equalInOut else self.convShortcut(x), out)
class NetworkBlock(nn.Module):
def __init__(self, nb_layers, in_planes, out_planes, block, stride, dropRate=0.0):
super(NetworkBlock, self).__init__()
self.layer = self._make_layer(block, in_planes, out_planes, nb_layers, stride, dropRate)
def _make_layer(self, block, in_planes, out_planes, nb_layers, stride, dropRate):
layers = []
for i in range(nb_layers):
layers.append(block(i == 0 and in_planes or out_planes, out_planes, i == 0 and stride or 1, dropRate))
return nn.Sequential(*layers)
def forward(self, x):
return self.layer(x)
class WideResNet(nn.Module):
def __init__(self, depth, num_classes, widen_factor=1, dropRate=0.0):
super(WideResNet, self).__init__()
nChannels = [16, 16 * widen_factor, 32 * widen_factor, 64 * widen_factor]
assert (depth - 4) % 6 == 0, 'depth should be 6n+4'
n = (depth - 4) // 6
block = BasicBlock
# 1st conv before any network block
self.conv1 = nn.Conv2d(3, nChannels[0], kernel_size=3, stride=1,
padding=1, bias=False)
# 1st block
self.block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, dropRate)
# 2nd block
self.block2 = NetworkBlock(n, nChannels[1], nChannels[2], block, 2, dropRate)
# 3rd block
self.block3 = NetworkBlock(n, nChannels[2], nChannels[3], block, 2, dropRate)
# global average pooling and classifier
self.bn1 = nn.BatchNorm2d(nChannels[3])
self.relu = nn.ReLU(inplace=True)
self.fc = nn.Linear(nChannels[3], num_classes)
self.nChannels = nChannels[3]
self.auxiliary_block = nn.Sequential(
deepcopy(self.block3)
)
self.auxiliary_bn1 = deepcopy(self.bn1)
self.auxiliary_fc = deepcopy(self.fc)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def get_feat_modules(self):
feat_m = nn.ModuleList([])
feat_m.append(self.conv1)
feat_m.append(self.block1)
feat_m.append(self.block2)
feat_m.append(self.block3)
return feat_m
def get_bn_before_relu(self):
bn1 = self.block2.layer[0].bn1
bn2 = self.block3.layer[0].bn1
bn3 = self.bn1
return [bn1, bn2, bn3]
def forward(self, x, is_feat=False, preact=False):
out = self.conv1(x)
f0 = out
out = self.block1(out)
f1 = out
out = self.block2(out)
f2 = out
out = self.block3(out)
f3 = out
out = self.relu(self.bn1(out))
out = F.avg_pool2d(out, 8)
out = out.view(-1, self.nChannels)
f4 = out
out = self.fc(out)
if is_feat:
if preact:
f1 = self.block2.layer[0].bn1(f1)
f2 = self.block3.layer[0].bn1(f2)
f3 = self.bn1(f3)
return [f0, f1, f2, f3, f4], out
else:
return out
def auxiliary_forward(self, feat):
out = self.auxiliary_block(feat)
f0 = out
out = self.relu(self.auxiliary_bn1(out))
out = F.avg_pool2d(out, 8)
out = out.view(-1, self.nChannels)
f1 = out
out = self.auxiliary_fc(out)
return [f0, f1], out
def wrn(**kwargs):
"""
Constructs a Wide Residual Networks.
"""
model = WideResNet(**kwargs)
return model
def wrn_40_2(**kwargs):
model = WideResNet(depth=40, widen_factor=2, **kwargs)
return model
def wrn_40_1(**kwargs):
model = WideResNet(depth=40, widen_factor=1, **kwargs)
return model
| 5,436 | 33.411392 | 116 | py |
Semi-Online-KD | Semi-Online-KD-master/kd_losses/st.py | from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import torch
import torch.nn as nn
import torch.nn.functional as F
class SoftTarget(nn.Module):
'''
Distilling the Knowledge in a Neural Network
https://arxiv.org/pdf/1503.02531.pdf
'''
def __init__(self, T):
super(SoftTarget, self).__init__()
self.T = T
def forward(self, out_s, out_t):
loss = F.kl_div(F.log_softmax(out_s/self.T, dim=1),
F.softmax(out_t/self.T, dim=1),
reduction='batchmean') * self.T * self.T
return loss | 563 | 23.521739 | 53 | py |
Semi-Online-KD | Semi-Online-KD-master/utils/utils.py | import logging
import colorlog
import os
import time
import shutil
import torch
import random
import numpy as np
from shutil import copyfile
def create_logger():
"""
Setup the logging environment
"""
log = logging.getLogger() # root logger
log.setLevel(logging.DEBUG)
format_str = '%(asctime)s - %(levelname)-8s - %(message)s'
date_format = '%Y-%m-%d %H:%M:%S'
if os.isatty(2):
cformat = '%(log_color)s' + format_str
colors = {'DEBUG': 'reset',
'INFO': 'reset',
'WARNING': 'bold_yellow',
'ERROR': 'bold_red',
'CRITICAL': 'bold_red'}
formatter = colorlog.ColoredFormatter(cformat, date_format,
log_colors=colors)
else:
formatter = logging.Formatter(format_str, date_format)
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(formatter)
log.addHandler(stream_handler)
return logging.getLogger(__name__)
class TimeRecorder(object):
"""
Recode training time.
"""
def __init__(self, start_epoch, epochs, logger):
self.total_time = 0.
self.remaining_time = 0.
self.epochs = epochs
self.start_epoch = start_epoch
self.logger = logger
self.start_time = time.time()
def update(self):
now_time = time.time()
elapsed_time = now_time - self.start_time
self.start_time = now_time
self.total_time += elapsed_time
self.remaining_time = elapsed_time * (self.epochs - self.start_epoch)
self.start_epoch += 1
self.logger.info(f'Cost time=>{self.format_time(self.total_time)}')
self.logger.info(f'Remaining time=>{self.format_time(self.remaining_time)}')
@staticmethod
def format_time(time):
h = time // 3600
m = (time % 3600) // 60
s = (time % 3600) % 60
return f'{h}h{m}m{s:.2f}s'
def output_process(output_path):
if os.path.exists(output_path):
print("{} file exist!".format(output_path))
action = input("Select Action: d (delete) / q (quit):").lower().strip()
act = action
if act == 'd':
shutil.rmtree(output_path)
else:
raise OSError("Directory {} exits!".format(output_path))
if not os.path.exists(output_path):
os.makedirs(output_path)
def save_code(src, dst, exclude=[]):
"""
Save experimental codes.
"""
for f in os.listdir(src):
# Do not save experimental results
if f in exclude:
continue
src_file = os.path.join(src, f)
file_split = f.split(".")
if len(file_split) >= 2:
if not os.path.isdir(dst):
os.makedirs(dst)
dst_file = os.path.join(dst, f)
try:
shutil.copyfile(src=src_file, dst=dst_file)
except:
print("Copy file error! src: {}, dst: {}".format(src_file, dst_file))
elif os.path.isdir(src_file):
deeper_dst = os.path.join(dst, f)
save_code(src_file, deeper_dst)
def accuracy(output, target, topk=(1,)):
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
# res.append(correct_k)
return res
class AverageMeter(object):
"""
Keeps track of most recent, average, sum, and count of a metric.
"""
def __init__(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def save_opts(opts, save_path='.'):
with open(f"{save_path}/opts.txt", 'w') as f:
for k, v in opts.items():
f.write(str(k) + ": " + str(v) + '\n')
def save_checkpoint(state_dict, is_best, folder_name='.'):
if not os.path.exists(folder_name):
os.makedirs(folder_name)
checkpoint_name = f"{folder_name}/checkpoint.pth.tar"
torch.save(state_dict, checkpoint_name)
if is_best:
model_name = f"{folder_name}/best_model.pth.tar"
copyfile(checkpoint_name, model_name)
def fix_random(seed=0):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
return True
def count_parameters_in_MB(model):
return sum(np.prod(v.size()) for name, v in model.named_parameters()) / 1e6
| 4,987 | 27.340909 | 85 | py |
Simplified_DMC | Simplified_DMC-master/location_dmc.py | import argparse
import os
import torch
from torch.utils.data import DataLoader
from torch import optim
import numpy as np
from data.MUSIC_dataset import MUSIC_Dataset, MUSIC_AV_Classify
from model.base_model import resnet18
from model.dmc_model import DMC_NET
from sklearn import cluster, metrics
import numpy as np
from sklearn.preprocessing import normalize
from torch import nn
import torch.nn.functional as F
import pickle
def batch_organize(audio_data, posi_img_data, nega_img_data, posi_label, nega_label):
batch_audio_data = torch.zeros(audio_data.shape[0] * 2, audio_data.shape[1], audio_data.shape[2],
audio_data.shape[3])
batch_image_data = torch.zeros(posi_img_data.shape[0] * 2, posi_img_data.shape[1], posi_img_data.shape[2],
posi_img_data.shape[3])
batch_labels = torch.zeros(audio_data.shape[0] * 2)
class_labels = torch.zeros(audio_data.shape[0] * 2)
for i in range(audio_data.shape[0]):
batch_audio_data[i * 2, :] = audio_data[i, :]
batch_audio_data[i * 2 + 1, :] = audio_data[i, :]
batch_image_data[i * 2, :] = posi_img_data[i, :]
batch_image_data[i * 2 + 1, :] = nega_img_data[i, :]
batch_labels[i * 2] = 1
batch_labels[i * 2 + 1] = 0
class_labels[i * 2] = posi_label[i]
class_labels[i * 2 + 1] = nega_label[i]
return batch_audio_data, batch_image_data, batch_labels, class_labels
def eva_metric2(predict, gt, pair_num=2):
num = int(predict.shape[0]/pair_num)
correct = 0
for i in range(num):
pos = predict[pair_num*i]
flag = True
for j in range(pair_num-1):
neg = predict[pair_num*i+j+1]
if pos >= neg:
flag = False
if flag == True:
correct += 1
return correct / num
class ContrastiveLoss(nn.Module):
"""
Contrastive loss
Takes embeddings of two samples and a target label == 1 if samples are from the same class and label == 0 otherwise
"""
def __init__(self, margin=5.):
super(ContrastiveLoss, self).__init__()
self.margin = margin
self.eps = 1e-9
def forward(self, output, target, size_average=True):
distances = output.pow(2).sum(1) # squared distances
losses = 0.5 * (target.float() * distances +
(1 + -1 * target).float() * F.relu(self.margin - (distances + self.eps).sqrt()).pow(2))
return losses.mean() if size_average else losses.sum()
def location_model_train(model, data_loader, optimizer, criterion):
model.train()
accs = 0
count = 0
losses = 0
for i, data in enumerate(data_loader, 0):
if i % 200 == 0:
print('location batch:%d' % i)
audio_data, posi_img_data, nega_img_data, posi_label, nega_label, _, _ = data
audio_data, image_data, av_labels, class_labels = batch_organize(audio_data, posi_img_data, nega_img_data, posi_label, nega_label)
audio_data, image_data, av_labels = audio_data.type(torch.FloatTensor).cuda(), \
image_data.type(torch.FloatTensor).cuda(), \
av_labels.type(torch.FloatTensor).cuda()
optimizer.zero_grad()
av_outputs, _, _ = model(image_data, audio_data)
loss = criterion(av_outputs, av_labels)
loss.backward()
optimizer.step()
losses += loss.detach().cpu().numpy()
# acc = eva_metric2(av_outputs.detach().cpu().numpy(), av_labels.cpu().numpy())
# accs += acc
count += 1
print('location loss is %.3f ' % (losses / count))
return accs / count
def location_model_eva(model, data_loader):
model.eval()
accs = 0
num = len(data_loader.dataset)
count = 0
results = {}
with torch.no_grad():
for i, data in enumerate(data_loader, 0):
audio_data, posi_img_data, nega_img_data, posi_label, nega_label, img_path, _ = data
audio_data, image_data, av_labels, class_labels = batch_organize(audio_data, posi_img_data, nega_img_data,
posi_label, nega_label)
audio_data, image_data = audio_data.type(torch.FloatTensor).cuda(), image_data.type(torch.FloatTensor).cuda()
av_outputs, av_maps, av_dists = model(image_data, audio_data)
obj_localization = av_maps.detach().cpu().numpy()
obj_localization = obj_localization[::2]
av_dists = av_dists[::2]
# accs += eva_metric2(av_outputs.detach().cpu().numpy(), av_labels.numpy())
count += 1
_, idx = torch.sort(av_dists, dim=1)
idx = idx[:, 1].detach().cpu().numpy()
for k in range(len(img_path)):
results[img_path[k][:-4]] = obj_localization[k]
pickle.dump(results, open('dmc.pkl', 'wb'))
return accs / count
def main():
parser = argparse.ArgumentParser(description='AID_PRETRAIN')
parser.add_argument('--data_list_dir', type=str,
default='./data/data_indicator/music/solo')
parser.add_argument('--data_dir', type=str, default='/home/ruiq/Music/solo')
parser.add_argument('--mode', type=str, default='train', help='train/val/test')
parser.add_argument('--json_file', type=str,default='./data/MUSIC_label/MUSIC_solo_videos.json')
parser.add_argument('--use_pretrain', type=int, default=0, help='whether to init from ckpt')
parser.add_argument('--ckpt_file', type=str, default='location_net_009_0.665.pth', help='pretrained model name')
parser.add_argument('--enable_img_augmentation', type=int, default=1, help='whether to augment input image')
parser.add_argument('--enable_audio_augmentation', type=int, default=1, help='whether to augment input audio')
parser.add_argument('--batch_size', type=int, default=32, help='training batch size')
parser.add_argument('--learning_rate', type=float, default=1e-4, help='training batch size')
parser.add_argument('--epoch', type=int, default=100, help='training epoch')
parser.add_argument('--gpu_ids', type=str, default='[0,1,2,3]', help='USING GPU IDS e.g.\'[0,4]\'')
parser.add_argument('--num_threads', type=int, default=4, help='number of threads')
parser.add_argument('--seed', type=int, default=10)
parser.add_argument('--evaluate', type=int, default=0, help='only evaluate or not')
parser.add_argument('--v_cluster', type=int, default=2, help='number of visual cluster')
parser.add_argument('--a_cluster', type=int, default=1, help='number of audio cluster')
args = parser.parse_args()
train_list_file = os.path.join(args.data_list_dir, 'solo_training_1.txt')
val_list_file = os.path.join(args.data_list_dir, 'solo_validation.txt')
test_list_file = os.path.join(args.data_list_dir, 'solo_testing.txt')
train_dataset = MUSIC_Dataset(args.data_dir, train_list_file, args)
val_dataset = MUSIC_Dataset(args.data_dir, val_list_file, args)
test_dataset = MUSIC_Dataset(args.data_dir, test_list_file, args)
train_dataloader = DataLoader(dataset=train_dataset, batch_size=args.batch_size, shuffle=True,
num_workers=args.num_threads)
val_dataloader = DataLoader(dataset=val_dataset, batch_size=args.batch_size, shuffle=False,
num_workers=args.num_threads)
test_dataloader = DataLoader(dataset=test_dataset, batch_size=args.batch_size, shuffle=False,
num_workers=args.num_threads)
# net setup
visual_backbone = resnet18(modal='vision',pretrained=False)
audio_backbone = resnet18(modal='audio')
av_model = DMC_NET(visual_net=visual_backbone, audio_net=audio_backbone, v_cluster_num=args.v_cluster, a_cluster_num=args.a_cluster)
if args.use_pretrain:
PATH = args.ckpt_file
state = torch.load(PATH)
av_model.load_state_dict(state, strict=False)
av_model_cuda = av_model.cuda()
loss_func = ContrastiveLoss()
optimizer = optim.Adam(params=av_model_cuda.parameters(), lr=args.learning_rate, betas=(0.9, 0.999),
weight_decay=0.0001)
if args.evaluate:
eva_location_acc = location_model_eva(av_model_cuda, test_dataloader)
return
for e in range(0, args.epoch):
print('Epoch is %03d' % e)
train_location_acc = location_model_train(av_model_cuda, train_dataloader, optimizer, loss_func)
eva_location_acc = location_model_eva(av_model_cuda, test_dataloader)
print('train acc is %.3f, val acc is %.3f' % (train_location_acc, eva_location_acc))
if e % 3 == 0:
PATH = 'ckpt/dmc/dmc_stage_one_%03d_%.3f.pth' % (e, eva_location_acc)
torch.save(av_model_cuda.state_dict(), PATH)
if __name__ == '__main__':
main()
| 8,957 | 41.254717 | 138 | py |
Simplified_DMC | Simplified_DMC-master/data/MUSIC_dataset.py | import numpy as np
import librosa
from PIL import Image, ImageEnhance
import pickle
import random
import os
import torchvision.transforms as transforms
import json
import torch
def augment_image(image):
if(random.random() < 0.5):
image = image.transpose(Image.FLIP_LEFT_RIGHT)
enhancer = ImageEnhance.Brightness(image)
image = enhancer.enhance(random.random()*0.6 + 0.7)
enhancer = ImageEnhance.Color(image)
image = enhancer.enhance(random.random()*0.6 + 0.7)
return image
class MUSIC_Dataset(object):
def __init__(self, data_root, data_list_file, opt):
# self.root = root
# root = '/mnt/scratch/hudi/MUSIC/solo'
self.opt = opt
self.audio_root = os.path.join(data_root, 'audio_frames')
self.video_root = os.path.join(data_root, 'video_frames')
with open(data_list_file,'r') as fid:
pairs = [line.strip().split(' ') for line in fid.readlines()]
self.sample_label= self._parse_csv(self.opt.json_file)
self.audio_list = []
self.video_list = []
self.label_list = []
for each in pairs:
audio = each[0]
video = each[1]
assert audio[:-5] == video[:-4]
audio_path = os.path.join(self.audio_root, audio[:-5])
video_path = os.path.join(self.video_root, video[:-4])
audio_samples= os.listdir(audio_path)
for item in range(len(audio_samples)):
audio_segment = audio_samples[item]
video_segment = os.path.join(video_path, 'frame_'+audio_segment[:3])
if os.path.exists(video_segment):
self.audio_list.append(os.path.join(audio_path, audio_segment))
self.video_list.append(os.path.join(video_path, video_segment))
if self.opt.mode == 'val' or self.opt.mode == 'test':
img_transform_list = [transforms.Resize((224,224)), transforms.ToTensor(),
transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))]
else:
img_transform_list = [transforms.Resize((256, 256)), transforms.RandomCrop(224), transforms.ToTensor(),
transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))]
self.img_transform = transforms.Compose(img_transform_list)
#self.audio_transform = audio_transform
def __len__(self):
return len(self.audio_list)
def _parse_csv(self, json_file):
f = open(json_file, encoding='utf-8')
content = f.read()
ins_indicator = json.loads(content)
ins_indicator = ins_indicator['videos']
ins_list = [*ins_indicator]
sample_label = {}
pickle.dump(ins_list, open('keylist.pkl', 'wb'))
for i in range(len(ins_list)):
current_list = ins_indicator[ins_list[i]]
for j in range(len(current_list)):
sample_label[current_list[j]] = i
return sample_label
def __getitem__(self, index):
# positive
cur_audio_segment = self.audio_list[index]
posi_video_segment = self.video_list[index]
if self.opt.mode == 'train':
posi_video_segment_img = random.choice(os.listdir(posi_video_segment))
else:
posi_video_segment_img = os.listdir(posi_video_segment)[0]
# load data
with open(cur_audio_segment, 'rb') as fid:
cur_audio_data = pickle.load(fid)
cur_audio_data = np.expand_dims(cur_audio_data, 0)
posi_img_path = os.path.join(posi_video_segment, posi_video_segment_img)
posi_img = Image.open(posi_img_path)
if(self.opt.enable_img_augmentation and self.opt.mode == 'train'):
posi_img = augment_image(posi_img)
posi_img = self.img_transform(posi_img)
posi_label = self.sample_label[posi_video_segment[-28:-17]]
# TODO: here may need normalization
# negative
while(1):
nega_video_segment = random.choice(self.video_list)
if nega_video_segment[-28:-17] != posi_video_segment[-28:-17]:
break
nega_video_segment_img = random.choice(os.listdir(nega_video_segment))
nega_img_path = os.path.join(nega_video_segment, nega_video_segment_img)
nega_img = Image.open(nega_img_path)
if(self.opt.enable_img_augmentation and self.opt.mode == 'train'):
nega_img = augment_image(nega_img)
nega_img = self.img_transform(nega_img)
nega_label = self.sample_label[nega_video_segment[-28:-17]]
if self.opt.mode == 'train':
return cur_audio_data, posi_img, nega_img, posi_label, nega_label, posi_video_segment, cur_audio_segment
return cur_audio_data, posi_img, nega_img, posi_label, nega_label, posi_img_path, cur_audio_segment
class MUSIC_Dataset_(object):
def __init__(self, data_root, data_list_file, opt):
# self.root = root
# root = '/mnt/scratch/hudi/MUSIC/solo'
self.opt = opt
if self.opt.mode == 'train':
self.audio_root = '/home/yuxi/ruiq/AudioVisual/multiple-sound-source-localization/synthesize/train/audio'
self.video_root = '/home/yuxi/ruiq/AudioVisual/multiple-sound-source-localization/synthesize/train/video'
else:
self.audio_root = '/home/yuxi/ruiq/AudioVisual/multiple-sound-source-localization/synthesize/test/audio'
self.video_root = '/home/yuxi/ruiq/AudioVisual/multiple-sound-source-localization/synthesize/test/video'
self.box_root = '/home/yuxi/ruiq/AudioVisual/multiple-sound-source-localization/synthesize/test/box'
self.audio_list = os.listdir(self.audio_root)
self.video_list = os.listdir(self.video_root)
self.box_list = os.listdir(self.box_root)
self.audio_list.sort()
self.video_list.sort()
self.box_list.sort()
assert len(self.audio_list) == len(self.video_list)
if self.opt.mode == 'val' or self.opt.mode == 'test':
img_transform_list = [transforms.Resize((224,224)), transforms.ToTensor(),
transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))]
else:
img_transform_list = [transforms.Resize((256, 256)), transforms.RandomCrop(224), transforms.ToTensor(),
transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))]
self.img_transform = transforms.Compose(img_transform_list)
def __len__(self):
return len(self.audio_list)
def __getitem__(self, index):
# positive
cur_audio_segment = self.audio_list[index]
posi_video_segment = self.video_list[index]
if self.opt.mode == 'val':
box_segment = self.box_list[index]
# load data
with open(os.path.join(self.audio_root, cur_audio_segment), 'rb') as fid:
cur_audio_data = pickle.load(fid)
cur_audio_data = np.expand_dims(cur_audio_data, 0)
posi_img_path = os.path.join(self.video_root, posi_video_segment)
posi_img = Image.open(posi_img_path)
if(self.opt.enable_img_augmentation and self.opt.mode == 'train'):
posi_img = augment_image(posi_img)
posi_img = self.img_transform(posi_img)
while(1):
nega_video_segment = random.choice(self.video_list)
if nega_video_segment != posi_video_segment:
break
nega_img_path = os.path.join(self.video_root, nega_video_segment)
nega_img = Image.open(nega_img_path)
if(self.opt.enable_img_augmentation and self.opt.mode == 'train'):
nega_img = augment_image(nega_img)
nega_img = self.img_transform(nega_img)
if self.opt.mode == 'val':
box = np.load(os.path.join(self.box_root, box_segment))
return cur_audio_data, posi_img, nega_img, torch.tensor(0), torch.tensor(0), torch.tensor(0), box
return cur_audio_data, posi_img, nega_img, torch.tensor(0), torch.tensor(0), torch.tensor(0), torch.tensor(0)
class MUSIC_AV_Classify(object):
def __init__(self, video_dirs, aud_dirs, label, opt):
self.opt = opt
self.video_dirs = video_dirs
self.aud_dirs = aud_dirs
self.label = label
if self.opt.mode == 'val' or self.opt.mode == 'test':
img_transform_list = [transforms.Resize((224,224)), transforms.ToTensor(),
transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))]
else:
img_transform_list = [transforms.Resize((256, 256)), transforms.RandomCrop(224), transforms.ToTensor(),
transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))]
self.img_transform = transforms.Compose(img_transform_list)
def __len__(self):
return len(self.video_dirs)
def __getitem__(self, index):
video_segment_img = random.choice(os.listdir(self.video_dirs[index]))
img_path = os.path.join(self.video_dirs[index], video_segment_img)
img = Image.open(img_path)
if(self.opt.enable_img_augmentation and self.opt.mode == 'train'):
img = augment_image(img)
img_data = self.img_transform(img)
with open(self.aud_dirs[index], 'rb') as fid:
cur_audio_data = pickle.load(fid)
audio_data = np.expand_dims(cur_audio_data, 0)
if self.opt.mode == 'val' or self.opt.mode == 'test':
return audio_data, img_data
else:
return audio_data, img_data, self.label[index] | 9,784 | 42.29646 | 117 | py |
Simplified_DMC | Simplified_DMC-master/data/base_sampler.py | import torch
from torch.utils.data.sampler import Sampler
Class BaseSampler(Sampler):
def __init__(self):
super(BaseSampler,self).__init__()
def __len__(self):
def __iter__(self):
| 203 | 17.545455 | 44 | py |
Simplified_DMC | Simplified_DMC-master/model/base_model.py | import torch
import torch.nn as nn
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152', 'resnext50_32x4d', 'resnext101_32x8d',
'wide_resnet50_2', 'wide_resnet101_2']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',
'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',
'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth',
'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth',
}
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, modal, num_classes=1000, zero_init_residual=False,
groups=1, width_per_group=64, replace_stride_with_dilation=None,
norm_layer=None):
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.modal = modal
self.groups = groups
self.base_width = width_per_group
self.conv1_a = nn.Conv2d(1, self.inplanes, kernel_size=7, stride=2, padding=3,
bias=False)
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0], stride=1)
self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=1,
dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def _forward_impl(self, x):
# See note [TorchScript super()]
if self.modal == 'audio':
x = self.conv1_a(x)
else:
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x
def forward(self, x):
return self._forward_impl(x)
def _resnet(arch, block, layers, pretrained, progress, modal, **kwargs):
model = ResNet(block, layers, modal, **kwargs)
if pretrained:
print('load pretrained res-18')
model.load_state_dict(torch.load('../resnet18-5c106cde.pth'), strict=False)
return model
def resnet18(pretrained=False, progress=True, modal='vision',**kwargs):
r"""ResNet-18 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress, modal, **kwargs)
| 9,147 | 38.261803 | 106 | py |
Simplified_DMC | Simplified_DMC-master/model/audio_net.py | import torch
import torch.nn as nn
import torch.nn.functional as F
class Unet(nn.Module):
def __init__(self, fc_dim=64, num_downs=5, ngf=64, use_dropout=False):
super(Unet, self).__init__()
# construct unet structure
unet_block = UnetBlock(
ngf * 8, ngf * 8, input_nc=None,
submodule=None, innermost=True)
for i in range(num_downs - 5):
unet_block = UnetBlock(
ngf * 8, ngf * 8, input_nc=None,
submodule=unet_block, use_dropout=use_dropout)
unet_block = UnetBlock(
ngf * 4, ngf * 8, input_nc=None,
submodule=unet_block)
unet_block = UnetBlock(
ngf * 2, ngf * 4, input_nc=None,
submodule=unet_block)
unet_block = UnetBlock(
ngf, ngf * 2, input_nc=None,
submodule=unet_block)
unet_block = UnetBlock(
fc_dim, ngf, input_nc=1,
submodule=unet_block, outermost=True)
self.bn0 = nn.BatchNorm2d(1)
self.unet_block = unet_block
def forward(self, x):
x = self.bn0(x)
x = self.unet_block(x)
return x
# Defines the submodule with skip connection.
# X -------------------identity---------------------- X
# |-- downsampling -- |submodule| -- upsampling --|
class UnetBlock(nn.Module):
def __init__(self, outer_nc, inner_input_nc, input_nc=None,
submodule=None, outermost=False, innermost=False,
use_dropout=False, inner_output_nc=None, noskip=False):
super(UnetBlock, self).__init__()
self.outermost = outermost
self.noskip = noskip
use_bias = False
if input_nc is None:
input_nc = outer_nc
if innermost:
inner_output_nc = inner_input_nc
elif inner_output_nc is None:
inner_output_nc = 2 * inner_input_nc
downrelu = nn.LeakyReLU(0.2, True)
downnorm = nn.BatchNorm2d(inner_input_nc)
uprelu = nn.ReLU(True)
upnorm = nn.BatchNorm2d(outer_nc)
upsample = nn.Upsample(
scale_factor=2, mode='bilinear', align_corners=True)
if outermost:
downconv = nn.Conv2d(
input_nc, inner_input_nc, kernel_size=4,
stride=2, padding=1, bias=use_bias)
upconv = nn.Conv2d(
inner_output_nc, outer_nc, kernel_size=3, padding=1)
down = [downconv]
up = [uprelu, upsample, upconv]
model = down + [submodule] + up
elif innermost:
downconv = nn.Conv2d(
input_nc, inner_input_nc, kernel_size=4,
stride=2, padding=1, bias=use_bias)
upconv = nn.Conv2d(
inner_output_nc, outer_nc, kernel_size=3,
padding=1, bias=use_bias)
down = [downrelu, downconv]
up = [uprelu, upsample, upconv, upnorm]
model = down + up
else:
downconv = nn.Conv2d(
input_nc, inner_input_nc, kernel_size=4,
stride=2, padding=1, bias=use_bias)
upconv = nn.Conv2d(
inner_output_nc, outer_nc, kernel_size=3,
padding=1, bias=use_bias)
down = [downrelu, downconv, downnorm]
up = [uprelu, upsample, upconv, upnorm]
if use_dropout:
model = down + [submodule] + up + [nn.Dropout(0.5)]
else:
model = down + [submodule] + up
self.model = nn.Sequential(*model)
def forward(self, x):
if self.outermost or self.noskip:
return self.model(x)
else:
return torch.cat([x, self.model(x)], 1)
| 3,744 | 33.675926 | 74 | py |
Simplified_DMC | Simplified_DMC-master/model/vision_net.py | import torch
import torch.nn as nn
import torch.nn.functional as F
class Resnet(nn.Module):
def __init__(self, original_resnet):
super(Resnet, self).__init__()
self.features = nn.Sequential(
*list(original_resnet.children())[:-1])
# for param in self.features.parameters():
# param.requires_grad = False
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), x.size(1))
return x
class ResnetFC(nn.Module):
def __init__(self, original_resnet, fc_dim=64,
pool_type='maxpool', conv_size=3):
super(ResnetFC, self).__init__()
self.pool_type = pool_type
self.features = nn.Sequential(
*list(original_resnet.children())[:-2])
self.fc = nn.Conv2d(
512, fc_dim, kernel_size=conv_size, padding=conv_size//2)
def forward(self, x, pool=True):
x = self.features(x)
x = self.fc(x)
if not pool:
return x
if self.pool_type == 'avgpool':
x = F.adaptive_avg_pool2d(x, 1)
elif self.pool_type == 'maxpool':
x = F.adaptive_max_pool2d(x, 1)
x = x.view(x.size(0), x.size(1))
return x
def forward_multiframe(self, x, pool=True):
(B, C, T, H, W) = x.size()
x = x.permute(0, 2, 1, 3, 4).contiguous()
x = x.view(B*T, C, H, W)
x = self.features(x)
x = self.fc(x)
(_, C, H, W) = x.size()
x = x.view(B, T, C, H, W)
x = x.permute(0, 2, 1, 3, 4)
if not pool:
return x
if self.pool_type == 'avgpool':
x = F.adaptive_avg_pool3d(x, 1)
elif self.pool_type == 'maxpool':
x = F.adaptive_max_pool3d(x, 1)
x = x.view(B, C)
return x
class ResnetDilated(nn.Module):
def __init__(self, orig_resnet, fc_dim=64, pool_type='maxpool',
dilate_scale=16, conv_size=3):
super(ResnetDilated, self).__init__()
from functools import partial
self.pool_type = pool_type
if dilate_scale == 8:
orig_resnet.layer3.apply(
partial(self._nostride_dilate, dilate=2))
orig_resnet.layer4.apply(
partial(self._nostride_dilate, dilate=4))
elif dilate_scale == 16:
orig_resnet.layer4.apply(
partial(self._nostride_dilate, dilate=2))
self.features = nn.Sequential(
*list(orig_resnet.children())[:-2])
self.fc = nn.Conv2d(
512, fc_dim, kernel_size=conv_size, padding=conv_size//2)
def _nostride_dilate(self, m, dilate):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
# the convolution with stride
if m.stride == (2, 2):
m.stride = (1, 1)
if m.kernel_size == (3, 3):
m.dilation = (dilate//2, dilate//2)
m.padding = (dilate//2, dilate//2)
# other convoluions
else:
if m.kernel_size == (3, 3):
m.dilation = (dilate, dilate)
m.padding = (dilate, dilate)
def forward(self, x, pool=True):
x = self.features(x)
x = self.fc(x)
if not pool:
return x
if self.pool_type == 'avgpool':
x = F.adaptive_avg_pool2d(x, 1)
elif self.pool_type == 'maxpool':
x = F.adaptive_max_pool2d(x, 1)
x = x.view(x.size(0), x.size(1))
return x
def forward_multiframe(self, x, pool=True):
(B, C, T, H, W) = x.size()
x = x.permute(0, 2, 1, 3, 4).contiguous()
x = x.view(B*T, C, H, W)
x = self.features(x)
x = self.fc(x)
(_, C, H, W) = x.size()
x = x.view(B, T, C, H, W)
x = x.permute(0, 2, 1, 3, 4)
if not pool:
return x
if self.pool_type == 'avgpool':
x = F.adaptive_avg_pool3d(x, 1)
elif self.pool_type == 'maxpool':
x = F.adaptive_max_pool3d(x, 1)
x = x.view(B, C)
return x
| 4,152 | 27.445205 | 69 | py |
Simplified_DMC | Simplified_DMC-master/model/base_model_v1.py | import torch
import torch.nn as nn
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152', 'resnext50_32x4d', 'resnext101_32x8d',
'wide_resnet50_2', 'wide_resnet101_2']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',
'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',
'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth',
'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth',
}
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, modal, num_classes=1000, zero_init_residual=False,
groups=1, width_per_group=64, replace_stride_with_dilation=None,
norm_layer=None):
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.modal = modal
self.groups = groups
self.base_width = width_per_group
self.conv1_a = nn.Conv2d(1, self.inplanes, kernel_size=3, stride=2, padding=3,
bias=False)
self.conv1_v = nn.Conv2d(3, self.inplanes, kernel_size=3, stride=2, padding=3,
bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0], stride=2)
self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=1,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=1,
dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def _forward_impl(self, x):
# See note [TorchScript super()]
if self.modal == 'audio':
x = self.conv1_a(x)
else:
x = self.conv1_v(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x
def forward(self, x):
return self._forward_impl(x)
def _resnet(arch, block, layers, pretrained, progress, modal, **kwargs):
model = ResNet(block, layers, modal, **kwargs)
return model
def resnet18(pretrained=False, progress=True, modal='vision',**kwargs):
r"""ResNet-18 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress, modal, **kwargs)
| 9,008 | 38.169565 | 106 | py |
Simplified_DMC | Simplified_DMC-master/model/dmc_model.py | import torch
import torch.nn as nn
import random
class Cluster_layer(nn.Module):
def __init__(self, input_dim = 512, num_cluster=2, iters=4, beta=-30, **kwargs):
super(Cluster_layer, self).__init__()
self.input_dim = input_dim
self.num_cluster = num_cluster
self.iters = iters
self.beta = beta
self.epsilon = torch.tensor(1e-10).type(torch.FloatTensor)#.cuda()
def forward(self, u_vecs):
(batch_size, input_num, feature_dim) = u_vecs.size()
ini_interval = int(input_num/self.num_cluster) #
o = torch.unsqueeze(u_vecs[:, 0, :], dim=1)
count = 1
while(self.num_cluster-count > 0):
current_o = torch.unsqueeze(u_vecs[:, ini_interval*count, :], dim=1) #ini_interval*count
o = torch.cat([o, current_o], dim=1)
count += 1
for i in range(self.iters):
nx = torch.sum(o**2, dim=2, keepdim=True)
ny = torch.sum(u_vecs**2, dim=2, keepdim=True)
qq = nx - 2 * torch.bmm(o, u_vecs.permute(0,2,1)) + ny.permute(0,2,1)
b = torch.sqrt(torch.max(qq, self.epsilon))
c = nn.functional.softmax(self.beta*b, dim=1) # assignments [None, output_num_capsule, input_num_capsule]
o = torch.bmm(c, u_vecs) # cluster centers [None, num_cluster, dim_cluster]
weights = torch.sum(c, dim=2, keepdim=True)
o = o / weights
return o, c
class DMC_NET(nn.Module):
def __init__(self, visual_net, audio_net, v_cluster_num = 4, a_cluster_num = 2):
super(DMC_NET, self).__init__()
# backbone net
self.visual_net = visual_net
self.audio_net = audio_net
self.pooling = nn.AdaptiveAvgPool2d((1, 1))
# visual ops
self.fc_v_1 = nn.Linear(512, 512)
self.fc_v_2 = nn.Linear(128, 128)
# audio ops
self.pooling_a = nn.AdaptiveMaxPool2d((1, 1))
self.fc_a_1 = nn.Linear(512, 512)
self.fc_a_2 = nn.Linear(128, 128)
self.relu = nn.ReLU(inplace=True)
# fusion ops
self.fc_av = nn.Linear(1, 2)
self.v_clustering = Cluster_layer(num_cluster=v_cluster_num)
self.a_clustering = Cluster_layer(num_cluster=a_cluster_num)
self.epsilon = torch.tensor(1e-10).type(torch.FloatTensor)#.cuda()
def forward(self, v_input, a_input):
# visual pathway
v_fea = self.visual_net(v_input)
(B, C, H, W) = v_fea.size()
v_fea = v_fea.view(B, C, H*W)
v_fea = v_fea.permute(0,2,1)
v_fea = self.fc_v_1(v_fea)
v_centers, v_assign = self.v_clustering(v_fea)
# audio pathway
a_fea = self.audio_net(a_input)
(B, C, H, W) = a_fea.size()
a_fea = a_fea.view(B, C, H*W)
a_fea = a_fea.permute(0,2,1)
a_fea = self.fc_a_1(a_fea)
a_centers, a_assign = self.a_clustering(a_fea)
v_centers_ = torch.sum(v_centers ** 2, dim=2, keepdim=True)
a_centers_ = torch.sum(a_centers ** 2, dim=2, keepdim=True)
distance_ = torch.sqrt(torch.max(v_centers_ - 2 * torch.bmm(v_centers, a_centers.permute(0, 2, 1)) + a_centers_.permute(0, 2, 1), self.epsilon))
distance = torch.min(distance_, dim=1)
distance = distance.values
return distance, v_assign, distance_ | 3,338 | 35.293478 | 152 | py |
synfeal | synfeal-main/utils.py | import numpy as np
import os
import cv2
import torch
import torch
import math
import yaml
from sklearn.metrics import mean_squared_error
from torchsummary import summary
from yaml.loader import SafeLoader
from colorama import Fore
from scipy.spatial.transform import Rotation as R
from models.loss_functions import BetaLoss, DynamicLoss
from models.posenet import PoseNetGoogleNet, PoseNetResNet
from models.poselstm import PoseLSTM
from models.hourglass import HourglassBatch
from synfeal_collection.src.pypcd_no_ros import PointCloud
def write_pcd(filename, msg, mode='binary'):
pc = PointCloud.from_msg(msg)
pc.save_pcd(filename, compression=mode)
def read_pcd(filename):
if not os.path.isfile(filename):
raise Exception("[read_pcd] File does not exist.")
pc = PointCloud.from_path(filename)
return pc
def write_transformation(filename, transformation):
np.savetxt(filename, transformation, delimiter=',',fmt='%.5f')
def write_img(filename, img):
cv2.imwrite(filename, img)
def matrixToRodrigues(matrix):
rods, _ = cv2.Rodrigues(matrix[0:3, 0:3])
rods = rods.transpose()
rodrigues = rods[0]
return rodrigues
def matrixToQuaternion(matrix):
rot_matrix = matrix[0:3, 0:3]
r = R.from_matrix(rot_matrix)
return r.as_quat()
def matrixToXYZ(matrix):
return matrix[0:3,3]
def rodriguesToMatrix(r):
rod = np.array(r, dtype=np.float)
matrix = cv2.Rodrigues(rod)
return matrix[0]
def quaternionToMatrix(quat):
return R.from_quat(quat).as_matrix()
def poseToMatrix(pose):
matrix = np.zeros((4,4))
rot_mat = quaternionToMatrix(pose[3:])
trans = pose[:3]
matrix[0:3,0:3] = rot_mat
matrix[0:3,3] = trans
matrix[3,3] = 1
return matrix
def write_intrinsic(filename, data):
matrix = np.zeros((3,3))
matrix[0,0] = data[0]
matrix[0,1] = data[1]
matrix[0,2] = data[2]
matrix[1,0] = data[3]
matrix[1,1] = data[4]
matrix[1,2] = data[5]
matrix[2,0] = data[6]
matrix[2,1] = data[7]
matrix[2,2] = data[8]
np.savetxt(filename, matrix, delimiter=',',fmt='%.5f')
def rotationAndpositionToMatrix44(rotation, position):
matrix44 = np.empty(shape=(4,4))
matrix44[:3,:3] = rotation
matrix44[:3,3] = position
matrix44[3,:3] = 0
matrix44[3,3] = 1
return matrix44
def matrix44_to_pose(matrix44):
quaternion = matrixToQuaternion(matrix44)
quaternion = normalize_quat(quaternion)
xyz = matrixToXYZ(matrix44)
pose = np.append(xyz, quaternion)
return pose
def compute_position_error(pred, targ):
pred = pred[:3]
targ = targ[:3]
return mean_squared_error(pred, targ, squared=False) # RMSE
def compute_rotation_error(pred, targ):
## second way: using rodrigues (like ATOM) --> better because angle ranges from 0 to pi (whereas with quaterions ranges from 0 to 2pi)
## https://github.com/lardemua/atom/blob/284b7943e467e53a3258de6f673cf852b07654cb/atom_evaluation/scripts/camera_to_camera_evalutation.py#L290
pred_matrix = poseToMatrix(pred)
targ_matrix = poseToMatrix(targ)
delta = np.dot(np.linalg.inv(pred_matrix), targ_matrix)
deltaR = matrixToRodrigues(delta[0:3, 0:3])
return np.linalg.norm(deltaR)
def normalize_quat(x, p=2, dim=1):
"""
Divides a tensor along a certain dim by the Lp norm
:param x:
:param p: Lp norm
:param dim: Dimension to normalize along
:return:
"""
if torch.is_tensor(x):
# x.shape = (N,4)
xn = x.norm(p=p, dim=dim) # computes the norm: 1xN
x = x / xn.unsqueeze(dim=dim)
else: # numpy
xn = np.linalg.norm(x)
x = x/xn
return x
def summarizeModel(model, input_example):
model.cuda()
summary(model, input_size=input_example.shape)
model.cpu()
def resumeTraining(folder_name):
model_name = [f for f in os.listdir(folder_name) if f.endswith('.pth')][0] # get first in the list of files that have extension .pth
file_name = f'{folder_name}/config.yaml'
with open(file_name) as f:
config = yaml.load(f, Loader=SafeLoader)
model = eval(config['init_model'])
model.load_state_dict(torch.load(f'{folder_name}/{model_name}'))
start_epoch = config['epoch']
train_losses = config['train_losses']
test_losses = config['test_losses']
print(f'{Fore.BLUE} Resuming training of model from epoch: {start_epoch} {Fore.RESET}')
return start_epoch, train_losses, test_losses, model
def process_pose(pose):
quat_unit = normalize_quat(pose[:,3:])
return torch.cat((pose[:,:3], quat_unit), dim=1)
def projectToCamera(intrinsic_matrix, distortion, width, height, pts):
"""
Projects a list of points to the camera defined transform, intrinsics and distortion
:param intrinsic_matrix: 3x3 intrinsic camera matrix
:param distortion: should be as follows: (k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6]])
:param width: the image width
:param height: the image height
:param pts: a list of point coordinates (in the camera frame) with the following format: np array 4xn or 3xn
:return: a list of pixel coordinates with the same length as pts
"""
_, n_pts = pts.shape
# Project the 3D points in the camera's frame to image pixels
# From https://docs.opencv.org/2.4/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html
pixs = np.zeros((2, n_pts), dtype=np.float)
k1, k2, p1, p2, k3 = distortion
# fx, _, cx, _, fy, cy, _, _, _ = intrinsic_matrix
# print('intrinsic=\n' + str(intrinsic_matrix))
fx = intrinsic_matrix[0, 0]
fy = intrinsic_matrix[1, 1]
cx = intrinsic_matrix[0, 2]
cy = intrinsic_matrix[1, 2]
x = pts[0, :]
y = pts[1, :]
z = pts[2, :]
dists = np.linalg.norm(pts[0:3, :], axis=0) # compute distances from point to camera
xl = np.divide(x, z) # compute homogeneous coordinates
yl = np.divide(y, z) # compute homogeneous coordinates
r2 = xl ** 2 + yl ** 2 # r square (used multiple times bellow)
xll = xl * (1 + k1 * r2 + k2 * r2 ** 2 + k3 * r2 ** 3) + 2 * p1 * xl * yl + p2 * (r2 + 2 * xl ** 2)
yll = yl * (1 + k1 * r2 + k2 * r2 ** 2 + k3 * r2 ** 3) + p1 * (r2 + 2 * yl ** 2) + 2 * p2 * xl * yl
pixs[0, :] = fx * xll + cx
pixs[1, :] = fy * yll + cy
# Compute mask of valid projections
valid_z = z > 0
valid_xpix = np.logical_and(pixs[0, :] >= 0, pixs[0, :] < width)
valid_ypix = np.logical_and(pixs[1, :] >= 0, pixs[1, :] < height)
valid_pixs = np.logical_and(valid_z, np.logical_and(valid_xpix, valid_ypix))
return pixs, valid_pixs, dists
def synthesize_pose(pose1, pose2):
"""
synthesize pose between pose1 and pose2
pose1: 4x4
pose2: 4x4
"""
pos1 = pose1[:3,3]
rot1 = pose1[:3,:3]
pos2 = pose2[:3,3]
rot2 = pose2[:3,:3]
# rot3x3 to euler angles
rot1_euler = R.from_matrix(rot1).as_euler('xyz', degrees=False)
rot2_euler = R.from_matrix(rot2).as_euler('xyz', degrees=False)
pos3 = (pos1 + pos2) / 2
rot3_euler = (rot1_euler + rot2_euler) / 2
rot3 = R.from_euler('xyz', rot3_euler, degrees=False).as_matrix()
pose3 = np.zeros(shape=(4,4))
pose3[:3,:3] = rot3
pose3[:3,3] = pos3
pose3[-1,-1] = 1
return pose3
def applyNoise(matrix44, pos_error, rot_error):
xyz = matrixToXYZ(matrix44)
euler = R.from_quat(matrixToQuaternion(matrix44)).as_euler('xyz', 'degrees')
# adapted from ATOM
v = np.random.uniform(-1.0, 1.0, 3)
v = v / np.linalg.norm(v)
new_xyz = xyz + v * (pos_error*math.sqrt(3))
v = np.random.choice([-1.0, 1.0], 3) * (rot_error/math.sqrt(3))
new_euler = euler + v
rotation_angles = R.from_euler('xyz', new_euler, degrees=True).as_matrix()
new_matrix44 = rotationAndpositionToMatrix44(rotation=rotation_angles, position=new_xyz)
return new_matrix44 | 7,993 | 29.51145 | 146 | py |
synfeal | synfeal-main/dataset.py | import cv2
import torch.utils.data as data
import numpy as np
import torch
import os
import yaml
from PIL import Image
from yaml.loader import SafeLoader
from utils import read_pcd, matrixToXYZ, matrixToQuaternion, normalize_quat
# pytorch datasets: https://pytorch.org/tutorials/beginner/basics/data_tutorial.html
class Dataset(data.Dataset):
def __init__(self, path_seq, rgb_transform = None, depth_transform = None, inputs = None):
self.root = f'{os.environ.get("SYNFEAL_DATASET")}/datasets/localbot'
self.seq = path_seq
self.path_seq = f'{self.root}/{path_seq}'
self.rgb_transform = rgb_transform
self.depth_transform = depth_transform
if inputs == None:
self.inputs = ['point_cloud', 'depth_image', 'rgb_image']
else:
self.inputs = inputs
config = self.getConfig()
if 'statistics' in config:
self.depth_mean = config['statistics']['D']['mean']
self.depth_std = config['statistics']['D']['std']
def __getitem__(self, index):
output = []
if 'point_cloud' in self.inputs:
# load point cloud
pc_raw = read_pcd(f'{self.path_seq}/frame-{index:05d}.pcd')
point_set = np.vstack([pc_raw.pc_data['x'], pc_raw.pc_data['y'], pc_raw.pc_data['z']]).T # stays NX3
point_set = torch.from_numpy(point_set.astype(np.float32))
output.append(point_set)
if 'depth_image' in self.inputs:
# load depth image
depth_image = cv2.imread(f'{self.path_seq}/frame-{index:05d}.depth.png', cv2.IMREAD_UNCHANGED)
depth_image = depth_image.astype(np.float32) / 1000.0 # to meters
depth_image = Image.fromarray(depth_image)
if self.depth_transform!=None:
depth_image = self.depth_transform(depth_image)
output.append(depth_image)
if 'rgb_image' in self.inputs:
# TODO: change this to the correct dataset
rgb_image = Image.open(f'{self.path_seq}/frame-{index:05d}.rgb.png')
if self.rgb_transform != None:
rgb_image = self.rgb_transform(rgb_image)
output.append(rgb_image)
# load pose
matrix = np.loadtxt(f'{self.path_seq}/frame-{index:05d}.pose.txt', delimiter=',')
quaternion = matrixToQuaternion(matrix)
quaternion = normalize_quat(quaternion)
xyz = matrixToXYZ(matrix)
pose = np.append(xyz, quaternion)
pose = torch.from_numpy(pose.astype(np.float32))
output.append(pose)
return tuple(output)
def __len__(self):
return sum(f.endswith('pose.txt') for f in os.listdir(self.path_seq))
def getConfig(self):
with open(f'{self.path_seq}/config.yaml') as f:
config = yaml.load(f, Loader=SafeLoader)
return config
def setConfig(self, config):
with open(f'{self.path_seq}/config.yaml', 'w') as f:
yaml.dump(config, f)
# config_stats = Dataset('seq5',depth_transform=None ,rgb_transform=None, inputs=['depth_image']).getConfig()['statistics']
# rgb_mean = [config_stats['R']['mean'], config_stats['G']['mean'], config_stats['B']['mean']]
# rgb_std = [config_stats['R']['std'], config_stats['G']['std'], config_stats['B']['std']]
# depth_mean = config_stats['D']['mean']
# depth_std = config_stats['D']['std']
# print(depth_mean)
# depth_transform_train = transforms.Compose([
# transforms.Resize(300),
# transforms.CenterCrop(299),
# transforms.ToTensor(),
# transforms.Normalize(mean=(depth_mean,), std=(depth_std,))
# ])
# rgb_transform_train = transforms.Compose([
# transforms.Resize(300),
# transforms.RandomCrop(299),
# transforms.ToTensor(),
# transforms.Normalize(rgb_mean, rgb_std)
# ])
# rgb_transform_test = transforms.Compose([
# transforms.Resize(300),
# transforms.CenterCrop(299),
# transforms.ToTensor(),
# transforms.Normalize(rgb_mean, rgb_std)
# ])
# dataset = Dataset('seq6',depth_transform=depth_transform_train ,rgb_transform=rgb_transform_train, inputs=['depth_image', 'rgb_image'])
# for i in range(100,110):
# print(f'depth size: {dataset[i][0].shape}')
# print(f'rgb size: {dataset[i][1].shape}')
# print(f'depth mean: {np.mean(dataset[i][0].numpy())}')
# print(f'rgb mean: {np.mean(dataset[i][1].numpy())}')
| 4,547 | 34.53125 | 137 | py |
synfeal | synfeal-main/models/pointnet.py | import torch
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data
from torch.autograd import Variable
import numpy as np
import torch.nn.functional as F
# this is a regularization to avoid overfitting! It adds another term to the cost function to penalize the complexity of the models.
def feature_transform_regularizer(trans):
d = trans.size()[1]
batchsize = trans.size()[0]
I = torch.eye(d)[None, :, :]
if trans.is_cuda:
I = I.cuda()
loss = torch.mean(torch.norm(torch.bmm(trans, trans.transpose(2,1)) - I, dim=(1,2)))
return loss
class STN3d(nn.Module): # spatial transformer network 3d, paper: https://arxiv.org/pdf/1506.02025v3.pdf
def __init__(self):
super(STN3d, self).__init__()
self.conv1 = torch.nn.Conv1d(3, 64, 1) # conv1d because we are sliding the filter over 1 dimensional.
self.conv2 = torch.nn.Conv1d(64, 128, 1)
self.conv3 = torch.nn.Conv1d(128, 1024, 1)
self.fc1 = nn.Linear(1024, 512)
self.fc2 = nn.Linear(512, 256)
self.fc3 = nn.Linear(256, 9)
self.relu = nn.ReLU()
self.bn1 = nn.BatchNorm1d(64)
self.bn2 = nn.BatchNorm1d(128)
self.bn3 = nn.BatchNorm1d(1024)
self.bn4 = nn.BatchNorm1d(512)
self.bn5 = nn.BatchNorm1d(256)
def forward(self, x):
batchsize = x.size()[0]
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.relu(self.bn3(self.conv3(x)))
x = torch.max(x, 2, keepdim=True)[0]
x = x.view(-1, 1024)
x = F.relu(self.bn4(self.fc1(x)))
x = F.relu(self.bn5(self.fc2(x)))
x = self.fc3(x)
iden = Variable(torch.from_numpy(np.array([1,0,0,0,1,0,0,0,1]).astype(np.float32))).view(1,9).repeat(batchsize,1)
if x.is_cuda:
iden = iden.cuda()
x = x + iden
x = x.view(-1, 3, 3)
return x
class STNkd(nn.Module):
def __init__(self, k=64):
super(STNkd, self).__init__()
self.conv1 = torch.nn.Conv1d(k, 64, 1)
self.conv2 = torch.nn.Conv1d(64, 128, 1)
self.conv3 = torch.nn.Conv1d(128, 1024, 1)
self.fc1 = nn.Linear(1024, 512)
self.fc2 = nn.Linear(512, 256)
self.fc3 = nn.Linear(256, k*k)
self.relu = nn.ReLU()
self.bn1 = nn.BatchNorm1d(64)
self.bn2 = nn.BatchNorm1d(128)
self.bn3 = nn.BatchNorm1d(1024)
self.bn4 = nn.BatchNorm1d(512)
self.bn5 = nn.BatchNorm1d(256)
self.k = k
def forward(self, x):
batchsize = x.size()[0]
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.relu(self.bn3(self.conv3(x)))
x = torch.max(x, 2, keepdim=True)[0]
x = x.view(-1, 1024)
x = F.relu(self.bn4(self.fc1(x)))
x = F.relu(self.bn5(self.fc2(x)))
x = self.fc3(x)
iden = Variable(torch.from_numpy(np.eye(self.k).flatten().astype(np.float32))).view(1,self.k*self.k).repeat(batchsize,1)
if x.is_cuda:
iden = iden.cuda()
x = x + iden
x = x.view(-1, self.k, self.k)
return x
class PointNetfeat(nn.Module):
def __init__(self, feature_transform = False):
super(PointNetfeat, self).__init__()
#self.stn = STN3d()
self.conv1 = torch.nn.Conv1d(3, 64, 1)
self.conv2 = torch.nn.Conv1d(64, 128, 1)
self.conv3 = torch.nn.Conv1d(128, 1024, 1)
self.bn1 = nn.BatchNorm1d(64)
self.bn2 = nn.BatchNorm1d(128)
self.bn3 = nn.BatchNorm1d(1024)
self.feature_transform = feature_transform
if self.feature_transform:
self.fstn = STNkd(k=64)
def forward(self, x):
n_pts = x.size()[2] # input is (batch_size, number_of_features, number_of_points)
#trans = self.stn(x)
#x = x.transpose(2, 1) # this swaps number of feature with number of points --> (batch_size, number_of_points, number_of_features)
#x = torch.bmm(x, trans) # batch matrix-matrix product --> x.shape = (32, 2500, 3), trans.shape = (32, 3, 3) --> output = (32, 2500, 3)
#x = x.transpose(2, 1) # now x.shape = (32, 3, 2500)
x = F.relu(self.bn1(self.conv1(x))) # x.shape = (32, 64, 2500)
if self.feature_transform:
trans_feat = self.fstn(x)
x = x.transpose(2,1)
x = torch.bmm(x, trans_feat)
x = x.transpose(2,1)
else:
trans_feat = None
x = F.relu(self.bn2(self.conv2(x)))
x = self.bn3(self.conv3(x)) #x.shape (32, 1024, 2500)
x = torch.max(x, 2, keepdim=True)[0] # MAX POOLING
x = x.view(-1, 1024) # flattening
trans = 0
return x, trans, trans_feat
class PointNet(nn.Module):
def __init__(self, feature_transform=False):
super(PointNet, self).__init__()
self.feature_transform = feature_transform
self.feat = PointNetfeat(feature_transform=feature_transform)
self.fc1 = nn.Linear(1024, 512)
self.fc2 = nn.Linear(512, 256)
self.fc3_trans = nn.Linear(256, 3)
self.fc3_rot = nn.Linear(256, 4)
self.dropout = nn.Dropout(p=0.3)
self.bn1 = nn.BatchNorm1d(512)
self.bn2 = nn.BatchNorm1d(256)
self.relu = nn.ReLU()
def forward(self, x):
x, trans, trans_feat = self.feat(x) # the output x is the global feature (1024x1)
x = F.relu(self.bn1(self.fc1(x)))
x = F.relu(self.bn2(self.dropout(self.fc2(x))))
x_trans = self.fc3_trans(x) # Joint Learning!
x_rot = self.fc3_rot(x) # Joint Learning!
x_pose = torch.cat((x_trans, x_rot), dim=1)
return x_pose, trans, trans_feat # softmax removed!
| 5,796 | 34.564417 | 143 | py |
synfeal | synfeal-main/models/pointnet_classification.py | import torch
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data
from torch.autograd import Variable
import numpy as np
import torch.nn.functional as F
class STN3d(nn.Module):
def __init__(self):
super(STN3d, self).__init__()
self.conv1 = torch.nn.Conv1d(3, 64, 1)
self.conv2 = torch.nn.Conv1d(64, 128, 1)
self.conv3 = torch.nn.Conv1d(128, 1024, 1)
self.fc1 = nn.Linear(1024, 512)
self.fc2 = nn.Linear(512, 256)
self.fc3 = nn.Linear(256, 9)
self.relu = nn.ReLU()
self.bn1 = nn.BatchNorm1d(64)
self.bn2 = nn.BatchNorm1d(128)
self.bn3 = nn.BatchNorm1d(1024)
self.bn4 = nn.BatchNorm1d(512)
self.bn5 = nn.BatchNorm1d(256)
def forward(self, x):
batchsize = x.size()[0]
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.relu(self.bn3(self.conv3(x)))
x = torch.max(x, 2, keepdim=True)[0]
x = x.view(-1, 1024)
x = F.relu(self.bn4(self.fc1(x)))
x = F.relu(self.bn5(self.fc2(x)))
x = self.fc3(x)
iden = Variable(torch.from_numpy(np.array([1,0,0,0,1,0,0,0,1]).astype(np.float32))).view(1,9).repeat(batchsize,1)
if x.is_cuda:
iden = iden.cuda()
x = x + iden
x = x.view(-1, 3, 3)
return x
class STNkd(nn.Module):
def __init__(self, k=64):
super(STNkd, self).__init__()
self.conv1 = torch.nn.Conv1d(k, 64, 1)
self.conv2 = torch.nn.Conv1d(64, 128, 1)
self.conv3 = torch.nn.Conv1d(128, 1024, 1)
self.fc1 = nn.Linear(1024, 512)
self.fc2 = nn.Linear(512, 256)
self.fc3 = nn.Linear(256, k*k)
self.relu = nn.ReLU()
self.bn1 = nn.BatchNorm1d(64)
self.bn2 = nn.BatchNorm1d(128)
self.bn3 = nn.BatchNorm1d(1024)
self.bn4 = nn.BatchNorm1d(512)
self.bn5 = nn.BatchNorm1d(256)
self.k = k
def forward(self, x):
batchsize = x.size()[0]
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.relu(self.bn3(self.conv3(x)))
x = torch.max(x, 2, keepdim=True)[0]
x = x.view(-1, 1024)
x = F.relu(self.bn4(self.fc1(x)))
x = F.relu(self.bn5(self.fc2(x)))
x = self.fc3(x)
iden = Variable(torch.from_numpy(np.eye(self.k).flatten().astype(np.float32))).view(1,self.k*self.k).repeat(batchsize,1)
if x.is_cuda:
iden = iden.cuda()
x = x + iden
x = x.view(-1, self.k, self.k)
return x
class PointNetfeat(nn.Module):
def __init__(self, global_feat = True, feature_transform = False):
super(PointNetfeat, self).__init__()
self.stn = STN3d()
self.conv1 = torch.nn.Conv1d(3, 64, 1)
self.conv2 = torch.nn.Conv1d(64, 128, 1)
self.conv3 = torch.nn.Conv1d(128, 1024, 1)
self.bn1 = nn.BatchNorm1d(64)
self.bn2 = nn.BatchNorm1d(128)
self.bn3 = nn.BatchNorm1d(1024)
self.global_feat = global_feat
self.feature_transform = feature_transform
if self.feature_transform:
self.fstn = STNkd(k=64)
def forward(self, x):
n_pts = x.size()[2]
trans = self.stn(x)
x = x.transpose(2, 1)
x = torch.bmm(x, trans)
x = x.transpose(2, 1)
x = F.relu(self.bn1(self.conv1(x)))
if self.feature_transform:
trans_feat = self.fstn(x)
x = x.transpose(2,1)
x = torch.bmm(x, trans_feat)
x = x.transpose(2,1)
else:
trans_feat = None
pointfeat = x
x = F.relu(self.bn2(self.conv2(x)))
x = self.bn3(self.conv3(x))
x = torch.max(x, 2, keepdim=True)[0]
x = x.view(-1, 1024)
if self.global_feat:
return x, trans, trans_feat
else:
x = x.view(-1, 1024, 1).repeat(1, 1, n_pts)
return torch.cat([x, pointfeat], 1), trans, trans_feat
class PointNetCls(nn.Module):
def __init__(self, k=2, feature_transform=False):
super(PointNetCls, self).__init__()
self.feature_transform = feature_transform
self.feat = PointNetfeat(global_feat=True, feature_transform=feature_transform)
self.fc1 = nn.Linear(1024, 512)
self.fc2 = nn.Linear(512, 256)
self.fc3 = nn.Linear(256, k)
self.dropout = nn.Dropout(p=0.3)
self.bn1 = nn.BatchNorm1d(512)
self.bn2 = nn.BatchNorm1d(256)
self.relu = nn.ReLU()
def forward(self, x):
x, trans, trans_feat = self.feat(x) # the output x is the global feature (1024x1)
x = F.relu(self.bn1(self.fc1(x)))
x = F.relu(self.bn2(self.dropout(self.fc2(x))))
x = self.fc3(x)
return F.log_softmax(x, dim=1), trans, trans_feat # this must change
| 4,884 | 32.006757 | 128 | py |
synfeal | synfeal-main/models/loss_functions.py | import torch
from torch import nn
class BetaLoss(nn.Module):
def __init__(self, beta= 512):
super(BetaLoss, self).__init__()
self.beta = beta
#self.loss_fn = torch.nn.L1Loss() # PoseNet said that L1 was the best
self.loss_fn = torch.nn.MSELoss()
def forward(self, pred, targ):
"""
:param pred: N x 7
:param targ: N x 7
:return:
"""
# Translation loss
loss = self.loss_fn(pred[:, :3], targ[:, :3])
# Rotation loss
loss += self.beta * self.loss_fn(pred[:, 3:], targ[:, 3:]) ## see paper: https://arxiv.org/abs/1704.00390
return loss
class DynamicLoss(nn.Module):
def __init__(self, sx=0.0, sq=-3.0):
super(DynamicLoss, self).__init__()
#self.loss_fn = torch.nn.L1Loss() # PoseNet said that L1 was the best
self.loss_fn = torch.nn.MSELoss()
self.sx = torch.nn.Parameter(torch.Tensor([sx]), requires_grad=True) # Parameter: When a Parameter is associated with a module as a model attribute, it gets added to the parameter list automatically and can be accessed using the 'parameters' iterator.
self.sq = torch.nn.Parameter(torch.Tensor([sq]), requires_grad=True)
def forward(self, pred, targ):
"""
:param pred: N x 7
:param targ: N x 7
:return:
"""
# Translation loss
loss = torch.exp(-self.sx) * self.loss_fn(pred[:, :3], targ[:, :3]) + self.sx
# Rotation loss
loss += torch.exp(-self.sq) * self.loss_fn(pred[:, 3:], targ[:, 3:]) + self.sq ## see paper: https://arxiv.org/abs/1704.00390
return loss | 1,656 | 39.414634 | 261 | py |
synfeal | synfeal-main/models/poselstm.py |
from turtle import forward
from unicodedata import bidirectional
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data
from torch.autograd import Variable
import numpy as np
import torch.nn.functional as F
from torchvision import transforms, models
# based on: https://github.com/hazirbas/poselstm-pytorch
# paper: https://openaccess.thecvf.com/content_ICCV_2017/papers/Walch_Image-Based_Localization_Using_ICCV_2017_paper.pdf
class PoseLSTM(nn.Module):
def __init__(self, hidden_size = 128, pretrained = True, aux_logits=True):
super(PoseLSTM, self).__init__()
self.hidden_size = hidden_size
self.aux_logits = aux_logits
if pretrained:
base_model = models.inception_v3(weights='Inception_V3_Weights.DEFAULT')
else:
base_model = models.inception_v3()
base_model.aux_logits = True
self.Conv2d_1a_3x3 = base_model.Conv2d_1a_3x3
self.Conv2d_2a_3x3 = base_model.Conv2d_2a_3x3
self.Conv2d_2b_3x3 = base_model.Conv2d_2b_3x3
self.Conv2d_3b_1x1 = base_model.Conv2d_3b_1x1
self.Conv2d_4a_3x3 = base_model.Conv2d_4a_3x3
self.Mixed_5b = base_model.Mixed_5b
self.Mixed_5c = base_model.Mixed_5c
self.Mixed_5d = base_model.Mixed_5d
self.Mixed_6a = base_model.Mixed_6a
self.Mixed_6b = base_model.Mixed_6b
self.Mixed_6c = base_model.Mixed_6c
self.Mixed_6d = base_model.Mixed_6d
self.Mixed_6e = base_model.Mixed_6e
self.Mixed_7a = base_model.Mixed_7a
self.Mixed_7b = base_model.Mixed_7b
self.Mixed_7c = base_model.Mixed_7c
if aux_logits:
self.aux1 = InceptionAux(288, stride=7, hidden_size = self.hidden_size)
self.aux2 = InceptionAux(768, stride=3, hidden_size = self.hidden_size)
self.lstm_regression = LstmRegression(dropout_rate=0.5, hidden_size=self.hidden_size)
def forward(self, x, verbose=False): # this is where we pass the input into the module
# 299 x 299 x 3
x = self.Conv2d_1a_3x3(x)
# 149 x 149 x 32
x = self.Conv2d_2a_3x3(x)
# 147 x 147 x 32
x = self.Conv2d_2b_3x3(x)
# 147 x 147 x 64
x = F.max_pool2d(x, kernel_size=3, stride=2)
# 73 x 73 x 64
x = self.Conv2d_3b_1x1(x)
# 73 x 73 x 80
x = self.Conv2d_4a_3x3(x)
# 71 x 71 x 192
x = F.max_pool2d(x, kernel_size=3, stride=2)
# 35 x 35 x 192
x = self.Mixed_5b(x) # mixed is the inception module!!
# 35 x 35 x 256
x = self.Mixed_5c(x)
# 35 x 35 x 288
x = self.Mixed_5d(x)
# 35 x 35 x 288
if self.aux_logits and self.training:
pose_aux1 = self.aux1(x)
x = self.Mixed_6a(x)
# 17 x 17 x 768
x = self.Mixed_6b(x)
# 17 x 17 x 768
x = self.Mixed_6c(x)
# 17 x 17 x 768
x = self.Mixed_6d(x)
# 17 x 17 x 768
x = self.Mixed_6e(x)
# 17 x 17 x 768
if self.aux_logits and self.training:
pose_aux2 = self.aux2(x)
x = self.Mixed_7a(x)
# 8 x 8 x 1280
x = self.Mixed_7b(x)
# 8 x 8 x 2048
x = self.Mixed_7c(x)
# 8 x 8 x 2048
x = F.avg_pool2d(x, kernel_size=8)
# 1 x 1 x 2048
# 1 x 1 x 2048
x = x.view(x.size(0), -1)
# 2048
pose = self.lstm_regression(x)
if self.aux_logits and self.training:
return pose_aux1, pose_aux2, pose
else:
return pose
class InceptionAux(nn.Module):
def __init__(self, in_channels, stride, hidden_size):
super(InceptionAux, self).__init__()
self.conv = nn.Conv2d(in_channels=in_channels, out_channels=128, kernel_size=(1,1))
self.fc = nn.Linear(3200, 2048)
self.relu = nn.ReLU()
self.pool = nn.AvgPool2d(kernel_size=5, stride=stride)
self.lstm_regression = LstmRegression(dropout_rate=0.7, hidden_size=hidden_size)
def forward(self, x):
x = self.pool(x)
x = self.relu(self.conv(x))
x = x.reshape(x.shape[0], -1)
x = self.relu(self.fc(x))
pose = self.lstm_regression(x)
return pose
class LstmRegression(nn.Module):
def __init__(self, dropout_rate, hidden_size):
super(LstmRegression, self).__init__()
#TODO: try hidden_size = 32
self.hidden_size = hidden_size
self.lstm_lr = nn.LSTM(input_size=64, hidden_size = hidden_size, bidirectional = True, batch_first = True)
self.lstm_ud = nn.LSTM(input_size=32, hidden_size = hidden_size, bidirectional = True, batch_first = True)
self.pos = nn.Linear(hidden_size*4, 3, bias=True)
self.ori = nn.Linear(hidden_size*4, 4, bias=True)
self.dropout = nn.Dropout(p=dropout_rate)
def forward(self,x):
# x is of shape (N,1,2048)
x = x.view(x.size(0),32, 64)
_, (hidden_state_lr, _) = self.lstm_lr(x.permute(0,1,2)) # to run row by row
_, (hidden_state_ud, _) = self.lstm_ud(x.permute(0,2,1)) # to run col by col
# hidden_state_lr.shape = [2, batch_size, hidden_size]
lstm_vector = torch.cat((hidden_state_lr[0,:,:],
hidden_state_lr[1,:,:],
hidden_state_ud[0,:,:],
hidden_state_ud[1,:,:]), 1)
lstm_vector = self.dropout(lstm_vector)
pos = self.pos(lstm_vector)
ori = self.ori(lstm_vector)
pose = torch.cat((pos, ori), dim=1)
return pose
# if __name__ == "__main__":
# model = PoseLSTM()
# print(model(torch.rand(10,3,299,299))[0].shape) | 6,396 | 33.766304 | 120 | py |
synfeal | synfeal-main/models/posenet.py |
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data
from torch.autograd import Variable
import numpy as np
import torch.nn.functional as F
from torchvision import transforms, models
#https://github.com/youngguncho/PoseNet-Pytorch/blob/6c583a345a20ba17f67b76e54a26cf78e2811604/posenet_simple.py#L119
#https://pytorch.org/hub/pytorch_vision_inception_v3/
class PoseNetGoogleNet(nn.Module):
def __init__(self, pretrained,dropout_rate=0.0, aux_logits=True):
super(PoseNetGoogleNet, self).__init__()
self.dropout_rate = dropout_rate
self.aux_logits = aux_logits
if pretrained:
base_model = models.inception_v3(weights='Inception_V3_Weights.DEFAULT')
else:
base_model = models.inception_v3()
base_model.aux_logits = True
self.Conv2d_1a_3x3 = base_model.Conv2d_1a_3x3
self.Conv2d_2a_3x3 = base_model.Conv2d_2a_3x3
self.Conv2d_2b_3x3 = base_model.Conv2d_2b_3x3
self.Conv2d_3b_1x1 = base_model.Conv2d_3b_1x1
self.Conv2d_4a_3x3 = base_model.Conv2d_4a_3x3
self.Mixed_5b = base_model.Mixed_5b
self.Mixed_5c = base_model.Mixed_5c
self.Mixed_5d = base_model.Mixed_5d
self.Mixed_6a = base_model.Mixed_6a
self.Mixed_6b = base_model.Mixed_6b
self.Mixed_6c = base_model.Mixed_6c
self.Mixed_6d = base_model.Mixed_6d
self.Mixed_6e = base_model.Mixed_6e
self.Mixed_7a = base_model.Mixed_7a
self.Mixed_7b = base_model.Mixed_7b
self.Mixed_7c = base_model.Mixed_7c
if aux_logits:
self.aux1 = InceptionAux1(288, dropout_rate)
self.aux2 = InceptionAux2(768, dropout_rate)
# Out 2
self.pos = nn.Linear(2048, 3, bias=True)
self.ori = nn.Linear(2048, 4, bias=True)
def forward(self, x, verbose=False): # this is where we pass the input into the module
# 299 x 299 x 3
x = self.Conv2d_1a_3x3(x)
# 149 x 149 x 32
x = self.Conv2d_2a_3x3(x)
# 147 x 147 x 32
x = self.Conv2d_2b_3x3(x)
# 147 x 147 x 64
x = F.max_pool2d(x, kernel_size=3, stride=2)
# 73 x 73 x 64
x = self.Conv2d_3b_1x1(x)
# 73 x 73 x 80
x = self.Conv2d_4a_3x3(x)
# 71 x 71 x 192
x = F.max_pool2d(x, kernel_size=3, stride=2)
# 35 x 35 x 192
x = self.Mixed_5b(x) # mixed is the inception module!!
# 35 x 35 x 256
x = self.Mixed_5c(x)
# 35 x 35 x 288
x = self.Mixed_5d(x)
# 35 x 35 x 288
if self.aux_logits and self.training:
pose_aux1 = self.aux1(x)
x = self.Mixed_6a(x)
# 17 x 17 x 768
x = self.Mixed_6b(x)
# 17 x 17 x 768
x = self.Mixed_6c(x)
# 17 x 17 x 768
x = self.Mixed_6d(x)
# 17 x 17 x 768
x = self.Mixed_6e(x)
# 17 x 17 x 768
if self.aux_logits and self.training:
pose_aux2 = self.aux2(x)
x = self.Mixed_7a(x)
# 8 x 8 x 1280
x = self.Mixed_7b(x)
# 8 x 8 x 2048
x = self.Mixed_7c(x)
# 8 x 8 x 2048
x = F.avg_pool2d(x, kernel_size=8)
# 1 x 1 x 2048
x = F.dropout(x, p=self.dropout_rate, training=self.training)
# 1 x 1 x 2048
x = x.view(x.size(0), -1)
# 2048
pos = self.pos(x)
ori = self.ori(x)
pose = torch.cat((pos, ori), dim=1)
if self.aux_logits and self.training:
return pose_aux1, pose_aux2, pose
else:
return pose
class InceptionAux1(nn.Module):
def __init__(self, in_channels, dropout_rate):
super(InceptionAux1, self).__init__()
self.conv = nn.Conv2d(in_channels=in_channels, out_channels=128, kernel_size=(1,1))
self.fc = nn.Linear(3200, 2048)
self.pos_aux1 = nn.Linear(in_features=2048, out_features=3)
self.ori_aux1 = nn.Linear(in_features=2048, out_features=4)
self.relu = nn.ReLU()
self.dropout = nn.Dropout(p=dropout_rate)
self.pool = nn.AvgPool2d(kernel_size=5, stride=7)
def forward(self, x):
x = self.pool(x)
x = self.relu(self.conv(x))
x = x.reshape(x.shape[0], -1)
x = self.relu(self.fc(x))
x = self.dropout(x)
pos = self.pos_aux1(x)
ori = self.ori_aux1(x)
pose = torch.cat((pos, ori), dim=1)
return pose
class InceptionAux2(nn.Module):
def __init__(self, in_channels, dropout_rate):
super(InceptionAux2, self).__init__()
self.conv = nn.Conv2d(in_channels=in_channels, out_channels=128, kernel_size=(1,1))
self.fc = nn.Linear(3200, 2048)
self.pos_aux2 = nn.Linear(in_features=2048, out_features=3)
self.ori_aux2 = nn.Linear(in_features=2048, out_features=4)
self.relu = nn.ReLU()
self.dropout = nn.Dropout(p=dropout_rate)
self.pool = nn.AvgPool2d(kernel_size=5, stride=3)
def forward(self, x):
x = self.pool(x)
x = self.relu(self.conv(x))
x = x.reshape(x.shape[0], -1)
x = self.relu(self.fc(x))
x = self.dropout(x)
pos = self.pos_aux2(x)
ori = self.ori_aux2(x)
pose = torch.cat((pos, ori), dim=1)
return pose
class PoseNetResNet(nn.Module): #https://github.com/youngguncho/PoseNet-Pytorch/blob/master/model.py
def __init__(self, pretrained, dropout_rate=0.0, aux_logits=False):
super(PoseNetResNet, self).__init__()
base_model = models.resnet34(pretrained=pretrained)
feat_in = base_model.fc.in_features
self.aux_logits = aux_logits
self.dropout_rate = dropout_rate
self.base_model = nn.Sequential(*list(base_model.children())[:-1])
self.fc_last = nn.Linear(feat_in, 2048, bias=True)
self.fc_position = nn.Linear(2048, 3, bias=True)
self.fc_rotation = nn.Linear(2048, 4, bias=True)
init_modules = [self.fc_last, self.fc_position, self.fc_rotation]
# init modules accoring to kaiming normal
# https://pytorch.org/docs/stable/nn.init.html
for module in init_modules:
if isinstance(module, nn.Conv2d) or isinstance(module, nn.Linear):
nn.init.kaiming_normal_(module.weight)
if module.bias is not None:
nn.init.constant_(module.bias, 0)
def forward(self, x):
x = self.base_model(x)
x = x.view(x.size(0), -1)
x_fully = self.fc_last(x)
x = F.relu(x_fully)
if self.dropout_rate > 0:
x = F.dropout(x, p=self.dropout_rate, training=self.training)
position = self.fc_position(x)
rotation = self.fc_rotation(x)
x_pose = torch.cat((position, rotation), dim=1)
return x_pose
| 7,521 | 34.314554 | 116 | py |
synfeal | synfeal-main/models/depthnet.py | import torch
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data
import numpy as np
import torch.nn.functional as F
class CNNDepth(nn.Module): #https://towardsdatascience.com/covolutional-neural-network-cb0883dd6529
def __init__(self):
super(CNNDepth, self).__init__() # call the init constructor of the nn.Module. This way, we are only adding attributes.
self.conv1 = nn.Conv2d(in_channels=1, out_channels=64, kernel_size=5, stride=2, padding=2)
self.conv2 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=5, stride=2, padding=2)
self.conv3 = nn.Conv2d(in_channels=128, out_channels=256, kernel_size=5, stride=2, padding=2)
self.conv4 = nn.Conv2d(in_channels=256, out_channels=512, kernel_size=5, stride=2, padding=2)
self.conv5 = nn.Conv2d(in_channels=512, out_channels=512, kernel_size=5, stride=2, padding=2)
self.fc1 = nn.Linear(25088, 4096)
self.fc2 = nn.Linear(4096, 1024)
self.fc3 = nn.Linear(1024, 512)
self.fc_out_translation = nn.Linear(512, 3)
self.fc_out_rotation = nn.Linear(512, 4)
# instead of treating the relu as modules, we can treat them as functions. We can access them via torch funtional
def forward(self, x, verbose=False): # this is where we pass the input into the module
if verbose: print('shape ' + str(x.shape))
x = F.relu(self.conv1(x))
if verbose: print('layer1 shape ' + str(x.shape))
x = F.relu(self.conv2(x))
if verbose: print('layer2 shape ' + str(x.shape))
x = F.relu(self.conv3(x))
if verbose: print('layer3 shape ' + str(x.shape))
x = F.relu(self.conv4(x))
if verbose: print('layer4 shape ' + str(x.shape))
x = F.relu(self.conv5(x))
if verbose: print('layer5 shape ' + str(x.shape))
x = x.view(x.size(0), -1)
if verbose: print('x shape ' + str(x.shape))
# x = F.dropout(x, p=0.5)
# x = F.relu(self.fc1(x))
# if verbose: print('fc1 shape ' + str(x.shape))
#
# x = F.relu(self.fc2(x))
# if verbose: print('fc2 shape ' + str(x.shape))
#
# x = F.relu(self.fc3(x))
# if verbose: print('fc3 shape ' + str(x.shape))
x = F.relu(self.fc1(x))
if verbose: print('fc1 shape ' + str(x.shape))
x = F.relu(self.fc2(x))
if verbose: print('fc2 shape ' + str(x.shape))
x = F.relu(self.fc3(x))
if verbose: print('fc3 shape ' + str(x.shape))
x_translation = self.fc_out_translation(x)
if verbose: print('x_translation shape ' + str(x_translation.shape))
x_rotation = self.fc_out_rotation(x)
if verbose: print('x_rotation shape ' + str(x_rotation.shape))
x_pose = torch.cat((x_translation, x_rotation), dim=1)
return x_pose
class CNNDepthLow(nn.Module): #https://towardsdatascience.com/covolutional-neural-network-cb0883dd6529
def __init__(self):
super(CNNDepthLow, self).__init__() # call the init constructor of the nn.Module. This way, we are only adding attributes.
self.conv1 = nn.Conv2d(in_channels=1, out_channels=64, kernel_size=3, stride=2)
self.conv2 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=2)
self.conv3 = nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=2)
self.conv4 = nn.Conv2d(in_channels=256, out_channels=512, kernel_size=3, stride=2)
self.conv5 = nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=2)
self.fc1 = nn.Linear(18432, 4096)
self.fc2 = nn.Linear(4096, 1024)
#self.fc3 = nn.Linear(1024, 512)
self.fc_out_translation = nn.Linear(1024, 3)
self.fc_out_rotation = nn.Linear(1024, 4)
# instead of treating the relu as modules, we can treat them as functions. We can access them via torch funtional
def forward(self, x, verbose=False): # this is where we pass the input into the module
if verbose: print('shape ' + str(x.shape))
x = F.relu(self.conv1(x))
if verbose: print('layer1 shape ' + str(x.shape))
x = F.relu(self.conv2(x))
if verbose: print('layer2 shape ' + str(x.shape))
x = F.relu(self.conv3(x))
if verbose: print('layer3 shape ' + str(x.shape))
x = F.relu(self.conv4(x))
if verbose: print('layer4 shape ' + str(x.shape))
x = F.relu(self.conv5(x))
if verbose: print('layer5 shape ' + str(x.shape))
x = x.view(x.size(0), -1)
if verbose: print('x shape ' + str(x.shape))
# x = F.dropout(x, p=0.5)
# x = F.relu(self.fc1(x))
# if verbose: print('fc1 shape ' + str(x.shape))
#
# x = F.relu(self.fc2(x))
# if verbose: print('fc2 shape ' + str(x.shape))
#
# x = F.relu(self.fc3(x))
# if verbose: print('fc3 shape ' + str(x.shape))
x = F.relu(self.fc1(x))
if verbose: print('fc1 shape ' + str(x.shape))
x = F.relu(self.fc2(x))
if verbose: print('fc2 shape ' + str(x.shape))
# x = F.relu(self.fc3(x))
# if verbose: print('fc3 shape ' + str(x.shape))
x_translation = self.fc_out_translation(x)
if verbose: print('x_translation shape ' + str(x_translation.shape))
x_rotation = self.fc_out_rotation(x)
if verbose: print('x_rotation shape ' + str(x_rotation.shape))
x_pose = torch.cat((x_translation, x_rotation), dim=1)
return x_pose
class CNNDepthDropout(nn.Module): #https://towardsdatascience.com/covolutional-neural-network-cb0883dd6529
def __init__(self):
super(CNNDepthDropout, self).__init__() # call the init constructor of the nn.Module. This way, we are only adding attributes.
self.conv1 = nn.Conv2d(in_channels=1, out_channels=64, kernel_size=5, stride=2, padding=2)
self.conv2 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=5, stride=2, padding=2)
self.conv3 = nn.Conv2d(in_channels=128, out_channels=256, kernel_size=5, stride=2, padding=2)
self.conv4 = nn.Conv2d(in_channels=256, out_channels=512, kernel_size=5, stride=2, padding=2)
self.conv5 = nn.Conv2d(in_channels=512, out_channels=512, kernel_size=5, stride=2, padding=2)
self.dropout1 = nn.Dropout(p=0.5)
self.dropout2 = nn.Dropout(p=0.3)
self.dropout3 = nn.Dropout(p=0.2)
self.fc1 = nn.Linear(25088, 4096)
self.fc2 = nn.Linear(4096, 1024)
self.fc3 = nn.Linear(1024, 512)
self.fc_out_translation = nn.Linear(512, 3)
self.fc_out_rotation = nn.Linear(512, 4)
# instead of treating the relu as modules, we can treat them as functions. We can access them via torch funtional
def forward(self, x, verbose=False): # this is where we pass the input into the module
if verbose: print('shape ' + str(x.shape))
x = F.relu(self.droupout3(self.conv1(x)))
if verbose: print('layer1 shape ' + str(x.shape))
x = F.relu(self.dropout3(self.conv2(x)))
if verbose: print('layer2 shape ' + str(x.shape))
x = F.relu(self.dropout3(self.conv3(x)))
if verbose: print('layer3 shape ' + str(x.shape))
x = F.relu(self.dropout3(self.conv4(x)))
if verbose: print('layer4 shape ' + str(x.shape))
x = F.relu(self.dropout3(self.conv5(x)))
if verbose: print('layer5 shape ' + str(x.shape))
x = x.view(x.size(0), -1)
if verbose: print('x shape ' + str(x.shape))
# x = F.dropout(x, p=0.5)
# x = F.relu(self.fc1(x))
# if verbose: print('fc1 shape ' + str(x.shape))
#
# x = F.relu(self.fc2(x))
# if verbose: print('fc2 shape ' + str(x.shape))
#
# x = F.relu(self.fc3(x))
# if verbose: print('fc3 shape ' + str(x.shape))
x = F.relu(self.dropout1(self.fc1(x)))
if verbose: print('fc1 shape ' + str(x.shape))
x = F.relu(self.dropout2(self.fc2(x)))
if verbose: print('fc2 shape ' + str(x.shape))
x = F.relu(self.dropout3(self.fc3(x)))
if verbose: print('fc3 shape ' + str(x.shape))
x_translation = self.fc_out_translation(x)
if verbose: print('x_translation shape ' + str(x_translation.shape))
x_rotation = self.fc_out_rotation(x)
if verbose: print('x_rotation shape ' + str(x_rotation.shape))
x_pose = torch.cat((x_translation, x_rotation), dim=1)
return x_pose
class CNNDepthBatch(nn.Module): #https://towardsdatascience.com/covolutional-neural-network-cb0883dd6529
def __init__(self):
super(CNNDepthBatch, self).__init__() # call the init constructor of the nn.Module. This way, we are only adding attributes.
self.conv1 = nn.Conv2d(in_channels=1, out_channels=64, kernel_size=5, stride=2, padding=2)
self.conv2 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=5, stride=2, padding=2)
self.conv3 = nn.Conv2d(in_channels=128, out_channels=256, kernel_size=5, stride=2, padding=2)
self.conv4 = nn.Conv2d(in_channels=256, out_channels=512, kernel_size=5, stride=2, padding=2)
self.conv5 = nn.Conv2d(in_channels=512, out_channels=512, kernel_size=5, stride=2, padding=2)
# Batch norm should be before relu
self.bn1 = nn.BatchNorm2d(64)
self.bn2 = nn.BatchNorm2d(128)
self.bn3 = nn.BatchNorm2d(256)
self.bn4 = nn.BatchNorm2d(512)
self.bn5 = nn.BatchNorm2d(512)
self.bn6 = nn.BatchNorm1d(4096)
self.bn7 = nn.BatchNorm1d(1024)
self.bn8 = nn.BatchNorm1d(512)
self.dropout = nn.Dropout(p=0.4)
self.fc1 = nn.Linear(25088, 4096)
self.fc2 = nn.Linear(4096, 1024)
self.fc3 = nn.Linear(1024, 512)
self.fc_out_translation = nn.Linear(512, 3)
self.fc_out_rotation = nn.Linear(512, 4)
# instead of treating the relu as modules, we can treat them as functions. We can access them via torch funtional
def forward(self, x, verbose=False): # this is where we pass the input into the module
if verbose: print('shape ' + str(x.shape))
x = F.relu(self.bn1(self.conv1(x)))
if verbose: print('layer1 shape ' + str(x.shape))
x = F.relu(self.bn2(self.conv2(x)))
if verbose: print('layer2 shape ' + str(x.shape))
x = F.relu(self.bn3(self.conv3(x)))
if verbose: print('layer3 shape ' + str(x.shape))
x = F.relu(self.bn4(self.conv4(x)))
if verbose: print('layer4 shape ' + str(x.shape))
x = F.relu(self.bn5(self.conv5(x)))
if verbose: print('layer5 shape ' + str(x.shape))
x = x.view(x.size(0), -1)
if verbose: print('x shape ' + str(x.shape))
# x = F.dropout(x, p=0.5)
# x = F.relu(self.fc1(x))
# if verbose: print('fc1 shape ' + str(x.shape))
#
# x = F.relu(self.fc2(x))
# if verbose: print('fc2 shape ' + str(x.shape))
#
# x = F.relu(self.fc3(x))
# if verbose: print('fc3 shape ' + str(x.shape))
x = F.relu(self.dropout(self.bn6(self.fc1(x))))
if verbose: print('fc1 shape ' + str(x.shape))
x = F.relu(self.bn7(self.fc2(x)))
if verbose: print('fc2 shape ' + str(x.shape))
x = F.relu(self.bn8(self.fc3(x)))
if verbose: print('fc3 shape ' + str(x.shape))
x_translation = self.fc_out_translation(x)
if verbose: print('x_translation shape ' + str(x_translation.shape))
x_rotation = self.fc_out_rotation(x)
if verbose: print('x_rotation shape ' + str(x_rotation.shape))
x_pose = torch.cat((x_translation, x_rotation), dim=1)
return x_pose
class CNNDepthBatchK3(nn.Module): #https://towardsdatascience.com/covolutional-neural-network-cb0883dd6529
def __init__(self):
super(CNNDepthBatchK3, self).__init__() # call the init constructor of the nn.Module. This way, we are only adding attributes.
self.conv1 = nn.Conv2d(in_channels=1, out_channels=64, kernel_size=3, stride=2, padding=1)
self.conv2 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=2, padding=1)
self.conv3 = nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=2, padding=1)
self.conv4 = nn.Conv2d(in_channels=256, out_channels=512, kernel_size=3, stride=2, padding=1)
self.conv5 = nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=2, padding=1)
# Batch norm should be before relu
self.bn1 = nn.BatchNorm2d(64)
self.bn2 = nn.BatchNorm2d(128)
self.bn3 = nn.BatchNorm2d(256)
self.bn4 = nn.BatchNorm2d(512)
self.bn5 = nn.BatchNorm2d(512)
self.bn6 = nn.BatchNorm1d(4096)
self.bn7 = nn.BatchNorm1d(1024)
self.bn8 = nn.BatchNorm1d(512)
self.dropout = nn.Dropout(p=0.4)
self.fc1 = nn.Linear(25088, 4096)
self.fc2 = nn.Linear(4096, 1024)
self.fc3 = nn.Linear(1024, 512)
self.fc_out_translation = nn.Linear(512, 3)
self.fc_out_rotation = nn.Linear(512, 4)
# instead of treating the relu as modules, we can treat them as functions. We can access them via torch funtional
def forward(self, x, verbose=True): # this is where we pass the input into the module
if verbose: print('shape ' + str(x.shape))
x = F.relu(self.bn1(self.conv1(x)))
if verbose: print('layer1 shape ' + str(x.shape))
x = F.relu(self.bn2(self.conv2(x)))
if verbose: print('layer2 shape ' + str(x.shape))
x = F.relu(self.bn3(self.conv3(x)))
if verbose: print('layer3 shape ' + str(x.shape))
x = F.relu(self.bn4(self.conv4(x)))
if verbose: print('layer4 shape ' + str(x.shape))
x = F.relu(self.bn5(self.conv5(x)))
if verbose: print('layer5 shape ' + str(x.shape))
x = x.view(x.size(0), -1)
if verbose: print('x shape ' + str(x.shape))
# x = F.dropout(x, p=0.5)
# x = F.relu(self.fc1(x))
# if verbose: print('fc1 shape ' + str(x.shape))
#
# x = F.relu(self.fc2(x))
# if verbose: print('fc2 shape ' + str(x.shape))
#
# x = F.relu(self.fc3(x))
# if verbose: print('fc3 shape ' + str(x.shape))
x = F.relu(self.dropout(self.bn6(self.fc1(x))))
if verbose: print('fc1 shape ' + str(x.shape))
x = F.relu(self.bn7(self.fc2(x)))
if verbose: print('fc2 shape ' + str(x.shape))
x = F.relu(self.bn8(self.fc3(x)))
if verbose: print('fc3 shape ' + str(x.shape))
x_translation = self.fc_out_translation(x)
if verbose: print('x_translation shape ' + str(x_translation.shape))
x_rotation = self.fc_out_rotation(x)
if verbose: print('x_rotation shape ' + str(x_rotation.shape))
x_pose = torch.cat((x_translation, x_rotation), dim=1)
return x_pose
class CNNDepthBatchLeaky(nn.Module): #https://towardsdatascience.com/covolutional-neural-network-cb0883dd6529
def __init__(self):
super(CNNDepthBatchLeaky, self).__init__() # call the init constructor of the nn.Module. This way, we are only adding attributes.
self.conv1 = nn.Conv2d(in_channels=1, out_channels=64, kernel_size=5, stride=2, padding=2)
self.conv2 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=5, stride=2, padding=2)
self.conv3 = nn.Conv2d(in_channels=128, out_channels=256, kernel_size=5, stride=2, padding=2)
self.conv4 = nn.Conv2d(in_channels=256, out_channels=512, kernel_size=5, stride=2, padding=2)
self.conv5 = nn.Conv2d(in_channels=512, out_channels=512, kernel_size=5, stride=2, padding=2)
# Batch norm should be before relu
self.bn1 = nn.BatchNorm2d(64)
self.bn2 = nn.BatchNorm2d(128)
self.bn3 = nn.BatchNorm2d(256)
self.bn4 = nn.BatchNorm2d(512)
self.bn5 = nn.BatchNorm2d(512)
self.bn6 = nn.BatchNorm1d(4096)
self.bn7 = nn.BatchNorm1d(1024)
self.bn8 = nn.BatchNorm1d(512)
self.lrelu = nn.LeakyReLU(0.1)
self.fc1 = nn.Linear(25088, 4096)
self.fc2 = nn.Linear(4096, 1024)
self.fc3 = nn.Linear(1024, 512)
self.fc_out_translation = nn.Linear(512, 3)
self.fc_out_rotation = nn.Linear(512, 4)
# instead of treating the relu as modules, we can treat them as functions. We can access them via torch funtional
def forward(self, x, verbose=False): # this is where we pass the input into the module
if verbose: print('shape ' + str(x.shape))
x = self.lrelu(self.bn1(self.conv1(x)))
if verbose: print('layer1 shape ' + str(x.shape))
x = self.lrelu(self.bn2(self.conv2(x)))
if verbose: print('layer2 shape ' + str(x.shape))
x = self.lrelu(self.bn3(self.conv3(x)))
if verbose: print('layer3 shape ' + str(x.shape))
x = self.lrelu(self.bn4(self.conv4(x)))
if verbose: print('layer4 shape ' + str(x.shape))
x = self.lrelu(self.bn5(self.conv5(x)))
if verbose: print('layer5 shape ' + str(x.shape))
x = x.view(x.size(0), -1)
if verbose: print('x shape ' + str(x.shape))
# x = F.dropout(x, p=0.5)
# x = F.relu(self.fc1(x))
# if verbose: print('fc1 shape ' + str(x.shape))
#
# x = F.relu(self.fc2(x))
# if verbose: print('fc2 shape ' + str(x.shape))
#
# x = F.relu(self.fc3(x))
# if verbose: print('fc3 shape ' + str(x.shape))
x = self.lrelu(self.dropout(self.bn6(self.fc1(x))))
if verbose: print('fc1 shape ' + str(x.shape))
x = self.lrelu(self.bn7(self.fc2(x)))
if verbose: print('fc2 shape ' + str(x.shape))
x = self.lrelu(self.bn8(self.fc3(x)))
if verbose: print('fc3 shape ' + str(x.shape))
x_translation = self.fc_out_translation(x)
if verbose: print('x_translation shape ' + str(x_translation.shape))
x_rotation = self.fc_out_rotation(x)
if verbose: print('x_rotation shape ' + str(x_rotation.shape))
x_pose = torch.cat((x_translation, x_rotation), dim=1)
return x_pose
class CNNDepthBatchLow(nn.Module): #https://towardsdatascience.com/covolutional-neural-network-cb0883dd6529
def __init__(self):
super(CNNDepthBatchLow, self).__init__() # call the init constructor of the nn.Module. This way, we are only adding attributes.
self.conv1 = nn.Conv2d(in_channels=1, out_channels=32, kernel_size=3, stride=2, padding=1)
self.conv2 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, stride=2, padding=1)
self.conv3 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=2, padding=1)
self.conv4 = nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=2, padding=1)
self.conv5 = nn.Conv2d(in_channels=256, out_channels=512, kernel_size=3, stride=2, padding=1)
# Batch norm should be before relu
self.bn1 = nn.BatchNorm2d(32)
self.bn2 = nn.BatchNorm2d(64)
self.bn3 = nn.BatchNorm2d(128)
self.bn4 = nn.BatchNorm2d(256)
self.bn5 = nn.BatchNorm2d(512)
self.bn6 = nn.BatchNorm1d(4096)
self.bn7 = nn.BatchNorm1d(1024)
#self.bn8 = nn.BatchNorm1d(512)
#self.lrelu = nn.LeakyReLU(0.2)
self.dropout = nn.Dropout(p=0.5)
self.fc1 = nn.Linear(25088, 4096)
self.fc2 = nn.Linear(4096, 1024)
#self.fc3 = nn.Linear(1024, 512)
self.fc_out_translation = nn.Linear(1024, 3)
self.fc_out_rotation = nn.Linear(1024, 4)
# instead of treating the relu as modules, we can treat them as functions. We can access them via torch funtional
def forward(self, x, verbose=True): # this is where we pass the input into the module
if verbose: print('shape ' + str(x.shape))
x = F.relu(self.bn1(self.conv1(x)))
if verbose: print('layer1 shape ' + str(x.shape))
x = F.relu(self.bn2(self.conv2(x)))
if verbose: print('layer2 shape ' + str(x.shape))
x = F.relu(self.bn3(self.conv3(x)))
if verbose: print('layer3 shape ' + str(x.shape))
x = F.relu(self.bn4(self.conv4(x)))
if verbose: print('layer4 shape ' + str(x.shape))
x = F.relu(self.bn5(self.conv5(x)))
if verbose: print('layer5 shape ' + str(x.shape))
x = x.view(x.size(0), -1)
if verbose: print('x shape ' + str(x.shape))
# x = F.dropout(x, p=0.5)
# x = self.lrelu(self.fc1(x))
# if verbose: print('fc1 shape ' + str(x.shape))
#
# x = self.lrelu(self.fc2(x))
# if verbose: print('fc2 shape ' + str(x.shape))
#
# x = self.lrelu(self.fc3(x))
# if verbose: print('fc3 shape ' + str(x.shape))
x = F.relu(self.dropout(self.bn6(self.fc1(x))))
if verbose: print('fc1 shape ' + str(x.shape))
x = F.relu(self.bn7(self.fc2(x)))
if verbose: print('fc2 shape ' + str(x.shape))
# x = self.lrelu(self.bn8(self.fc3(x)))
# if verbose: print('fc3 shape ' + str(x.shape))
x_translation = self.fc_out_translation(x)
if verbose: print('x_translation shape ' + str(x_translation.shape))
x_rotation = self.fc_out_rotation(x)
if verbose: print('x_rotation shape ' + str(x_rotation.shape))
x_pose = torch.cat((x_translation, x_rotation), dim=1)
return x_pose
class CNNDepthBatchLowL2RegLeaky(nn.Module): #https://towardsdatascience.com/covolutional-neural-network-cb0883dd6529
def __init__(self):
super(CNNDepthBatchLowL2RegLeaky, self).__init__() # call the init constructor of the nn.Module. This way, we are only adding attributes.
self.conv1 = nn.Conv2d(in_channels=1, out_channels=64, kernel_size=3, stride=3, padding=1)
self.conv2 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=3, padding=1)
self.conv3 = nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=3, padding=1)
#self.conv4 = nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=3, padding=1)
#self.conv5 = nn.Conv2d(in_channels=256, out_channels=512, kernel_size=3, stride=3, padding=1)
# Batch norm should be before relu
self.bn1 = nn.BatchNorm2d(64)
self.bn2 = nn.BatchNorm2d(128)
self.bn3 = nn.BatchNorm2d(256)
#self.bn4 = nn.BatchNorm2d(256)
#self.bn5 = nn.BatchNorm2d(512)
self.bn6 = nn.BatchNorm1d(4096)
self.bn7 = nn.BatchNorm1d(1024)
self.bn8 = nn.BatchNorm1d(512)
self.lrelu = nn.LeakyReLU(0.2)
#self.dropout = nn.Dropout(p=0.5)
self.fc1 = nn.Linear(20736, 4096)
self.fc2 = nn.Linear(4096, 1024)
self.fc3 = nn.Linear(1024, 512)
self.fc_out_translation = nn.Linear(512, 3)
self.fc_out_rotation = nn.Linear(512, 4)
# instead of treating the relu as modules, we can treat them as functions. We can access them via torch funtional
def forward(self, x, verbose=False): # this is where we pass the input into the module
if verbose: print('shape ' + str(x.shape))
x = self.lrelu(self.bn1(self.conv1(x)))
if verbose: print('layer1 shape ' + str(x.shape))
x = self.lrelu(self.bn2(self.conv2(x)))
if verbose: print('layer2 shape ' + str(x.shape))
x = self.lrelu(self.bn3(self.conv3(x)))
if verbose: print('layer3 shape ' + str(x.shape))
# x = self.lrelu(self.bn4(self.conv4(x)))
# if verbose: print('layer4 shape ' + str(x.shape))
# x = self.lrelu(self.bn5(self.conv5(x)))
# if verbose: print('layer5 shape ' + str(x.shape))
x = x.view(x.size(0), -1)
if verbose: print('x shape ' + str(x.shape))
# x = F.dropout(x, p=0.5)
# x = self.lrelu(self.fc1(x))
# if verbose: print('fc1 shape ' + str(x.shape))
#
# x = self.lrelu(self.fc2(x))
# if verbose: print('fc2 shape ' + str(x.shape))
#
# x = self.lrelu(self.fc3(x))
# if verbose: print('fc3 shape ' + str(x.shape))
x = self.lrelu(self.bn6(self.fc1(x)))
if verbose: print('fc1 shape ' + str(x.shape))
x = self.lrelu(self.bn7(self.fc2(x)))
if verbose: print('fc2 shape ' + str(x.shape))
x = self.lrelu(self.bn8(self.fc3(x)))
if verbose: print('fc3 shape ' + str(x.shape))
x_translation = self.fc_out_translation(x)
if verbose: print('x_translation shape ' + str(x_translation.shape))
x_rotation = self.fc_out_rotation(x)
if verbose: print('x_rotation shape ' + str(x_rotation.shape))
x_pose = torch.cat((x_translation, x_rotation), dim=1)
return x_pose
class CNNDepthBatchLowL2Reg2(nn.Module): #https://towardsdatascience.com/covolutional-neural-network-cb0883dd6529
def __init__(self):
super(CNNDepthBatchLowL2Reg2, self).__init__() # call the init constructor of the nn.Module. This way, we are only adding attributes.
self.conv1 = nn.Conv2d(in_channels=1, out_channels=64, kernel_size=5, stride=2, padding=1)
self.conv2 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=5, stride=2, padding=1)
self.conv3 = nn.Conv2d(in_channels=128, out_channels=256, kernel_size=5, stride=2, padding=1)
self.conv4 = nn.Conv2d(in_channels=256, out_channels=512, kernel_size=5, stride=2, padding=1)
self.conv5 = nn.Conv2d(in_channels=512, out_channels=512, kernel_size=5, stride=2, padding=1)
#self.conv6 = nn.Conv2d(in_channels=512, out_channels=1024, kernel_size=5, stride=2, padding=1)
# Batch norm should be before relu
self.bn1 = nn.BatchNorm2d(64)
self.bn2 = nn.BatchNorm2d(128)
self.bn3 = nn.BatchNorm2d(256)
self.bn4 = nn.BatchNorm2d(512)
self.bn5 = nn.BatchNorm2d(512)
#self.bn6 = nn.BatchNorm2d(1024)
self.bn6 = nn.BatchNorm1d(4096)
self.bn7 = nn.BatchNorm1d(1024)
self.bn8 = nn.BatchNorm1d(512)
#self.lrelu = nn.LeakyReLU(0.2)
#self.dropout = nn.Dropout(p=0.5)
self.fc1 = nn.Linear(18432, 4096)
self.fc2 = nn.Linear(4096, 1024)
self.fc3 = nn.Linear(1024, 512)
self.fc_out_translation = nn.Linear(512, 3)
self.fc_out_rotation = nn.Linear(512, 4)
# instead of treating the relu as modules, we can treat them as functions. We can access them via torch funtional
def forward(self, x, verbose=False): # this is where we pass the input into the module
if verbose: print('shape ' + str(x.shape))
x = F.relu(self.bn1(self.conv1(x)))
if verbose: print('layer1 shape ' + str(x.shape))
x = F.relu(self.bn2(self.conv2(x)))
if verbose: print('layer2 shape ' + str(x.shape))
x = F.relu(self.bn3(self.conv3(x)))
if verbose: print('layer3 shape ' + str(x.shape))
x = F.relu(self.bn4(self.conv4(x)))
if verbose: print('layer4 shape ' + str(x.shape))
x = F.relu(self.bn5(self.conv5(x)))
if verbose: print('layer5 shape ' + str(x.shape))
# x = self.lrelu(self.bn6(self.conv6(x)))
# if verbose: print('layer6 shape ' + str(x.shape))
x = x.view(x.size(0), -1)
if verbose: print('x shape ' + str(x.shape))
# x = F.dropout(x, p=0.5)
# x = self.lrelu(self.fc1(x))
# if verbose: print('fc1 shape ' + str(x.shape))
#
# x = self.lrelu(self.fc2(x))
# if verbose: print('fc2 shape ' + str(x.shape))
#
# x = self.lrelu(self.fc3(x))
# if verbose: print('fc3 shape ' + str(x.shape))
x = F.relu(self.bn6(self.fc1(x)))
if verbose: print('fc1 shape ' + str(x.shape))
x = F.relu(self.bn7(self.fc2(x)))
if verbose: print('fc2 shape ' + str(x.shape))
x = F.relu(self.bn8(self.fc3(x)))
if verbose: print('fc3 shape ' + str(x.shape))
x_translation = self.fc_out_translation(x)
if verbose: print('x_translation shape ' + str(x_translation.shape))
x_rotation = self.fc_out_rotation(x)
if verbose: print('x_rotation shape ' + str(x_rotation.shape))
x_pose = torch.cat((x_translation, x_rotation), dim=1)
return x_pose
class CNNDepthBatchDropout8(nn.Module): #https://towardsdatascience.com/covolutional-neural-network-cb0883dd6529
def __init__(self):
super(CNNDepthBatchDropout8, self).__init__() # call the init constructor of the nn.Module. This way, we are only adding attributes.
self.conv1 = nn.Conv2d(in_channels=1, out_channels=64, kernel_size=5, stride=2, padding=2)
self.conv2 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=5, stride=2, padding=2)
self.conv3 = nn.Conv2d(in_channels=128, out_channels=256, kernel_size=5, stride=2, padding=2)
self.conv4 = nn.Conv2d(in_channels=256, out_channels=512, kernel_size=5, stride=2, padding=2)
self.conv5 = nn.Conv2d(in_channels=512, out_channels=512, kernel_size=5, stride=2, padding=2)
# Batch norm should be before relu
self.bn1 = nn.BatchNorm2d(64)
self.bn2 = nn.BatchNorm2d(128)
self.bn3 = nn.BatchNorm2d(256)
self.bn4 = nn.BatchNorm2d(512)
self.bn5 = nn.BatchNorm2d(512)
self.bn6 = nn.BatchNorm1d(4096)
self.bn7 = nn.BatchNorm1d(1024)
self.bn8 = nn.BatchNorm1d(512)
self.dropout = nn.Dropout(p=0.8)
self.fc1 = nn.Linear(25088, 4096)
self.fc2 = nn.Linear(4096, 1024)
self.fc3 = nn.Linear(1024, 512)
self.fc_out_translation = nn.Linear(512, 3)
self.fc_out_rotation = nn.Linear(512, 4)
# instead of treating the relu as modules, we can treat them as functions. We can access them via torch funtional
def forward(self, x, verbose=False): # this is where we pass the input into the module
if verbose: print('shape ' + str(x.shape))
x = F.relu(self.bn1(self.conv1(x)))
if verbose: print('layer1 shape ' + str(x.shape))
x = F.relu(self.bn2(self.conv2(x)))
if verbose: print('layer2 shape ' + str(x.shape))
x = F.relu(self.bn3(self.conv3(x)))
if verbose: print('layer3 shape ' + str(x.shape))
x = F.relu(self.bn4(self.conv4(x)))
if verbose: print('layer4 shape ' + str(x.shape))
x = F.relu(self.bn5(self.conv5(x)))
if verbose: print('layer5 shape ' + str(x.shape))
x = x.view(x.size(0), -1)
if verbose: print('x shape ' + str(x.shape))
# x = F.dropout(x, p=0.5)
# x = F.relu(self.fc1(x))
# if verbose: print('fc1 shape ' + str(x.shape))
#
# x = F.relu(self.fc2(x))
# if verbose: print('fc2 shape ' + str(x.shape))
#
# x = F.relu(self.fc3(x))
# if verbose: print('fc3 shape ' + str(x.shape))
x = F.relu(self.dropout(self.bn6(self.fc1(x))))
if verbose: print('fc1 shape ' + str(x.shape))
x = F.relu(self.bn7(self.fc2(x)))
if verbose: print('fc2 shape ' + str(x.shape))
x = F.relu(self.bn8(self.fc3(x)))
if verbose: print('fc3 shape ' + str(x.shape))
x_translation = self.fc_out_translation(x)
if verbose: print('x_translation shape ' + str(x_translation.shape))
x_rotation = self.fc_out_rotation(x)
if verbose: print('x_rotation shape ' + str(x_rotation.shape))
x_pose = torch.cat((x_translation, x_rotation), dim=1)
return x_pose
class CNNDepthBatchDropoutVar(nn.Module): #https://towardsdatascience.com/covolutional-neural-network-cb0883dd6529
def __init__(self):
super(CNNDepthBatchDropoutVar, self).__init__() # call the init constructor of the nn.Module. This way, we are only adding attributes.
self.conv1 = nn.Conv2d(in_channels=1, out_channels=64, kernel_size=5, stride=2, padding=2)
self.conv2 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=5, stride=2, padding=2)
self.conv3 = nn.Conv2d(in_channels=128, out_channels=256, kernel_size=5, stride=2, padding=2)
self.conv4 = nn.Conv2d(in_channels=256, out_channels=512, kernel_size=5, stride=2, padding=2)
self.conv5 = nn.Conv2d(in_channels=512, out_channels=512, kernel_size=5, stride=2, padding=2)
# Batch norm should be before relu
self.bn1 = nn.BatchNorm2d(64)
self.bn2 = nn.BatchNorm2d(128)
self.bn3 = nn.BatchNorm2d(256)
self.bn4 = nn.BatchNorm2d(512)
self.bn5 = nn.BatchNorm2d(512)
self.bn6 = nn.BatchNorm1d(4096)
self.bn7 = nn.BatchNorm1d(1024)
self.bn8 = nn.BatchNorm1d(512)
self.dropout1 = nn.Dropout(p=0.5)
self.dropout2 = nn.Dropout(p=0.3)
self.dropout3 = nn.Dropout(p=0.2)
self.fc1 = nn.Linear(25088, 4096)
self.fc2 = nn.Linear(4096, 1024)
self.fc3 = nn.Linear(1024, 512)
self.fc_out_translation = nn.Linear(512, 3)
self.fc_out_rotation = nn.Linear(512, 4)
# instead of treating the relu as modules, we can treat them as functions. We can access them via torch funtional
def forward(self, x, verbose=False): # this is where we pass the input into the module
if verbose: print('shape ' + str(x.shape))
x = F.relu(self.bn1(self.conv1(x)))
if verbose: print('layer1 shape ' + str(x.shape))
x = F.relu(self.bn2(self.conv2(x)))
if verbose: print('layer2 shape ' + str(x.shape))
x = F.relu(self.bn3(self.conv3(x)))
if verbose: print('layer3 shape ' + str(x.shape))
x = F.relu(self.bn4(self.conv4(x)))
if verbose: print('layer4 shape ' + str(x.shape))
x = F.relu(self.bn5(self.conv5(x)))
if verbose: print('layer5 shape ' + str(x.shape))
x = x.view(x.size(0), -1)
if verbose: print('x shape ' + str(x.shape))
# x = F.dropout(x, p=0.5)
# x = F.relu(self.fc1(x))
# if verbose: print('fc1 shape ' + str(x.shape))
#
# x = F.relu(self.fc2(x))
# if verbose: print('fc2 shape ' + str(x.shape))
#
# x = F.relu(self.fc3(x))
# if verbose: print('fc3 shape ' + str(x.shape))
x = F.relu(self.dropout1(self.bn6(self.fc1(x))))
if verbose: print('fc1 shape ' + str(x.shape))
x = F.relu(self.dropout2(self.bn7(self.fc2(x))))
if verbose: print('fc2 shape ' + str(x.shape))
x = F.relu(self.dropout3(self.bn8(self.fc3(x))))
if verbose: print('fc3 shape ' + str(x.shape))
x_translation = self.fc_out_translation(x)
if verbose: print('x_translation shape ' + str(x_translation.shape))
x_rotation = self.fc_out_rotation(x)
if verbose: print('x_rotation shape ' + str(x_rotation.shape))
x_pose = torch.cat((x_translation, x_rotation), dim=1)
return x_pose
class CNNDepthBatchDropout8Cont(nn.Module): #https://towardsdatascience.com/covolutional-neural-network-cb0883dd6529
def __init__(self):
super(CNNDepthBatchDropout8Cont, self).__init__() # call the init constructor of the nn.Module. This way, we are only adding attributes.
self.conv1 = nn.Conv2d(in_channels=1, out_channels=64, kernel_size=5, stride=2, padding=2)
self.conv2 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=5, stride=2, padding=2)
self.conv3 = nn.Conv2d(in_channels=128, out_channels=256, kernel_size=5, stride=2, padding=2)
self.conv4 = nn.Conv2d(in_channels=256, out_channels=512, kernel_size=5, stride=2, padding=2)
self.conv5 = nn.Conv2d(in_channels=512, out_channels=512, kernel_size=5, stride=2, padding=2)
# Batch norm should be before relu
self.bn1 = nn.BatchNorm2d(64)
self.bn2 = nn.BatchNorm2d(128)
self.bn3 = nn.BatchNorm2d(256)
self.bn4 = nn.BatchNorm2d(512)
self.bn5 = nn.BatchNorm2d(512)
self.bn6 = nn.BatchNorm1d(4096)
self.bn7 = nn.BatchNorm1d(1024)
self.bn8 = nn.BatchNorm1d(512)
self.dropout1 = nn.Dropout(p=0.8)
self.dropout2 = nn.Dropout(p=0.5)
self.dropout3 = nn.Dropout(p=0.3)
self.fc1 = nn.Linear(25088, 4096)
self.fc2 = nn.Linear(4096, 1024)
#self.fc3 = nn.Linear(1024, 512)
self.fc_out_translation = nn.Linear(1024, 3)
self.fc_out_rotation = nn.Linear(1024, 4)
# instead of treating the relu as modules, we can treat them as functions. We can access them via torch funtional
def forward(self, x, verbose=False): # this is where we pass the input into the module
if verbose: print('shape ' + str(x.shape))
x = F.relu(self.bn1(self.conv1(x)))
if verbose: print('layer1 shape ' + str(x.shape))
x = F.relu(self.bn2(self.conv2(x)))
if verbose: print('layer2 shape ' + str(x.shape))
x = F.relu(self.bn3(self.conv3(x)))
if verbose: print('layer3 shape ' + str(x.shape))
x = F.relu(self.bn4(self.conv4(x)))
if verbose: print('layer4 shape ' + str(x.shape))
x = F.relu(self.bn5(self.conv5(x)))
if verbose: print('layer5 shape ' + str(x.shape))
x = x.view(x.size(0), -1)
if verbose: print('x shape ' + str(x.shape))
# x = F.dropout(x, p=0.5)
# x = F.relu(self.fc1(x))
# if verbose: print('fc1 shape ' + str(x.shape))
#
# x = F.relu(self.fc2(x))
# if verbose: print('fc2 shape ' + str(x.shape))
#
# x = F.relu(self.fc3(x))
# if verbose: print('fc3 shape ' + str(x.shape))
x = F.relu(self.dropout1(self.bn6(self.fc1(x))))
if verbose: print('fc1 shape ' + str(x.shape))
x = F.relu(self.dropout2(self.bn7(self.fc2(x))))
if verbose: print('fc2 shape ' + str(x.shape))
#x = F.relu(self.dropout3(self.bn8(self.fc3(x))))
#if verbose: print('fc3 shape ' + str(x.shape))
x_translation = self.fc_out_translation(x)
if verbose: print('x_translation shape ' + str(x_translation.shape))
x_rotation = self.fc_out_rotation(x)
if verbose: print('x_rotation shape ' + str(x_rotation.shape))
x_pose = torch.cat((x_translation, x_rotation), dim=1)
return x_pose
class CNNDepthBatchDropout8Kernel7(nn.Module): #https://towardsdatascience.com/covolutional-neural-network-cb0883dd6529
def __init__(self):
super(CNNDepthBatchDropout8Kernel7, self).__init__() # call the init constructor of the nn.Module. This way, we are only adding attributes.
self.conv1 = nn.Conv2d(in_channels=1, out_channels=64, kernel_size=7, stride=2, padding=1)
self.conv2 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=7, stride=2, padding=1)
self.conv3 = nn.Conv2d(in_channels=128, out_channels=256, kernel_size=5, stride=2, padding=1)
self.conv4 = nn.Conv2d(in_channels=256, out_channels=512, kernel_size=5, stride=2, padding=1)
self.conv5 = nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=2, padding=1)
# Batch norm should be before relu
self.bn1 = nn.BatchNorm2d(64)
self.bn2 = nn.BatchNorm2d(128)
self.bn3 = nn.BatchNorm2d(256)
self.bn4 = nn.BatchNorm2d(512)
self.bn5 = nn.BatchNorm2d(512)
self.bn6 = nn.BatchNorm1d(4096)
self.bn7 = nn.BatchNorm1d(1024)
self.bn8 = nn.BatchNorm1d(512)
self.dropout = nn.Dropout(p=0.8)
self.fc1 = nn.Linear(18432, 4096)
self.fc2 = nn.Linear(4096, 1024)
self.fc3 = nn.Linear(1024, 512)
self.fc_out_translation = nn.Linear(512, 3)
self.fc_out_rotation = nn.Linear(512, 4)
# instead of treating the relu as modules, we can treat them as functions. We can access them via torch funtional
def forward(self, x, verbose=False): # this is where we pass the input into the module
if verbose: print('shape ' + str(x.shape))
x = F.relu(self.bn1(self.conv1(x)))
if verbose: print('layer1 shape ' + str(x.shape))
x = F.relu(self.bn2(self.conv2(x)))
if verbose: print('layer2 shape ' + str(x.shape))
x = F.relu(self.bn3(self.conv3(x)))
if verbose: print('layer3 shape ' + str(x.shape))
x = F.relu(self.bn4(self.conv4(x)))
if verbose: print('layer4 shape ' + str(x.shape))
x = F.relu(self.bn5(self.conv5(x)))
if verbose: print('layer5 shape ' + str(x.shape))
x = x.view(x.size(0), -1)
if verbose: print('x shape ' + str(x.shape))
# x = F.dropout(x, p=0.5)
# x = F.relu(self.fc1(x))
# if verbose: print('fc1 shape ' + str(x.shape))
#
# x = F.relu(self.fc2(x))
# if verbose: print('fc2 shape ' + str(x.shape))
#
# x = F.relu(self.fc3(x))
# if verbose: print('fc3 shape ' + str(x.shape))
x = F.relu(self.dropout(self.bn6(self.fc1(x))))
if verbose: print('fc1 shape ' + str(x.shape))
x = F.relu(self.bn7(self.fc2(x)))
if verbose: print('fc2 shape ' + str(x.shape))
x = F.relu(self.bn8(self.fc3(x)))
if verbose: print('fc3 shape ' + str(x.shape))
x_translation = self.fc_out_translation(x)
if verbose: print('x_translation shape ' + str(x_translation.shape))
x_rotation = self.fc_out_rotation(x)
if verbose: print('x_rotation shape ' + str(x_rotation.shape))
x_pose = torch.cat((x_translation, x_rotation), dim=1)
return x_pose | 45,692 | 44.784569 | 152 | py |
synfeal | synfeal-main/models/hourglass.py |
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data
import torch.nn.functional as F
from torchvision import transforms, models
# paper: https://arxiv.org/abs/1703.07971
# github: https://github.com/youngguncho/HourglassPose-Pytorch/blob/master/model.py
class HourglassBatch(nn.Module):
def __init__(self, pretrained, sum_mode=False, dropout_rate=0.5, aux_logits=False):
super(HourglassBatch, self).__init__()
self.sum_mode = sum_mode
self.dropout_rate = dropout_rate
self.aux_logits = aux_logits
if pretrained:
base_model = models.resnet34('ResNet34_Weights.DEFAULT')
else:
base_model = models.resnet34()
# encoding blocks!
self.init_block = nn.Sequential(*list(base_model.children())[:4])
self.res_block1 = base_model.layer1
self.res_block2 = base_model.layer2
self.res_block3 = base_model.layer3
self.res_block4 = base_model.layer4
# decoding blocks
if sum_mode:
self.deconv_block1 = nn.ConvTranspose2d(512, 256, kernel_size=(
3, 3), stride=(2, 2), padding=(1, 1), bias=False, output_padding=1)
self.deconv_block2 = nn.ConvTranspose2d(256, 128, kernel_size=(
3, 3), stride=(2, 2), padding=(1, 1), bias=False, output_padding=1)
self.deconv_block3 = nn.ConvTranspose2d(128, 64, kernel_size=(
3, 3), stride=(2, 2), padding=(1, 1), bias=False, output_padding=1)
self.conv_block = nn.Conv2d(64, 32, kernel_size=(
3, 3), stride=(1, 1), padding=(1, 1), bias=False)
else:
# concatenation with the encoder feature vectors
self.deconv_block1 = nn.ConvTranspose2d(512, 256, kernel_size=(
3, 3), stride=(2, 2), padding=(1, 1), bias=False, output_padding=1)
self.deconv_block2 = nn.ConvTranspose2d(512, 128, kernel_size=(
3, 3), stride=(2, 2), padding=(1, 1), bias=False, output_padding=1)
self.deconv_block3 = nn.ConvTranspose2d(256, 64, kernel_size=(
3, 3), stride=(2, 2), padding=(1, 1), bias=False, output_padding=1)
self.conv_block = nn.Conv2d(128, 32, kernel_size=(
3, 3), stride=(1, 1), padding=(1, 1), bias=False)
self.bn1 = nn.BatchNorm2d(256)
self.bn2 = nn.BatchNorm2d(128)
self.bn3 = nn.BatchNorm2d(64)
self.bn4 = nn.BatchNorm2d(32)
self.bn5 = nn.BatchNorm1d(1024)
# Regressor
self.fc_dim_reduce = nn.Linear(56 * 56 * 32, 1024)
self.fc_trans = nn.Linear(1024, 3)
self.fc_rot = nn.Linear(1024, 4)
# Initialize Weights
init_modules = [self.deconv_block1, self.deconv_block2, self.deconv_block3, self.conv_block,
self.fc_dim_reduce, self.fc_trans, self.fc_rot]
for module in init_modules:
if isinstance(module, nn.ConvTranspose2d) or isinstance(module, nn.Linear) or isinstance(module, nn.Conv3d):
nn.init.kaiming_normal_(module.weight)
if module.bias is not None:
nn.init.constant_(module.bias, 0)
def forward(self, x):
# conv
x = self.init_block(x)
x_res1 = self.res_block1(x)
x_res2 = self.res_block2(x_res1)
x_res3 = self.res_block3(x_res2)
x_res4 = self.res_block4(x_res3)
# Deconv
x_deconv1 = self.bn1(F.relu(self.deconv_block1(x_res4)))
if self.sum_mode:
x_deconv1 = x_res3 + x_deconv1
else:
x_deconv1 = torch.cat((x_res3, x_deconv1), dim=1)
x_deconv2 = self.bn2(F.relu(self.deconv_block2(x_deconv1)))
if self.sum_mode:
x_deconv2 = x_res2 + x_deconv2
else:
x_deconv2 = torch.cat((x_res2, x_deconv2), dim=1)
x_deconv3 = self.bn3(F.relu(self.deconv_block3(x_deconv2)))
if self.sum_mode:
x_deconv3 = x_res1 + x_deconv3
else:
x_deconv3 = torch.cat((x_res1, x_deconv3), dim=1)
x_conv = self.bn4(F.relu(self.conv_block(x_deconv3)))
x_linear = x_conv.view(x_conv.size(0), -1)
x_linear = self.bn5(F.relu(self.fc_dim_reduce(x_linear)))
x_linear = F.dropout(x_linear, p=self.dropout_rate,
training=self.training)
position = self.fc_trans(x_linear)
rotation = self.fc_rot(x_linear)
x_pose = torch.cat((position, rotation), dim=1)
return x_pose
| 4,639 | 36.723577 | 120 | py |
PT-M2 | PT-M2-main/evaluate.py | import argparse
import torch
import os
from utils import load_file, load_dir, write_to_csv
from metrics import PTM2
def main():
parser = argparse.ArgumentParser("PT-M2")
parser.add_argument("--source", type=str, default="source file path")
parser.add_argument("--reference", type=str, default="reference file path")
parser.add_argument("--hypothesis", type=str, default="hypothesis file path")
parser.add_argument("--output", type=str, default="output file path")
parser.add_argument("--base", choices=["m2", "sentm2", "errant", "senterrant"], default="m2", type=str)
parser.add_argument("--scorer", choices=["self", "bertscore", "bartscore"],
default="self", type=str, help="choose the plm scorer type")
parser.add_argument("--model_type", type=str, help="choose the plm type", default="bert-base-uncased")
parser.add_argument("--beta", default=0.5, type=float)
args = parser.parse_args()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
args.device = device
print(args)
sources = load_file(args.source)
references = load_dir(args.reference)
m2_file = f"{args.reference}.m2"
metric = PTM2(args, corpus=None)
if args.base == "m2":
score = metric.compute_m2(m2_file=m2_file, hyp_file=args.hypothesis, sources=sources, references=references)
elif args.base == "sentm2":
score = metric.compute_sentm2(m2_file=m2_file, hyp_file=args.hypothesis, sources=sources, references=references)
elif args.base == "errant":
score = metric.compute_errant(m2_file=m2_file, hyp_file=args.hypothesis, sources=sources, references=references)
elif args.base == "senterrant":
score = metric.compute_senterrant(m2_file=m2_file, hyp_file=args.hypothesis, sources=sources, references=references)
print(f"base={args.base}, scorer={args.scorer}, model_type={args.model_type}, score={score:.4f}")
with open(args.output, "w", encoding="utf8") as fw:
fw.write(f"base={args.base}, scorer={args.scorer}, model_type={args.model_type}, score={score}")
if __name__ == "__main__":
main()
| 2,143 | 43.666667 | 124 | py |
PT-M2 | PT-M2-main/utils.py | import os
import sys
import csv
import random
import numpy as np
import torch
sys.path.append("m2scorer")
def load_file(src_file):
sources = []
with open(src_file, "r", encoding="utf8") as fr:
for line in fr:
sources.append(line.strip("\n"))
return sources
def load_dir(ref_dir):
references = {}
for f_n in os.listdir(ref_dir):
n = int(f_n[3:])
ref_file = os.path.join(ref_dir, f_n)
with open(ref_file, "r", encoding="utf8") as fr:
for i, line in enumerate(fr):
if i not in references:
references[i] = {}
references[i][n] = line.strip("\n")
references = [v for v in references.values()]
return references
def write_to_csv(f_n, datas):
with open(f_n, 'w', encoding='utf-8', newline='') as f:
write = csv.writer(f, delimiter="\t")
for data in datas:
write.writerow(data)
| 945 | 23.25641 | 59 | py |
PT-M2 | PT-M2-main/bart_score.py | # %%
import torch
import torch.nn as nn
import traceback
from transformers import BartTokenizer, BartForConditionalGeneration
from typing import List
import numpy as np
class BARTScorer:
def __init__(self, device='cuda:0', max_length=1024, checkpoint='facebook/bart-large-cnn'):
# Set up model
self.device = device
self.max_length = max_length
self.tokenizer = BartTokenizer.from_pretrained(checkpoint)
self.model = BartForConditionalGeneration.from_pretrained(checkpoint)
self.model.eval()
self.model.to(device)
# Set up loss
self.loss_fct = nn.NLLLoss(reduction='none', ignore_index=self.model.config.pad_token_id)
self.lsm = nn.LogSoftmax(dim=1)
def load(self, path=None):
""" Load model from paraphrase finetuning """
if path is None:
path = 'models/bart.pth'
self.model.load_state_dict(torch.load(path, map_location=self.device))
def score(self, srcs, tgts, batch_size=4):
""" Score a batch of examples """
score_list = []
for i in range(0, len(srcs), batch_size):
src_list = srcs[i: i + batch_size]
tgt_list = tgts[i: i + batch_size]
try:
with torch.no_grad():
encoded_src = self.tokenizer(
src_list,
max_length=self.max_length,
truncation=True,
padding=True,
return_tensors='pt'
)
encoded_tgt = self.tokenizer(
tgt_list,
max_length=self.max_length,
truncation=True,
padding=True,
return_tensors='pt'
)
src_tokens = encoded_src['input_ids'].to(self.device)
src_mask = encoded_src['attention_mask'].to(self.device)
tgt_tokens = encoded_tgt['input_ids'].to(self.device)
tgt_mask = encoded_tgt['attention_mask']
tgt_len = tgt_mask.sum(dim=1).to(self.device)
output = self.model(
input_ids=src_tokens,
attention_mask=src_mask,
labels=tgt_tokens
)
logits = output.logits.view(-1, self.model.config.vocab_size)
loss = self.loss_fct(self.lsm(logits), tgt_tokens.view(-1))
loss = loss.view(tgt_tokens.shape[0], -1)
loss = loss.sum(dim=1) / tgt_len
curr_score_list = [-x.item() for x in loss]
score_list += curr_score_list
except RuntimeError:
traceback.print_exc()
print(f'source: {src_list}')
print(f'target: {tgt_list}')
exit(0)
return score_list
def multi_ref_score(self, srcs, tgts: List[List[str]], agg="mean", batch_size=4):
# Assert we have the same number of references
ref_nums = [len(x) for x in tgts]
if len(set(ref_nums)) > 1:
raise Exception("You have different number of references per test sample.")
ref_num = len(tgts[0])
score_matrix = []
for i in range(ref_num):
curr_tgts = [x[i] for x in tgts]
scores = self.score(srcs, curr_tgts, batch_size)
score_matrix.append(scores)
if agg == "mean":
score_list = np.mean(score_matrix, axis=0)
elif agg == "max":
score_list = np.max(score_matrix, axis=0)
else:
raise NotImplementedError
return list(score_list)
def test(self, batch_size=3):
""" Test """
src_list = [
'This is a very good idea. Although simple, but very insightful.',
'Can I take a look?',
'Do not trust him, he is a liar.'
]
tgt_list = [
"That's stupid.",
"What's the problem?",
'He is trustworthy.'
]
print(self.score(src_list, tgt_list, batch_size)) | 4,219 | 36.678571 | 97 | py |
PT-M2 | PT-M2-main/bert_score/score.py | import os
import sys
import time
import pathlib
import torch
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
import numpy as np
import pandas as pd
from collections import defaultdict
from transformers import AutoTokenizer
from .utils import (
get_model,
get_tokenizer,
get_idf_dict,
bert_cos_score_idf,
get_bert_embedding,
lang2model,
model2layers,
get_hash,
cache_scibert,
sent_encode,
)
__all__ = ["score", "plot_example"]
def score(
cands,
refs,
model_type=None,
num_layers=None,
verbose=False,
idf=False,
device=None,
batch_size=64,
nthreads=4,
all_layers=False,
lang=None,
return_hash=False,
rescale_with_baseline=False,
baseline_path=None,
use_fast_tokenizer=False
):
"""
BERTScore metric.
Args:
- :param: `cands` (list of str): candidate sentences
- :param: `refs` (list of str or list of list of str): reference sentences
- :param: `model_type` (str): bert specification, default using the suggested
model for the target langauge; has to specify at least one of
`model_type` or `lang`
- :param: `num_layers` (int): the layer of representation to use.
default using the number of layer tuned on WMT16 correlation data
- :param: `verbose` (bool): turn on intermediate status update
- :param: `idf` (bool or dict): use idf weighting, can also be a precomputed idf_dict
- :param: `device` (str): on which the contextual embedding model will be allocated on.
If this argument is None, the model lives on cuda:0 if cuda is available.
- :param: `nthreads` (int): number of threads
- :param: `batch_size` (int): bert score processing batch size
- :param: `lang` (str): language of the sentences; has to specify
at least one of `model_type` or `lang`. `lang` needs to be
specified when `rescale_with_baseline` is True.
- :param: `return_hash` (bool): return hash code of the setting
- :param: `rescale_with_baseline` (bool): rescale bertscore with pre-computed baseline
- :param: `baseline_path` (str): customized baseline file
- :param: `use_fast_tokenizer` (bool): `use_fast` parameter passed to HF tokenizer
Return:
- :param: `(P, R, F)`: each is of shape (N); N = number of input
candidate reference pairs. if returning hashcode, the
output will be ((P, R, F), hashcode). If a candidate have
multiple references, the returned score of this candidate is
the *best* score among all references.
"""
assert len(cands) == len(refs), "Different number of candidates and references"
assert lang is not None or model_type is not None, "Either lang or model_type should be specified"
ref_group_boundaries = None
if not isinstance(refs[0], str):
ref_group_boundaries = []
ori_cands, ori_refs = cands, refs
cands, refs = [], []
count = 0
for cand, ref_group in zip(ori_cands, ori_refs):
cands += [cand] * len(ref_group)
refs += ref_group
ref_group_boundaries.append((count, count + len(ref_group)))
count += len(ref_group)
if rescale_with_baseline:
assert lang is not None, "Need to specify Language when rescaling with baseline"
if model_type is None:
lang = lang.lower()
model_type = lang2model[lang]
if num_layers is None:
num_layers = model2layers[model_type]
tokenizer = get_tokenizer(model_type, use_fast_tokenizer)
model = get_model(model_type, num_layers, all_layers)
if device is None:
device = "cuda" if torch.cuda.is_available() else "cpu"
model.to(device)
if not idf:
idf_dict = defaultdict(lambda: 1.0)
# set idf for [SEP] and [CLS] to 0
idf_dict[tokenizer.sep_token_id] = 0
idf_dict[tokenizer.cls_token_id] = 0
elif isinstance(idf, dict):
if verbose:
print("using predefined IDF dict...")
idf_dict = idf
else:
if verbose:
print("preparing IDF dict...")
start = time.perf_counter()
idf_dict = get_idf_dict(refs, tokenizer, nthreads=nthreads)
if verbose:
print("done in {:.2f} seconds".format(time.perf_counter() - start))
if verbose:
print("calculating scores...")
start = time.perf_counter()
all_preds = bert_cos_score_idf(
model,
refs,
cands,
tokenizer,
idf_dict,
verbose=verbose,
device=device,
batch_size=batch_size,
all_layers=all_layers,
).cpu()
if ref_group_boundaries is not None:
max_preds = []
for beg, end in ref_group_boundaries:
max_preds.append(all_preds[beg:end].max(dim=0)[0])
all_preds = torch.stack(max_preds, dim=0)
use_custom_baseline = baseline_path is not None
if rescale_with_baseline:
if baseline_path is None:
baseline_path = os.path.join(os.path.dirname(__file__), f"rescale_baseline/{lang}/{model_type}.tsv")
if os.path.isfile(baseline_path):
if not all_layers:
baselines = torch.from_numpy(pd.read_csv(baseline_path).iloc[num_layers].to_numpy())[1:].float()
else:
baselines = torch.from_numpy(pd.read_csv(baseline_path).to_numpy())[:, 1:].unsqueeze(1).float()
all_preds = (all_preds - baselines) / (1 - baselines)
else:
print(
f"Warning: Baseline not Found for {model_type} on {lang} at {baseline_path}", file=sys.stderr,
)
out = all_preds[..., 0], all_preds[..., 1], all_preds[..., 2] # P, R, F
if verbose:
time_diff = time.perf_counter() - start
print(f"done in {time_diff:.2f} seconds, {len(refs) / time_diff:.2f} sentences/sec")
if return_hash:
return tuple(
[
out,
get_hash(model_type, num_layers, idf, rescale_with_baseline,
use_custom_baseline=use_custom_baseline,
use_fast_tokenizer=use_fast_tokenizer),
]
)
return out
def plot_example(
candidate,
reference,
model_type=None,
num_layers=None,
lang=None,
rescale_with_baseline=False,
baseline_path=None,
use_fast_tokenizer=False,
fname="",
):
"""
BERTScore metric.
Args:
- :param: `candidate` (str): a candidate sentence
- :param: `reference` (str): a reference sentence
- :param: `verbose` (bool): turn on intermediate status update
- :param: `model_type` (str): bert specification, default using the suggested
model for the target langauge; has to specify at least one of
`model_type` or `lang`
- :param: `num_layers` (int): the layer of representation to use
- :param: `lang` (str): language of the sentences; has to specify
at least one of `model_type` or `lang`. `lang` needs to be
specified when `rescale_with_baseline` is True.
- :param: `return_hash` (bool): return hash code of the setting
- :param: `rescale_with_baseline` (bool): rescale bertscore with pre-computed baseline
- :param: `use_fast_tokenizer` (bool): `use_fast` parameter passed to HF tokenizer
- :param: `fname` (str): path to save the output plot
"""
assert isinstance(candidate, str)
assert isinstance(reference, str)
assert lang is not None or model_type is not None, "Either lang or model_type should be specified"
if rescale_with_baseline:
assert lang is not None, "Need to specify Language when rescaling with baseline"
if model_type is None:
lang = lang.lower()
model_type = lang2model[lang]
if num_layers is None:
num_layers = model2layers[model_type]
tokenizer = get_tokenizer(model_type, use_fast_tokenizer)
model = get_model(model_type, num_layers)
device = "cuda" if torch.cuda.is_available() else "cpu"
model.to(device)
idf_dict = defaultdict(lambda: 1.0)
# set idf for [SEP] and [CLS] to 0
idf_dict[tokenizer.sep_token_id] = 0
idf_dict[tokenizer.cls_token_id] = 0
hyp_embedding, masks, padded_idf = get_bert_embedding(
[candidate], model, tokenizer, idf_dict, device=device, all_layers=False
)
ref_embedding, masks, padded_idf = get_bert_embedding(
[reference], model, tokenizer, idf_dict, device=device, all_layers=False
)
ref_embedding.div_(torch.norm(ref_embedding, dim=-1).unsqueeze(-1))
hyp_embedding.div_(torch.norm(hyp_embedding, dim=-1).unsqueeze(-1))
sim = torch.bmm(hyp_embedding, ref_embedding.transpose(1, 2))
sim = sim.squeeze(0).cpu()
# remove [CLS] and [SEP] tokens
r_tokens = [tokenizer.decode([i]) for i in sent_encode(tokenizer, reference)][1:-1]
h_tokens = [tokenizer.decode([i]) for i in sent_encode(tokenizer, candidate)][1:-1]
sim = sim[1:-1, 1:-1]
if rescale_with_baseline:
if baseline_path is None:
baseline_path = os.path.join(os.path.dirname(__file__), f"rescale_baseline/{lang}/{model_type}.tsv")
if os.path.isfile(baseline_path):
baselines = torch.from_numpy(pd.read_csv(baseline_path).iloc[num_layers].to_numpy())[1:].float()
sim = (sim - baselines[2].item()) / (1 - baselines[2].item())
else:
print(
f"Warning: Baseline not Found for {model_type} on {lang} at {baseline_path}", file=sys.stderr,
)
fig, ax = plt.subplots(figsize=(len(r_tokens), len(h_tokens)))
im = ax.imshow(sim, cmap="Blues", vmin=0, vmax=1)
# We want to show all ticks...
ax.set_xticks(np.arange(len(r_tokens)))
ax.set_yticks(np.arange(len(h_tokens)))
# ... and label them with the respective list entries
ax.set_xticklabels(r_tokens, fontsize=10)
ax.set_yticklabels(h_tokens, fontsize=10)
ax.grid(False)
plt.xlabel("Reference (tokenized)", fontsize=14)
plt.ylabel("Candidate (tokenized)", fontsize=14)
title = "Similarity Matrix"
if rescale_with_baseline:
title += " (after Rescaling)"
plt.title(title, fontsize=14)
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="2%", pad=0.2)
fig.colorbar(im, cax=cax)
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right", rotation_mode="anchor")
# Loop over data dimensions and create text annotations.
for i in range(len(h_tokens)):
for j in range(len(r_tokens)):
text = ax.text(
j,
i,
"{:.3f}".format(sim[i, j].item()),
ha="center",
va="center",
color="k" if sim[i, j].item() < 0.5 else "w",
)
fig.tight_layout()
if fname != "":
plt.savefig(fname, dpi=100)
print("Saved figure to file: ", fname)
plt.show()
| 11,254 | 35.781046 | 112 | py |
PT-M2 | PT-M2-main/bert_score/scorer.py | import os
import sys
import time
import pathlib
import torch
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
import numpy as np
import pandas as pd
import warnings
from collections import defaultdict
from transformers import AutoTokenizer
from .utils import (
get_model,
get_tokenizer,
get_idf_dict,
bert_cos_score_idf,
get_bert_embedding,
lang2model,
model2layers,
get_hash,
cache_scibert,
sent_encode,
)
class BERTScorer:
"""
BERTScore Scorer Object.
"""
def __init__(
self,
model_type=None,
num_layers=None,
batch_size=64,
nthreads=4,
all_layers=False,
idf=False,
idf_sents=None,
device=None,
lang=None,
rescale_with_baseline=False,
baseline_path=None,
use_fast_tokenizer=False
):
"""
Args:
- :param: `model_type` (str): contexual embedding model specification, default using the suggested
model for the target langauge; has to specify at least one of
`model_type` or `lang`
- :param: `num_layers` (int): the layer of representation to use.
default using the number of layer tuned on WMT16 correlation data
- :param: `verbose` (bool): turn on intermediate status update
- :param: `idf` (bool): a booling to specify whether to use idf or not (this should be True even if `idf_sents` is given)
- :param: `idf_sents` (List of str): list of sentences used to compute the idf weights
- :param: `device` (str): on which the contextual embedding model will be allocated on.
If this argument is None, the model lives on cuda:0 if cuda is available.
- :param: `batch_size` (int): bert score processing batch size
- :param: `nthreads` (int): number of threads
- :param: `lang` (str): language of the sentences; has to specify
at least one of `model_type` or `lang`. `lang` needs to be
specified when `rescale_with_baseline` is True.
- :param: `return_hash` (bool): return hash code of the setting
- :param: `rescale_with_baseline` (bool): rescale bertscore with pre-computed baseline
- :param: `baseline_path` (str): customized baseline file
- :param: `use_fast_tokenizer` (bool): `use_fast` parameter passed to HF tokenizer
"""
assert lang is not None or model_type is not None, "Either lang or model_type should be specified"
if rescale_with_baseline:
assert lang is not None, "Need to specify Language when rescaling with baseline"
if device is None:
self.device = "cuda" if torch.cuda.is_available() else "cpu"
else:
self.device = device
self._lang = lang
self._rescale_with_baseline = rescale_with_baseline
self._idf = idf
self.batch_size = batch_size
self.nthreads = nthreads
self.all_layers = all_layers
if model_type is None:
lang = lang.lower()
self._model_type = lang2model[lang]
else:
self._model_type = model_type
if num_layers is None:
self._num_layers = model2layers[self.model_type]
else:
self._num_layers = num_layers
# Building model and tokenizer
self._use_fast_tokenizer = use_fast_tokenizer
self._tokenizer = get_tokenizer(self.model_type, self._use_fast_tokenizer)
self._model = get_model(self.model_type, self.num_layers, self.all_layers)
self._model.to(self.device)
self._idf_dict = None
if idf_sents is not None:
self.compute_idf(idf_sents)
self._baseline_vals = None
self.baseline_path = baseline_path
self.use_custom_baseline = self.baseline_path is not None
if self.baseline_path is None:
self.baseline_path = os.path.join(
os.path.dirname(__file__), f"rescale_baseline/{self.lang}/{self.model_type}.tsv"
)
@property
def lang(self):
return self._lang
@property
def idf(self):
return self._idf
@property
def model_type(self):
return self._model_type
@property
def num_layers(self):
return self._num_layers
@property
def rescale_with_baseline(self):
return self._rescale_with_baseline
@property
def baseline_vals(self):
if self._baseline_vals is None:
if os.path.isfile(self.baseline_path):
if not self.all_layers:
self._baseline_vals = torch.from_numpy(
pd.read_csv(self.baseline_path).iloc[self.num_layers].to_numpy()
)[1:].float()
else:
self._baseline_vals = (
torch.from_numpy(pd.read_csv(self.baseline_path).to_numpy())[:, 1:].unsqueeze(1).float()
)
else:
raise ValueError(f"Baseline not Found for {self.model_type} on {self.lang} at {self.baseline_path}")
return self._baseline_vals
@property
def use_fast_tokenizer(self):
return self._use_fast_tokenizer
@property
def hash(self):
return get_hash(
self.model_type, self.num_layers, self.idf, self.rescale_with_baseline, self.use_custom_baseline, self.use_fast_tokenizer
)
def compute_idf(self, sents):
"""
Args:
"""
if self._idf_dict is not None:
warnings.warn("Overwriting the previous importance weights.")
self._idf_dict = get_idf_dict(sents, self._tokenizer, nthreads=self.nthreads)
def score(self, cands, refs, verbose=False, batch_size=64, return_hash=False):
"""
Args:
- :param: `cands` (list of str): candidate sentences
- :param: `refs` (list of str or list of list of str): reference sentences
Return:
- :param: `(P, R, F)`: each is of shape (N); N = number of input
candidate reference pairs. if returning hashcode, the
output will be ((P, R, F), hashcode). If a candidate have
multiple references, the returned score of this candidate is
the *best* score among all references.
"""
ref_group_boundaries = None
if not isinstance(refs[0], str):
ref_group_boundaries = []
ori_cands, ori_refs = cands, refs
cands, refs = [], []
count = 0
for cand, ref_group in zip(ori_cands, ori_refs):
cands += [cand] * len(ref_group)
refs += ref_group
ref_group_boundaries.append((count, count + len(ref_group)))
count += len(ref_group)
if verbose:
print("calculating scores...")
start = time.perf_counter()
if self.idf:
assert self._idf_dict, "IDF weights are not computed"
idf_dict = self._idf_dict
else:
idf_dict = defaultdict(lambda: 1.0)
idf_dict[self._tokenizer.sep_token_id] = 0
idf_dict[self._tokenizer.cls_token_id] = 0
all_preds = bert_cos_score_idf(
self._model,
refs,
cands,
self._tokenizer,
idf_dict,
verbose=verbose,
device=self.device,
batch_size=batch_size,
all_layers=self.all_layers,
).cpu()
if ref_group_boundaries is not None:
max_preds = []
for start, end in ref_group_boundaries:
max_preds.append(all_preds[start:end].max(dim=0)[0])
all_preds = torch.stack(max_preds, dim=0)
if self.rescale_with_baseline:
all_preds = (all_preds - self.baseline_vals) / (1 - self.baseline_vals)
out = all_preds[..., 0], all_preds[..., 1], all_preds[..., 2] # P, R, F
if verbose:
time_diff = time.perf_counter() - start
print(f"done in {time_diff:.2f} seconds, {len(refs) / time_diff:.2f} sentences/sec")
if return_hash:
out = tuple([out, self.hash])
return out
def plot_example(self, candidate, reference, fname=""):
"""
Args:
- :param: `candidate` (str): a candidate sentence
- :param: `reference` (str): a reference sentence
- :param: `fname` (str): path to save the output plot
"""
assert isinstance(candidate, str)
assert isinstance(reference, str)
idf_dict = defaultdict(lambda: 1.0)
idf_dict[self._tokenizer.sep_token_id] = 0
idf_dict[self._tokenizer.cls_token_id] = 0
hyp_embedding, masks, padded_idf = get_bert_embedding(
[candidate], self._model, self._tokenizer, idf_dict, device=self.device, all_layers=False,
)
ref_embedding, masks, padded_idf = get_bert_embedding(
[reference], self._model, self._tokenizer, idf_dict, device=self.device, all_layers=False,
)
ref_embedding.div_(torch.norm(ref_embedding, dim=-1).unsqueeze(-1))
hyp_embedding.div_(torch.norm(hyp_embedding, dim=-1).unsqueeze(-1))
sim = torch.bmm(hyp_embedding, ref_embedding.transpose(1, 2))
sim = sim.squeeze(0).cpu()
r_tokens = [self._tokenizer.decode([i]) for i in sent_encode(self._tokenizer, reference)][1:-1]
h_tokens = [self._tokenizer.decode([i]) for i in sent_encode(self._tokenizer, candidate)][1:-1]
sim = sim[1:-1, 1:-1]
if self.rescale_with_baseline:
sim = (sim - self.baseline_vals[2].item()) / (1 - self.baseline_vals[2].item())
fig, ax = plt.subplots(figsize=(len(r_tokens), len(h_tokens)))
im = ax.imshow(sim, cmap="Blues", vmin=0, vmax=1)
# We want to show all ticks...
ax.set_xticks(np.arange(len(r_tokens)))
ax.set_yticks(np.arange(len(h_tokens)))
# ... and label them with the respective list entries
ax.set_xticklabels(r_tokens, fontsize=10)
ax.set_yticklabels(h_tokens, fontsize=10)
ax.grid(False)
plt.xlabel("Reference (tokenized)", fontsize=14)
plt.ylabel("Candidate (tokenized)", fontsize=14)
title = "Similarity Matrix"
if self.rescale_with_baseline:
title += " (after Rescaling)"
plt.title(title, fontsize=14)
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="2%", pad=0.2)
fig.colorbar(im, cax=cax)
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right", rotation_mode="anchor")
# Loop over data dimensions and create text annotations.
for i in range(len(h_tokens)):
for j in range(len(r_tokens)):
text = ax.text(
j,
i,
"{:.3f}".format(sim[i, j].item()),
ha="center",
va="center",
color="k" if sim[i, j].item() < 0.5 else "w",
)
fig.tight_layout()
if fname != "":
plt.savefig(fname, dpi=100)
print("Saved figure to file: ", fname)
plt.show()
def __repr__(self):
return f"{self.__class__.__name__}(hash={self.hash}, batch_size={self.batch_size}, nthreads={self.nthreads})"
def __str__(self):
return self.__repr__()
| 11,730 | 35.095385 | 133 | py |
PT-M2 | PT-M2-main/bert_score/utils.py | import sys
import os
import torch
from math import log
from itertools import chain
from collections import defaultdict, Counter
from multiprocessing import Pool
from functools import partial
from tqdm.auto import tqdm
from torch.nn.utils.rnn import pad_sequence
from distutils.version import LooseVersion
from transformers import BertConfig, XLNetConfig, XLMConfig, RobertaConfig
from transformers import AutoModel, GPT2Tokenizer, AutoTokenizer
from . import __version__
from transformers import __version__ as trans_version
__all__ = []
SCIBERT_URL_DICT = {
"scibert-scivocab-uncased": "https://s3-us-west-2.amazonaws.com/ai2-s2-research/scibert/pytorch_models/scibert_scivocab_uncased.tar", # recommend by the SciBERT authors
"scibert-scivocab-cased": "https://s3-us-west-2.amazonaws.com/ai2-s2-research/scibert/pytorch_models/scibert_scivocab_cased.tar",
"scibert-basevocab-uncased": "https://s3-us-west-2.amazonaws.com/ai2-s2-research/scibert/pytorch_models/scibert_basevocab_uncased.tar",
"scibert-basevocab-cased": "https://s3-us-west-2.amazonaws.com/ai2-s2-research/scibert/pytorch_models/scibert_basevocab_cased.tar",
}
lang2model = defaultdict(lambda: "bert-base-multilingual-cased")
lang2model.update(
{
"en": "roberta-large",
"zh": "bert-base-chinese",
"tr": "dbmdz/bert-base-turkish-cased",
"en-sci": "allenai/scibert_scivocab_uncased",
}
)
model2layers = {
"bert-base-uncased": 9, # 0.6925188074454226
"bert-large-uncased": 18, # 0.7210358126642836
"bert-base-cased-finetuned-mrpc": 9, # 0.6721947475618048
"bert-base-multilingual-cased": 9, # 0.6680687802637132
"bert-base-chinese": 8,
"roberta-base": 10, # 0.706288719158983
"roberta-large": 17, # 0.7385974720781534
"roberta-large-mnli": 19, # 0.7535618640417984
"roberta-base-openai-detector": 7, # 0.7048158349432633
"roberta-large-openai-detector": 15, # 0.7462770207355116
"xlnet-base-cased": 5, # 0.6630103662114238
"xlnet-large-cased": 7, # 0.6598800720297179
"xlm-mlm-en-2048": 6, # 0.651262570131464
"xlm-mlm-100-1280": 10, # 0.6475166424401905
# "scibert-scivocab-uncased": 8, # 0.6590354319927313
# "scibert-scivocab-cased": 9, # 0.6536375053937445
# "scibert-basevocab-uncased": 9, # 0.6748944832703548
# "scibert-basevocab-cased": 9, # 0.6524624150542374
'allenai/scibert_scivocab_uncased': 8, # 0.6590354393124127
'allenai/scibert_scivocab_cased': 9, # 0.6536374902465466
'nfliu/scibert_basevocab_uncased': 9, # 0.6748945076082333
"distilroberta-base": 5, # 0.6797558139322964
"distilbert-base-uncased": 5, # 0.6756659152782033
"distilbert-base-uncased-distilled-squad": 4, # 0.6718318036382493
"distilbert-base-multilingual-cased": 5, # 0.6178131050889238
"albert-base-v1": 10, # 0.654237567249745
"albert-large-v1": 17, # 0.6755890754323239
"albert-xlarge-v1": 16, # 0.7031844211905911
"albert-xxlarge-v1": 8, # 0.7508642218461096
"albert-base-v2": 9, # 0.6682455591837927
"albert-large-v2": 14, # 0.7008537594374035
"albert-xlarge-v2": 13, # 0.7317228357869254
"albert-xxlarge-v2": 8, # 0.7505160257184014
"xlm-roberta-base": 9, # 0.6506799445871697
"xlm-roberta-large": 17, # 0.6941551437476826
"google/electra-small-generator": 9, # 0.6659421842117754
"google/electra-small-discriminator": 11, # 0.6534639151385759
"google/electra-base-generator": 10, # 0.6730033453857188
"google/electra-base-discriminator": 9, # 0.7032089590812965
"google/electra-large-generator": 18, # 0.6813370013104459
"google/electra-large-discriminator": 14, # 0.6896675824733477
"google/bert_uncased_L-2_H-128_A-2": 1, # 0.5887998733228855
"google/bert_uncased_L-2_H-256_A-4": 1, # 0.6114863547661203
"google/bert_uncased_L-2_H-512_A-8": 1, # 0.6177345529192847
"google/bert_uncased_L-2_H-768_A-12": 2, # 0.6191261237956839
"google/bert_uncased_L-4_H-128_A-2": 3, # 0.6076202863798991
"google/bert_uncased_L-4_H-256_A-4": 3, # 0.6205239036810148
"google/bert_uncased_L-4_H-512_A-8": 3, # 0.6375351621856903
"google/bert_uncased_L-4_H-768_A-12": 3, # 0.6561849979644787
"google/bert_uncased_L-6_H-128_A-2": 5, # 0.6200458425360283
"google/bert_uncased_L-6_H-256_A-4": 5, # 0.6277501629539081
"google/bert_uncased_L-6_H-512_A-8": 5, # 0.641952305130849
"google/bert_uncased_L-6_H-768_A-12": 5, # 0.6762186226247106
"google/bert_uncased_L-8_H-128_A-2": 7, # 0.6186876506711779
"google/bert_uncased_L-8_H-256_A-4": 7, # 0.6447993208267708
"google/bert_uncased_L-8_H-512_A-8": 6, # 0.6489729408169956
"google/bert_uncased_L-8_H-768_A-12": 7, # 0.6705203359541737
"google/bert_uncased_L-10_H-128_A-2": 8, # 0.6126762064125278
"google/bert_uncased_L-10_H-256_A-4": 8, # 0.6376350032576573
"google/bert_uncased_L-10_H-512_A-8": 9, # 0.6579006292799915
"google/bert_uncased_L-10_H-768_A-12": 8, # 0.6861146692220176
"google/bert_uncased_L-12_H-128_A-2": 10, # 0.6184105693383591
"google/bert_uncased_L-12_H-256_A-4": 11, # 0.6374004994430261
"google/bert_uncased_L-12_H-512_A-8": 10, # 0.65880012149526
"google/bert_uncased_L-12_H-768_A-12": 9, # 0.675911357700092
"amazon/bort": 0, # 0.41927911053036643
"facebook/bart-base": 6, # 0.7122259132414092
"facebook/bart-large": 10, # 0.7448671872459683
"facebook/bart-large-cnn": 10, # 0.7393148105835096
"facebook/bart-large-mnli": 11, # 0.7531665445691358
"facebook/bart-large-xsum": 9, # 0.7496408866539556
"t5-small": 6, # 0.6813843919496912
"t5-base": 11, # 0.7096044814981418
"t5-large": 23, # 0.7244153820191929
"vinai/bertweet-base": 9, # 0.6529471006118857
"microsoft/deberta-base": 9, # 0.7088459455930344
"microsoft/deberta-base-mnli": 9, # 0.7395257063907247
"microsoft/deberta-large": 16, # 0.7511806792052013
"microsoft/deberta-large-mnli": 18, # 0.7736263649679905
"microsoft/deberta-xlarge": 18, # 0.7568670944373346
"microsoft/deberta-xlarge-mnli": 40, # 0.7780600929333213
"YituTech/conv-bert-base": 10, # 0.7058253551080789
"YituTech/conv-bert-small": 10, # 0.6544473011107349
"YituTech/conv-bert-medium-small": 9, # 0.6590097075123257
"microsoft/mpnet-base": 8, # 0.724976539498804
"squeezebert/squeezebert-uncased": 9, # 0.6543868703018726
"squeezebert/squeezebert-mnli": 9, # 0.6654799051284791
"squeezebert/squeezebert-mnli-headless": 9, # 0.6654799051284791
"tuner007/pegasus_paraphrase": 15, # 0.7188349436772694
"google/pegasus-large": 8, # 0.63960462272448
"google/pegasus-xsum": 11, # 0.6836878575233349
"sshleifer/tiny-mbart": 2, # 0.028246072231946733
"facebook/mbart-large-cc25": 12, # 0.6582922975802958
"facebook/mbart-large-50": 12, # 0.6464972230103133
"facebook/mbart-large-en-ro": 12, # 0.6791285137459857
"facebook/mbart-large-50-many-to-many-mmt": 12, # 0.6904136529270892
"facebook/mbart-large-50-one-to-many-mmt": 12, # 0.6847906439540236
"allenai/led-base-16384": 6, # 0.7122259170564179
"facebook/blenderbot_small-90M": 7, # 0.6489176335400088
"facebook/blenderbot-400M-distill": 2, # 0.5874774070540008
"microsoft/prophetnet-large-uncased": 4, # 0.586496184234925
"microsoft/prophetnet-large-uncased-cnndm": 7, # 0.6478379437729287
"SpanBERT/spanbert-base-cased": 8, # 0.6824006863686848
"SpanBERT/spanbert-large-cased": 17, # 0.705352690855603
"microsoft/xprophetnet-large-wiki100-cased": 7, # 0.5852499775879524
"ProsusAI/finbert": 10, # 0.6923213940752796
"Vamsi/T5_Paraphrase_Paws": 12, # 0.6941611753807352
"ramsrigouthamg/t5_paraphraser": 11, # 0.7200917597031539
"microsoft/deberta-v2-xlarge": 10, # 0.7393675784473045
"microsoft/deberta-v2-xlarge-mnli": 17, # 0.7620620803716714
"microsoft/deberta-v2-xxlarge": 21, # 0.7520547670281869
"microsoft/deberta-v2-xxlarge-mnli": 22, # 0.7742603457742682
"allenai/longformer-base-4096": 7, # 0.7089559593129316
"allenai/longformer-large-4096": 14, # 0.732408493548181
"allenai/longformer-large-4096-finetuned-triviaqa": 14, # 0.7365882744744722
"zhiheng-huang/bert-base-uncased-embedding-relative-key": 4, # 0.5995636595368777
"zhiheng-huang/bert-base-uncased-embedding-relative-key-query": 7, # 0.6303599452145718
"zhiheng-huang/bert-large-uncased-whole-word-masking-embedding-relative-key-query": 19, # 0.6896878492850327
'google/mt5-small': 8, # 0.6401166527273479
'google/mt5-base': 11, # 0.5663956536597241
'google/mt5-large': 19, # 0.6430931371732798
'google/mt5-xl': 24, # 0.6707200963021145
'google/bigbird-roberta-base': 10, # 0.6695606423502717
'google/bigbird-roberta-large': 14, # 0.6755874042374509
'google/bigbird-base-trivia-itc': 8, # 0.6930725491629892
'princeton-nlp/unsup-simcse-bert-base-uncased': 10, # 0.6703066531921142
'princeton-nlp/unsup-simcse-bert-large-uncased': 18, # 0.6958302800755326
'princeton-nlp/unsup-simcse-roberta-base': 8, # 0.6436615893535319
'princeton-nlp/unsup-simcse-roberta-large': 13, # 0.6812864385585965
'princeton-nlp/sup-simcse-bert-base-uncased': 10, # 0.7068074935240984
'princeton-nlp/sup-simcse-bert-large-uncased': 18, # 0.7111049471332378
'princeton-nlp/sup-simcse-roberta-base': 10, # 0.7253123806661946
'princeton-nlp/sup-simcse-roberta-large': 16, # 0.7497820277237173
'dbmdz/bert-base-turkish-cased': 10, # WMT18 seg en-tr 0.5522827687776142
'dbmdz/distilbert-base-turkish-cased': 4, # WMT18 seg en-tr 0.4742268041237113
'google/byt5-small': 1, # 0.5100025975052146
'google/byt5-base': 17, # 0.5810347173565313
'google/byt5-large': 30, # 0.6151895697554877
'microsoft/deberta-v3-xsmall': 10, # 0.6941803815412021
'microsoft/deberta-v3-small': 4, # 0.6651551203179679
'microsoft/deberta-v3-base': 9, # 0.7261586651018335
'microsoft/mdeberta-v3-base': 10, # 0.6778713684091584
'microsoft/deberta-v3-large': 12, # 0.6927693082293821
'khalidalt/DeBERTa-v3-large-mnli': 18, # 0.7428756686018376
}
def sent_encode(tokenizer, sent):
"Encoding as sentence based on the tokenizer"
sent = sent.strip()
if sent == "":
return tokenizer.build_inputs_with_special_tokens([])
elif isinstance(tokenizer, GPT2Tokenizer):
# for RoBERTa and GPT-2
if LooseVersion(trans_version) >= LooseVersion("4.0.0"):
return tokenizer.encode(
sent,
add_special_tokens=True,
add_prefix_space=True,
max_length=tokenizer.model_max_length,
truncation=True,
)
elif LooseVersion(trans_version) >= LooseVersion("3.0.0"):
return tokenizer.encode(
sent, add_special_tokens=True, add_prefix_space=True, max_length=tokenizer.max_len, truncation=True,
)
elif LooseVersion(trans_version) >= LooseVersion("2.0.0"):
return tokenizer.encode(sent, add_special_tokens=True, add_prefix_space=True, max_length=tokenizer.max_len)
else:
raise NotImplementedError(f"transformers version {trans_version} is not supported")
else:
if LooseVersion(trans_version) >= LooseVersion("4.0.0"):
return tokenizer.encode(
sent, add_special_tokens=True, max_length=tokenizer.model_max_length, truncation=True,
)
elif LooseVersion(trans_version) >= LooseVersion("3.0.0"):
return tokenizer.encode(sent, add_special_tokens=True, max_length=tokenizer.max_len, truncation=True)
elif LooseVersion(trans_version) >= LooseVersion("2.0.0"):
return tokenizer.encode(sent, add_special_tokens=True, max_length=tokenizer.max_len)
else:
raise NotImplementedError(f"transformers version {trans_version} is not supported")
def get_model(model_type, num_layers, all_layers=None):
if model_type.startswith("scibert"):
model = AutoModel.from_pretrained(cache_scibert(model_type))
elif "t5" in model_type:
from transformers import T5EncoderModel
model = T5EncoderModel.from_pretrained(model_type)
else:
model = AutoModel.from_pretrained(model_type)
model.eval()
if hasattr(model, "decoder") and hasattr(model, "encoder"):
model = model.encoder
# drop unused layers
if not all_layers:
if hasattr(model, "n_layers"): # xlm
assert (
0 <= num_layers <= model.n_layers
), f"Invalid num_layers: num_layers should be between 0 and {model.n_layers} for {model_type}"
model.n_layers = num_layers
elif hasattr(model, "layer"): # xlnet
assert (
0 <= num_layers <= len(model.layer)
), f"Invalid num_layers: num_layers should be between 0 and {len(model.layer)} for {model_type}"
model.layer = torch.nn.ModuleList([layer for layer in model.layer[:num_layers]])
elif hasattr(model, "encoder"): # albert
if hasattr(model.encoder, "albert_layer_groups"):
assert (
0 <= num_layers <= model.encoder.config.num_hidden_layers
), f"Invalid num_layers: num_layers should be between 0 and {model.encoder.config.num_hidden_layers} for {model_type}"
model.encoder.config.num_hidden_layers = num_layers
elif hasattr(model.encoder, "block"): # t5
assert (
0 <= num_layers <= len(model.encoder.block)
), f"Invalid num_layers: num_layers should be between 0 and {len(model.encoder.block)} for {model_type}"
model.encoder.block = torch.nn.ModuleList([layer for layer in model.encoder.block[:num_layers]])
else: # bert, roberta
assert (
0 <= num_layers <= len(model.encoder.layer)
), f"Invalid num_layers: num_layers should be between 0 and {len(model.encoder.layer)} for {model_type}"
model.encoder.layer = torch.nn.ModuleList([layer for layer in model.encoder.layer[:num_layers]])
elif hasattr(model, "transformer"): # bert, roberta
assert (
0 <= num_layers <= len(model.transformer.layer)
), f"Invalid num_layers: num_layers should be between 0 and {len(model.transformer.layer)} for {model_type}"
model.transformer.layer = torch.nn.ModuleList([layer for layer in model.transformer.layer[:num_layers]])
elif hasattr(model, "layers"): # bart
assert (
0 <= num_layers <= len(model.layers)
), f"Invalid num_layers: num_layers should be between 0 and {len(model.layers)} for {model_type}"
model.layers = torch.nn.ModuleList([layer for layer in model.layers[:num_layers]])
else:
raise ValueError("Not supported")
else:
if hasattr(model, "output_hidden_states"):
model.output_hidden_states = True
elif hasattr(model, "encoder"):
model.encoder.output_hidden_states = True
elif hasattr(model, "transformer"):
model.transformer.output_hidden_states = True
# else:
# raise ValueError(f"Not supported model architecture: {model_type}")
return model
def get_tokenizer(model_type, use_fast=False):
if model_type.startswith("scibert"):
model_type = cache_scibert(model_type)
if LooseVersion(trans_version) >= LooseVersion("4.0.0"):
tokenizer = AutoTokenizer.from_pretrained(model_type, use_fast=use_fast)
else:
assert not use_fast, "Fast tokenizer is not available for version < 4.0.0"
tokenizer = AutoTokenizer.from_pretrained(model_type)
return tokenizer
def padding(arr, pad_token, dtype=torch.long):
lens = torch.LongTensor([len(a) for a in arr])
max_len = lens.max().item()
padded = torch.ones(len(arr), max_len, dtype=dtype) * pad_token
mask = torch.zeros(len(arr), max_len, dtype=torch.long)
for i, a in enumerate(arr):
padded[i, : lens[i]] = torch.tensor(a, dtype=dtype)
mask[i, : lens[i]] = 1
return padded, lens, mask
def bert_encode(model, x, attention_mask, all_layers=False):
model.eval()
with torch.no_grad():
out = model(x, attention_mask=attention_mask, output_hidden_states=all_layers)
if all_layers:
emb = torch.stack(out[-1], dim=2)
else:
emb = out[0]
return emb
def process(a, tokenizer=None):
if tokenizer is not None:
a = sent_encode(tokenizer, a)
return set(a)
def get_idf_dict(arr, tokenizer, nthreads=4):
"""
Returns mapping from word piece index to its inverse document frequency.
Args:
- :param: `arr` (list of str) : sentences to process.
- :param: `tokenizer` : a BERT tokenizer corresponds to `model`.
- :param: `nthreads` (int) : number of CPU threads to use
"""
idf_count = Counter()
num_docs = len(arr)
process_partial = partial(process, tokenizer=tokenizer)
with Pool(nthreads) as p:
idf_count.update(chain.from_iterable(p.map(process_partial, arr)))
idf_dict = defaultdict(lambda: log((num_docs + 1) / (1)))
idf_dict.update({idx: log((num_docs + 1) / (c + 1)) for (idx, c) in idf_count.items()})
return idf_dict
def collate_idf(arr, tokenizer, idf_dict, device="cuda:0"):
"""
Helper function that pads a list of sentences to hvae the same length and
loads idf score for words in the sentences.
Args:
- :param: `arr` (list of str): sentences to process.
- :param: `tokenize` : a function that takes a string and return list
of tokens.
- :param: `numericalize` : a function that takes a list of tokens and
return list of token indexes.
- :param: `idf_dict` (dict): mapping a word piece index to its
inverse document frequency
- :param: `pad` (str): the padding token.
- :param: `device` (str): device to use, e.g. 'cpu' or 'cuda'
"""
arr = [sent_encode(tokenizer, a) for a in arr]
idf_weights = [[idf_dict[i] for i in a] for a in arr]
pad_token = tokenizer.pad_token_id
padded, lens, mask = padding(arr, pad_token, dtype=torch.long)
padded_idf, _, _ = padding(idf_weights, 0, dtype=torch.float)
padded = padded.to(device=device)
mask = mask.to(device=device)
lens = lens.to(device=device)
return padded, padded_idf, lens, mask
def get_bert_embedding(all_sens, model, tokenizer, idf_dict, batch_size=-1, device="cuda:0", all_layers=False):
"""
Compute BERT embedding in batches.
Args:
- :param: `all_sens` (list of str) : sentences to encode.
- :param: `model` : a BERT model from `pytorch_pretrained_bert`.
- :param: `tokenizer` : a BERT tokenizer corresponds to `model`.
- :param: `idf_dict` (dict) : mapping a word piece index to its
inverse document frequency
- :param: `device` (str): device to use, e.g. 'cpu' or 'cuda'
"""
padded_sens, padded_idf, lens, mask = collate_idf(all_sens, tokenizer, idf_dict, device=device)
if batch_size == -1:
batch_size = len(all_sens)
embeddings = []
with torch.no_grad():
for i in range(0, len(all_sens), batch_size):
batch_embedding = bert_encode(
model, padded_sens[i : i + batch_size], attention_mask=mask[i : i + batch_size], all_layers=all_layers,
)
embeddings.append(batch_embedding)
del batch_embedding
total_embedding = torch.cat(embeddings, dim=0)
return total_embedding, mask, padded_idf
def greedy_cos_idf(ref_embedding, ref_masks, ref_idf, hyp_embedding, hyp_masks, hyp_idf, all_layers=False):
"""
Compute greedy matching based on cosine similarity.
Args:
- :param: `ref_embedding` (torch.Tensor):
embeddings of reference sentences, BxKxd,
B: batch size, K: longest length, d: bert dimenison
- :param: `ref_lens` (list of int): list of reference sentence length.
- :param: `ref_masks` (torch.LongTensor): BxKxK, BERT attention mask for
reference sentences.
- :param: `ref_idf` (torch.Tensor): BxK, idf score of each word
piece in the reference setence
- :param: `hyp_embedding` (torch.Tensor):
embeddings of candidate sentences, BxKxd,
B: batch size, K: longest length, d: bert dimenison
- :param: `hyp_lens` (list of int): list of candidate sentence length.
- :param: `hyp_masks` (torch.LongTensor): BxKxK, BERT attention mask for
candidate sentences.
- :param: `hyp_idf` (torch.Tensor): BxK, idf score of each word
piece in the candidate setence
"""
ref_embedding.div_(torch.norm(ref_embedding, dim=-1).unsqueeze(-1))
hyp_embedding.div_(torch.norm(hyp_embedding, dim=-1).unsqueeze(-1))
if all_layers:
B, _, L, D = hyp_embedding.size()
hyp_embedding = hyp_embedding.transpose(1, 2).transpose(0, 1).contiguous().view(L * B, hyp_embedding.size(1), D)
ref_embedding = ref_embedding.transpose(1, 2).transpose(0, 1).contiguous().view(L * B, ref_embedding.size(1), D)
batch_size = ref_embedding.size(0)
sim = torch.bmm(hyp_embedding, ref_embedding.transpose(1, 2))
masks = torch.bmm(hyp_masks.unsqueeze(2).float(), ref_masks.unsqueeze(1).float())
if all_layers:
masks = masks.unsqueeze(0).expand(L, -1, -1, -1).contiguous().view_as(sim)
else:
masks = masks.expand(batch_size, -1, -1).contiguous().view_as(sim)
masks = masks.float().to(sim.device)
sim = sim * masks
word_precision = sim.max(dim=2)[0]
word_recall = sim.max(dim=1)[0]
hyp_idf.div_(hyp_idf.sum(dim=1, keepdim=True))
ref_idf.div_(ref_idf.sum(dim=1, keepdim=True))
precision_scale = hyp_idf.to(word_precision.device)
recall_scale = ref_idf.to(word_recall.device)
if all_layers:
precision_scale = precision_scale.unsqueeze(0).expand(L, B, -1).contiguous().view_as(word_precision)
recall_scale = recall_scale.unsqueeze(0).expand(L, B, -1).contiguous().view_as(word_recall)
P = (word_precision * precision_scale).sum(dim=1)
R = (word_recall * recall_scale).sum(dim=1)
F = 2 * P * R / (P + R)
hyp_zero_mask = hyp_masks.sum(dim=1).eq(2)
ref_zero_mask = ref_masks.sum(dim=1).eq(2)
if all_layers:
P = P.view(L, B)
R = R.view(L, B)
F = F.view(L, B)
if torch.any(hyp_zero_mask):
print(
"Warning: Empty candidate sentence detected; setting raw BERTscores to 0.", file=sys.stderr,
)
P = P.masked_fill(hyp_zero_mask, 0.0)
R = R.masked_fill(hyp_zero_mask, 0.0)
if torch.any(ref_zero_mask):
print("Warning: Empty reference sentence detected; setting raw BERTScores to 0.", file=sys.stderr)
P = P.masked_fill(ref_zero_mask, 0.0)
R = R.masked_fill(ref_zero_mask, 0.0)
F = F.masked_fill(torch.isnan(F), 0.0)
return P, R, F
def bert_cos_score_idf(
model, refs, hyps, tokenizer, idf_dict, verbose=False, batch_size=64, device="cuda:0", all_layers=False,
):
"""
Compute BERTScore.
Args:
- :param: `model` : a BERT model in `pytorch_pretrained_bert`
- :param: `refs` (list of str): reference sentences
- :param: `hyps` (list of str): candidate sentences
- :param: `tokenzier` : a BERT tokenizer corresponds to `model`
- :param: `idf_dict` : a dictionary mapping a word piece index to its
inverse document frequency
- :param: `verbose` (bool): turn on intermediate status update
- :param: `batch_size` (int): bert score processing batch size
- :param: `device` (str): device to use, e.g. 'cpu' or 'cuda'
"""
preds = []
def dedup_and_sort(l):
return sorted(list(set(l)), key=lambda x: len(x.split(" ")), reverse=True)
sentences = dedup_and_sort(refs + hyps)
embs = []
iter_range = range(0, len(sentences), batch_size)
if verbose:
print("computing bert embedding.")
iter_range = tqdm(iter_range)
stats_dict = dict()
for batch_start in iter_range:
sen_batch = sentences[batch_start : batch_start + batch_size]
embs, masks, padded_idf = get_bert_embedding(
sen_batch, model, tokenizer, idf_dict, device=device, all_layers=all_layers
)
embs = embs.cpu()
masks = masks.cpu()
padded_idf = padded_idf.cpu()
for i, sen in enumerate(sen_batch):
sequence_len = masks[i].sum().item()
emb = embs[i, :sequence_len]
idf = padded_idf[i, :sequence_len]
stats_dict[sen] = (emb, idf)
def pad_batch_stats(sen_batch, stats_dict, device):
stats = [stats_dict[s] for s in sen_batch]
emb, idf = zip(*stats)
emb = [e.to(device) for e in emb]
idf = [i.to(device) for i in idf]
lens = [e.size(0) for e in emb]
emb_pad = pad_sequence(emb, batch_first=True, padding_value=2.0)
idf_pad = pad_sequence(idf, batch_first=True)
def length_to_mask(lens):
lens = torch.tensor(lens, dtype=torch.long)
max_len = max(lens)
base = torch.arange(max_len, dtype=torch.long).expand(len(lens), max_len)
return base < lens.unsqueeze(1)
pad_mask = length_to_mask(lens).to(device)
return emb_pad, pad_mask, idf_pad
device = next(model.parameters()).device
iter_range = range(0, len(refs), batch_size)
if verbose:
print("computing greedy matching.")
iter_range = tqdm(iter_range)
with torch.no_grad():
for batch_start in iter_range:
batch_refs = refs[batch_start : batch_start + batch_size]
batch_hyps = hyps[batch_start : batch_start + batch_size]
ref_stats = pad_batch_stats(batch_refs, stats_dict, device)
hyp_stats = pad_batch_stats(batch_hyps, stats_dict, device)
P, R, F1 = greedy_cos_idf(*ref_stats, *hyp_stats, all_layers)
preds.append(torch.stack((P, R, F1), dim=-1).cpu())
preds = torch.cat(preds, dim=1 if all_layers else 0)
return preds
def get_hash(model, num_layers, idf, rescale_with_baseline, use_custom_baseline, use_fast_tokenizer):
msg = "{}_L{}{}_version={}(hug_trans={})".format(
model, num_layers, "_idf" if idf else "_no-idf", __version__, trans_version
)
if rescale_with_baseline:
if use_custom_baseline:
msg += "-custom-rescaled"
else:
msg += "-rescaled"
if use_fast_tokenizer:
msg += "_fast-tokenizer"
return msg
def cache_scibert(model_type, cache_folder="~/.cache/torch/transformers"):
if not model_type.startswith("scibert"):
return model_type
underscore_model_type = model_type.replace("-", "_")
cache_folder = os.path.abspath(os.path.expanduser(cache_folder))
filename = os.path.join(cache_folder, underscore_model_type)
# download SciBERT models
if not os.path.exists(filename):
cmd = f"mkdir -p {cache_folder}; cd {cache_folder};"
cmd += f"wget {SCIBERT_URL_DICT[model_type]}; tar -xvf {underscore_model_type}.tar;"
cmd += (
f"rm -f {underscore_model_type}.tar ; cd {underscore_model_type}; tar -zxvf weights.tar.gz; mv weights/* .;"
)
cmd += f"rm -f weights.tar.gz; rmdir weights; mv bert_config.json config.json;"
print(cmd)
print(f"downloading {model_type} model")
os.system(cmd)
# fix the missing files in scibert
json_file = os.path.join(filename, "special_tokens_map.json")
if not os.path.exists(json_file):
with open(json_file, "w") as f:
print(
'{"unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}',
file=f,
)
json_file = os.path.join(filename, "added_tokens.json")
if not os.path.exists(json_file):
with open(json_file, "w") as f:
print("{}", file=f)
if "uncased" in model_type:
json_file = os.path.join(filename, "tokenizer_config.json")
if not os.path.exists(json_file):
with open(json_file, "w") as f:
print('{"do_lower_case": true, "max_len": 512, "init_inputs": []}', file=f)
return filename
| 28,789 | 44.553797 | 173 | py |