text
stringlengths 3
11.2M
| id
stringlengths 15
188
| metadata
dict | __index_level_0__
int64 0
275
|
---|---|---|---|
import os
import cv2
import numpy as np
from scipy.interpolate import InterpolatedUnivariateSpline
class Lane:
def __init__(self, points=None, invalid_value=-2., metadata=None):
super(Lane, self).__init__()
self.curr_iter = 0
self.points = points
self.invalid_value = invalid_value
self.function = InterpolatedUnivariateSpline(
points[:, 1], points[:, 0], k=min(3, len(points) - 1))
self.min_y = points[:, 1].min() - 0.01
self.max_y = points[:, 1].max() + 0.01
self.metadata = metadata or {}
def __repr__(self):
return '[Lane]\n' + str(self.points) + '\n[/Lane]'
def __call__(self, lane_ys):
lane_xs = self.function(lane_ys)
lane_xs[(lane_ys < self.min_y) | (lane_ys > self.max_y
)] = self.invalid_value
return lane_xs
def to_array(self, sample_y_range, img_w, img_h):
self.sample_y = range(sample_y_range[0], sample_y_range[1],
sample_y_range[2])
sample_y = self.sample_y
img_w, img_h = img_w, img_h
ys = np.array(sample_y) / float(img_h)
xs = self(ys)
valid_mask = (xs >= 0) & (xs < 1)
lane_xs = xs[valid_mask] * img_w
lane_ys = ys[valid_mask] * img_h
lane = np.concatenate(
(lane_xs.reshape(-1, 1), lane_ys.reshape(-1, 1)), axis=1)
return lane
def __iter__(self):
return self
def __next__(self):
if self.curr_iter < len(self.points):
self.curr_iter += 1
return self.points[self.curr_iter - 1]
self.curr_iter = 0
raise StopIteration
COLORS = [
(255, 0, 0),
(0, 255, 0),
(0, 0, 255),
(255, 255, 0),
(255, 0, 255),
(0, 255, 255),
(128, 255, 0),
(255, 128, 0),
(128, 0, 255),
(255, 0, 128),
(0, 128, 255),
(0, 255, 128),
(128, 255, 255),
(255, 128, 255),
(255, 255, 128),
(60, 180, 0),
(180, 60, 0),
(0, 60, 180),
(0, 180, 60),
(60, 0, 180),
(180, 0, 60),
(255, 0, 0),
(0, 255, 0),
(0, 0, 255),
(255, 255, 0),
(255, 0, 255),
(0, 255, 255),
(128, 255, 0),
(255, 128, 0),
(128, 0, 255),
]
def imshow_lanes(img, lanes, show=False, out_file=None, width=4):
lanes_xys = []
for _, lane in enumerate(lanes):
xys = []
for x, y in lane:
if x <= 0 or y <= 0:
continue
x, y = int(x), int(y)
xys.append((x, y))
lanes_xys.append(xys)
lanes_xys.sort(key=lambda xys: xys[0][0] if len(xys) > 0 else 0)
for idx, xys in enumerate(lanes_xys):
for i in range(1, len(xys)):
cv2.line(img, xys[i - 1], xys[i], COLORS[idx], thickness=width)
if show:
cv2.imshow('view', img)
cv2.waitKey(0)
if out_file:
if not os.path.exists(os.path.dirname(out_file)):
os.makedirs(os.path.dirname(out_file))
cv2.imwrite(out_file, img)
| PaddleDetection/ppdet/modeling/lane_utils.py/0 | {
"file_path": "PaddleDetection/ppdet/modeling/lane_utils.py",
"repo_id": "PaddleDetection",
"token_count": 1627
} | 79 |
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from itertools import cycle, islice
from collections import abc
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from ppdet.core.workspace import register, serializable
from ppdet.utils.logger import setup_logger
logger = setup_logger('ppdet.engine')
__all__ = ['Pose3DLoss']
@register
@serializable
class Pose3DLoss(nn.Layer):
def __init__(self, weight_3d=1.0, weight_2d=0.0, reduction='none'):
"""
KeyPointMSELoss layer
Args:
weight_3d (float): weight of 3d loss
weight_2d (float): weight of 2d loss
reduction (bool): whether use reduction to loss
"""
super(Pose3DLoss, self).__init__()
self.weight_3d = weight_3d
self.weight_2d = weight_2d
self.criterion_2dpose = nn.MSELoss(reduction=reduction)
self.criterion_3dpose = nn.L1Loss(reduction=reduction)
self.criterion_smoothl1 = nn.SmoothL1Loss(
reduction=reduction, delta=1.0)
self.criterion_vertices = nn.L1Loss()
def forward(self, pred3d, pred2d, inputs):
"""
mpjpe: mpjpe loss between 3d joints
keypoint_2d_loss: 2d joints loss compute by criterion_2dpose
"""
gt_3d_joints = inputs['joints_3d']
gt_2d_joints = inputs['joints_2d']
has_3d_joints = inputs['has_3d_joints']
has_2d_joints = inputs['has_2d_joints']
loss_3d = mpjpe_focal(pred3d, gt_3d_joints, has_3d_joints)
loss = self.weight_3d * loss_3d
epoch = inputs['epoch_id']
if self.weight_2d > 0:
weight = self.weight_2d * pow(0.1, (epoch // 8))
if epoch > 8:
weight = 0
loss_2d = keypoint_2d_loss(self.criterion_2dpose, pred2d,
gt_2d_joints, has_2d_joints)
loss += weight * loss_2d
return loss
def filter_3d_joints(pred, gt, has_3d_joints):
"""
filter 3d joints
"""
gt = gt[has_3d_joints == 1]
gt = gt[:, :, :3]
pred = pred[has_3d_joints == 1]
gt_pelvis = (gt[:, 2, :] + gt[:, 3, :]) / 2
gt = gt - gt_pelvis[:, None, :]
pred_pelvis = (pred[:, 2, :] + pred[:, 3, :]) / 2
pred = pred - pred_pelvis[:, None, :]
return pred, gt
def mpjpe(pred, gt, has_3d_joints):
"""
mPJPE loss
"""
pred, gt = filter_3d_joints(pred, gt, has_3d_joints)
error = paddle.sqrt((paddle.minimum((pred - gt), paddle.to_tensor(1.2))**2
).sum(axis=-1)).mean()
return error
def mpjpe_focal(pred, gt, has_3d_joints):
"""
mPJPE loss
"""
pred, gt = filter_3d_joints(pred, gt, has_3d_joints)
mse_error = ((pred - gt)**2).sum(axis=-1)
mpjpe_error = paddle.sqrt(mse_error)
mean = mpjpe_error.mean()
std = mpjpe_error.std()
atte = 2 * F.sigmoid(6 * (mpjpe_error - mean) / std)
mse_error *= atte
return mse_error.mean()
def mpjpe_mse(pred, gt, has_3d_joints, weight=1.):
"""
mPJPE loss
"""
pred, gt = filter_3d_joints(pred, gt, has_3d_joints)
error = (((pred - gt)**2).sum(axis=-1)).mean()
return error
def mpjpe_criterion(pred, gt, has_3d_joints, criterion_pose3d):
"""
mPJPE loss of self define criterion
"""
pred, gt = filter_3d_joints(pred, gt, has_3d_joints)
error = paddle.sqrt(criterion_pose3d(pred, gt)).mean()
return error
@register
@serializable
def weighted_mpjpe(pred, gt, has_3d_joints):
"""
Weighted_mPJPE
"""
pred, gt = filter_3d_joints(pred, gt, has_3d_joints)
weight = paddle.linalg.norm(pred, p=2, axis=-1)
weight = paddle.to_tensor(
[1.5, 1.3, 1.2, 1.2, 1.3, 1.5, 1.5, 1.3, 1.2, 1.2, 1.3, 1.5, 1., 1.])
error = (weight * paddle.linalg.norm(pred - gt, p=2, axis=-1)).mean()
return error
@register
@serializable
def normed_mpjpe(pred, gt, has_3d_joints):
"""
Normalized MPJPE (scale only), adapted from:
https://github.com/hrhodin/UnsupervisedGeometryAwareRepresentationLearning/blob/master/losses/poses.py
"""
assert pred.shape == gt.shape
pred, gt = filter_3d_joints(pred, gt, has_3d_joints)
norm_predicted = paddle.mean(
paddle.sum(pred**2, axis=3, keepdim=True), axis=2, keepdim=True)
norm_target = paddle.mean(
paddle.sum(gt * pred, axis=3, keepdim=True), axis=2, keepdim=True)
scale = norm_target / norm_predicted
return mpjpe(scale * pred, gt)
@register
@serializable
def mpjpe_np(pred, gt, has_3d_joints):
"""
mPJPE_NP
"""
pred, gt = filter_3d_joints(pred, gt, has_3d_joints)
error = np.sqrt(((pred - gt)**2).sum(axis=-1)).mean()
return error
@register
@serializable
def mean_per_vertex_error(pred, gt, has_smpl):
"""
Compute mPVE
"""
pred = pred[has_smpl == 1]
gt = gt[has_smpl == 1]
with paddle.no_grad():
error = paddle.sqrt(((pred - gt)**2).sum(axis=-1)).mean()
return error
@register
@serializable
def keypoint_2d_loss(criterion_keypoints, pred_keypoints_2d, gt_keypoints_2d,
has_pose_2d):
"""
Compute 2D reprojection loss if 2D keypoint annotations are available.
The confidence (conf) is binary and indicates whether the keypoints exist or not.
"""
conf = gt_keypoints_2d[:, :, -1].unsqueeze(-1).clone()
loss = (conf * criterion_keypoints(
pred_keypoints_2d, gt_keypoints_2d[:, :, :-1] * 0.001)).mean()
return loss
@register
@serializable
def keypoint_3d_loss(criterion_keypoints, pred_keypoints_3d, gt_keypoints_3d,
has_pose_3d):
"""
Compute 3D keypoint loss if 3D keypoint annotations are available.
"""
conf = gt_keypoints_3d[:, :, -1].unsqueeze(-1).clone()
gt_keypoints_3d = gt_keypoints_3d[:, :, :-1].clone()
gt_keypoints_3d = gt_keypoints_3d[has_pose_3d == 1]
conf = conf[has_pose_3d == 1]
pred_keypoints_3d = pred_keypoints_3d[has_pose_3d == 1]
if len(gt_keypoints_3d) > 0:
gt_pelvis = (gt_keypoints_3d[:, 2, :] + gt_keypoints_3d[:, 3, :]) / 2
gt_keypoints_3d = gt_keypoints_3d - gt_pelvis[:, None, :]
pred_pelvis = (
pred_keypoints_3d[:, 2, :] + pred_keypoints_3d[:, 3, :]) / 2
pred_keypoints_3d = pred_keypoints_3d - pred_pelvis[:, None, :]
return (conf * criterion_keypoints(pred_keypoints_3d,
gt_keypoints_3d)).mean()
else:
return paddle.to_tensor([1.]).fill_(0.)
@register
@serializable
def vertices_loss(criterion_vertices, pred_vertices, gt_vertices, has_smpl):
"""
Compute per-vertex loss if vertex annotations are available.
"""
pred_vertices_with_shape = pred_vertices[has_smpl == 1]
gt_vertices_with_shape = gt_vertices[has_smpl == 1]
if len(gt_vertices_with_shape) > 0:
return criterion_vertices(pred_vertices_with_shape,
gt_vertices_with_shape)
else:
return paddle.to_tensor([1.]).fill_(0.)
@register
@serializable
def rectify_pose(pose):
pose = pose.copy()
R_mod = cv2.Rodrigues(np.array([np.pi, 0, 0]))[0]
R_root = cv2.Rodrigues(pose[:3])[0]
new_root = R_root.dot(R_mod)
pose[:3] = cv2.Rodrigues(new_root)[0].reshape(3)
return pose
| PaddleDetection/ppdet/modeling/losses/pose3d_loss.py/0 | {
"file_path": "PaddleDetection/ppdet/modeling/losses/pose3d_loss.py",
"repo_id": "PaddleDetection",
"token_count": 3750
} | 80 |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import math
import paddle
import paddle.nn as nn
from paddle import ParamAttr
from paddle.nn.initializer import Uniform
import paddle.nn.functional as F
from ppdet.core.workspace import register, serializable
from ppdet.modeling.layers import ConvNormLayer
from ppdet.modeling.backbones.hardnet import ConvLayer, HarDBlock
from ..shape_spec import ShapeSpec
__all__ = ['CenterNetDLAFPN', 'CenterNetHarDNetFPN']
# SGE attention
class BasicConv(nn.Layer):
def __init__(self,
in_planes,
out_planes,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
relu=True,
bn=True,
bias_attr=False):
super(BasicConv, self).__init__()
self.out_channels = out_planes
self.conv = nn.Conv2D(
in_planes,
out_planes,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias_attr=bias_attr)
self.bn = nn.BatchNorm2D(
out_planes,
epsilon=1e-5,
momentum=0.01,
weight_attr=False,
bias_attr=False) if bn else None
self.relu = nn.ReLU() if relu else None
def forward(self, x):
x = self.conv(x)
if self.bn is not None:
x = self.bn(x)
if self.relu is not None:
x = self.relu(x)
return x
class ChannelPool(nn.Layer):
def forward(self, x):
return paddle.concat(
(paddle.max(x, 1).unsqueeze(1), paddle.mean(x, 1).unsqueeze(1)),
axis=1)
class SpatialGate(nn.Layer):
def __init__(self):
super(SpatialGate, self).__init__()
kernel_size = 7
self.compress = ChannelPool()
self.spatial = BasicConv(
2,
1,
kernel_size,
stride=1,
padding=(kernel_size - 1) // 2,
relu=False)
def forward(self, x):
x_compress = self.compress(x)
x_out = self.spatial(x_compress)
scale = F.sigmoid(x_out) # broadcasting
return x * scale
def fill_up_weights(up):
weight = up.weight.numpy()
f = math.ceil(weight.shape[2] / 2)
c = (2 * f - 1 - f % 2) / (2. * f)
for i in range(weight.shape[2]):
for j in range(weight.shape[3]):
weight[0, 0, i, j] = \
(1 - math.fabs(i / f - c)) * (1 - math.fabs(j / f - c))
for c in range(1, weight.shape[0]):
weight[c, 0, :, :] = weight[0, 0, :, :]
up.weight.set_value(weight)
class IDAUp(nn.Layer):
def __init__(self, ch_ins, ch_out, up_strides, dcn_v2=True):
super(IDAUp, self).__init__()
for i in range(1, len(ch_ins)):
ch_in = ch_ins[i]
up_s = int(up_strides[i])
fan_in = ch_in * 3 * 3
stdv = 1. / math.sqrt(fan_in)
proj = nn.Sequential(
ConvNormLayer(
ch_in,
ch_out,
filter_size=3,
stride=1,
use_dcn=dcn_v2,
bias_on=dcn_v2,
norm_decay=None,
dcn_lr_scale=1.,
dcn_regularizer=None,
initializer=Uniform(-stdv, stdv)),
nn.ReLU())
node = nn.Sequential(
ConvNormLayer(
ch_out,
ch_out,
filter_size=3,
stride=1,
use_dcn=dcn_v2,
bias_on=dcn_v2,
norm_decay=None,
dcn_lr_scale=1.,
dcn_regularizer=None,
initializer=Uniform(-stdv, stdv)),
nn.ReLU())
kernel_size = up_s * 2
fan_in = ch_out * kernel_size * kernel_size
stdv = 1. / math.sqrt(fan_in)
up = nn.Conv2DTranspose(
ch_out,
ch_out,
kernel_size=up_s * 2,
stride=up_s,
padding=up_s // 2,
groups=ch_out,
weight_attr=ParamAttr(initializer=Uniform(-stdv, stdv)),
bias_attr=False)
fill_up_weights(up)
setattr(self, 'proj_' + str(i), proj)
setattr(self, 'up_' + str(i), up)
setattr(self, 'node_' + str(i), node)
def forward(self, inputs, start_level, end_level):
for i in range(start_level + 1, end_level):
upsample = getattr(self, 'up_' + str(i - start_level))
project = getattr(self, 'proj_' + str(i - start_level))
inputs[i] = project(inputs[i])
inputs[i] = upsample(inputs[i])
node = getattr(self, 'node_' + str(i - start_level))
inputs[i] = node(paddle.add(inputs[i], inputs[i - 1]))
return inputs
class DLAUp(nn.Layer):
def __init__(self, start_level, channels, scales, ch_in=None, dcn_v2=True):
super(DLAUp, self).__init__()
self.start_level = start_level
if ch_in is None:
ch_in = channels
self.channels = channels
channels = list(channels)
scales = np.array(scales, dtype=int)
for i in range(len(channels) - 1):
j = -i - 2
setattr(
self,
'ida_{}'.format(i),
IDAUp(
ch_in[j:],
channels[j],
scales[j:] // scales[j],
dcn_v2=dcn_v2))
scales[j + 1:] = scales[j]
ch_in[j + 1:] = [channels[j] for _ in channels[j + 1:]]
def forward(self, inputs):
out = [inputs[-1]] # start with 32
for i in range(len(inputs) - self.start_level - 1):
ida = getattr(self, 'ida_{}'.format(i))
outputs = ida(inputs, len(inputs) - i - 2, len(inputs))
out.insert(0, outputs[-1])
return out
@register
@serializable
class CenterNetDLAFPN(nn.Layer):
"""
Args:
in_channels (list): number of input feature channels from backbone.
[16, 32, 64, 128, 256, 512] by default, means the channels of DLA-34
down_ratio (int): the down ratio from images to heatmap, 4 by default
last_level (int): the last level of input feature fed into the upsamplng block
out_channel (int): the channel of the output feature, 0 by default means
the channel of the input feature whose down ratio is `down_ratio`
first_level (None): the first level of input feature fed into the upsamplng block.
if None, the first level stands for logs(down_ratio)
dcn_v2 (bool): whether use the DCNv2, True by default
with_sge (bool): whether use SGE attention, False by default
"""
def __init__(self,
in_channels,
down_ratio=4,
last_level=5,
out_channel=0,
first_level=None,
dcn_v2=True,
with_sge=False):
super(CenterNetDLAFPN, self).__init__()
self.first_level = int(np.log2(
down_ratio)) if first_level is None else first_level
assert self.first_level >= 0, "first level in CenterNetDLAFPN should be greater or equal to 0, but received {}".format(
self.first_level)
self.down_ratio = down_ratio
self.last_level = last_level
scales = [2**i for i in range(len(in_channels[self.first_level:]))]
self.dla_up = DLAUp(
self.first_level,
in_channels[self.first_level:],
scales,
dcn_v2=dcn_v2)
self.out_channel = out_channel
if out_channel == 0:
self.out_channel = in_channels[self.first_level]
self.ida_up = IDAUp(
in_channels[self.first_level:self.last_level],
self.out_channel,
[2**i for i in range(self.last_level - self.first_level)],
dcn_v2=dcn_v2)
self.with_sge = with_sge
if self.with_sge:
self.sge_attention = SpatialGate()
@classmethod
def from_config(cls, cfg, input_shape):
return {'in_channels': [i.channels for i in input_shape]}
def forward(self, body_feats):
inputs = [body_feats[i] for i in range(len(body_feats))]
dla_up_feats = self.dla_up(inputs)
ida_up_feats = []
for i in range(self.last_level - self.first_level):
ida_up_feats.append(dla_up_feats[i].clone())
self.ida_up(ida_up_feats, 0, len(ida_up_feats))
feat = ida_up_feats[-1]
if self.with_sge:
feat = self.sge_attention(feat)
if self.down_ratio != 4:
feat = F.interpolate(
feat,
scale_factor=self.down_ratio // 4,
mode="bilinear",
align_corners=True)
return feat
@property
def out_shape(self):
return [ShapeSpec(channels=self.out_channel, stride=self.down_ratio)]
class TransitionUp(nn.Layer):
def __init__(self, in_channels, out_channels):
super().__init__()
def forward(self, x, skip):
w, h = skip.shape[2], skip.shape[3]
out = F.interpolate(x, size=(w, h), mode="bilinear", align_corners=True)
out = paddle.concat([out, skip], 1)
return out
@register
@serializable
class CenterNetHarDNetFPN(nn.Layer):
"""
Args:
in_channels (list): number of input feature channels from backbone.
[96, 214, 458, 784] by default, means the channels of HarDNet85
num_layers (int): HarDNet laters, 85 by default
down_ratio (int): the down ratio from images to heatmap, 4 by default
first_level (int|None): the first level of input feature fed into the upsamplng block.
if None, the first level stands for logs(down_ratio) - 1
last_level (int): the last level of input feature fed into the upsamplng block
out_channel (int): the channel of the output feature, 0 by default means
the channel of the input feature whose down ratio is `down_ratio`
"""
def __init__(self,
in_channels,
num_layers=85,
down_ratio=4,
first_level=None,
last_level=4,
out_channel=0):
super(CenterNetHarDNetFPN, self).__init__()
self.first_level = int(np.log2(
down_ratio)) - 1 if first_level is None else first_level
assert self.first_level >= 0, "first level in CenterNetDLAFPN should be greater or equal to 0, but received {}".format(
self.first_level)
self.down_ratio = down_ratio
self.last_level = last_level
self.last_pool = nn.AvgPool2D(kernel_size=2, stride=2)
assert num_layers in [68, 85], "HarDNet-{} not support.".format(
num_layers)
if num_layers == 85:
self.last_proj = ConvLayer(784, 256, kernel_size=1)
self.last_blk = HarDBlock(768, 80, 1.7, 8)
self.skip_nodes = [1, 3, 8, 13]
self.SC = [32, 32, 0]
gr = [64, 48, 28]
layers = [8, 8, 4]
ch_list2 = [224 + self.SC[0], 160 + self.SC[1], 96 + self.SC[2]]
channels = [96, 214, 458, 784]
self.skip_lv = 3
elif num_layers == 68:
self.last_proj = ConvLayer(654, 192, kernel_size=1)
self.last_blk = HarDBlock(576, 72, 1.7, 8)
self.skip_nodes = [1, 3, 8, 11]
self.SC = [32, 32, 0]
gr = [48, 32, 20]
layers = [8, 8, 4]
ch_list2 = [224 + self.SC[0], 96 + self.SC[1], 64 + self.SC[2]]
channels = [64, 124, 328, 654]
self.skip_lv = 2
self.transUpBlocks = nn.LayerList([])
self.denseBlocksUp = nn.LayerList([])
self.conv1x1_up = nn.LayerList([])
self.avg9x9 = nn.AvgPool2D(kernel_size=(9, 9), stride=1, padding=(4, 4))
prev_ch = self.last_blk.get_out_ch()
for i in range(3):
skip_ch = channels[3 - i]
self.transUpBlocks.append(TransitionUp(prev_ch, prev_ch))
if i < self.skip_lv:
cur_ch = prev_ch + skip_ch
else:
cur_ch = prev_ch
self.conv1x1_up.append(
ConvLayer(
cur_ch, ch_list2[i], kernel_size=1))
cur_ch = ch_list2[i]
cur_ch -= self.SC[i]
cur_ch *= 3
blk = HarDBlock(cur_ch, gr[i], 1.7, layers[i])
self.denseBlocksUp.append(blk)
prev_ch = blk.get_out_ch()
prev_ch += self.SC[0] + self.SC[1] + self.SC[2]
self.out_channel = prev_ch
@classmethod
def from_config(cls, cfg, input_shape):
return {'in_channels': [i.channels for i in input_shape]}
def forward(self, body_feats):
x = body_feats[-1]
x_sc = []
x = self.last_proj(x)
x = self.last_pool(x)
x2 = self.avg9x9(x)
x3 = x / (x.sum((2, 3), keepdim=True) + 0.1)
x = paddle.concat([x, x2, x3], 1)
x = self.last_blk(x)
for i in range(3):
skip_x = body_feats[3 - i]
x_up = self.transUpBlocks[i](x, skip_x)
x_ch = self.conv1x1_up[i](x_up)
if self.SC[i] > 0:
end = x_ch.shape[1]
new_st = end - self.SC[i]
x_sc.append(x_ch[:, new_st:, :, :])
x_ch = x_ch[:, :new_st, :, :]
x2 = self.avg9x9(x_ch)
x3 = x_ch / (x_ch.sum((2, 3), keepdim=True) + 0.1)
x_new = paddle.concat([x_ch, x2, x3], 1)
x = self.denseBlocksUp[i](x_new)
scs = [x]
for i in range(3):
if self.SC[i] > 0:
scs.insert(
0,
F.interpolate(
x_sc[i],
size=(x.shape[2], x.shape[3]),
mode="bilinear",
align_corners=True))
neck_feat = paddle.concat(scs, 1)
return neck_feat
@property
def out_shape(self):
return [ShapeSpec(channels=self.out_channel, stride=self.down_ratio)]
| PaddleDetection/ppdet/modeling/necks/centernet_fpn.py/0 | {
"file_path": "PaddleDetection/ppdet/modeling/necks/centernet_fpn.py",
"repo_id": "PaddleDetection",
"token_count": 8048
} | 81 |
# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This code is referenced from: https://github.com/open-mmlab/mmdetection
import paddle
from paddle import nn
from ppdet.core.workspace import register
__all__ = ['EmbeddingRPNHead']
@register
class EmbeddingRPNHead(nn.Layer):
__shared__ = ['proposal_embedding_dim']
def __init__(self, num_proposals, proposal_embedding_dim=256):
super(EmbeddingRPNHead, self).__init__()
self.num_proposals = num_proposals
self.proposal_embedding_dim = proposal_embedding_dim
self._init_layers()
self._init_weights()
def _init_layers(self):
self.init_proposal_bboxes = nn.Embedding(self.num_proposals, 4)
self.init_proposal_features = nn.Embedding(self.num_proposals,
self.proposal_embedding_dim)
def _init_weights(self):
init_bboxes = paddle.empty_like(self.init_proposal_bboxes.weight)
init_bboxes[:, :2] = 0.5
init_bboxes[:, 2:] = 1.0
self.init_proposal_bboxes.weight.set_value(init_bboxes)
@staticmethod
def bbox_cxcywh_to_xyxy(x):
cxcy, wh = paddle.split(x, 2, axis=-1)
return paddle.concat([cxcy - 0.5 * wh, cxcy + 0.5 * wh], axis=-1)
def forward(self, img_whwh):
proposal_bboxes = self.init_proposal_bboxes.weight.clone()
proposal_bboxes = self.bbox_cxcywh_to_xyxy(proposal_bboxes)
proposal_bboxes = proposal_bboxes.unsqueeze(0) * img_whwh.unsqueeze(1)
proposal_features = self.init_proposal_features.weight.clone()
proposal_features = proposal_features.unsqueeze(0).tile(
[img_whwh.shape[0], 1, 1])
return proposal_bboxes, proposal_features
| PaddleDetection/ppdet/modeling/proposal_generator/embedding_rpn_head.py/0 | {
"file_path": "PaddleDetection/ppdet/modeling/proposal_generator/embedding_rpn_head.py",
"repo_id": "PaddleDetection",
"token_count": 925
} | 82 |
# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import paddle.nn.functional as F
def align_weak_strong_shape(data_weak, data_strong):
max_shape_x = max(data_strong['image'].shape[2],
data_weak['image'].shape[2])
max_shape_y = max(data_strong['image'].shape[3],
data_weak['image'].shape[3])
scale_x_s = max_shape_x / data_strong['image'].shape[2]
scale_y_s = max_shape_y / data_strong['image'].shape[3]
scale_x_w = max_shape_x / data_weak['image'].shape[2]
scale_y_w = max_shape_y / data_weak['image'].shape[3]
target_size = [max_shape_x, max_shape_y]
if scale_x_s != 1 or scale_y_s != 1:
data_strong['image'] = F.interpolate(
data_strong['image'],
size=target_size,
mode='bilinear',
align_corners=False)
if 'gt_bbox' in data_strong:
gt_bboxes = data_strong['gt_bbox'].numpy()
for i in range(len(gt_bboxes)):
if len(gt_bboxes[i]) > 0:
gt_bboxes[i][:, 0::2] = gt_bboxes[i][:, 0::2] * scale_x_s
gt_bboxes[i][:, 1::2] = gt_bboxes[i][:, 1::2] * scale_y_s
data_strong['gt_bbox'] = paddle.to_tensor(gt_bboxes)
if scale_x_w != 1 or scale_y_w != 1:
data_weak['image'] = F.interpolate(
data_weak['image'],
size=target_size,
mode='bilinear',
align_corners=False)
if 'gt_bbox' in data_weak:
gt_bboxes = data_weak['gt_bbox'].numpy()
for i in range(len(gt_bboxes)):
if len(gt_bboxes[i]) > 0:
gt_bboxes[i][:, 0::2] = gt_bboxes[i][:, 0::2] * scale_x_w
gt_bboxes[i][:, 1::2] = gt_bboxes[i][:, 1::2] * scale_y_w
data_weak['gt_bbox'] = paddle.to_tensor(gt_bboxes)
return data_weak, data_strong
def QFLv2(pred_sigmoid,
teacher_sigmoid,
weight=None,
beta=2.0,
reduction='mean'):
pt = pred_sigmoid
zerolabel = paddle.zeros_like(pt)
loss = F.binary_cross_entropy(
pred_sigmoid, zerolabel, reduction='none') * pt.pow(beta)
pos = weight > 0
pt = teacher_sigmoid[pos] - pred_sigmoid[pos]
loss[pos] = F.binary_cross_entropy(
pred_sigmoid[pos], teacher_sigmoid[pos],
reduction='none') * pt.pow(beta)
valid = weight >= 0
if reduction == "mean":
loss = loss[valid].mean()
elif reduction == "sum":
loss = loss[valid].sum()
return loss
def filter_invalid(bbox, label=None, score=None, thr=0.0, min_size=0):
if score.numel() > 0:
soft_score = score.max(-1)
valid = soft_score >= thr
bbox = bbox[valid]
if label is not None:
label = label[valid]
score = score[valid]
if min_size is not None and bbox.shape[0] > 0:
bw = bbox[:, 2]
bh = bbox[:, 3]
valid = (bw > min_size) & (bh > min_size)
bbox = bbox[valid]
if label is not None:
label = label[valid]
score = score[valid]
return bbox, label, score
| PaddleDetection/ppdet/modeling/ssod/utils.py/0 | {
"file_path": "PaddleDetection/ppdet/modeling/ssod/utils.py",
"repo_id": "PaddleDetection",
"token_count": 1779
} | 83 |
from paddle.utils.cpp_extension import CUDAExtension, setup
if __name__ == "__main__":
setup(
name='deformable_detr_ops',
ext_modules=CUDAExtension(
sources=['ms_deformable_attn_op.cc', 'ms_deformable_attn_op.cu']))
| PaddleDetection/ppdet/modeling/transformers/ext_op/setup_ms_deformable_attn_op.py/0 | {
"file_path": "PaddleDetection/ppdet/modeling/transformers/ext_op/setup_ms_deformable_attn_op.py",
"repo_id": "PaddleDetection",
"token_count": 114
} | 84 |
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddle import ParamAttr
from ppdet.core.workspace import register
from ppdet.modeling import ops
from ppdet.modeling.losses.iou_loss import GIoULoss
from ppdet.utils.logger import setup_logger
logger = setup_logger(__name__)
__all__ = [
'DistillYOLOv3Loss',
'KnowledgeDistillationKLDivLoss',
'DistillPPYOLOELoss',
'FGDFeatureLoss',
'CWDFeatureLoss',
'PKDFeatureLoss',
'MGDFeatureLoss',
]
def parameter_init(mode="kaiming", value=0.):
if mode == "kaiming":
weight_attr = paddle.nn.initializer.KaimingUniform()
elif mode == "constant":
weight_attr = paddle.nn.initializer.Constant(value=value)
else:
weight_attr = paddle.nn.initializer.KaimingUniform()
weight_init = ParamAttr(initializer=weight_attr)
return weight_init
def feature_norm(feat):
# Normalize the feature maps to have zero mean and unit variances.
assert len(feat.shape) == 4
N, C, H, W = feat.shape
feat = feat.transpose([1, 0, 2, 3]).reshape([C, -1])
mean = feat.mean(axis=-1, keepdim=True)
std = feat.std(axis=-1, keepdim=True)
feat = (feat - mean) / (std + 1e-6)
return feat.reshape([C, N, H, W]).transpose([1, 0, 2, 3])
@register
class DistillYOLOv3Loss(nn.Layer):
def __init__(self, weight=1000):
super(DistillYOLOv3Loss, self).__init__()
self.loss_weight = weight
def obj_weighted_reg(self, sx, sy, sw, sh, tx, ty, tw, th, tobj):
loss_x = ops.sigmoid_cross_entropy_with_logits(sx, F.sigmoid(tx))
loss_y = ops.sigmoid_cross_entropy_with_logits(sy, F.sigmoid(ty))
loss_w = paddle.abs(sw - tw)
loss_h = paddle.abs(sh - th)
loss = paddle.add_n([loss_x, loss_y, loss_w, loss_h])
weighted_loss = paddle.mean(loss * F.sigmoid(tobj))
return weighted_loss
def obj_weighted_cls(self, scls, tcls, tobj):
loss = ops.sigmoid_cross_entropy_with_logits(scls, F.sigmoid(tcls))
weighted_loss = paddle.mean(paddle.multiply(loss, F.sigmoid(tobj)))
return weighted_loss
def obj_loss(self, sobj, tobj):
obj_mask = paddle.cast(tobj > 0., dtype="float32")
obj_mask.stop_gradient = True
loss = paddle.mean(
ops.sigmoid_cross_entropy_with_logits(sobj, obj_mask))
return loss
def forward(self, teacher_model, student_model):
teacher_distill_pairs = teacher_model.yolo_head.loss.distill_pairs
student_distill_pairs = student_model.yolo_head.loss.distill_pairs
distill_reg_loss, distill_cls_loss, distill_obj_loss = [], [], []
for s_pair, t_pair in zip(student_distill_pairs, teacher_distill_pairs):
distill_reg_loss.append(
self.obj_weighted_reg(s_pair[0], s_pair[1], s_pair[2], s_pair[
3], t_pair[0], t_pair[1], t_pair[2], t_pair[3], t_pair[4]))
distill_cls_loss.append(
self.obj_weighted_cls(s_pair[5], t_pair[5], t_pair[4]))
distill_obj_loss.append(self.obj_loss(s_pair[4], t_pair[4]))
distill_reg_loss = paddle.add_n(distill_reg_loss)
distill_cls_loss = paddle.add_n(distill_cls_loss)
distill_obj_loss = paddle.add_n(distill_obj_loss)
loss = (distill_reg_loss + distill_cls_loss + distill_obj_loss
) * self.loss_weight
return loss
@register
class KnowledgeDistillationKLDivLoss(nn.Layer):
"""Loss function for knowledge distilling using KL divergence.
Args:
reduction (str): Options are `'none'`, `'mean'` and `'sum'`.
loss_weight (float): Loss weight of current loss.
T (int): Temperature for distillation.
"""
def __init__(self, reduction='mean', loss_weight=1.0, T=10):
super(KnowledgeDistillationKLDivLoss, self).__init__()
assert reduction in ('none', 'mean', 'sum')
assert T >= 1
self.reduction = reduction
self.loss_weight = loss_weight
self.T = T
def knowledge_distillation_kl_div_loss(self,
pred,
soft_label,
T,
detach_target=True):
r"""Loss function for knowledge distilling using KL divergence.
Args:
pred (Tensor): Predicted logits with shape (N, n + 1).
soft_label (Tensor): Target logits with shape (N, N + 1).
T (int): Temperature for distillation.
detach_target (bool): Remove soft_label from automatic differentiation
"""
assert pred.shape == soft_label.shape
target = F.softmax(soft_label / T, axis=1)
if detach_target:
target = target.detach()
kd_loss = F.kl_div(
F.log_softmax(
pred / T, axis=1), target, reduction='none').mean(1) * (T * T)
return kd_loss
def forward(self,
pred,
soft_label,
weight=None,
avg_factor=None,
reduction_override=None):
"""Forward function.
Args:
pred (Tensor): Predicted logits with shape (N, n + 1).
soft_label (Tensor): Target logits with shape (N, N + 1).
weight (Tensor, optional): The weight of loss for each
prediction. Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Defaults to None.
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (reduction_override
if reduction_override else self.reduction)
loss_kd_out = self.knowledge_distillation_kl_div_loss(
pred, soft_label, T=self.T)
if weight is not None:
loss_kd_out = weight * loss_kd_out
if avg_factor is None:
if reduction == 'none':
loss = loss_kd_out
elif reduction == 'mean':
loss = loss_kd_out.mean()
elif reduction == 'sum':
loss = loss_kd_out.sum()
else:
# if reduction is mean, then average the loss by avg_factor
if reduction == 'mean':
loss = loss_kd_out.sum() / avg_factor
# if reduction is 'none', then do nothing, otherwise raise an error
elif reduction != 'none':
raise ValueError(
'avg_factor can not be used with reduction="sum"')
loss_kd = self.loss_weight * loss
return loss_kd
@register
class DistillPPYOLOELoss(nn.Layer):
def __init__(
self,
loss_weight={'logits': 4.0,
'feat': 1.0},
logits_distill=True,
logits_loss_weight={'class': 1.0,
'iou': 2.5,
'dfl': 0.5},
logits_ld_distill=False,
logits_ld_params={'weight': 20000,
'T': 10},
feat_distill=True,
feat_distiller='fgd',
feat_distill_place='neck_feats',
teacher_width_mult=1.0, # L
student_width_mult=0.75, # M
feat_out_channels=[768, 384, 192]):
super(DistillPPYOLOELoss, self).__init__()
self.loss_weight_logits = loss_weight['logits']
self.loss_weight_feat = loss_weight['feat']
self.logits_distill = logits_distill
self.logits_ld_distill = logits_ld_distill
self.feat_distill = feat_distill
if logits_distill and self.loss_weight_logits > 0:
self.bbox_loss_weight = logits_loss_weight['iou']
self.dfl_loss_weight = logits_loss_weight['dfl']
self.qfl_loss_weight = logits_loss_weight['class']
self.loss_bbox = GIoULoss()
if logits_ld_distill:
self.loss_kd = KnowledgeDistillationKLDivLoss(
loss_weight=logits_ld_params['weight'], T=logits_ld_params['T'])
if feat_distill and self.loss_weight_feat > 0:
assert feat_distiller in ['cwd', 'fgd', 'pkd', 'mgd', 'mimic']
assert feat_distill_place in ['backbone_feats', 'neck_feats']
self.feat_distill_place = feat_distill_place
self.t_channel_list = [
int(c * teacher_width_mult) for c in feat_out_channels
]
self.s_channel_list = [
int(c * student_width_mult) for c in feat_out_channels
]
self.distill_feat_loss_modules = []
for i in range(len(feat_out_channels)):
if feat_distiller == 'cwd':
feat_loss_module = CWDFeatureLoss(
student_channels=self.s_channel_list[i],
teacher_channels=self.t_channel_list[i],
normalize=True)
elif feat_distiller == 'fgd':
feat_loss_module = FGDFeatureLoss(
student_channels=self.s_channel_list[i],
teacher_channels=self.t_channel_list[i],
normalize=True,
alpha_fgd=0.00001,
beta_fgd=0.000005,
gamma_fgd=0.00001,
lambda_fgd=0.00000005)
elif feat_distiller == 'pkd':
feat_loss_module = PKDFeatureLoss(
student_channels=self.s_channel_list[i],
teacher_channels=self.t_channel_list[i],
normalize=True,
resize_stu=True)
elif feat_distiller == 'mgd':
feat_loss_module = MGDFeatureLoss(
student_channels=self.s_channel_list[i],
teacher_channels=self.t_channel_list[i],
normalize=True,
loss_func='ssim')
elif feat_distiller == 'mimic':
feat_loss_module = MimicFeatureLoss(
student_channels=self.s_channel_list[i],
teacher_channels=self.t_channel_list[i],
normalize=True)
else:
raise ValueError
self.distill_feat_loss_modules.append(feat_loss_module)
def quality_focal_loss(self,
pred_logits,
soft_target_logits,
beta=2.0,
use_sigmoid=False,
num_total_pos=None):
if use_sigmoid:
func = F.binary_cross_entropy_with_logits
soft_target = F.sigmoid(soft_target_logits)
pred_sigmoid = F.sigmoid(pred_logits)
preds = pred_logits
else:
func = F.binary_cross_entropy
soft_target = soft_target_logits
pred_sigmoid = pred_logits
preds = pred_sigmoid
scale_factor = pred_sigmoid - soft_target
loss = func(
preds, soft_target, reduction='none') * scale_factor.abs().pow(beta)
loss = loss.sum(1)
if num_total_pos is not None:
loss = loss.sum() / num_total_pos
else:
loss = loss.mean()
return loss
def bbox_loss(self, s_bbox, t_bbox, weight_targets=None):
# [x,y,w,h]
if weight_targets is not None:
loss = paddle.sum(self.loss_bbox(s_bbox, t_bbox) * weight_targets)
avg_factor = weight_targets.sum()
loss = loss / avg_factor
else:
loss = paddle.mean(self.loss_bbox(s_bbox, t_bbox))
return loss
def distribution_focal_loss(self,
pred_corners,
target_corners,
weight_targets=None):
target_corners_label = F.softmax(target_corners, axis=-1)
loss_dfl = F.cross_entropy(
pred_corners,
target_corners_label,
soft_label=True,
reduction='none')
loss_dfl = loss_dfl.sum(1)
if weight_targets is not None:
loss_dfl = loss_dfl * (weight_targets.expand([-1, 4]).reshape([-1]))
loss_dfl = loss_dfl.sum(-1) / weight_targets.sum()
else:
loss_dfl = loss_dfl.mean(-1)
return loss_dfl / 4.0 # 4 direction
def main_kd(self, mask_positive, pred_scores, soft_cls, num_classes):
num_pos = mask_positive.sum()
if num_pos > 0:
cls_mask = mask_positive.unsqueeze(-1).tile([1, 1, num_classes])
pred_scores_pos = paddle.masked_select(
pred_scores, cls_mask).reshape([-1, num_classes])
soft_cls_pos = paddle.masked_select(
soft_cls, cls_mask).reshape([-1, num_classes])
loss_kd = self.loss_kd(
pred_scores_pos, soft_cls_pos, avg_factor=num_pos)
else:
loss_kd = paddle.zeros([1])
return loss_kd
def forward(self, teacher_model, student_model):
teacher_distill_pairs = teacher_model.yolo_head.distill_pairs
student_distill_pairs = student_model.yolo_head.distill_pairs
if self.logits_distill and self.loss_weight_logits > 0:
distill_bbox_loss, distill_dfl_loss, distill_cls_loss = [], [], []
distill_cls_loss.append(
self.quality_focal_loss(
student_distill_pairs['pred_cls_scores'].reshape(
(-1, student_distill_pairs['pred_cls_scores'].shape[-1]
)),
teacher_distill_pairs['pred_cls_scores'].detach().reshape(
(-1, teacher_distill_pairs['pred_cls_scores'].shape[-1]
)),
num_total_pos=student_distill_pairs['pos_num'],
use_sigmoid=False))
distill_bbox_loss.append(
self.bbox_loss(student_distill_pairs['pred_bboxes_pos'],
teacher_distill_pairs['pred_bboxes_pos'].detach(),
weight_targets=student_distill_pairs['bbox_weight']
) if 'pred_bboxes_pos' in student_distill_pairs and \
'pred_bboxes_pos' in teacher_distill_pairs and \
'bbox_weight' in student_distill_pairs
else paddle.zeros([1]))
distill_dfl_loss.append(
self.distribution_focal_loss(
student_distill_pairs['pred_dist_pos'].reshape((-1, student_distill_pairs['pred_dist_pos'].shape[-1])),
teacher_distill_pairs['pred_dist_pos'].detach().reshape((-1, teacher_distill_pairs['pred_dist_pos'].shape[-1])), \
weight_targets=student_distill_pairs['bbox_weight']
) if 'pred_dist_pos' in student_distill_pairs and \
'pred_dist_pos' in teacher_distill_pairs and \
'bbox_weight' in student_distill_pairs
else paddle.zeros([1]))
distill_cls_loss = paddle.add_n(distill_cls_loss)
distill_bbox_loss = paddle.add_n(distill_bbox_loss)
distill_dfl_loss = paddle.add_n(distill_dfl_loss)
logits_loss = distill_bbox_loss * self.bbox_loss_weight + distill_cls_loss * self.qfl_loss_weight + distill_dfl_loss * self.dfl_loss_weight
if self.logits_ld_distill:
loss_kd = self.main_kd(
student_distill_pairs['mask_positive_select'],
student_distill_pairs['pred_cls_scores'],
teacher_distill_pairs['pred_cls_scores'],
student_model.yolo_head.num_classes, )
logits_loss += loss_kd
else:
logits_loss = paddle.zeros([1])
if self.feat_distill and self.loss_weight_feat > 0:
feat_loss_list = []
inputs = student_model.inputs
assert 'gt_bbox' in inputs
assert self.feat_distill_place in student_distill_pairs
assert self.feat_distill_place in teacher_distill_pairs
stu_feats = student_distill_pairs[self.feat_distill_place]
tea_feats = teacher_distill_pairs[self.feat_distill_place]
for i, loss_module in enumerate(self.distill_feat_loss_modules):
feat_loss_list.append(
loss_module(stu_feats[i], tea_feats[i], inputs))
feat_loss = paddle.add_n(feat_loss_list)
else:
feat_loss = paddle.zeros([1])
student_model.yolo_head.distill_pairs.clear()
teacher_model.yolo_head.distill_pairs.clear()
return logits_loss * self.loss_weight_logits, feat_loss * self.loss_weight_feat
@register
class CWDFeatureLoss(nn.Layer):
def __init__(self,
student_channels,
teacher_channels,
normalize=False,
tau=1.0,
weight=1.0):
super(CWDFeatureLoss, self).__init__()
self.normalize = normalize
self.tau = tau
self.loss_weight = weight
if student_channels != teacher_channels:
self.align = nn.Conv2D(
student_channels,
teacher_channels,
kernel_size=1,
stride=1,
padding=0)
else:
self.align = None
def distill_softmax(self, x, tau):
_, _, w, h = paddle.shape(x)
x = paddle.reshape(x, [-1, w * h])
x /= tau
return F.softmax(x, axis=1)
def forward(self, preds_s, preds_t, inputs=None):
assert preds_s.shape[-2:] == preds_t.shape[-2:]
N, C, H, W = preds_s.shape
eps = 1e-5
if self.align is not None:
preds_s = self.align(preds_s)
if self.normalize:
preds_s = feature_norm(preds_s)
preds_t = feature_norm(preds_t)
softmax_pred_s = self.distill_softmax(preds_s, self.tau)
softmax_pred_t = self.distill_softmax(preds_t, self.tau)
loss = paddle.sum(-softmax_pred_t * paddle.log(eps + softmax_pred_s) +
softmax_pred_t * paddle.log(eps + softmax_pred_t))
return self.loss_weight * loss / (C * N)
@register
class FGDFeatureLoss(nn.Layer):
"""
Focal and Global Knowledge Distillation for Detectors
The code is reference from https://github.com/yzd-v/FGD/blob/master/mmdet/distillation/losses/fgd.py
Args:
student_channels (int): The number of channels in the student's FPN feature map. Default to 256.
teacher_channels (int): The number of channels in the teacher's FPN feature map. Default to 256.
normalize (bool): Whether to normalize the feature maps.
temp (float, optional): The temperature coefficient. Defaults to 0.5.
alpha_fgd (float, optional): The weight of fg_loss. Defaults to 0.001
beta_fgd (float, optional): The weight of bg_loss. Defaults to 0.0005
gamma_fgd (float, optional): The weight of mask_loss. Defaults to 0.001
lambda_fgd (float, optional): The weight of relation_loss. Defaults to 0.000005
"""
def __init__(self,
student_channels,
teacher_channels,
normalize=False,
loss_weight=1.0,
temp=0.5,
alpha_fgd=0.001,
beta_fgd=0.0005,
gamma_fgd=0.001,
lambda_fgd=0.000005):
super(FGDFeatureLoss, self).__init__()
self.normalize = normalize
self.loss_weight = loss_weight
self.temp = temp
self.alpha_fgd = alpha_fgd
self.beta_fgd = beta_fgd
self.gamma_fgd = gamma_fgd
self.lambda_fgd = lambda_fgd
kaiming_init = parameter_init("kaiming")
zeros_init = parameter_init("constant", 0.0)
if student_channels != teacher_channels:
self.align = nn.Conv2D(
student_channels,
teacher_channels,
kernel_size=1,
stride=1,
padding=0,
weight_attr=kaiming_init)
student_channels = teacher_channels
else:
self.align = None
self.conv_mask_s = nn.Conv2D(
student_channels, 1, kernel_size=1, weight_attr=kaiming_init)
self.conv_mask_t = nn.Conv2D(
teacher_channels, 1, kernel_size=1, weight_attr=kaiming_init)
self.stu_conv_block = nn.Sequential(
nn.Conv2D(
student_channels,
student_channels // 2,
kernel_size=1,
weight_attr=zeros_init),
nn.LayerNorm([student_channels // 2, 1, 1]),
nn.ReLU(),
nn.Conv2D(
student_channels // 2,
student_channels,
kernel_size=1,
weight_attr=zeros_init))
self.tea_conv_block = nn.Sequential(
nn.Conv2D(
teacher_channels,
teacher_channels // 2,
kernel_size=1,
weight_attr=zeros_init),
nn.LayerNorm([teacher_channels // 2, 1, 1]),
nn.ReLU(),
nn.Conv2D(
teacher_channels // 2,
teacher_channels,
kernel_size=1,
weight_attr=zeros_init))
def spatial_channel_attention(self, x, t=0.5):
shape = paddle.shape(x)
N, C, H, W = shape
_f = paddle.abs(x)
spatial_map = paddle.reshape(
paddle.mean(
_f, axis=1, keepdim=True) / t, [N, -1])
spatial_map = F.softmax(spatial_map, axis=1, dtype="float32") * H * W
spatial_att = paddle.reshape(spatial_map, [N, H, W])
channel_map = paddle.mean(
paddle.mean(
_f, axis=2, keepdim=False), axis=2, keepdim=False)
channel_att = F.softmax(channel_map / t, axis=1, dtype="float32") * C
return [spatial_att, channel_att]
def spatial_pool(self, x, mode="teacher"):
batch, channel, width, height = x.shape
x_copy = x
x_copy = paddle.reshape(x_copy, [batch, channel, height * width])
x_copy = x_copy.unsqueeze(1)
if mode.lower() == "student":
context_mask = self.conv_mask_s(x)
else:
context_mask = self.conv_mask_t(x)
context_mask = paddle.reshape(context_mask, [batch, 1, height * width])
context_mask = F.softmax(context_mask, axis=2)
context_mask = context_mask.unsqueeze(-1)
context = paddle.matmul(x_copy, context_mask)
context = paddle.reshape(context, [batch, channel, 1, 1])
return context
def mask_loss(self, stu_channel_att, tea_channel_att, stu_spatial_att,
tea_spatial_att):
def _func(a, b):
return paddle.sum(paddle.abs(a - b)) / len(a)
mask_loss = _func(stu_channel_att, tea_channel_att) + _func(
stu_spatial_att, tea_spatial_att)
return mask_loss
def feature_loss(self, stu_feature, tea_feature, mask_fg, mask_bg,
tea_channel_att, tea_spatial_att):
mask_fg = mask_fg.unsqueeze(axis=1)
mask_bg = mask_bg.unsqueeze(axis=1)
tea_channel_att = tea_channel_att.unsqueeze(axis=-1).unsqueeze(axis=-1)
tea_spatial_att = tea_spatial_att.unsqueeze(axis=1)
fea_t = paddle.multiply(tea_feature, paddle.sqrt(tea_spatial_att))
fea_t = paddle.multiply(fea_t, paddle.sqrt(tea_channel_att))
fg_fea_t = paddle.multiply(fea_t, paddle.sqrt(mask_fg))
bg_fea_t = paddle.multiply(fea_t, paddle.sqrt(mask_bg))
fea_s = paddle.multiply(stu_feature, paddle.sqrt(tea_spatial_att))
fea_s = paddle.multiply(fea_s, paddle.sqrt(tea_channel_att))
fg_fea_s = paddle.multiply(fea_s, paddle.sqrt(mask_fg))
bg_fea_s = paddle.multiply(fea_s, paddle.sqrt(mask_bg))
fg_loss = F.mse_loss(fg_fea_s, fg_fea_t, reduction="sum") / len(mask_fg)
bg_loss = F.mse_loss(bg_fea_s, bg_fea_t, reduction="sum") / len(mask_bg)
return fg_loss, bg_loss
def relation_loss(self, stu_feature, tea_feature):
context_s = self.spatial_pool(stu_feature, "student")
context_t = self.spatial_pool(tea_feature, "teacher")
out_s = stu_feature + self.stu_conv_block(context_s)
out_t = tea_feature + self.tea_conv_block(context_t)
rela_loss = F.mse_loss(out_s, out_t, reduction="sum") / len(out_s)
return rela_loss
def mask_value(self, mask, xl, xr, yl, yr, value):
mask[xl:xr, yl:yr] = paddle.maximum(mask[xl:xr, yl:yr], value)
return mask
def forward(self, stu_feature, tea_feature, inputs):
assert stu_feature.shape[-2:] == stu_feature.shape[-2:]
assert "gt_bbox" in inputs.keys() and "im_shape" in inputs.keys()
gt_bboxes = inputs['gt_bbox']
ins_shape = [
inputs['im_shape'][i] for i in range(inputs['im_shape'].shape[0])
]
index_gt = []
for i in range(len(gt_bboxes)):
if gt_bboxes[i].size > 2:
index_gt.append(i)
# only distill feature with labeled GTbox
if len(index_gt) != len(gt_bboxes):
index_gt_t = paddle.to_tensor(index_gt)
stu_feature = paddle.index_select(stu_feature, index_gt_t)
tea_feature = paddle.index_select(tea_feature, index_gt_t)
ins_shape = [ins_shape[c] for c in index_gt]
gt_bboxes = [gt_bboxes[c] for c in index_gt]
assert len(gt_bboxes) == tea_feature.shape[0]
if self.align is not None:
stu_feature = self.align(stu_feature)
if self.normalize:
stu_feature = feature_norm(stu_feature)
tea_feature = feature_norm(tea_feature)
tea_spatial_att, tea_channel_att = self.spatial_channel_attention(
tea_feature, self.temp)
stu_spatial_att, stu_channel_att = self.spatial_channel_attention(
stu_feature, self.temp)
mask_fg = paddle.zeros(tea_spatial_att.shape)
mask_bg = paddle.ones_like(tea_spatial_att)
one_tmp = paddle.ones([*tea_spatial_att.shape[1:]])
zero_tmp = paddle.zeros([*tea_spatial_att.shape[1:]])
mask_fg.stop_gradient = True
mask_bg.stop_gradient = True
one_tmp.stop_gradient = True
zero_tmp.stop_gradient = True
wmin, wmax, hmin, hmax = [], [], [], []
if len(gt_bboxes) == 0:
loss = self.relation_loss(stu_feature, tea_feature)
return self.lambda_fgd * loss
N, _, H, W = stu_feature.shape
for i in range(N):
tmp_box = paddle.ones_like(gt_bboxes[i])
tmp_box.stop_gradient = True
tmp_box[:, 0] = gt_bboxes[i][:, 0] / ins_shape[i][1] * W
tmp_box[:, 2] = gt_bboxes[i][:, 2] / ins_shape[i][1] * W
tmp_box[:, 1] = gt_bboxes[i][:, 1] / ins_shape[i][0] * H
tmp_box[:, 3] = gt_bboxes[i][:, 3] / ins_shape[i][0] * H
zero = paddle.zeros_like(tmp_box[:, 0], dtype="int32")
ones = paddle.ones_like(tmp_box[:, 2], dtype="int32")
zero.stop_gradient = True
ones.stop_gradient = True
wmin.append(
paddle.cast(paddle.floor(tmp_box[:, 0]), "int32").maximum(zero))
wmax.append(paddle.cast(paddle.ceil(tmp_box[:, 2]), "int32"))
hmin.append(
paddle.cast(paddle.floor(tmp_box[:, 1]), "int32").maximum(zero))
hmax.append(paddle.cast(paddle.ceil(tmp_box[:, 3]), "int32"))
area_recip = 1.0 / (
hmax[i].reshape([1, -1]) + 1 - hmin[i].reshape([1, -1])) / (
wmax[i].reshape([1, -1]) + 1 - wmin[i].reshape([1, -1]))
for j in range(len(gt_bboxes[i])):
if gt_bboxes[i][j].sum() > 0:
mask_fg[i] = self.mask_value(
mask_fg[i], hmin[i][j], hmax[i][j] + 1, wmin[i][j],
wmax[i][j] + 1, area_recip[0][j])
mask_bg[i] = paddle.where(mask_fg[i] > zero_tmp, zero_tmp, one_tmp)
if paddle.sum(mask_bg[i]):
mask_bg[i] /= paddle.sum(mask_bg[i])
fg_loss, bg_loss = self.feature_loss(stu_feature, tea_feature, mask_fg,
mask_bg, tea_channel_att,
tea_spatial_att)
mask_loss = self.mask_loss(stu_channel_att, tea_channel_att,
stu_spatial_att, tea_spatial_att)
rela_loss = self.relation_loss(stu_feature, tea_feature)
loss = self.alpha_fgd * fg_loss + self.beta_fgd * bg_loss \
+ self.gamma_fgd * mask_loss + self.lambda_fgd * rela_loss
return loss * self.loss_weight
@register
class PKDFeatureLoss(nn.Layer):
"""
PKD: General Distillation Framework for Object Detectors via Pearson Correlation Coefficient.
Args:
loss_weight (float): Weight of loss. Defaults to 1.0.
resize_stu (bool): If True, we'll down/up sample the features of the
student model to the spatial size of those of the teacher model if
their spatial sizes are different. And vice versa. Defaults to
True.
"""
def __init__(self,
student_channels=256,
teacher_channels=256,
normalize=True,
loss_weight=1.0,
resize_stu=True):
super(PKDFeatureLoss, self).__init__()
self.normalize = normalize
self.loss_weight = loss_weight
self.resize_stu = resize_stu
def forward(self, stu_feature, tea_feature, inputs=None):
size_s, size_t = stu_feature.shape[2:], tea_feature.shape[2:]
if size_s[0] != size_t[0]:
if self.resize_stu:
stu_feature = F.interpolate(
stu_feature, size_t, mode='bilinear')
else:
tea_feature = F.interpolate(
tea_feature, size_s, mode='bilinear')
assert stu_feature.shape == tea_feature.shape
if self.normalize:
stu_feature = feature_norm(stu_feature)
tea_feature = feature_norm(tea_feature)
loss = F.mse_loss(stu_feature, tea_feature) / 2
return loss * self.loss_weight
@register
class MimicFeatureLoss(nn.Layer):
def __init__(self,
student_channels=256,
teacher_channels=256,
normalize=True,
loss_weight=1.0):
super(MimicFeatureLoss, self).__init__()
self.normalize = normalize
self.loss_weight = loss_weight
self.mse_loss = nn.MSELoss()
if student_channels != teacher_channels:
self.align = nn.Conv2D(
student_channels,
teacher_channels,
kernel_size=1,
stride=1,
padding=0)
else:
self.align = None
def forward(self, stu_feature, tea_feature, inputs=None):
if self.align is not None:
stu_feature = self.align(stu_feature)
if self.normalize:
stu_feature = feature_norm(stu_feature)
tea_feature = feature_norm(tea_feature)
loss = self.mse_loss(stu_feature, tea_feature)
return loss * self.loss_weight
@register
class MGDFeatureLoss(nn.Layer):
def __init__(self,
student_channels=256,
teacher_channels=256,
normalize=True,
loss_weight=1.0,
loss_func='mse'):
super(MGDFeatureLoss, self).__init__()
self.normalize = normalize
self.loss_weight = loss_weight
assert loss_func in ['mse', 'ssim']
self.loss_func = loss_func
self.mse_loss = nn.MSELoss(reduction='sum')
self.ssim_loss = SSIM(11)
kaiming_init = parameter_init("kaiming")
if student_channels != teacher_channels:
self.align = nn.Conv2D(
student_channels,
teacher_channels,
kernel_size=1,
stride=1,
padding=0,
weight_attr=kaiming_init,
bias_attr=False)
else:
self.align = None
self.generation = nn.Sequential(
nn.Conv2D(
teacher_channels, teacher_channels, kernel_size=3, padding=1),
nn.ReLU(),
nn.Conv2D(
teacher_channels, teacher_channels, kernel_size=3, padding=1))
def forward(self, stu_feature, tea_feature, inputs=None):
N = stu_feature.shape[0]
if self.align is not None:
stu_feature = self.align(stu_feature)
stu_feature = self.generation(stu_feature)
if self.normalize:
stu_feature = feature_norm(stu_feature)
tea_feature = feature_norm(tea_feature)
if self.loss_func == 'mse':
loss = self.mse_loss(stu_feature, tea_feature) / N
elif self.loss_func == 'ssim':
ssim_loss = self.ssim_loss(stu_feature, tea_feature)
loss = paddle.clip((1 - ssim_loss) / 2, 0, 1)
else:
raise ValueError
return loss * self.loss_weight
class SSIM(nn.Layer):
def __init__(self, window_size=11, size_average=True):
super(SSIM, self).__init__()
self.window_size = window_size
self.size_average = size_average
self.channel = 1
self.window = self.create_window(window_size, self.channel)
def gaussian(self, window_size, sigma):
gauss = paddle.to_tensor([
math.exp(-(x - window_size // 2)**2 / float(2 * sigma**2))
for x in range(window_size)
])
return gauss / gauss.sum()
def create_window(self, window_size, channel):
_1D_window = self.gaussian(window_size, 1.5).unsqueeze(1)
_2D_window = _1D_window.mm(_1D_window.t()).unsqueeze(0).unsqueeze(0)
window = _2D_window.expand([channel, 1, window_size, window_size])
return window
def _ssim(self, img1, img2, window, window_size, channel,
size_average=True):
mu1 = F.conv2d(img1, window, padding=window_size // 2, groups=channel)
mu2 = F.conv2d(img2, window, padding=window_size // 2, groups=channel)
mu1_sq = mu1.pow(2)
mu2_sq = mu2.pow(2)
mu1_mu2 = mu1 * mu2
sigma1_sq = F.conv2d(
img1 * img1, window, padding=window_size // 2,
groups=channel) - mu1_sq
sigma2_sq = F.conv2d(
img2 * img2, window, padding=window_size // 2,
groups=channel) - mu2_sq
sigma12 = F.conv2d(
img1 * img2, window, padding=window_size // 2,
groups=channel) - mu1_mu2
C1 = 0.01**2
C2 = 0.03**2
ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / (
1e-12 + (mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2))
if size_average:
return ssim_map.mean()
else:
return ssim_map.mean([1, 2, 3])
def forward(self, img1, img2):
channel = img1.shape[1]
if channel == self.channel and self.window.dtype == img1.dtype:
window = self.window
else:
window = self.create_window(self.window_size, channel)
self.window = window
self.channel = channel
return self._ssim(img1, img2, window, self.window_size, channel,
self.size_average)
| PaddleDetection/ppdet/slim/distill_loss.py/0 | {
"file_path": "PaddleDetection/ppdet/slim/distill_loss.py",
"repo_id": "PaddleDetection",
"token_count": 19190
} | 85 |
# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import paddle
import paddle.profiler as profiler
# A global variable to record the number of calling times for profiler
# functions. It is used to specify the tracing range of training steps.
_profiler_step_id = 0
# A global variable to avoid parsing from string every time.
_profiler_options = None
_prof = None
class ProfilerOptions(object):
'''
Use a string to initialize a ProfilerOptions.
The string should be in the format: "key1=value1;key2=value;key3=value3".
For example:
"profile_path=model.profile"
"batch_range=[50, 60]; profile_path=model.profile"
"batch_range=[50, 60]; tracer_option=OpDetail; profile_path=model.profile"
ProfilerOptions supports following key-value pair:
batch_range - a integer list, e.g. [100, 110].
state - a string, the optional values are 'CPU', 'GPU' or 'All'.
sorted_key - a string, the optional values are 'calls', 'total',
'max', 'min' or 'ave.
tracer_option - a string, the optional values are 'Default', 'OpDetail',
'AllOpDetail'.
profile_path - a string, the path to save the serialized profile data,
which can be used to generate a timeline.
exit_on_finished - a boolean.
'''
def __init__(self, options_str):
assert isinstance(options_str, str)
self._options = {
'batch_range': [10, 20],
'state': 'All',
'sorted_key': 'total',
'tracer_option': 'Default',
'profile_path': '/tmp/profile',
'exit_on_finished': True,
'timer_only': True
}
self._parse_from_string(options_str)
def _parse_from_string(self, options_str):
for kv in options_str.replace(' ', '').split(';'):
key, value = kv.split('=')
if key == 'batch_range':
value_list = value.replace('[', '').replace(']', '').split(',')
value_list = list(map(int, value_list))
if len(value_list) >= 2 and value_list[0] >= 0 and value_list[
1] > value_list[0]:
self._options[key] = value_list
elif key == 'exit_on_finished':
self._options[key] = value.lower() in ("yes", "true", "t", "1")
elif key in [
'state', 'sorted_key', 'tracer_option', 'profile_path'
]:
self._options[key] = value
elif key == 'timer_only':
self._options[key] = value
def __getitem__(self, name):
if self._options.get(name, None) is None:
raise ValueError(
"ProfilerOptions does not have an option named %s." % name)
return self._options[name]
def add_profiler_step(options_str=None):
'''
Enable the operator-level timing using PaddlePaddle's profiler.
The profiler uses a independent variable to count the profiler steps.
One call of this function is treated as a profiler step.
Args:
profiler_options - a string to initialize the ProfilerOptions.
Default is None, and the profiler is disabled.
'''
if options_str is None:
return
global _prof
global _profiler_step_id
global _profiler_options
if _profiler_options is None:
_profiler_options = ProfilerOptions(options_str)
# profile : https://www.paddlepaddle.org.cn/documentation/docs/zh/guides/performance_improving/profiling_model.html#chakanxingnengshujudetongjibiaodan
# timer_only = True only the model's throughput and time overhead are displayed
# timer_only = False calling summary can print a statistical form that presents performance data from different perspectives.
# timer_only = False the output Timeline information can be found in the profiler_log directory
if _prof is None:
_timer_only = str(_profiler_options['timer_only']) == str(True)
_prof = profiler.Profiler(
scheduler = (_profiler_options['batch_range'][0], _profiler_options['batch_range'][1]),
on_trace_ready = profiler.export_chrome_tracing('./profiler_log'),
timer_only = _timer_only)
_prof.start()
else:
_prof.step()
if _profiler_step_id == _profiler_options['batch_range'][1]:
_prof.stop()
_prof.summary(
op_detail=True,
thread_sep=False,
time_unit='ms')
_prof = None
if _profiler_options['exit_on_finished']:
sys.exit(0)
_profiler_step_id += 1
| PaddleDetection/ppdet/utils/profiler.py/0 | {
"file_path": "PaddleDetection/ppdet/utils/profiler.py",
"repo_id": "PaddleDetection",
"token_count": 2191
} | 86 |
_BASE_: [
'../../../../configs/datasets/spine_coco.yml',
'../../../../configs/runtime.yml',
'../../../../configs/rotate/ppyoloe_r/_base_/optimizer_3x.yml',
'../../../../configs/rotate/ppyoloe_r/_base_/ppyoloe_r_reader.yml',
'../../../../configs/rotate/ppyoloe_r/_base_/ppyoloe_r_crn.yml'
]
log_iter: 50
snapshot_epoch: 1
weights: output/ppyoloe_r_crn_s_3x_dota/model_final
pretrain_weights: https://paddledet.bj.bcebos.com/models/pretrained/CSPResNetb_s_pretrained.pdparams
depth_mult: 0.33
width_mult: 0.50
| PaddleDetection/test_tipc/configs/rotate/ppyoloe_r/ppyoloe_r_crn_s_3x_spine_coco.yml/0 | {
"file_path": "PaddleDetection/test_tipc/configs/rotate/ppyoloe_r/ppyoloe_r_crn_s_3x_spine_coco.yml",
"repo_id": "PaddleDetection",
"token_count": 241
} | 87 |
#!/bin/bash
source test_tipc/utils_func.sh
FILENAME=$1
MODE="whole_infer"
# parser model_name
dataline=$(cat ${FILENAME})
IFS=$'\n'
lines=(${dataline})
model_name=$(func_parser_value "${lines[1]}")
echo "ppdet ptq: ${model_name}"
python=$(func_parser_value "${lines[2]}")
filename_key=$(func_parser_key "${lines[3]}")
# parser export params
save_export_key=$(func_parser_key "${lines[5]}")
save_export_value=$(func_parser_value "${lines[5]}")
export_weight_key=$(func_parser_key "${lines[6]}")
export_weight_value=$(func_parser_value "${lines[6]}")
kl_quant_export=$(func_parser_value "${lines[7]}")
export_param1_key=$(func_parser_key "${lines[8]}")
export_param1_value=$(func_parser_value "${lines[8]}")
# parser infer params
inference_py=$(func_parser_value "${lines[10]}")
device_key=$(func_parser_key "${lines[11]}")
device_list=$(func_parser_value "${lines[11]}")
use_mkldnn_key=$(func_parser_key "${lines[12]}")
use_mkldnn_list=$(func_parser_value "${lines[12]}")
cpu_threads_key=$(func_parser_key "${lines[13]}")
cpu_threads_list=$(func_parser_value "${lines[13]}")
batch_size_key=$(func_parser_key "${lines[14]}")
batch_size_list=$(func_parser_value "${lines[14]}")
run_mode_key=$(func_parser_key "${lines[15]}")
run_mode_list=$(func_parser_value "${lines[15]}")
model_dir_key=$(func_parser_key "${lines[16]}")
image_dir_key=$(func_parser_key "${lines[17]}")
image_dir_value=$(func_parser_value "${lines[17]}")
run_benchmark_key=$(func_parser_key "${lines[18]}")
run_benchmark_value=$(func_parser_value "${lines[18]}")
infer_param1_key=$(func_parser_key "${lines[19]}")
infer_param1_value=$(func_parser_value "${lines[19]}")
LOG_PATH="./test_tipc/output/${model_name}/${MODE}"
mkdir -p ${LOG_PATH}
status_log="${LOG_PATH}/results_ptq_python.log"
function func_ptq_inference(){
IFS='|'
_python=$1
_log_path=$2
_script=$3
_set_model_dir=$4
set_image_dir=$(func_set_params "${image_dir_key}" "${image_dir_value}")
set_run_benchmark=$(func_set_params "${run_benchmark_key}" "${run_benchmark_value}")
set_infer_param1=$(func_set_params "${infer_param1_key}" "${infer_param1_value}")
# inference
for device in ${device_list[*]}; do
set_device=$(func_set_params "${device_key}" "${device}")
if [ ${device} = "cpu" ]; then
for use_mkldnn in ${use_mkldnn_list[*]}; do
set_use_mkldnn=$(func_set_params "${use_mkldnn_key}" "${use_mkldnn}")
for threads in ${cpu_threads_list[*]}; do
set_cpu_threads=$(func_set_params "${cpu_threads_key}" "${threads}")
for batch_size in ${batch_size_list[*]}; do
_save_log_path="${_log_path}/python_infer_cpu_usemkldnn_${use_mkldnn}_threads_${threads}_mode_paddle_batchsize_${batch_size}.log"
set_batchsize=$(func_set_params "${batch_size_key}" "${batch_size}")
command="${_python} ${_script} ${set_device} ${set_use_mkldnn} ${set_cpu_threads} ${_set_model_dir} ${set_batchsize} ${set_image_dir} ${set_run_benchmark} ${set_infer_param1} > ${_save_log_path} 2>&1 "
eval $command
last_status=${PIPESTATUS[0]}
eval "cat ${_save_log_path}"
status_check $last_status "${command}" "${status_log}" "${model_name}" "${_save_log_path}"
done
done
done
elif [ ${device} = "gpu" ]; then
for run_mode in ${run_mode_list[*]}; do
if [[ ${run_mode} = "paddle" ]] || [[ ${run_mode} = "trt_int8" ]]; then
for batch_size in ${batch_size_list[*]}; do
_save_log_path="${_log_path}/python_infer_gpu_mode_${run_mode}_batchsize_${batch_size}.log"
set_batchsize=$(func_set_params "${batch_size_key}" "${batch_size}")
set_run_mode=$(func_set_params "${run_mode_key}" "${run_mode}")
command="${_python} ${_script} ${set_device} ${set_run_mode} ${_set_model_dir} ${set_batchsize} ${set_image_dir} ${set_run_benchmark} ${set_infer_param1} > ${_save_log_path} 2>&1 "
eval $command
last_status=${PIPESTATUS[0]}
eval "cat ${_save_log_path}"
status_check $last_status "${command}" "${status_log}" "${model_name}" "${_save_log_path}"
done
fi
done
else
echo "Does not support hardware other than CPU and GPU Currently!"
fi
done
}
IFS="|"
# run ptq
set_export_weight=$(func_set_params "${export_weight_key}" "${export_weight_value}")
set_save_export_dir=$(func_set_params "${save_export_key}" "${save_export_value}")
set_filename=$(func_set_params "${filename_key}" "${model_name}")
export_log_path="${LOG_PATH}/export.log"
ptq_cmd="${python} ${kl_quant_export} ${set_export_weight} ${set_filename} ${set_save_export_dir}"
echo $ptq_cmd
eval "${ptq_cmd} > ${export_log_path} 2>&1"
status_export=$?
cat ${export_log_path}
status_check $status_export "${ptq_cmd}" "${status_log}" "${model_name}" "${export_log_path}"
#run inference
set_export_model_dir=$(func_set_params "${model_dir_key}" "${save_export_value}/${model_name}")
func_ptq_inference "${python}" "${LOG_PATH}" "${inference_py}" "${set_export_model_dir}"
| PaddleDetection/test_tipc/test_ptq_inference_python.sh/0 | {
"file_path": "PaddleDetection/test_tipc/test_ptq_inference_python.sh",
"repo_id": "PaddleDetection",
"token_count": 2546
} | 88 |
This section describes the Python scripts necessary for converting deep learning model files:
* `Dockerfile`: A Docker file used to construct an image based on Ubuntu 20.04 that includes the Transformer library.
* `download_and_convert_model.sh`: A shell script that converts model codegen-6B-multi with the provided number of GPUs.
* `codegen_gptj_convert.py`: A Python script for converting SalesForce CodeGen models to GPT-J (e.g., Salesforce/codegen-350M-multi).
* `huggingface_gptj_convert.py`: A Python script for converting the HF model to the GPT-J format (e.g., GPTJForCausalLM model)
* `triton_config_gen.py`: A Python script that creates a config and weight file for running a Codgen model with Triton.
* `config_template.pbtxt`: A template file for defining the config file's data format.
| fauxpilot/converter/README.md/0 | {
"file_path": "fauxpilot/converter/README.md",
"repo_id": "fauxpilot",
"token_count": 240
} | 89 |
"""
A simple script that sets up the model directory of a given model for Triton.
"""
import argparse
import os
import shutil
from pathlib import Path
from string import Template
SCRIPT_DIR = Path(__file__).parent
CONFIG_TEMPLATE_PATH = os.path.join(SCRIPT_DIR, 'config_template.pbtxt')
parser = argparse.ArgumentParser()
parser.add_argument("--model_dir", type=str, required=True)
parser.add_argument("--model_name", type=str, required=True)
parser.add_argument("--org_name", type=str, required=True)
parser.add_argument("--use_half", type=str, default="1")
parser.add_argument("--use_int8", type=str, default="0")
parser.add_argument("--use_auto_device_map", type=str, default="1")
args = parser.parse_args()
# Step1: Make model directory
model_dir_path = Path(os.path.join(Path(args.model_dir), f"py-{args.org_name}-{args.model_name}/py-model/1"))
model_dir_path.mkdir(parents=True, exist_ok=True)
# Step 2: copy model.py
shutil.copy(os.path.join(SCRIPT_DIR, 'model.py'), os.path.join(model_dir_path, 'model.py'))
# Step 3: Generate config.pbtxt
with open(CONFIG_TEMPLATE_PATH, 'r') as f:
template = Template(f.read())
config = template.substitute(
org_name=args.org_name,
model_name=args.model_name,
use_half=args.use_half,
use_int8=args.use_int8,
use_auto_device_map=args.use_auto_device_map,
)
with open(os.path.join(model_dir_path, '../config.pbtxt'), 'w') as f:
f.write(config)
print(f"Config written to {os.path.abspath(f.name)}")
| fauxpilot/python_backend/init_model.py/0 | {
"file_path": "fauxpilot/python_backend/init_model.py",
"repo_id": "fauxpilot",
"token_count": 563
} | 90 |
{
"pipeline": [
{
"limit": -1,
"progress": false,
"text_key": "text",
"id_key": "id",
"adapter": "<bound method BaseReader._default_adapter of \ud83d\udcd6 - READER: \ud83d\udc3f Jsonl>",
"_empty_warning": false,
"default_metadata": null,
"data_folder": "DataFolder(path='/home/ubuntu/wensimin-work/get-data/filtered_data', fs=<fsspec.implementations.local.LocalFileSystem object at 0x7efddeeb6d10>)",
"recursive": true,
"glob_pattern": null,
"shuffle_files": false,
"compression": "infer"
},
{
"_tokenizer": null,
"tokenizer_name_or_path": "gpt2",
"_post_processor": null,
"eos_token": null,
"count_eos_token": false,
"overwrite": true
},
{
"data_folder": "DataFolder(path='/home/ubuntu/wensimin-work/get-data/remove_ids', fs=<fsspec.implementations.local.LocalFileSystem object at 0x7efddeeb6d10>)",
"exclusion_writer": {
"compression": "gzip",
"output_folder": "DataFolder(path='/home/ubuntu/wensimin-work/get-data/removed', fs=<fsspec.implementations.local.LocalFileSystem object at 0x7efddeeb6d10>)",
"max_file_size": -1,
"file_id_counter": {},
"output_filename": "<string.Template object at 0x7efdde55bd90>",
"output_mg": "<datatrove.io.OutputFileManager object at 0x7efdde55bdd0>",
"adapter": "<bound method DiskWriter._default_adapter of \ud83d\udcbd - WRITER: \ud83d\udc3f Jsonl>",
"expand_metadata": false
},
"load_cluster_ids": false,
"lines_to_buffer": 5
},
{
"compression": "gzip",
"output_folder": "DataFolder(path='/home/ubuntu/wensimin-work/get-data/hf_stack', fs=<fsspec.implementations.local.LocalFileSystem object at 0x7efddeeb6d10>)",
"max_file_size": -1,
"file_id_counter": {},
"output_filename": "<string.Template object at 0x7efdde560390>",
"output_mg": "<datatrove.io.OutputFileManager object at 0x7efdde5603d0>",
"adapter": "<bound method DiskWriter._default_adapter of \ud83d\udcbd - WRITER: \ud83d\udc3f Jsonl>",
"expand_metadata": false
}
],
"logging_dir": "DataFolder(path='/home/ubuntu/wensimin-work/get-data/logs/2024-07-05_01-48-57_umniw', fs=<fsspec.implementations.local.LocalFileSystem object at 0x7efddeeb6d10>)",
"skip_completed": true,
"tasks": 16,
"workers": 16,
"start_method": "forkserver",
"local_tasks": 16,
"local_rank_offset": 0,
"depends": null,
"_launched": true,
"world_size": 16
} | get-data/logs/2024-07-05_01-48-57_umniw/executor.json/0 | {
"file_path": "get-data/logs/2024-07-05_01-48-57_umniw/executor.json",
"repo_id": "get-data",
"token_count": 1458
} | 91 |
[
{
"name": "\ud83e\udec2 - DEDUP: \ud83c\udfaf MinHash stage 2",
"time_stats": {
"total": 0.2595059520099312,
"n": 14,
"mean": 0.018536139429280802,
"variance": 8.963066932958969e-08,
"std_dev": 0.0002993838160782738,
"min": 0.01824019296327606,
"max": 0.019252316968049854,
"total_human": "0 seconds",
"mean_human": "18.54 milliseconds",
"std_dev_human": "0.30 milliseconds",
"min_human": "18.24 milliseconds",
"max_human": "19.25 milliseconds",
"global_mean": 0.018536139429280802,
"global_mean_human": "0 seconds",
"global_min": 0.01824019296327606,
"global_min_human": "0 seconds",
"global_max": 0.019252316968049854,
"global_max_human": "0 seconds",
"global_std_dev": 0.0002993838160782738,
"global_std_dev_human": "0 seconds"
},
"stats": {
"total_matches": 16905
}
}
] | get-data/logs/2024-07-05_01-48-57_zxkng/stats.json/0 | {
"file_path": "get-data/logs/2024-07-05_01-48-57_zxkng/stats.json",
"repo_id": "get-data",
"token_count": 613
} | 92 |
{
"pipeline": [
{
"limit": -1,
"progress": false,
"text_key": "text",
"id_key": "id",
"adapter": "<bound method BaseReader._default_adapter of \ud83d\udcd6 - READER: \ud83d\udc7e PersonalCopilot>",
"_empty_warning": false,
"default_metadata": null,
"data_folder": "DataFolder(path='C:/Users/hukai/IdeaProjects', fs=<fsspec.implementations.local.LocalFileSystem object at 0x000001EFEF775BA0>)",
"recursive": true,
"glob_pattern": null,
"shuffle_files": false,
"empty_warning": false
},
{
"exclusion_writer": null,
"max_line_length_threshold": 1000,
"mean_line_length_threshold": 100,
"alphanum_threshold": 0.25
},
{
"compression": "gzip",
"output_folder": "DataFolder(path='C:/Users/hukai/Desktop/get-data/filtered_data', fs=<fsspec.implementations.local.LocalFileSystem object at 0x000001EFEF775BA0>)",
"max_file_size": -1,
"file_id_counter": {},
"output_filename": "<string.Template object at 0x000001EFEF7772B0>",
"output_mg": "<datatrove.io.OutputFileManager object at 0x000001EFEF777340>",
"adapter": "<bound method DiskWriter._default_adapter of \ud83d\udcbd - WRITER: \ud83d\udc3f Jsonl>",
"expand_metadata": false
}
],
"logging_dir": "DataFolder(path='C:/Users/hukai/Desktop/get-data/logs/2024-07-05_09-35-12_nmwph', fs=<fsspec.implementations.local.LocalFileSystem object at 0x000001EFEF775BA0>)",
"skip_completed": true,
"tasks": 16,
"workers": 16,
"start_method": "forkserver",
"local_tasks": 16,
"local_rank_offset": 0,
"depends": null,
"_launched": true,
"world_size": 16
} | get-data/logs/2024-07-05_09-35-12_nmwph/executor.json/0 | {
"file_path": "get-data/logs/2024-07-05_09-35-12_nmwph/executor.json",
"repo_id": "get-data",
"token_count": 936
} | 93 |
# pylint: skip-file
import mxnet as mx
import numpy as np
import sys, os
import random
import math
import scipy.misc
import cv2
import logging
import sklearn
import datetime
import img_helper
from mxnet.io import DataIter
from mxnet import ndarray as nd
from mxnet import io
from mxnet import recordio
from PIL import Image
from config import config
from skimage import transform as tf
class FaceSegIter(DataIter):
def __init__(self,
batch_size,
per_batch_size=0,
path_imgrec=None,
aug_level=0,
force_mirror=False,
exf=1,
use_coherent=0,
args=None,
data_name="data",
label_name="softmax_label"):
self.aug_level = aug_level
self.force_mirror = force_mirror
self.use_coherent = use_coherent
self.exf = exf
self.batch_size = batch_size
self.per_batch_size = per_batch_size
self.data_name = data_name
self.label_name = label_name
assert path_imgrec
logging.info('loading recordio %s...', path_imgrec)
path_imgidx = path_imgrec[0:-4] + ".idx"
self.imgrec = mx.recordio.MXIndexedRecordIO(path_imgidx, path_imgrec,
'r') # pylint: disable=redefined-variable-type
self.oseq = list(self.imgrec.keys)
print('train size', len(self.oseq))
self.cur = 0
self.reset()
self.data_shape = (3, config.input_img_size, config.input_img_size)
self.num_classes = config.num_classes
self.input_img_size = config.input_img_size
#self.label_classes = self.num_classes
if config.losstype == 'heatmap':
if aug_level > 0:
self.output_label_size = config.output_label_size
self.label_shape = (self.num_classes, self.output_label_size,
self.output_label_size)
else:
self.output_label_size = self.input_img_size
#self.label_shape = (self.num_classes, 2)
self.label_shape = (self.num_classes, self.output_label_size,
self.output_label_size)
else:
if aug_level > 0:
self.output_label_size = config.output_label_size
self.label_shape = (self.num_classes, 2)
else:
self.output_label_size = self.input_img_size
#self.label_shape = (self.num_classes, 2)
self.label_shape = (self.num_classes, 2)
self.provide_data = [(data_name, (batch_size, ) + self.data_shape)]
self.provide_label = [(label_name, (batch_size, ) + self.label_shape)]
self.img_num = 0
self.invalid_num = 0
self.mode = 1
self.vis = 0
self.stats = [0, 0]
self.flip_order = [
16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 26, 25,
24, 23, 22, 21, 20, 19, 18, 17, 27, 28, 29, 30, 35, 34, 33, 32, 31,
45, 44, 43, 42, 47, 46, 39, 38, 37, 36, 41, 40, 54, 53, 52, 51, 50,
49, 48, 59, 58, 57, 56, 55, 64, 63, 62, 61, 60, 67, 66, 65
]
#self.mirror_set = [
# (22,23),
# (21,24),
# (20,25),
# (19,26),
# (18,27),
# (40,43),
# (39,44),
# (38,45),
# (37,46),
# (42,47),
# (41,48),
# (33,35),
# (32,36),
# (51,53),
# (50,54),
# (62,64),
# (61,65),
# (49,55),
# (49,55),
# (68,66),
# (60,56),
# (59,57),
# (1,17),
# (2,16),
# (3,15),
# (4,14),
# (5,13),
# (6,12),
# (7,11),
# (8,10),
# ]
def get_data_shape(self):
return self.data_shape
#def get_label_shape(self):
# return self.label_shape
def get_shape_dict(self):
D = {}
for (k, v) in self.provide_data:
D[k] = v
for (k, v) in self.provide_label:
D[k] = v
return D
def get_label_names(self):
D = []
for (k, v) in self.provide_label:
D.append(k)
return D
def reset(self):
#print('reset')
if self.aug_level == 0:
self.seq = self.oseq
else:
self.seq = []
for _ in range(self.exf):
_seq = self.oseq[:]
random.shuffle(_seq)
self.seq += _seq
print('train size after reset', len(self.seq))
self.cur = 0
def next_sample(self):
"""Helper function for reading in next sample."""
if self.cur >= len(self.seq):
raise StopIteration
idx = self.seq[self.cur]
self.cur += 1
s = self.imgrec.read_idx(idx)
header, img = recordio.unpack(s)
img = mx.image.imdecode(img).asnumpy()
hlabel = np.array(header.label).reshape((self.num_classes, 2))
if not config.label_xfirst:
hlabel = hlabel[:, ::-1] #convert to X/W first
annot = {'scale': config.base_scale}
#ul = np.array( (50000,50000), dtype=np.int32)
#br = np.array( (0,0), dtype=np.int32)
#for i in range(hlabel.shape[0]):
# h = int(hlabel[i][0])
# w = int(hlabel[i][1])
# key = np.array((h,w))
# ul = np.minimum(key, ul)
# br = np.maximum(key, br)
return img, hlabel, annot
def get_flip(self, data, label):
data_flip = np.zeros_like(data)
label_flip = np.zeros_like(label)
for k in range(data_flip.shape[2]):
data_flip[:, :, k] = np.fliplr(data[:, :, k])
for k in range(label_flip.shape[0]):
label_flip[k, :] = np.fliplr(label[k, :])
#print(label[0,:].shape)
label_flip = label_flip[self.flip_order, :]
return data_flip, label_flip
def get_data(self, data, label, annot):
if self.vis:
self.img_num += 1
#if self.img_num<=self.vis:
# filename = './vis/raw_%d.jpg' % (self.img_num)
# print('save', filename)
# draw = data.copy()
# for i in range(label.shape[0]):
# cv2.circle(draw, (label[i][1], label[i][0]), 1, (0, 0, 255), 2)
# scipy.misc.imsave(filename, draw)
rotate = 0
#scale = 1.0
if 'scale' in annot:
scale = annot['scale']
else:
scale = max(data.shape[0], data.shape[1])
if 'center' in annot:
center = annot['center']
else:
center = np.array((data.shape[1] / 2, data.shape[0] / 2))
max_retry = 3
if self.aug_level == 0: #validation mode
max_retry = 6
retry = 0
found = False
base_scale = scale
while retry < max_retry:
retry += 1
succ = True
_scale = base_scale
if self.aug_level > 0:
rotate = np.random.randint(-40, 40)
scale_config = 0.2
#rotate = 0
#scale_config = 0.0
scale_ratio = min(
1 + scale_config,
max(1 - scale_config,
(np.random.randn() * scale_config) + 1))
_scale = int(base_scale * scale_ratio)
#translate = np.random.randint(-5, 5, size=(2,))
#center += translate
data_out, trans = img_helper.transform(data, center,
self.input_img_size, _scale,
rotate)
#data_out = img_helper.crop2(data, center, _scale, (self.input_img_size, self.input_img_size), rot=rotate)
label_out = np.zeros(self.label_shape, dtype=np.float32)
#print('out shapes', data_out.shape, label_out.shape)
for i in range(label.shape[0]):
pt = label[i].copy()
#pt = pt[::-1]
npt = img_helper.transform_pt(pt, trans)
if npt[0] >= data_out.shape[1] or npt[1] >= data_out.shape[
0] or npt[0] < 0 or npt[1] < 0:
succ = False
#print('err npt', npt)
break
if config.losstype == 'heatmap':
pt_scale = float(
self.output_label_size) / self.input_img_size
npt *= pt_scale
npt = npt.astype(np.int32)
img_helper.gaussian(label_out[i], npt, config.gaussian)
else:
label_out[i] = (npt / self.input_img_size)
#print('before gaussian', label_out[i].shape, pt.shape)
#trans = img_helper.transform(pt, center, _scale, (self.output_label_size, self.output_label_size), rot=rotate)
#print(trans.shape)
#if not img_helper.gaussian(label_out[i], trans, _g):
# succ = False
# break
if not succ:
if self.aug_level == 0:
base_scale += 20
continue
flip_data_out = None
flip_label_out = None
if config.net_coherent:
flip_data_out, flip_label_out = self.get_flip(
data_out, label_out)
elif ((self.aug_level > 0 and np.random.rand() < 0.5)
or self.force_mirror): #flip aug
flip_data_out, flip_label_out = self.get_flip(
data_out, label_out)
data_out, label_out = flip_data_out, flip_label_out
found = True
break
#self.stats[0]+=1
if not found:
#self.stats[1]+=1
#print('find aug error', retry)
#print(self.stats)
#print('!!!ERR')
return None
#print('found with scale', _scale, rotate)
if self.vis > 0 and self.img_num <= self.vis:
print('crop', data.shape, center, _scale, rotate, data_out.shape)
filename = './vis/cropped_%d.jpg' % (self.img_num)
print('save', filename)
draw = data_out.copy()
alabel = label_out.copy()
for i in range(label.shape[0]):
a = cv2.resize(alabel[i],
(self.input_img_size, self.input_img_size))
ind = np.unravel_index(np.argmax(a, axis=None), a.shape)
cv2.circle(draw, (ind[1], ind[0]), 1, (0, 0, 255), 2)
scipy.misc.imsave(filename, draw)
filename = './vis/raw_%d.jpg' % (self.img_num)
scipy.misc.imsave(filename, data)
return data_out, label_out, flip_data_out, flip_label_out
def next(self):
"""Returns the next batch of data."""
#print('next')
batch_size = self.batch_size
batch_data = nd.empty((batch_size, ) + self.data_shape)
batch_label = nd.empty((batch_size, ) + self.label_shape)
i = 0
#self.cutoff = random.randint(800,1280)
try:
while i < batch_size:
#print('N', i)
data, label, annot = self.next_sample()
R = self.get_data(data, label, annot)
if R is None:
continue
data_out, label_out, flip_data_out, flip_label_out = R
if not self.use_coherent:
data = nd.array(data_out)
data = nd.transpose(data, axes=(2, 0, 1))
label = nd.array(label_out)
#print(data.shape, label.shape)
batch_data[i][:] = data
batch_label[i][:] = label
i += 1
else:
data = nd.array(data_out)
data = nd.transpose(data, axes=(2, 0, 1))
label = nd.array(label_out)
data2 = nd.array(flip_data_out)
data2 = nd.transpose(data2, axes=(2, 0, 1))
label2 = nd.array(flip_label_out)
#M = nd.array(M)
#print(data.shape, label.shape)
batch_data[i][:] = data
batch_label[i][:] = label
#i+=1
j = i + self.per_batch_size // 2
batch_data[j][:] = data2
batch_label[j][:] = label2
i += 1
if j % self.per_batch_size == self.per_batch_size - 1:
i = j + 1
except StopIteration:
if i < batch_size:
raise StopIteration
#return {self.data_name : batch_data,
# self.label_name : batch_label}
#print(batch_data.shape, batch_label.shape)
return mx.io.DataBatch([batch_data], [batch_label], batch_size - i)
| insightface/alignment/heatmap/data.py/0 | {
"file_path": "insightface/alignment/heatmap/data.py",
"repo_id": "insightface",
"token_count": 7678
} | 94 |
# Face Attribute Datasets
(Updating)
## Training Datasets
### CelebA
https://mmlab.ie.cuhk.edu.hk/projects/CelebA.html
## Test Datasets
| insightface/attribute/_datasets_/README.md/0 | {
"file_path": "insightface/attribute/_datasets_/README.md",
"repo_id": "insightface",
"token_count": 60
} | 95 |
import torch
from torch.nn import functional as F
import numpy as np
import torch.nn as nn
import math
parents = [1, 2, 13, 13, 3, 4, 7, 8, 12, 12, 9, 10, 14, 13, 13, 12, 15]
bone_indices = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 15, 16]
def kl_criterion(mu1, sigma1, mu2, sigma2, mean_only=False):
if mean_only:
kld = (mu1 - mu2)**2 / (2*sigma2**2)
else:
kld = torch.log(sigma2/sigma1) + (sigma1**2 + (mu1 - mu2)**2)/(2*sigma2**2) - 1/2
return kld
def loss_bone(x, y):
bones_x = (x - x[:, parents])[:, bone_indices]
bones_y = (y - y[:, parents])[:, bone_indices]
loss_bone = (bones_x - bones_y).pow(2).sum(dim=-1).sqrt().mean()
return loss_bone
def loss_3d(pred3d, gt3d):
# pred3d, gt_3d = pred3d[..., :3], gt3d[..., :3]
# bones_pred = (pred3d - pred3d[:, parents])[:, bone_indices]
# bones_gt = (gt3d - gt3d[:, parents])[:, bone_indices]
# loss_loc = (torch.abs(pred3d - gt3d)).sum(dim=-1).sqrt().mean()
# loss_bone = (torch.abs(bones_pred - bones_gt)).sum(dim=-1).sqrt().mean()
# return loss_loc + loss_bone
# return torch.sum(torch.abs(pred3d[..., :3] - gt3d[..., :3]), dim=-1).mean()
return torch.sum((pred3d[..., :3] - gt3d[..., :3]) ** 2, dim=-1).sqrt().mean()
def loss_2d(pred2d, gt2d):
# return torch.sum(torch.abs(pred2d[..., :2] - gt2d[..., :2]), dim=-1).mean()
return torch.sum((pred2d[..., :2] - gt2d[..., :2]) ** 2, dim=-1).sqrt().mean()
def loss_gadv(fake_logits):
loss_func = torch.nn.BCEWithLogitsLoss().cuda()
fake_one = torch.ones_like(fake_logits).cuda()
loss_g = loss_func(fake_logits, fake_one)
# loss_g = loss_func(fake_one, fake_logits)
return loss_g
def loss_dadv(real_logits, fake_logits):
loss_func = torch.nn.BCEWithLogitsLoss().cuda()
real_one = torch.ones_like(real_logits).cuda()
fake_one = torch.zeros_like(fake_logits).cuda()
loss_d_real = loss_func(real_logits, real_one)
loss_d_fake = loss_func(fake_logits, fake_one)
# loss_d_real = loss_func(real_one, real_logits)
# loss_d_fake = loss_func(fake_one, fake_logits)
return loss_d_real + loss_d_fake
class Losses(nn.CrossEntropyLoss):
"""2D Cross Entropy Loss with Auxilary Loss"""
def __init__(self, parent, summary_writer):
super(Losses, self).__init__()
self.bone_idx = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 15, 16])
self.parent=parent
self.summary_writer=summary_writer
self.train_cnt=0
def forward(self, pred, lbl):
gt_bones = (lbl - lbl[:, self.parent])[:, self.bone_idx]
pred_bones = (pred - pred[:, self.parent])[:, self.bone_idx]
self.loss_3d = torch.sqrt(torch.sum((lbl - pred) ** 2, -1)).mean()
self.loss_bone = torch.sqrt(torch.sum((gt_bones - pred_bones) ** 2, -1)).mean()
self.train_cnt += 1
self.summary_writer.add_scalar('h36m_train3d_loss', self.loss_3d.item(), self.train_cnt)
self.summary_writer.add_scalar('h36m_bone_loss', self.loss_bone.item(), self.train_cnt)
return self.loss_3d + self.loss_bone
| insightface/body/human_pose/ambiguity_aware/lib/core/loss.py/0 | {
"file_path": "insightface/body/human_pose/ambiguity_aware/lib/core/loss.py",
"repo_id": "insightface",
"token_count": 1465
} | 96 |
#!/bin/bash
rm -rf demo_output
python inference.py --indir demo_input --outdir demo_output --cfg ../cfg/h36m_gt_scale.yaml --pretrain ../models/tmc_klbone.pth.tar
| insightface/body/human_pose/ambiguity_aware/scripts/demo.sh/0 | {
"file_path": "insightface/body/human_pose/ambiguity_aware/scripts/demo.sh",
"repo_id": "insightface",
"token_count": 65
} | 97 |
# Face Detection Datasets
(Updating)
## Training Datasets
### WiderFace
http://shuoyang1213.me/WIDERFACE/
## Test Datasets
### WiderFace
http://shuoyang1213.me/WIDERFACE/
### FDDB
http://vis-www.cs.umass.edu/fddb/
### AFW
### PASCAL FACE
### MALF
http://www.cbsr.ia.ac.cn/faceevaluation/
| insightface/detection/_datasets_/README.md/0 | {
"file_path": "insightface/detection/_datasets_/README.md",
"repo_id": "insightface",
"token_count": 136
} | 98 |
from __future__ import print_function
import numpy as np
import cv2
import os
import math
import sys
import random
from ..config import config
def brightness_aug(src, x):
alpha = 1.0 + random.uniform(-x, x)
src *= alpha
return src
def contrast_aug(src, x):
alpha = 1.0 + random.uniform(-x, x)
coef = np.array([[[0.299, 0.587, 0.114]]])
gray = src * coef
gray = (3.0 * (1.0 - alpha) / gray.size) * np.sum(gray)
src *= alpha
src += gray
return src
def saturation_aug(src, x):
alpha = 1.0 + random.uniform(-x, x)
coef = np.array([[[0.299, 0.587, 0.114]]])
gray = src * coef
gray = np.sum(gray, axis=2, keepdims=True)
gray *= (1.0 - alpha)
src *= alpha
src += gray
return src
def color_aug(img, x):
if config.COLOR_MODE > 1:
augs = [brightness_aug, contrast_aug, saturation_aug]
random.shuffle(augs)
else:
augs = [brightness_aug]
for aug in augs:
#print(img.shape)
img = aug(img, x)
#print(img.shape)
return img
def get_image(roidb, scale=False):
"""
preprocess image and return processed roidb
:param roidb: a list of roidb
:return: list of img as in mxnet format
roidb add new item['im_info']
0 --- x (width, second dim of im)
|
y (height, first dim of im)
"""
num_images = len(roidb)
processed_ims = []
processed_roidb = []
for i in range(num_images):
roi_rec = roidb[i]
if 'stream' in roi_rec:
im = cv2.imdecode(roi_rec['stream'], cv2.IMREAD_COLOR)
else:
assert os.path.exists(
roi_rec['image']), '{} does not exist'.format(roi_rec['image'])
im = cv2.imread(roi_rec['image'])
if roidb[i]['flipped']:
im = im[:, ::-1, :]
new_rec = roi_rec.copy()
if scale:
scale_range = config.TRAIN.SCALE_RANGE
im_scale = np.random.uniform(scale_range[0], scale_range[1])
im = cv2.resize(im,
None,
None,
fx=im_scale,
fy=im_scale,
interpolation=cv2.INTER_LINEAR)
elif not config.ORIGIN_SCALE:
scale_ind = random.randrange(len(config.SCALES))
target_size = config.SCALES[scale_ind][0]
max_size = config.SCALES[scale_ind][1]
im, im_scale = resize(im,
target_size,
max_size,
stride=config.IMAGE_STRIDE)
else:
im_scale = 1.0
im_tensor = transform(im, config.PIXEL_MEANS, config.PIXEL_STDS)
if 'boxes_mask' in roi_rec:
im = im.astype(np.float32)
boxes_mask = roi_rec['boxes_mask'].copy() * im_scale
boxes_mask = boxes_mask.astype(np.int)
for j in range(boxes_mask.shape[0]):
m = boxes_mask[j]
im_tensor[:, :, m[1]:m[3], m[0]:m[2]] = 0.0
#print('find mask', m, file=sys.stderr)
processed_ims.append(im_tensor)
new_rec['boxes'] = roi_rec['boxes'].copy() * im_scale
if config.TRAIN.IMAGE_ALIGN > 0:
if im_tensor.shape[
2] % config.TRAIN.IMAGE_ALIGN != 0 or im_tensor.shape[
3] % config.TRAIN.IMAGE_ALIGN != 0:
new_height = math.ceil(
float(im_tensor.shape[2]) /
config.TRAIN.IMAGE_ALIGN) * config.TRAIN.IMAGE_ALIGN
new_width = math.ceil(
float(im_tensor.shape[3]) /
config.TRAIN.IMAGE_ALIGN) * config.TRAIN.IMAGE_ALIGN
new_im_tensor = np.zeros(
(1, 3, int(new_height), int(new_width)))
new_im_tensor[:, :, 0:im_tensor.shape[2],
0:im_tensor.shape[3]] = im_tensor
print(im_tensor.shape, new_im_tensor.shape, file=sys.stderr)
im_tensor = new_im_tensor
#print('boxes', new_rec['boxes'], file=sys.stderr)
im_info = [im_tensor.shape[2], im_tensor.shape[3], im_scale]
new_rec['im_info'] = im_info
processed_roidb.append(new_rec)
return processed_ims, processed_roidb
TMP_ID = -1
#bakup method
def __get_crop_image(roidb):
"""
preprocess image and return processed roidb
:param roidb: a list of roidb
:return: list of img as in mxnet format
roidb add new item['im_info']
0 --- x (width, second dim of im)
|
y (height, first dim of im)
"""
#roidb and each roi_rec can not be changed as it will be reused in next epoch
num_images = len(roidb)
processed_ims = []
processed_roidb = []
for i in range(num_images):
roi_rec = roidb[i]
if 'stream' in roi_rec:
im = cv2.imdecode(roi_rec['stream'], cv2.IMREAD_COLOR)
else:
assert os.path.exists(
roi_rec['image']), '{} does not exist'.format(roi_rec['image'])
im = cv2.imread(roi_rec['image'])
if roidb[i]['flipped']:
im = im[:, ::-1, :]
if 'boxes_mask' in roi_rec:
#im = im.astype(np.float32)
boxes_mask = roi_rec['boxes_mask'].copy()
boxes_mask = boxes_mask.astype(np.int)
for j in range(boxes_mask.shape[0]):
m = boxes_mask[j]
im[m[1]:m[3], m[0]:m[2], :] = 0
#print('find mask', m, file=sys.stderr)
new_rec = roi_rec.copy()
#choose one gt randomly
SIZE = config.SCALES[0][0]
TARGET_BOX_SCALES = np.array([16, 32, 64, 128, 256, 512])
assert roi_rec['boxes'].shape[0] > 0
candidates = []
for i in range(roi_rec['boxes'].shape[0]):
box = roi_rec['boxes'][i]
box_size = max(box[2] - box[0], box[3] - box[1])
if box_size < config.TRAIN.MIN_BOX_SIZE:
continue
#if box[0]<0 or box[1]<0:
# continue
#if box[2]>im.shape[1] or box[3]>im.shape[0]:
# continue;
candidates.append(i)
assert len(candidates) > 0
box_ind = random.choice(candidates)
box = roi_rec['boxes'][box_ind]
box_size = max(box[2] - box[0], box[3] - box[1])
dist = np.abs(TARGET_BOX_SCALES - box_size)
nearest = np.argmin(dist)
target_ind = random.randrange(min(len(TARGET_BOX_SCALES), nearest + 2))
target_box_size = TARGET_BOX_SCALES[target_ind]
im_scale = float(target_box_size) / box_size
#min_scale = float(SIZE)/np.min(im.shape[0:2])
#if im_scale<min_scale:
# im_scale = min_scale
im = cv2.resize(im,
None,
None,
fx=im_scale,
fy=im_scale,
interpolation=cv2.INTER_LINEAR)
new_rec['boxes'] = roi_rec['boxes'].copy() * im_scale
box_scale = new_rec['boxes'][box_ind].copy().astype(np.int)
ul_min = box_scale[2:4] - SIZE
ul_max = box_scale[0:2]
assert ul_min[0] <= ul_max[0]
assert ul_min[1] <= ul_max[1]
#print('ul', ul_min, ul_max, box)
up, left = np.random.randint(ul_min[1],
ul_max[1] + 1), np.random.randint(
ul_min[0], ul_max[0] + 1)
#print('box', box, up, left)
M = [
[1.0, 0.0, -left],
[0.0, 1.0, -up],
]
M = np.array(M)
im = cv2.warpAffine(im,
M, (SIZE, SIZE),
borderValue=tuple(config.PIXEL_MEANS))
#tbox = np.array([left, left+SIZE, up, up+SIZE], dtype=np.int)
#im_new = np.zeros( (SIZE, SIZE,3), dtype=im.dtype)
#for i in range(3):
# im_new[:,:,i] = config.PIXEL_MEANS[i]
new_rec['boxes'][:, 0] -= left
new_rec['boxes'][:, 2] -= left
new_rec['boxes'][:, 1] -= up
new_rec['boxes'][:, 3] -= up
box_trans = new_rec['boxes'][box_ind].copy().astype(np.int)
#print('sel box', im_scale, box, box_scale, box_trans, file=sys.stderr)
#print('before', new_rec['boxes'].shape[0])
boxes_new = []
classes_new = []
for i in range(new_rec['boxes'].shape[0]):
box = new_rec['boxes'][i]
box_size = max(box[2] - box[0], box[3] - box[1])
center = np.array(([box[0], box[1]] + [box[2], box[3]])) / 2
if center[0] < 0 or center[1] < 0 or center[0] >= im.shape[
1] or center[1] >= im.shape[0]:
continue
if box_size < config.TRAIN.MIN_BOX_SIZE:
continue
boxes_new.append(box)
classes_new.append(new_rec['gt_classes'][i])
new_rec['boxes'] = np.array(boxes_new)
new_rec['gt_classes'] = np.array(classes_new)
#print('after', new_rec['boxes'].shape[0])
#assert new_rec['boxes'].shape[0]>0
DEBUG = True
if DEBUG:
global TMP_ID
if TMP_ID < 10:
tim = im.copy()
for i in range(new_rec['boxes'].shape[0]):
box = new_rec['boxes'][i].copy().astype(np.int)
cv2.rectangle(tim, (box[0], box[1]), (box[2], box[3]),
(255, 0, 0), 1)
filename = './trainimages/train%d.png' % TMP_ID
TMP_ID += 1
cv2.imwrite(filename, tim)
im_tensor = transform(im, config.PIXEL_MEANS, config.PIXEL_STDS,
config.PIXEL_SCALE)
processed_ims.append(im_tensor)
#print('boxes', new_rec['boxes'], file=sys.stderr)
im_info = [im_tensor.shape[2], im_tensor.shape[3], im_scale]
new_rec['im_info'] = im_info
processed_roidb.append(new_rec)
return processed_ims, processed_roidb
def expand_bboxes(bboxes,
image_width,
image_height,
expand_left=2.,
expand_up=2.,
expand_right=2.,
expand_down=2.):
"""
Expand bboxes, expand 2 times by defalut.
"""
expand_boxes = []
for bbox in bboxes:
xmin = bbox[0]
ymin = bbox[1]
xmax = bbox[2]
ymax = bbox[3]
w = xmax - xmin
h = ymax - ymin
ex_xmin = max(xmin - w / expand_left, 0.)
ex_ymin = max(ymin - h / expand_up, 0.)
ex_xmax = min(xmax + w / expand_right, image_width)
ex_ymax = min(ymax + h / expand_down, image_height)
expand_boxes.append([ex_xmin, ex_ymin, ex_xmax, ex_ymax])
return expand_boxes
def get_crop_image1(roidb):
"""
preprocess image and return processed roidb
:param roidb: a list of roidb
:return: list of img as in mxnet format
roidb add new item['im_info']
0 --- x (width, second dim of im)
|
y (height, first dim of im)
"""
#roidb and each roi_rec can not be changed as it will be reused in next epoch
num_images = len(roidb)
processed_ims = []
processed_roidb = []
for i in range(num_images):
roi_rec = roidb[i]
if 'stream' in roi_rec:
im = cv2.imdecode(roi_rec['stream'], cv2.IMREAD_COLOR)
else:
assert os.path.exists(
roi_rec['image']), '{} does not exist'.format(roi_rec['image'])
im = cv2.imread(roi_rec['image'])
if roidb[i]['flipped']:
im = im[:, ::-1, :]
if 'boxes_mask' in roi_rec:
#im = im.astype(np.float32)
boxes_mask = roi_rec['boxes_mask'].copy()
boxes_mask = boxes_mask.astype(np.int)
for j in range(boxes_mask.shape[0]):
m = boxes_mask[j]
im[m[1]:m[3], m[0]:m[2], :] = 127
#print('find mask', m, file=sys.stderr)
SIZE = config.SCALES[0][0]
PRE_SCALES = [0.3, 0.45, 0.6, 0.8, 1.0]
#PRE_SCALES = [0.3, 0.45, 0.6, 0.8, 1.0, 0.8, 1.0, 0.8, 1.0]
_scale = random.choice(PRE_SCALES)
#_scale = np.random.uniform(PRE_SCALES[0], PRE_SCALES[-1])
size = int(np.min(im.shape[0:2]) * _scale)
#size = int(np.round(_scale*np.min(im.shape[0:2])))
im_scale = float(SIZE) / size
#origin_im_scale = im_scale
#size = np.round(np.min(im.shape[0:2])*im_scale)
#im_scale *= (float(SIZE)/size)
origin_shape = im.shape
if _scale > 10.0: #avoid im.size<SIZE, never?
sizex = int(np.round(im.shape[1] * im_scale))
sizey = int(np.round(im.shape[0] * im_scale))
if sizex < SIZE:
sizex = SIZE
print('keepx', sizex)
if sizey < SIZE:
sizey = SIZE
print('keepy', sizex)
im = cv2.resize(im, (sizex, sizey), interpolation=cv2.INTER_LINEAR)
else:
im = cv2.resize(im,
None,
None,
fx=im_scale,
fy=im_scale,
interpolation=cv2.INTER_LINEAR)
assert im.shape[0] >= SIZE and im.shape[1] >= SIZE
#print('image size', origin_shape, _scale, SIZE, size, im_scale)
new_rec = roi_rec.copy()
new_rec['boxes'] = roi_rec['boxes'].copy() * im_scale
if config.FACE_LANDMARK:
new_rec['landmarks'] = roi_rec['landmarks'].copy()
new_rec['landmarks'][:, :, 0:2] *= im_scale
retry = 0
LIMIT = 25
size = SIZE
while retry < LIMIT:
up, left = (np.random.randint(0, im.shape[0] - size + 1),
np.random.randint(0, im.shape[1] - size + 1))
boxes_new = new_rec['boxes'].copy()
im_new = im[up:(up + size), left:(left + size), :]
#print('crop', up, left, size, im_scale)
boxes_new[:, 0] -= left
boxes_new[:, 2] -= left
boxes_new[:, 1] -= up
boxes_new[:, 3] -= up
if config.FACE_LANDMARK:
landmarks_new = new_rec['landmarks'].copy()
landmarks_new[:, :, 0] -= left
landmarks_new[:, :, 1] -= up
#for i in range(0,10,2):
# landmarks_new[:,i] -= left
#for i in range(1,10,2):
# landmarks_new[:,i] -= up
valid_landmarks = []
#im_new = cv2.resize(im_new, (SIZE, SIZE), interpolation=cv2.INTER_LINEAR)
#boxes_new *= im_scale
#print(origin_shape, im_new.shape, im_scale)
valid = []
valid_boxes = []
for i in range(boxes_new.shape[0]):
box = boxes_new[i]
#center = np.array(([box[0], box[1]]+[box[2], box[3]]))/2
centerx = (box[0] + box[2]) / 2
centery = (box[1] + box[3]) / 2
#box[0] = max(0, box[0])
#box[1] = max(0, box[1])
#box[2] = min(im_new.shape[1], box[2])
#box[3] = min(im_new.shape[0], box[3])
box_size = max(box[2] - box[0], box[3] - box[1])
if centerx < 0 or centery < 0 or centerx >= im_new.shape[
1] or centery >= im_new.shape[0]:
continue
if box_size < config.TRAIN.MIN_BOX_SIZE:
continue
#filter by landmarks? TODO
valid.append(i)
valid_boxes.append(box)
if config.FACE_LANDMARK:
valid_landmarks.append(landmarks_new[i])
if len(valid) > 0 or retry == LIMIT - 1:
im = im_new
new_rec['boxes'] = np.array(valid_boxes)
new_rec['gt_classes'] = new_rec['gt_classes'][valid]
if config.FACE_LANDMARK:
new_rec['landmarks'] = np.array(valid_landmarks)
if config.HEAD_BOX:
face_box = new_rec['boxes']
head_box = expand_bboxes(face_box,
image_width=im.shape[1],
image_height=im.shape[0])
new_rec['boxes_head'] = np.array(head_box)
break
retry += 1
if config.COLOR_MODE > 0 and config.COLOR_JITTERING > 0.0:
im = im.astype(np.float32)
im = color_aug(im, config.COLOR_JITTERING)
#assert np.all(new_rec['landmarks'][:,10]>0.0)
global TMP_ID
if TMP_ID >= 0 and TMP_ID < 10:
tim = im.copy().astype(np.uint8)
for i in range(new_rec['boxes'].shape[0]):
box = new_rec['boxes'][i].copy().astype(np.int)
cv2.rectangle(tim, (box[0], box[1]), (box[2], box[3]),
(255, 0, 0), 1)
print('draw box:', box)
if config.FACE_LANDMARK:
for i in range(new_rec['landmarks'].shape[0]):
landmark = new_rec['landmarks'][i].copy()
if landmark[0][2] < 0:
print('zero', landmark)
continue
landmark = landmark.astype(np.int)
print('draw landmark', landmark)
for k in range(5):
color = (0, 0, 255)
if k == 0 or k == 3:
color = (0, 255, 0)
pp = (landmark[k][0], landmark[k][1])
cv2.circle(tim, (pp[0], pp[1]), 1, color, 2)
filename = './trainimages/train%d.png' % TMP_ID
print('write', filename)
cv2.imwrite(filename, tim)
TMP_ID += 1
im_tensor = transform(im, config.PIXEL_MEANS, config.PIXEL_STDS,
config.PIXEL_SCALE)
processed_ims.append(im_tensor)
#print('boxes', new_rec['boxes'], file=sys.stderr)
im_info = [im_tensor.shape[2], im_tensor.shape[3], im_scale]
new_rec['im_info'] = np.array(im_info, dtype=np.float32)
processed_roidb.append(new_rec)
return processed_ims, processed_roidb
def get_crop_image2(roidb):
"""
preprocess image and return processed roidb
:param roidb: a list of roidb
:return: list of img as in mxnet format
roidb add new item['im_info']
0 --- x (width, second dim of im)
|
y (height, first dim of im)
"""
#roidb and each roi_rec can not be changed as it will be reused in next epoch
num_images = len(roidb)
processed_ims = []
processed_roidb = []
for i in range(num_images):
roi_rec = roidb[i]
if 'stream' in roi_rec:
im = cv2.imdecode(roi_rec['stream'], cv2.IMREAD_COLOR)
else:
assert os.path.exists(
roi_rec['image']), '{} does not exist'.format(roi_rec['image'])
im = cv2.imread(roi_rec['image'])
if roidb[i]['flipped']:
im = im[:, ::-1, :]
if 'boxes_mask' in roi_rec:
#im = im.astype(np.float32)
boxes_mask = roi_rec['boxes_mask'].copy()
boxes_mask = boxes_mask.astype(np.int)
for j in range(boxes_mask.shape[0]):
m = boxes_mask[j]
im[m[1]:m[3], m[0]:m[2], :] = 0
#print('find mask', m, file=sys.stderr)
SIZE = config.SCALES[0][0]
scale_array = np.array([16, 32, 64, 128, 256, 512], dtype=np.float32)
candidates = []
for i in range(roi_rec['boxes'].shape[0]):
box = roi_rec['boxes'][i]
box_size = max(box[2] - box[0], box[3] - box[1])
if box_size < config.TRAIN.MIN_BOX_SIZE:
continue
#if box[0]<0 or box[1]<0:
# continue
#if box[2]>im.shape[1] or box[3]>im.shape[0]:
# continue;
candidates.append(i)
assert len(candidates) > 0
box_ind = random.choice(candidates)
box = roi_rec['boxes'][box_ind]
width = box[2] - box[0]
height = box[3] - box[1]
wid = width
hei = height
resize_width, resize_height = config.SCALES[0]
image_width = im.shape[0]
image_height = im.shape[1]
area = width * height
range_size = 0
for scale_ind in range(0, len(scale_array) - 1):
if area > scale_array[scale_ind] ** 2 and area < \
scale_array[scale_ind + 1] ** 2:
range_size = scale_ind + 1
break
if area > scale_array[len(scale_array) - 2]**2:
range_size = len(scale_array) - 2
scale_choose = 0.0
if range_size == 0:
rand_idx_size = 0
else:
# np.random.randint range: [low, high)
rng_rand_size = np.random.randint(0, range_size + 1)
rand_idx_size = rng_rand_size % (range_size + 1)
if rand_idx_size == range_size:
min_resize_val = scale_array[rand_idx_size] / 2.0
max_resize_val = min(2.0 * scale_array[rand_idx_size],
2 * math.sqrt(wid * hei))
scale_choose = random.uniform(min_resize_val, max_resize_val)
else:
min_resize_val = scale_array[rand_idx_size] / 2.0
max_resize_val = 2.0 * scale_array[rand_idx_size]
scale_choose = random.uniform(min_resize_val, max_resize_val)
sample_bbox_size = wid * resize_width / scale_choose
w_off_orig = 0.0
h_off_orig = 0.0
if sample_bbox_size < max(image_height, image_width):
if wid <= sample_bbox_size:
w_off_orig = np.random.uniform(xmin + wid - sample_bbox_size,
xmin)
else:
w_off_orig = np.random.uniform(xmin,
xmin + wid - sample_bbox_size)
if hei <= sample_bbox_size:
h_off_orig = np.random.uniform(ymin + hei - sample_bbox_size,
ymin)
else:
h_off_orig = np.random.uniform(ymin,
ymin + hei - sample_bbox_size)
else:
w_off_orig = np.random.uniform(image_width - sample_bbox_size, 0.0)
h_off_orig = np.random.uniform(image_height - sample_bbox_size,
0.0)
w_off_orig = math.floor(w_off_orig)
h_off_orig = math.floor(h_off_orig)
# Figure out top left coordinates.
w_off = 0.0
h_off = 0.0
w_off = float(w_off_orig / image_width)
h_off = float(h_off_orig / image_height)
im_new = im[up:(up + size), left:(left + size), :]
sampled_bbox = bbox(w_off, h_off,
w_off + float(sample_bbox_size / image_width),
h_off + float(sample_bbox_size / image_height))
return sampled_bbox
box_size = max(box[2] - box[0], box[3] - box[1])
dist = np.abs(TARGET_BOX_SCALES - box_size)
nearest = np.argmin(dist)
target_ind = random.randrange(min(len(TARGET_BOX_SCALES), nearest + 2))
target_box_size = TARGET_BOX_SCALES[target_ind]
im_scale = float(target_box_size) / box_size
PRE_SCALES = [0.3, 0.45, 0.6, 0.8, 1.0]
_scale = random.choice(PRE_SCALES)
#_scale = np.random.uniform(PRE_SCALES[0], PRE_SCALES[-1])
size = int(np.round(_scale * np.min(im.shape[0:2])))
im_scale = float(SIZE) / size
#origin_im_scale = im_scale
#size = np.round(np.min(im.shape[0:2])*im_scale)
#im_scale *= (float(SIZE)/size)
origin_shape = im.shape
if _scale > 10.0: #avoid im.size<SIZE, never?
sizex = int(np.round(im.shape[1] * im_scale))
sizey = int(np.round(im.shape[0] * im_scale))
if sizex < SIZE:
sizex = SIZE
print('keepx', sizex)
if sizey < SIZE:
sizey = SIZE
print('keepy', sizex)
im = cv2.resize(im, (sizex, sizey), interpolation=cv2.INTER_LINEAR)
else:
im = cv2.resize(im,
None,
None,
fx=im_scale,
fy=im_scale,
interpolation=cv2.INTER_LINEAR)
assert im.shape[0] >= SIZE and im.shape[1] >= SIZE
new_rec = roi_rec.copy()
new_rec['boxes'] = roi_rec['boxes'].copy() * im_scale
if config.FACE_LANDMARK:
new_rec['landmarks'] = roi_rec['landmarks'].copy() * im_scale
retry = 0
LIMIT = 25
size = SIZE
while retry < LIMIT:
up, left = (np.random.randint(0, im.shape[0] - size + 1),
np.random.randint(0, im.shape[1] - size + 1))
boxes_new = new_rec['boxes'].copy()
im_new = im[up:(up + size), left:(left + size), :]
#print('crop', up, left, size, im_scale)
boxes_new[:, 0] -= left
boxes_new[:, 2] -= left
boxes_new[:, 1] -= up
boxes_new[:, 3] -= up
if config.FACE_LANDMARK:
landmarks_new = new_rec['landmarks'].copy()
for i in range(0, 10, 2):
landmarks_new[:, i] -= left
for i in range(1, 10, 2):
landmarks_new[:, i] -= up
valid_landmarks = []
#im_new = cv2.resize(im_new, (SIZE, SIZE), interpolation=cv2.INTER_LINEAR)
#boxes_new *= im_scale
#print(origin_shape, im_new.shape, im_scale)
valid = []
valid_boxes = []
for i in range(boxes_new.shape[0]):
box = boxes_new[i]
#center = np.array(([box[0], box[1]]+[box[2], box[3]]))/2
centerx = (box[0] + box[2]) / 2
centery = (box[1] + box[3]) / 2
#box[0] = max(0, box[0])
#box[1] = max(0, box[1])
#box[2] = min(im_new.shape[1], box[2])
#box[3] = min(im_new.shape[0], box[3])
box_size = max(box[2] - box[0], box[3] - box[1])
if centerx < 0 or centery < 0 or centerx >= im_new.shape[
1] or centery >= im_new.shape[0]:
continue
if box_size < config.TRAIN.MIN_BOX_SIZE:
continue
#filter by landmarks? TODO
valid.append(i)
valid_boxes.append(box)
if config.FACE_LANDMARK:
valid_landmarks.append(landmarks_new[i])
if len(valid) > 0 or retry == LIMIT - 1:
im = im_new
new_rec['boxes'] = np.array(valid_boxes)
new_rec['gt_classes'] = new_rec['gt_classes'][valid]
if config.FACE_LANDMARK:
new_rec['landmarks'] = np.array(valid_landmarks)
break
retry += 1
if config.COLOR_JITTERING > 0.0:
im = im.astype(np.float32)
im = color_aug(im, config.COLOR_JITTERING)
#assert np.all(new_rec['landmarks'][:,10]>0.0)
global TMP_ID
if TMP_ID >= 0 and TMP_ID < 10:
tim = im.copy().astype(np.uint8)
for i in range(new_rec['boxes'].shape[0]):
box = new_rec['boxes'][i].copy().astype(np.int)
cv2.rectangle(tim, (box[0], box[1]), (box[2], box[3]),
(255, 0, 0), 1)
print('draw box:', box)
if config.FACE_LANDMARK:
for i in range(new_rec['landmarks'].shape[0]):
landmark = new_rec['landmarks'][i].copy()
if landmark[10] == 0.0:
print('zero', landmark)
continue
landmark = landmark.astype(np.int)
print('draw landmark', landmark)
for k in range(5):
color = (0, 0, 255)
if k == 0 or k == 3:
color = (0, 255, 0)
pp = (landmark[k * 2], landmark[1 + k * 2])
cv2.circle(tim, (pp[0], pp[1]), 1, color, 2)
filename = './trainimages/train%d.png' % TMP_ID
print('write', filename)
cv2.imwrite(filename, tim)
TMP_ID += 1
im_tensor = transform(im, config.PIXEL_MEANS, config.PIXEL_STDS,
config.PIXEL_SCALE)
processed_ims.append(im_tensor)
#print('boxes', new_rec['boxes'], file=sys.stderr)
im_info = [im_tensor.shape[2], im_tensor.shape[3], im_scale]
new_rec['im_info'] = np.array(im_info, dtype=np.float32)
processed_roidb.append(new_rec)
return processed_ims, processed_roidb
def do_mixup(im1, roidb1, im2, roidb2):
im = (im1 + im2) / 2.0
roidb = {}
#print(roidb1.keys())
#for k in roidb1:
for k in ['boxes', 'landmarks', 'gt_classes', 'im_info']:
v1 = roidb1[k]
v2 = roidb2[k]
if k != 'im_info':
#print('try', k, v1.shape, v2.shape)
if v1.shape[0] > 0 and v2.shape[0] > 0:
v = np.concatenate((v1, v2), axis=0)
else:
v = v1
else:
v = v1
#print(k, v1.shape, v2.shape, v.shape)
roidb[k] = v
return im, roidb
def get_crop_image(roidb):
ims, roidbs = get_crop_image1(roidb)
if config.MIXUP > 0.0 and np.random.random() < config.MIXUP:
for i in range(len(ims)):
im = ims[i]
roidb = roidbs[i]
j = np.random.randint(0, len(ims) - 1)
if j >= i:
j += 1
im, roidb = do_mixup(im, roidb, ims[j], roidbs[j])
ims[i] = im
roidbs[i] = roidb
return ims, roidbs
def resize(im, target_size, max_size, stride=0, min_size=0):
"""
only resize input image to target size and return scale
:param im: BGR image input by opencv
:param target_size: one dimensional size (the short side)
:param max_size: one dimensional max size (the long side)
:param stride: if given, pad the image to designated stride
:return:
"""
im_shape = im.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
im_scale = float(target_size) / float(im_size_min)
# prevent bigger axis from being more than max_size:
if np.round(im_scale * im_size_max) > max_size:
im_scale = float(max_size) / float(im_size_max)
if min_size > 0 and np.round(im_scale * im_size_min) < min_size:
im_scale = float(min_size) / float(im_size_min)
im = cv2.resize(im,
None,
None,
fx=im_scale,
fy=im_scale,
interpolation=cv2.INTER_LINEAR)
if stride == 0:
return im, im_scale
else:
# pad to product of stride
im_height = int(np.ceil(im.shape[0] / float(stride)) * stride)
im_width = int(np.ceil(im.shape[1] / float(stride)) * stride)
im_channel = im.shape[2]
padded_im = np.zeros((im_height, im_width, im_channel))
padded_im[:im.shape[0], :im.shape[1], :] = im
return padded_im, im_scale
def transform(im, pixel_means, pixel_stds, pixel_scale):
"""
transform into mxnet tensor,
subtract pixel size and transform to correct format
:param im: [height, width, channel] in BGR
:param pixel_means: [B, G, R pixel means]
:return: [batch, channel, height, width]
"""
im_tensor = np.zeros((1, 3, im.shape[0], im.shape[1]))
for i in range(3):
im_tensor[0, i, :, :] = (im[:, :, 2 - i] / pixel_scale -
pixel_means[2 - i]) / pixel_stds[2 - i]
return im_tensor
def transform_inverse(im_tensor, pixel_means):
"""
transform from mxnet im_tensor to ordinary RGB image
im_tensor is limited to one image
:param im_tensor: [batch, channel, height, width]
:param pixel_means: [B, G, R pixel means]
:return: im [height, width, channel(RGB)]
"""
assert im_tensor.shape[0] == 1
im_tensor = im_tensor.copy()
# put channel back
channel_swap = (0, 2, 3, 1)
im_tensor = im_tensor.transpose(channel_swap)
im = im_tensor[0]
assert im.shape[2] == 3
im += pixel_means[[2, 1, 0]]
im = im.astype(np.uint8)
return im
def tensor_vstack(tensor_list, pad=0):
"""
vertically stack tensors
:param tensor_list: list of tensor to be stacked vertically
:param pad: label to pad with
:return: tensor with max shape
"""
ndim = len(tensor_list[0].shape)
dtype = tensor_list[0].dtype
islice = tensor_list[0].shape[0]
dimensions = []
first_dim = sum([tensor.shape[0] for tensor in tensor_list])
dimensions.append(first_dim)
for dim in range(1, ndim):
dimensions.append(max([tensor.shape[dim] for tensor in tensor_list]))
if pad == 0:
all_tensor = np.zeros(tuple(dimensions), dtype=dtype)
elif pad == 1:
all_tensor = np.ones(tuple(dimensions), dtype=dtype)
else:
all_tensor = np.full(tuple(dimensions), pad, dtype=dtype)
if ndim == 1:
for ind, tensor in enumerate(tensor_list):
all_tensor[ind * islice:(ind + 1) * islice] = tensor
elif ndim == 2:
for ind, tensor in enumerate(tensor_list):
all_tensor[ind * islice:(ind + 1) *
islice, :tensor.shape[1]] = tensor
elif ndim == 3:
for ind, tensor in enumerate(tensor_list):
all_tensor[ind * islice:(ind + 1) *
islice, :tensor.shape[1], :tensor.shape[2]] = tensor
elif ndim == 4:
for ind, tensor in enumerate(tensor_list):
all_tensor[ind * islice:(ind + 1) * islice, :tensor.
shape[1], :tensor.shape[2], :tensor.shape[3]] = tensor
elif ndim == 5:
for ind, tensor in enumerate(tensor_list):
all_tensor[ind * islice:(ind + 1) *
islice, :tensor.shape[1], :tensor.shape[2], :tensor.
shape[3], :tensor.shape[4]] = tensor
else:
print(tensor_list[0].shape)
raise Exception('Sorry, unimplemented.')
return all_tensor
| insightface/detection/retinaface/rcnn/io/image.py/0 | {
"file_path": "insightface/detection/retinaface/rcnn/io/image.py",
"repo_id": "insightface",
"token_count": 19452
} | 99 |
__author__ = 'tsungyi'
from rcnn.pycocotools import _mask
# Interface for manipulating masks stored in RLE format.
#
# RLE is a simple yet efficient format for storing binary masks. RLE
# first divides a vector (or vectorized image) into a series of piecewise
# constant regions and then for each piece simply stores the length of
# that piece. For example, given M=[0 0 1 1 1 0 1] the RLE counts would
# be [2 3 1 1], or for M=[1 1 1 1 1 1 0] the counts would be [0 6 1]
# (note that the odd counts are always the numbers of zeros). Instead of
# storing the counts directly, additional compression is achieved with a
# variable bitrate representation based on a common scheme called LEB128.
#
# Compression is greatest given large piecewise constant regions.
# Specifically, the size of the RLE is proportional to the number of
# *boundaries* in M (or for an image the number of boundaries in the y
# direction). Assuming fairly simple shapes, the RLE representation is
# O(sqrt(n)) where n is number of pixels in the object. Hence space usage
# is substantially lower, especially for large simple objects (large n).
#
# Many common operations on masks can be computed directly using the RLE
# (without need for decoding). This includes computations such as area,
# union, intersection, etc. All of these operations are linear in the
# size of the RLE, in other words they are O(sqrt(n)) where n is the area
# of the object. Computing these operations on the original mask is O(n).
# Thus, using the RLE can result in substantial computational savings.
#
# The following API functions are defined:
# encode - Encode binary masks using RLE.
# decode - Decode binary masks encoded via RLE.
# merge - Compute union or intersection of encoded masks.
# iou - Compute intersection over union between masks.
# area - Compute area of encoded masks.
# toBbox - Get bounding boxes surrounding encoded masks.
# frPyObjects - Convert polygon, bbox, and uncompressed RLE to encoded RLE mask.
#
# Usage:
# Rs = encode( masks )
# masks = decode( Rs )
# R = merge( Rs, intersect=false )
# o = iou( dt, gt, iscrowd )
# a = area( Rs )
# bbs = toBbox( Rs )
# Rs = frPyObjects( [pyObjects], h, w )
#
# In the API the following formats are used:
# Rs - [dict] Run-length encoding of binary masks
# R - dict Run-length encoding of binary mask
# masks - [hxwxn] Binary mask(s) (must have type np.ndarray(dtype=uint8) in column-major order)
# iscrowd - [nx1] list of np.ndarray. 1 indicates corresponding gt image has crowd region to ignore
# bbs - [nx4] Bounding box(es) stored as [x y w h]
# poly - Polygon stored as [[x1 y1 x2 y2...],[x1 y1 ...],...] (2D list)
# dt,gt - May be either bounding boxes or encoded masks
# Both poly and bbs are 0-indexed (bbox=[0 0 1 1] encloses first pixel).
#
# Finally, a note about the intersection over union (iou) computation.
# The standard iou of a ground truth (gt) and detected (dt) object is
# iou(gt,dt) = area(intersect(gt,dt)) / area(union(gt,dt))
# For "crowd" regions, we use a modified criteria. If a gt object is
# marked as "iscrowd", we allow a dt to match any subregion of the gt.
# Choosing gt' in the crowd gt that best matches the dt can be done using
# gt'=intersect(dt,gt). Since by definition union(gt',dt)=dt, computing
# iou(gt,dt,iscrowd) = iou(gt',dt) = area(intersect(gt,dt)) / area(dt)
# For crowd gt regions we use this modified criteria above for the iou.
#
# To compile run "python setup.py build_ext --inplace"
# Please do not contact us for help with compiling.
#
# Microsoft COCO Toolbox. version 2.0
# Data, paper, and tutorials available at: http://mscoco.org/
# Code written by Piotr Dollar and Tsung-Yi Lin, 2015.
# Licensed under the Simplified BSD License [see coco/license.txt]
iou = _mask.iou
merge = _mask.merge
frPyObjects = _mask.frPyObjects
def encode(bimask):
if len(bimask.shape) == 3:
return _mask.encode(bimask)
elif len(bimask.shape) == 2:
h, w = bimask.shape
return _mask.encode(bimask.reshape((h, w, 1), order='F'))[0]
def decode(rleObjs):
if type(rleObjs) == list:
return _mask.decode(rleObjs)
else:
return _mask.decode([rleObjs])[:, :, 0]
def area(rleObjs):
if type(rleObjs) == list:
return _mask.area(rleObjs)
else:
return _mask.area([rleObjs])[0]
def toBbox(rleObjs):
if type(rleObjs) == list:
return _mask.toBbox(rleObjs)
else:
return _mask.toBbox([rleObjs])[0]
| insightface/detection/retinaface/rcnn/pycocotools/mask.py/0 | {
"file_path": "insightface/detection/retinaface/rcnn/pycocotools/mask.py",
"repo_id": "insightface",
"token_count": 1622
} | 100 |
import argparse
import pprint
import mxnet as mx
from ..logger import logger
from ..config import config, default, generate_config
from ..symbol import *
from ..dataset import *
from ..core.loader import TestLoader
from ..core.tester import Predictor, generate_proposals, test_proposals
from ..utils.load_model import load_param
def test_rpn(network,
dataset,
image_set,
root_path,
dataset_path,
ctx,
prefix,
epoch,
vis,
shuffle,
thresh,
test_output=False):
# rpn generate proposal config
config.TEST.HAS_RPN = True
# print config
logger.info(pprint.pformat(config))
# load symbol
sym = eval('get_' + network + '_rpn_test')()
# load dataset and prepare imdb for training
imdb = eval(dataset)(image_set, root_path, dataset_path)
roidb = imdb.gt_roidb()
test_data = TestLoader(roidb,
batch_size=1,
shuffle=shuffle,
has_rpn=True,
withlabel=True)
# load model
arg_params, aux_params = load_param(prefix, epoch, convert=True, ctx=ctx)
# infer shape
data_shape_dict = dict(test_data.provide_data)
arg_shape, _, aux_shape = sym.infer_shape(**data_shape_dict)
arg_shape_dict = dict(zip(sym.list_arguments(), arg_shape))
aux_shape_dict = dict(zip(sym.list_auxiliary_states(), aux_shape))
# check parameters
for k in sym.list_arguments():
if k in data_shape_dict or 'label' in k:
continue
assert k in arg_params, k + ' not initialized'
assert arg_params[k].shape == arg_shape_dict[k], \
'shape inconsistent for ' + k + ' inferred ' + str(arg_shape_dict[k]) + ' provided ' + str(arg_params[k].shape)
for k in sym.list_auxiliary_states():
assert k in aux_params, k + ' not initialized'
assert aux_params[k].shape == aux_shape_dict[k], \
'shape inconsistent for ' + k + ' inferred ' + str(aux_shape_dict[k]) + ' provided ' + str(aux_params[k].shape)
# decide maximum shape
data_names = [k[0] for k in test_data.provide_data]
label_names = None if test_data.provide_label is None else [
k[0] for k in test_data.provide_label
]
max_data_shape = [('data', (1, 3, max([v[1] for v in config.SCALES]),
max([v[1] for v in config.SCALES])))]
# create predictor
predictor = Predictor(sym,
data_names,
label_names,
context=ctx,
max_data_shapes=max_data_shape,
provide_data=test_data.provide_data,
provide_label=test_data.provide_label,
arg_params=arg_params,
aux_params=aux_params)
# start testing
if not test_output:
imdb_boxes = generate_proposals(predictor,
test_data,
imdb,
vis=vis,
thresh=thresh)
imdb.evaluate_recall(roidb, candidate_boxes=imdb_boxes)
else:
test_proposals(predictor, test_data, imdb, roidb, vis=vis)
def parse_args():
parser = argparse.ArgumentParser(
description='Test a Region Proposal Network')
# general
parser.add_argument('--network',
help='network name',
default=default.network,
type=str)
parser.add_argument('--dataset',
help='dataset name',
default=default.dataset,
type=str)
args, rest = parser.parse_known_args()
generate_config(args.network, args.dataset)
parser.add_argument('--image_set',
help='image_set name',
default=default.test_image_set,
type=str)
parser.add_argument('--root_path',
help='output data folder',
default=default.root_path,
type=str)
parser.add_argument('--dataset_path',
help='dataset path',
default=default.dataset_path,
type=str)
# testing
parser.add_argument('--prefix',
help='model to test with',
default=default.rpn_prefix,
type=str)
parser.add_argument('--epoch',
help='model to test with',
default=default.rpn_epoch,
type=int)
# rpn
parser.add_argument('--gpu',
help='GPU device to test with',
default=0,
type=int)
parser.add_argument('--vis',
help='turn on visualization',
action='store_true')
parser.add_argument('--thresh',
help='rpn proposal threshold',
default=0,
type=float)
parser.add_argument('--shuffle',
help='shuffle data on visualization',
action='store_true')
args = parser.parse_args()
return args
def main():
args = parse_args()
logger.info('Called with argument: %s' % args)
ctx = mx.gpu(args.gpu)
test_rpn(args.network, args.dataset, args.image_set, args.root_path,
args.dataset_path, ctx, args.prefix, args.epoch, args.vis,
args.shuffle, args.thresh)
if __name__ == '__main__':
main()
| insightface/detection/retinaface/rcnn/tools/test_rpn.py/0 | {
"file_path": "insightface/detection/retinaface/rcnn/tools/test_rpn.py",
"repo_id": "insightface",
"token_count": 3069
} | 101 |
dataset_type = 'RetinaFaceDataset'
data_root = 'data/retinaface/'
train_root = data_root+'train/'
val_root = data_root+'val/'
#img_norm_cfg = dict(
# mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
img_norm_cfg = dict(
mean=[127.5, 127.5, 127.5], std=[128.0, 128.0, 128.0], to_rgb=True)
train_pipeline = [
#dict(type='LoadImageFromFile'),
#dict(type='LoadAnnotations', with_bbox=True),
#dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
#dict(type='RandomFlip', flip_ratio=0.5),
#dict(type='Normalize', **img_norm_cfg),
#dict(type='Pad', size_divisor=32),
#dict(type='DefaultFormatBundle'),
#dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_keypoints', 'gt_labels']),
dict(type='LoadImageFromFile', to_float32=True),
dict(type='LoadAnnotations', with_bbox=True, with_keypoints=True),
dict(type='RandomSquareCrop',
crop_choice=[0.3, 0.45, 0.6, 0.8, 1.0]),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Resize', img_scale=(640, 640), keep_ratio=False),
dict(type='Normalize', **img_norm_cfg),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_bboxes_ignore', 'gt_keypointss']),
#dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_bboxes_ignore']),
]
test_pipeline = [
#dict(type='LoadImageFromFile'),
#dict(
# type='MultiScaleFlipAug',
# img_scale=(1333, 800),
# flip=False,
# transforms=[
# dict(type='Resize', keep_ratio=True),
# dict(type='RandomFlip'),
# dict(type='Normalize', **img_norm_cfg),
# dict(type='Pad', size_divisor=32),
# dict(type='ImageToTensor', keys=['img']),
# dict(type='Collect', keys=['img']),
# ])
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1100, 1650),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.0),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32, pad_val=0),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img'])
])
]
data = dict(
samples_per_gpu=8,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=train_root + 'labelv2.txt',
img_prefix=train_root+ 'images/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=val_root + 'labelv2.txt',
img_prefix=val_root+ 'images/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=val_root + 'labelv2.txt',
img_prefix=val_root+ 'images/',
pipeline=test_pipeline),
)
#evaluation = dict(interval=1, metric='bbox')
evaluation = dict(interval=10, metric='mAP')
| insightface/detection/scrfd/configs/_base_/datasets/retinaface.py/0 | {
"file_path": "insightface/detection/scrfd/configs/_base_/datasets/retinaface.py",
"repo_id": "insightface",
"token_count": 1482
} | 102 |
# optimizer
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[8, 11])
total_epochs = 12
| insightface/detection/scrfd/configs/_base_/schedules/schedule_1x.py/0 | {
"file_path": "insightface/detection/scrfd/configs/_base_/schedules/schedule_1x.py",
"repo_id": "insightface",
"token_count": 123
} | 103 |
from mmcv.utils import Registry, build_from_cfg
ANCHOR_GENERATORS = Registry('Anchor generator')
def build_anchor_generator(cfg, default_args=None):
return build_from_cfg(cfg, ANCHOR_GENERATORS, default_args)
| insightface/detection/scrfd/mmdet/core/anchor/builder.py/0 | {
"file_path": "insightface/detection/scrfd/mmdet/core/anchor/builder.py",
"repo_id": "insightface",
"token_count": 77
} | 104 |
from abc import ABCMeta, abstractmethod
class BaseBBoxCoder(metaclass=ABCMeta):
"""Base bounding box coder."""
def __init__(self, **kwargs):
pass
@abstractmethod
def encode(self, bboxes, gt_bboxes):
"""Encode deltas between bboxes and ground truth boxes."""
pass
@abstractmethod
def decode(self, bboxes, bboxes_pred):
"""Decode the predicted bboxes according to prediction and base
boxes."""
pass
| insightface/detection/scrfd/mmdet/core/bbox/coder/base_bbox_coder.py/0 | {
"file_path": "insightface/detection/scrfd/mmdet/core/bbox/coder/base_bbox_coder.py",
"repo_id": "insightface",
"token_count": 186
} | 105 |
import torch
from ..builder import BBOX_SAMPLERS
from ..transforms import bbox2roi
from .base_sampler import BaseSampler
@BBOX_SAMPLERS.register_module()
class OHEMSampler(BaseSampler):
r"""Online Hard Example Mining Sampler described in `Training Region-based
Object Detectors with Online Hard Example Mining
<https://arxiv.org/abs/1604.03540>`_.
"""
def __init__(self,
num,
pos_fraction,
context,
neg_pos_ub=-1,
add_gt_as_proposals=True,
**kwargs):
super(OHEMSampler, self).__init__(num, pos_fraction, neg_pos_ub,
add_gt_as_proposals)
self.context = context
if not hasattr(self.context, 'num_stages'):
self.bbox_head = self.context.bbox_head
else:
self.bbox_head = self.context.bbox_head[self.context.current_stage]
def hard_mining(self, inds, num_expected, bboxes, labels, feats):
with torch.no_grad():
rois = bbox2roi([bboxes])
if not hasattr(self.context, 'num_stages'):
bbox_results = self.context._bbox_forward(feats, rois)
else:
bbox_results = self.context._bbox_forward(
self.context.current_stage, feats, rois)
cls_score = bbox_results['cls_score']
loss = self.bbox_head.loss(
cls_score=cls_score,
bbox_pred=None,
rois=rois,
labels=labels,
label_weights=cls_score.new_ones(cls_score.size(0)),
bbox_targets=None,
bbox_weights=None,
reduction_override='none')['loss_cls']
_, topk_loss_inds = loss.topk(num_expected)
return inds[topk_loss_inds]
def _sample_pos(self,
assign_result,
num_expected,
bboxes=None,
feats=None,
**kwargs):
"""Sample positive boxes.
Args:
assign_result (:obj:`AssignResult`): Assigned results
num_expected (int): Number of expected positive samples
bboxes (torch.Tensor, optional): Boxes. Defaults to None.
feats (list[torch.Tensor], optional): Multi-level features.
Defaults to None.
Returns:
torch.Tensor: Indices of positive samples
"""
# Sample some hard positive samples
pos_inds = torch.nonzero(assign_result.gt_inds > 0, as_tuple=False)
if pos_inds.numel() != 0:
pos_inds = pos_inds.squeeze(1)
if pos_inds.numel() <= num_expected:
return pos_inds
else:
return self.hard_mining(pos_inds, num_expected, bboxes[pos_inds],
assign_result.labels[pos_inds], feats)
def _sample_neg(self,
assign_result,
num_expected,
bboxes=None,
feats=None,
**kwargs):
"""Sample negative boxes.
Args:
assign_result (:obj:`AssignResult`): Assigned results
num_expected (int): Number of expected negative samples
bboxes (torch.Tensor, optional): Boxes. Defaults to None.
feats (list[torch.Tensor], optional): Multi-level features.
Defaults to None.
Returns:
torch.Tensor: Indices of negative samples
"""
# Sample some hard negative samples
neg_inds = torch.nonzero(assign_result.gt_inds == 0, as_tuple=False)
if neg_inds.numel() != 0:
neg_inds = neg_inds.squeeze(1)
if len(neg_inds) <= num_expected:
return neg_inds
else:
neg_labels = assign_result.labels.new_empty(
neg_inds.size(0)).fill_(self.bbox_head.num_classes)
return self.hard_mining(neg_inds, num_expected, bboxes[neg_inds],
neg_labels, feats)
| insightface/detection/scrfd/mmdet/core/bbox/samplers/ohem_sampler.py/0 | {
"file_path": "insightface/detection/scrfd/mmdet/core/bbox/samplers/ohem_sampler.py",
"repo_id": "insightface",
"token_count": 2164
} | 106 |
import warnings
from mmcv.runner import (Fp16OptimizerHook, auto_fp16, force_fp32,
wrap_fp16_model)
class DeprecatedFp16OptimizerHook(Fp16OptimizerHook):
"""A wrapper class for the FP16 optimizer hook. This class wraps
:class:`Fp16OptimizerHook` in `mmcv.runner` and shows a warning that the
:class:`Fp16OptimizerHook` from `mmdet.core` will be deprecated.
Refer to :class:`Fp16OptimizerHook` in `mmcv.runner` for more details.
Args:
loss_scale (float): Scale factor multiplied with loss.
"""
def __init__(*args, **kwargs):
super().__init__(*args, **kwargs)
warnings.warn(
'Importing Fp16OptimizerHook from "mmdet.core" will be '
'deprecated in the future. Please import them from "mmcv.runner" '
'instead')
def deprecated_auto_fp16(*args, **kwargs):
warnings.warn(
'Importing auto_fp16 from "mmdet.core" will be '
'deprecated in the future. Please import them from "mmcv.runner" '
'instead')
return auto_fp16(*args, **kwargs)
def deprecated_force_fp32(*args, **kwargs):
warnings.warn(
'Importing force_fp32 from "mmdet.core" will be '
'deprecated in the future. Please import them from "mmcv.runner" '
'instead')
return force_fp32(*args, **kwargs)
def deprecated_wrap_fp16_model(*args, **kwargs):
warnings.warn(
'Importing wrap_fp16_model from "mmdet.core" will be '
'deprecated in the future. Please import them from "mmcv.runner" '
'instead')
wrap_fp16_model(*args, **kwargs)
| insightface/detection/scrfd/mmdet/core/fp16/deprecated_fp16_utils.py/0 | {
"file_path": "insightface/detection/scrfd/mmdet/core/fp16/deprecated_fp16_utils.py",
"repo_id": "insightface",
"token_count": 661
} | 107 |
import bisect
import math
from collections import defaultdict
import numpy as np
from mmcv.utils import print_log
from torch.utils.data.dataset import ConcatDataset as _ConcatDataset
from .builder import DATASETS
from .coco import CocoDataset
@DATASETS.register_module()
class ConcatDataset(_ConcatDataset):
"""A wrapper of concatenated dataset.
Same as :obj:`torch.utils.data.dataset.ConcatDataset`, but
concat the group flag for image aspect ratio.
Args:
datasets (list[:obj:`Dataset`]): A list of datasets.
separate_eval (bool): Whether to evaluate the results
separately if it is used as validation dataset.
Defaults to True.
"""
def __init__(self, datasets, separate_eval=True):
super(ConcatDataset, self).__init__(datasets)
self.CLASSES = datasets[0].CLASSES
self.separate_eval = separate_eval
if not separate_eval:
if any([isinstance(ds, CocoDataset) for ds in datasets]):
raise NotImplementedError(
'Evaluating concatenated CocoDataset as a whole is not'
' supported! Please set "separate_eval=True"')
elif len(set([type(ds) for ds in datasets])) != 1:
raise NotImplementedError(
'All the datasets should have same types')
if hasattr(datasets[0], 'flag'):
flags = []
for i in range(0, len(datasets)):
flags.append(datasets[i].flag)
self.flag = np.concatenate(flags)
def get_cat_ids(self, idx):
"""Get category ids of concatenated dataset by index.
Args:
idx (int): Index of data.
Returns:
list[int]: All categories in the image of specified index.
"""
if idx < 0:
if -idx > len(self):
raise ValueError(
'absolute value of index should not exceed dataset length')
idx = len(self) + idx
dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx)
if dataset_idx == 0:
sample_idx = idx
else:
sample_idx = idx - self.cumulative_sizes[dataset_idx - 1]
return self.datasets[dataset_idx].get_cat_ids(sample_idx)
def evaluate(self, results, logger=None, **kwargs):
"""Evaluate the results.
Args:
results (list[list | tuple]): Testing results of the dataset.
logger (logging.Logger | str | None): Logger used for printing
related information during evaluation. Default: None.
Returns:
dict[str: float]: AP results of the total dataset or each separate
dataset if `self.separate_eval=True`.
"""
assert len(results) == self.cumulative_sizes[-1], \
('Dataset and results have different sizes: '
f'{self.cumulative_sizes[-1]} v.s. {len(results)}')
# Check whether all the datasets support evaluation
for dataset in self.datasets:
assert hasattr(dataset, 'evaluate'), \
f'{type(dataset)} does not implement evaluate function'
if self.separate_eval:
dataset_idx = -1
total_eval_results = dict()
for size, dataset in zip(self.cumulative_sizes, self.datasets):
start_idx = 0 if dataset_idx == -1 else \
self.cumulative_sizes[dataset_idx]
end_idx = self.cumulative_sizes[dataset_idx + 1]
results_per_dataset = results[start_idx:end_idx]
print_log(
f'\nEvaluateing {dataset.ann_file} with '
f'{len(results_per_dataset)} images now',
logger=logger)
eval_results_per_dataset = dataset.evaluate(
results_per_dataset, logger=logger, **kwargs)
dataset_idx += 1
for k, v in eval_results_per_dataset.items():
total_eval_results.update({f'{dataset_idx}_{k}': v})
return total_eval_results
elif any([isinstance(ds, CocoDataset) for ds in self.datasets]):
raise NotImplementedError(
'Evaluating concatenated CocoDataset as a whole is not'
' supported! Please set "separate_eval=True"')
elif len(set([type(ds) for ds in self.datasets])) != 1:
raise NotImplementedError(
'All the datasets should have same types')
else:
original_data_infos = self.datasets[0].data_infos
self.datasets[0].data_infos = sum(
[dataset.data_infos for dataset in self.datasets], [])
eval_results = self.datasets[0].evaluate(
results, logger=logger, **kwargs)
self.datasets[0].data_infos = original_data_infos
return eval_results
@DATASETS.register_module()
class RepeatDataset(object):
"""A wrapper of repeated dataset.
The length of repeated dataset will be `times` larger than the original
dataset. This is useful when the data loading time is long but the dataset
is small. Using RepeatDataset can reduce the data loading time between
epochs.
Args:
dataset (:obj:`Dataset`): The dataset to be repeated.
times (int): Repeat times.
"""
def __init__(self, dataset, times):
self.dataset = dataset
self.times = times
self.CLASSES = dataset.CLASSES
if hasattr(self.dataset, 'flag'):
self.flag = np.tile(self.dataset.flag, times)
self._ori_len = len(self.dataset)
def __getitem__(self, idx):
return self.dataset[idx % self._ori_len]
def get_cat_ids(self, idx):
"""Get category ids of repeat dataset by index.
Args:
idx (int): Index of data.
Returns:
list[int]: All categories in the image of specified index.
"""
return self.dataset.get_cat_ids(idx % self._ori_len)
def __len__(self):
"""Length after repetition."""
return self.times * self._ori_len
# Modified from https://github.com/facebookresearch/detectron2/blob/41d475b75a230221e21d9cac5d69655e3415e3a4/detectron2/data/samplers/distributed_sampler.py#L57 # noqa
@DATASETS.register_module()
class ClassBalancedDataset(object):
"""A wrapper of repeated dataset with repeat factor.
Suitable for training on class imbalanced datasets like LVIS. Following
the sampling strategy in the `paper <https://arxiv.org/abs/1908.03195>`_,
in each epoch, an image may appear multiple times based on its
"repeat factor".
The repeat factor for an image is a function of the frequency the rarest
category labeled in that image. The "frequency of category c" in [0, 1]
is defined by the fraction of images in the training set (without repeats)
in which category c appears.
The dataset needs to instantiate :func:`self.get_cat_ids` to support
ClassBalancedDataset.
The repeat factor is computed as followed.
1. For each category c, compute the fraction # of images
that contain it: :math:`f(c)`
2. For each category c, compute the category-level repeat factor:
:math:`r(c) = max(1, sqrt(t/f(c)))`
3. For each image I, compute the image-level repeat factor:
:math:`r(I) = max_{c in I} r(c)`
Args:
dataset (:obj:`CustomDataset`): The dataset to be repeated.
oversample_thr (float): frequency threshold below which data is
repeated. For categories with ``f_c >= oversample_thr``, there is
no oversampling. For categories with ``f_c < oversample_thr``, the
degree of oversampling following the square-root inverse frequency
heuristic above.
filter_empty_gt (bool, optional): If set true, images without bounding
boxes will not be oversampled. Otherwise, they will be categorized
as the pure background class and involved into the oversampling.
Default: True.
"""
def __init__(self, dataset, oversample_thr, filter_empty_gt=True):
self.dataset = dataset
self.oversample_thr = oversample_thr
self.filter_empty_gt = filter_empty_gt
self.CLASSES = dataset.CLASSES
repeat_factors = self._get_repeat_factors(dataset, oversample_thr)
repeat_indices = []
for dataset_idx, repeat_factor in enumerate(repeat_factors):
repeat_indices.extend([dataset_idx] * math.ceil(repeat_factor))
self.repeat_indices = repeat_indices
flags = []
if hasattr(self.dataset, 'flag'):
for flag, repeat_factor in zip(self.dataset.flag, repeat_factors):
flags.extend([flag] * int(math.ceil(repeat_factor)))
assert len(flags) == len(repeat_indices)
self.flag = np.asarray(flags, dtype=np.uint8)
def _get_repeat_factors(self, dataset, repeat_thr):
"""Get repeat factor for each images in the dataset.
Args:
dataset (:obj:`CustomDataset`): The dataset
repeat_thr (float): The threshold of frequency. If an image
contains the categories whose frequency below the threshold,
it would be repeated.
Returns:
list[float]: The repeat factors for each images in the dataset.
"""
# 1. For each category c, compute the fraction # of images
# that contain it: f(c)
category_freq = defaultdict(int)
num_images = len(dataset)
for idx in range(num_images):
cat_ids = set(self.dataset.get_cat_ids(idx))
if len(cat_ids) == 0 and not self.filter_empty_gt:
cat_ids = set([len(self.CLASSES)])
for cat_id in cat_ids:
category_freq[cat_id] += 1
for k, v in category_freq.items():
category_freq[k] = v / num_images
# 2. For each category c, compute the category-level repeat factor:
# r(c) = max(1, sqrt(t/f(c)))
category_repeat = {
cat_id: max(1.0, math.sqrt(repeat_thr / cat_freq))
for cat_id, cat_freq in category_freq.items()
}
# 3. For each image I, compute the image-level repeat factor:
# r(I) = max_{c in I} r(c)
repeat_factors = []
for idx in range(num_images):
cat_ids = set(self.dataset.get_cat_ids(idx))
if len(cat_ids) == 0 and not self.filter_empty_gt:
cat_ids = set([len(self.CLASSES)])
repeat_factor = 1
if len(cat_ids) > 0:
repeat_factor = max(
{category_repeat[cat_id]
for cat_id in cat_ids})
repeat_factors.append(repeat_factor)
return repeat_factors
def __getitem__(self, idx):
ori_index = self.repeat_indices[idx]
return self.dataset[ori_index]
def __len__(self):
"""Length after repetition."""
return len(self.repeat_indices)
| insightface/detection/scrfd/mmdet/datasets/dataset_wrappers.py/0 | {
"file_path": "insightface/detection/scrfd/mmdet/datasets/dataset_wrappers.py",
"repo_id": "insightface",
"token_count": 4971
} | 108 |
from collections import OrderedDict
from mmdet.core import eval_map, eval_recalls
from .builder import DATASETS
from .xml_style import XMLDataset
@DATASETS.register_module()
class VOCDataset(XMLDataset):
CLASSES = ('aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car',
'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train',
'tvmonitor')
def __init__(self, **kwargs):
super(VOCDataset, self).__init__(**kwargs)
if 'VOC2007' in self.img_prefix:
self.year = 2007
elif 'VOC2012' in self.img_prefix:
self.year = 2012
else:
raise ValueError('Cannot infer dataset year from img_prefix')
def evaluate(self,
results,
metric='mAP',
logger=None,
proposal_nums=(100, 300, 1000),
iou_thr=0.5,
scale_ranges=None):
"""Evaluate in VOC protocol.
Args:
results (list[list | tuple]): Testing results of the dataset.
metric (str | list[str]): Metrics to be evaluated. Options are
'mAP', 'recall'.
logger (logging.Logger | str, optional): Logger used for printing
related information during evaluation. Default: None.
proposal_nums (Sequence[int]): Proposal number used for evaluating
recalls, such as recall@100, recall@1000.
Default: (100, 300, 1000).
iou_thr (float | list[float]): IoU threshold. It must be a float
when evaluating mAP, and can be a list when evaluating recall.
Default: 0.5.
scale_ranges (list[tuple], optional): Scale ranges for evaluating
mAP. If not specified, all bounding boxes would be included in
evaluation. Default: None.
Returns:
dict[str, float]: AP/recall metrics.
"""
if not isinstance(metric, str):
assert len(metric) == 1
metric = metric[0]
allowed_metrics = ['mAP', 'recall']
if metric not in allowed_metrics:
raise KeyError(f'metric {metric} is not supported')
annotations = [self.get_ann_info(i) for i in range(len(self))]
eval_results = OrderedDict()
if metric == 'mAP':
assert isinstance(iou_thr, float)
if self.year == 2007:
ds_name = 'voc07'
else:
ds_name = self.CLASSES
mean_ap, _ = eval_map(
results,
annotations,
scale_ranges=None,
iou_thr=iou_thr,
dataset=ds_name,
logger=logger)
eval_results['mAP'] = mean_ap
elif metric == 'recall':
gt_bboxes = [ann['bboxes'] for ann in annotations]
if isinstance(iou_thr, float):
iou_thr = [iou_thr]
recalls = eval_recalls(
gt_bboxes, results, proposal_nums, iou_thr, logger=logger)
for i, num in enumerate(proposal_nums):
for j, iou in enumerate(iou_thr):
eval_results[f'recall@{num}@{iou}'] = recalls[i, j]
if recalls.shape[1] > 1:
ar = recalls.mean(axis=1)
for i, num in enumerate(proposal_nums):
eval_results[f'AR@{num}'] = ar[i]
return eval_results
| insightface/detection/scrfd/mmdet/datasets/voc.py/0 | {
"file_path": "insightface/detection/scrfd/mmdet/datasets/voc.py",
"repo_id": "insightface",
"token_count": 1804
} | 109 |
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import VGG, constant_init, kaiming_init, normal_init, xavier_init
from mmcv.runner import load_checkpoint
from mmdet.utils import get_root_logger
from ..builder import BACKBONES
@BACKBONES.register_module()
class SSDVGG(VGG):
"""VGG Backbone network for single-shot-detection.
Args:
input_size (int): width and height of input, from {300, 512}.
depth (int): Depth of vgg, from {11, 13, 16, 19}.
out_indices (Sequence[int]): Output from which stages.
Example:
>>> self = SSDVGG(input_size=300, depth=11)
>>> self.eval()
>>> inputs = torch.rand(1, 3, 300, 300)
>>> level_outputs = self.forward(inputs)
>>> for level_out in level_outputs:
... print(tuple(level_out.shape))
(1, 1024, 19, 19)
(1, 512, 10, 10)
(1, 256, 5, 5)
(1, 256, 3, 3)
(1, 256, 1, 1)
"""
extra_setting = {
300: (256, 'S', 512, 128, 'S', 256, 128, 256, 128, 256),
512: (256, 'S', 512, 128, 'S', 256, 128, 'S', 256, 128, 'S', 256, 128),
}
def __init__(self,
input_size,
depth,
with_last_pool=False,
ceil_mode=True,
out_indices=(3, 4),
out_feature_indices=(22, 34),
l2_norm_scale=20.):
# TODO: in_channels for mmcv.VGG
super(SSDVGG, self).__init__(
depth,
with_last_pool=with_last_pool,
ceil_mode=ceil_mode,
out_indices=out_indices)
assert input_size in (300, 512)
self.input_size = input_size
self.features.add_module(
str(len(self.features)),
nn.MaxPool2d(kernel_size=3, stride=1, padding=1))
self.features.add_module(
str(len(self.features)),
nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6))
self.features.add_module(
str(len(self.features)), nn.ReLU(inplace=True))
self.features.add_module(
str(len(self.features)), nn.Conv2d(1024, 1024, kernel_size=1))
self.features.add_module(
str(len(self.features)), nn.ReLU(inplace=True))
self.out_feature_indices = out_feature_indices
self.inplanes = 1024
self.extra = self._make_extra_layers(self.extra_setting[input_size])
self.l2_norm = L2Norm(
self.features[out_feature_indices[0] - 1].out_channels,
l2_norm_scale)
def init_weights(self, pretrained=None):
"""Initialize the weights in backbone.
Args:
pretrained (str, optional): Path to pre-trained weights.
Defaults to None.
"""
if isinstance(pretrained, str):
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif pretrained is None:
for m in self.features.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m)
elif isinstance(m, nn.BatchNorm2d):
constant_init(m, 1)
elif isinstance(m, nn.Linear):
normal_init(m, std=0.01)
else:
raise TypeError('pretrained must be a str or None')
for m in self.extra.modules():
if isinstance(m, nn.Conv2d):
xavier_init(m, distribution='uniform')
constant_init(self.l2_norm, self.l2_norm.scale)
def forward(self, x):
"""Forward function."""
outs = []
for i, layer in enumerate(self.features):
x = layer(x)
if i in self.out_feature_indices:
outs.append(x)
for i, layer in enumerate(self.extra):
x = F.relu(layer(x), inplace=True)
if i % 2 == 1:
outs.append(x)
outs[0] = self.l2_norm(outs[0])
if len(outs) == 1:
return outs[0]
else:
return tuple(outs)
def _make_extra_layers(self, outplanes):
layers = []
kernel_sizes = (1, 3)
num_layers = 0
outplane = None
for i in range(len(outplanes)):
if self.inplanes == 'S':
self.inplanes = outplane
continue
k = kernel_sizes[num_layers % 2]
if outplanes[i] == 'S':
outplane = outplanes[i + 1]
conv = nn.Conv2d(
self.inplanes, outplane, k, stride=2, padding=1)
else:
outplane = outplanes[i]
conv = nn.Conv2d(
self.inplanes, outplane, k, stride=1, padding=0)
layers.append(conv)
self.inplanes = outplanes[i]
num_layers += 1
if self.input_size == 512:
layers.append(nn.Conv2d(self.inplanes, 256, 4, padding=1))
return nn.Sequential(*layers)
class L2Norm(nn.Module):
def __init__(self, n_dims, scale=20., eps=1e-10):
"""L2 normalization layer.
Args:
n_dims (int): Number of dimensions to be normalized
scale (float, optional): Defaults to 20..
eps (float, optional): Used to avoid division by zero.
Defaults to 1e-10.
"""
super(L2Norm, self).__init__()
self.n_dims = n_dims
self.weight = nn.Parameter(torch.Tensor(self.n_dims))
self.eps = eps
self.scale = scale
def forward(self, x):
"""Forward function."""
# normalization layer convert to FP32 in FP16 training
x_float = x.float()
norm = x_float.pow(2).sum(1, keepdim=True).sqrt() + self.eps
return (self.weight[None, :, None, None].float().expand_as(x_float) *
x_float / norm).type_as(x)
| insightface/detection/scrfd/mmdet/models/backbones/ssd_vgg.py/0 | {
"file_path": "insightface/detection/scrfd/mmdet/models/backbones/ssd_vgg.py",
"repo_id": "insightface",
"token_count": 3041
} | 110 |
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import normal_init
from mmcv.ops import nms
from ..builder import HEADS
from .guided_anchor_head import GuidedAnchorHead
from .rpn_test_mixin import RPNTestMixin
@HEADS.register_module()
class GARPNHead(RPNTestMixin, GuidedAnchorHead):
"""Guided-Anchor-based RPN head."""
def __init__(self, in_channels, **kwargs):
super(GARPNHead, self).__init__(1, in_channels, **kwargs)
def _init_layers(self):
"""Initialize layers of the head."""
self.rpn_conv = nn.Conv2d(
self.in_channels, self.feat_channels, 3, padding=1)
super(GARPNHead, self)._init_layers()
def init_weights(self):
"""Initialize weights of the head."""
normal_init(self.rpn_conv, std=0.01)
super(GARPNHead, self).init_weights()
def forward_single(self, x):
"""Forward feature of a single scale level."""
x = self.rpn_conv(x)
x = F.relu(x, inplace=True)
(cls_score, bbox_pred, shape_pred,
loc_pred) = super(GARPNHead, self).forward_single(x)
return cls_score, bbox_pred, shape_pred, loc_pred
def loss(self,
cls_scores,
bbox_preds,
shape_preds,
loc_preds,
gt_bboxes,
img_metas,
gt_bboxes_ignore=None):
losses = super(GARPNHead, self).loss(
cls_scores,
bbox_preds,
shape_preds,
loc_preds,
gt_bboxes,
None,
img_metas,
gt_bboxes_ignore=gt_bboxes_ignore)
return dict(
loss_rpn_cls=losses['loss_cls'],
loss_rpn_bbox=losses['loss_bbox'],
loss_anchor_shape=losses['loss_shape'],
loss_anchor_loc=losses['loss_loc'])
def _get_bboxes_single(self,
cls_scores,
bbox_preds,
mlvl_anchors,
mlvl_masks,
img_shape,
scale_factor,
cfg,
rescale=False):
cfg = self.test_cfg if cfg is None else cfg
mlvl_proposals = []
for idx in range(len(cls_scores)):
rpn_cls_score = cls_scores[idx]
rpn_bbox_pred = bbox_preds[idx]
anchors = mlvl_anchors[idx]
mask = mlvl_masks[idx]
assert rpn_cls_score.size()[-2:] == rpn_bbox_pred.size()[-2:]
# if no location is kept, end.
if mask.sum() == 0:
continue
rpn_cls_score = rpn_cls_score.permute(1, 2, 0)
if self.use_sigmoid_cls:
rpn_cls_score = rpn_cls_score.reshape(-1)
scores = rpn_cls_score.sigmoid()
else:
rpn_cls_score = rpn_cls_score.reshape(-1, 2)
# remind that we set FG labels to [0, num_class-1]
# since mmdet v2.0
# BG cat_id: num_class
scores = rpn_cls_score.softmax(dim=1)[:, :-1]
# filter scores, bbox_pred w.r.t. mask.
# anchors are filtered in get_anchors() beforehand.
scores = scores[mask]
rpn_bbox_pred = rpn_bbox_pred.permute(1, 2, 0).reshape(-1,
4)[mask, :]
if scores.dim() == 0:
rpn_bbox_pred = rpn_bbox_pred.unsqueeze(0)
anchors = anchors.unsqueeze(0)
scores = scores.unsqueeze(0)
# filter anchors, bbox_pred, scores w.r.t. scores
if cfg.nms_pre > 0 and scores.shape[0] > cfg.nms_pre:
_, topk_inds = scores.topk(cfg.nms_pre)
rpn_bbox_pred = rpn_bbox_pred[topk_inds, :]
anchors = anchors[topk_inds, :]
scores = scores[topk_inds]
# get proposals w.r.t. anchors and rpn_bbox_pred
proposals = self.bbox_coder.decode(
anchors, rpn_bbox_pred, max_shape=img_shape)
# filter out too small bboxes
if cfg.min_bbox_size > 0:
w = proposals[:, 2] - proposals[:, 0]
h = proposals[:, 3] - proposals[:, 1]
valid_inds = torch.nonzero(
(w >= cfg.min_bbox_size) & (h >= cfg.min_bbox_size),
as_tuple=False).squeeze()
proposals = proposals[valid_inds, :]
scores = scores[valid_inds]
# NMS in current level
proposals, _ = nms(proposals, scores, cfg.nms_thr)
proposals = proposals[:cfg.nms_post, :]
mlvl_proposals.append(proposals)
proposals = torch.cat(mlvl_proposals, 0)
if cfg.nms_across_levels:
# NMS across multi levels
proposals, _ = nms(proposals[:, :4], proposals[:, -1], cfg.nms_thr)
proposals = proposals[:cfg.max_num, :]
else:
scores = proposals[:, 4]
num = min(cfg.max_num, proposals.shape[0])
_, topk_inds = scores.topk(num)
proposals = proposals[topk_inds, :]
return proposals
| insightface/detection/scrfd/mmdet/models/dense_heads/ga_rpn_head.py/0 | {
"file_path": "insightface/detection/scrfd/mmdet/models/dense_heads/ga_rpn_head.py",
"repo_id": "insightface",
"token_count": 2975
} | 111 |
import numpy as np
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule, Scale, bias_init_with_prob, normal_init
from mmcv.ops import DeformConv2d
from mmcv.runner import force_fp32
from mmdet.core import (bbox2distance, bbox_overlaps, build_anchor_generator,
build_assigner, build_sampler, distance2bbox,
multi_apply, multiclass_nms, reduce_mean)
from ..builder import HEADS, build_loss
from .atss_head import ATSSHead
from .fcos_head import FCOSHead
INF = 1e8
@HEADS.register_module()
class VFNetHead(ATSSHead, FCOSHead):
"""Head of `VarifocalNet (VFNet): An IoU-aware Dense Object
Detector.<https://arxiv.org/abs/2008.13367>`_.
The VFNet predicts IoU-aware classification scores which mix the
object presence confidence and object localization accuracy as the
detection score. It is built on the FCOS architecture and uses ATSS
for defining positive/negative training examples. The VFNet is trained
with Varifocal Loss and empolys star-shaped deformable convolution to
extract features for a bbox.
Args:
num_classes (int): Number of categories excluding the background
category.
in_channels (int): Number of channels in the input feature map.
regress_ranges (tuple[tuple[int, int]]): Regress range of multiple
level points.
center_sampling (bool): If true, use center sampling. Default: False.
center_sample_radius (float): Radius of center sampling. Default: 1.5.
sync_num_pos (bool): If true, synchronize the number of positive
examples across GPUs. Default: True
gradient_mul (float): The multiplier to gradients from bbox refinement
and recognition. Default: 0.1.
bbox_norm_type (str): The bbox normalization type, 'reg_denom' or
'stride'. Default: reg_denom
loss_cls_fl (dict): Config of focal loss.
use_vfl (bool): If true, use varifocal loss for training.
Default: True.
loss_cls (dict): Config of varifocal loss.
loss_bbox (dict): Config of localization loss, GIoU Loss.
loss_bbox (dict): Config of localization refinement loss, GIoU Loss.
norm_cfg (dict): dictionary to construct and config norm layer.
Default: norm_cfg=dict(type='GN', num_groups=32,
requires_grad=True).
use_atss (bool): If true, use ATSS to define positive/negative
examples. Default: True.
anchor_generator (dict): Config of anchor generator for ATSS.
Example:
>>> self = VFNetHead(11, 7)
>>> feats = [torch.rand(1, 7, s, s) for s in [4, 8, 16, 32, 64]]
>>> cls_score, bbox_pred, bbox_pred_refine= self.forward(feats)
>>> assert len(cls_score) == len(self.scales)
""" # noqa: E501
def __init__(self,
num_classes,
in_channels,
regress_ranges=((-1, 64), (64, 128), (128, 256), (256, 512),
(512, INF)),
center_sampling=False,
center_sample_radius=1.5,
sync_num_pos=True,
gradient_mul=0.1,
bbox_norm_type='reg_denom',
loss_cls_fl=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
use_vfl=True,
loss_cls=dict(
type='VarifocalLoss',
use_sigmoid=True,
alpha=0.75,
gamma=2.0,
iou_weighted=True,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.5),
loss_bbox_refine=dict(type='GIoULoss', loss_weight=2.0),
norm_cfg=dict(type='GN', num_groups=32, requires_grad=True),
use_atss=True,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
center_offset=0.0,
strides=[8, 16, 32, 64, 128]),
**kwargs):
# dcn base offsets, adapted from reppoints_head.py
self.num_dconv_points = 9
self.dcn_kernel = int(np.sqrt(self.num_dconv_points))
self.dcn_pad = int((self.dcn_kernel - 1) / 2)
dcn_base = np.arange(-self.dcn_pad,
self.dcn_pad + 1).astype(np.float64)
dcn_base_y = np.repeat(dcn_base, self.dcn_kernel)
dcn_base_x = np.tile(dcn_base, self.dcn_kernel)
dcn_base_offset = np.stack([dcn_base_y, dcn_base_x], axis=1).reshape(
(-1))
self.dcn_base_offset = torch.tensor(dcn_base_offset).view(1, -1, 1, 1)
super(FCOSHead, self).__init__(
num_classes, in_channels, norm_cfg=norm_cfg, **kwargs)
self.regress_ranges = regress_ranges
self.reg_denoms = [
regress_range[-1] for regress_range in regress_ranges
]
self.reg_denoms[-1] = self.reg_denoms[-2] * 2
self.center_sampling = center_sampling
self.center_sample_radius = center_sample_radius
self.sync_num_pos = sync_num_pos
self.bbox_norm_type = bbox_norm_type
self.gradient_mul = gradient_mul
self.use_vfl = use_vfl
if self.use_vfl:
self.loss_cls = build_loss(loss_cls)
else:
self.loss_cls = build_loss(loss_cls_fl)
self.loss_bbox = build_loss(loss_bbox)
self.loss_bbox_refine = build_loss(loss_bbox_refine)
# for getting ATSS targets
self.use_atss = use_atss
self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False)
self.anchor_generator = build_anchor_generator(anchor_generator)
self.anchor_center_offset = anchor_generator['center_offset']
self.num_anchors = self.anchor_generator.num_base_anchors[0]
self.sampling = False
if self.train_cfg:
self.assigner = build_assigner(self.train_cfg.assigner)
sampler_cfg = dict(type='PseudoSampler')
self.sampler = build_sampler(sampler_cfg, context=self)
def _init_layers(self):
"""Initialize layers of the head."""
super(FCOSHead, self)._init_cls_convs()
super(FCOSHead, self)._init_reg_convs()
self.relu = nn.ReLU(inplace=True)
self.vfnet_reg_conv = ConvModule(
self.feat_channels,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
bias=self.conv_bias)
self.vfnet_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1)
self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides])
self.vfnet_reg_refine_dconv = DeformConv2d(
self.feat_channels,
self.feat_channels,
self.dcn_kernel,
1,
padding=self.dcn_pad)
self.vfnet_reg_refine = nn.Conv2d(self.feat_channels, 4, 3, padding=1)
self.scales_refine = nn.ModuleList([Scale(1.0) for _ in self.strides])
self.vfnet_cls_dconv = DeformConv2d(
self.feat_channels,
self.feat_channels,
self.dcn_kernel,
1,
padding=self.dcn_pad)
self.vfnet_cls = nn.Conv2d(
self.feat_channels, self.cls_out_channels, 3, padding=1)
def init_weights(self):
"""Initialize weights of the head."""
for m in self.cls_convs:
if isinstance(m.conv, nn.Conv2d):
normal_init(m.conv, std=0.01)
for m in self.reg_convs:
if isinstance(m.conv, nn.Conv2d):
normal_init(m.conv, std=0.01)
normal_init(self.vfnet_reg_conv.conv, std=0.01)
normal_init(self.vfnet_reg, std=0.01)
normal_init(self.vfnet_reg_refine_dconv, std=0.01)
normal_init(self.vfnet_reg_refine, std=0.01)
normal_init(self.vfnet_cls_dconv, std=0.01)
bias_cls = bias_init_with_prob(0.01)
normal_init(self.vfnet_cls, std=0.01, bias=bias_cls)
def forward(self, feats):
"""Forward features from the upstream network.
Args:
feats (tuple[Tensor]): Features from the upstream network, each is
a 4D-tensor.
Returns:
tuple:
cls_scores (list[Tensor]): Box iou-aware scores for each scale
level, each is a 4D-tensor, the channel number is
num_points * num_classes.
bbox_preds (list[Tensor]): Box offsets for each
scale level, each is a 4D-tensor, the channel number is
num_points * 4.
bbox_preds_refine (list[Tensor]): Refined Box offsets for
each scale level, each is a 4D-tensor, the channel
number is num_points * 4.
"""
return multi_apply(self.forward_single, feats, self.scales,
self.scales_refine, self.strides, self.reg_denoms)
def forward_single(self, x, scale, scale_refine, stride, reg_denom):
"""Forward features of a single scale level.
Args:
x (Tensor): FPN feature maps of the specified stride.
scale (:obj: `mmcv.cnn.Scale`): Learnable scale module to resize
the bbox prediction.
scale_refine (:obj: `mmcv.cnn.Scale`): Learnable scale module to
resize the refined bbox prediction.
stride (int): The corresponding stride for feature maps,
used to normalize the bbox prediction when
bbox_norm_type = 'stride'.
reg_denom (int): The corresponding regression range for feature
maps, only used to normalize the bbox prediction when
bbox_norm_type = 'reg_denom'.
Returns:
tuple: iou-aware cls scores for each box, bbox predictions and
refined bbox predictions of input feature maps.
"""
cls_feat = x
reg_feat = x
for cls_layer in self.cls_convs:
cls_feat = cls_layer(cls_feat)
for reg_layer in self.reg_convs:
reg_feat = reg_layer(reg_feat)
# predict the bbox_pred of different level
reg_feat_init = self.vfnet_reg_conv(reg_feat)
if self.bbox_norm_type == 'reg_denom':
bbox_pred = scale(
self.vfnet_reg(reg_feat_init)).float().exp() * reg_denom
elif self.bbox_norm_type == 'stride':
bbox_pred = scale(
self.vfnet_reg(reg_feat_init)).float().exp() * stride
else:
raise NotImplementedError
# compute star deformable convolution offsets
# converting dcn_offset to reg_feat.dtype thus VFNet can be
# trained with FP16
dcn_offset = self.star_dcn_offset(bbox_pred, self.gradient_mul,
stride).to(reg_feat.dtype)
# refine the bbox_pred
reg_feat = self.relu(self.vfnet_reg_refine_dconv(reg_feat, dcn_offset))
bbox_pred_refine = scale_refine(
self.vfnet_reg_refine(reg_feat)).float().exp()
bbox_pred_refine = bbox_pred_refine * bbox_pred.detach()
# predict the iou-aware cls score
cls_feat = self.relu(self.vfnet_cls_dconv(cls_feat, dcn_offset))
cls_score = self.vfnet_cls(cls_feat)
return cls_score, bbox_pred, bbox_pred_refine
def star_dcn_offset(self, bbox_pred, gradient_mul, stride):
"""Compute the star deformable conv offsets.
Args:
bbox_pred (Tensor): Predicted bbox distance offsets (l, r, t, b).
gradient_mul (float): Gradient multiplier.
stride (int): The corresponding stride for feature maps,
used to project the bbox onto the feature map.
Returns:
dcn_offsets (Tensor): The offsets for deformable convolution.
"""
dcn_base_offset = self.dcn_base_offset.type_as(bbox_pred)
bbox_pred_grad_mul = (1 - gradient_mul) * bbox_pred.detach() + \
gradient_mul * bbox_pred
# map to the feature map scale
bbox_pred_grad_mul = bbox_pred_grad_mul / stride
N, C, H, W = bbox_pred.size()
x1 = bbox_pred_grad_mul[:, 0, :, :]
y1 = bbox_pred_grad_mul[:, 1, :, :]
x2 = bbox_pred_grad_mul[:, 2, :, :]
y2 = bbox_pred_grad_mul[:, 3, :, :]
bbox_pred_grad_mul_offset = bbox_pred.new_zeros(
N, 2 * self.num_dconv_points, H, W)
bbox_pred_grad_mul_offset[:, 0, :, :] = -1.0 * y1 # -y1
bbox_pred_grad_mul_offset[:, 1, :, :] = -1.0 * x1 # -x1
bbox_pred_grad_mul_offset[:, 2, :, :] = -1.0 * y1 # -y1
bbox_pred_grad_mul_offset[:, 4, :, :] = -1.0 * y1 # -y1
bbox_pred_grad_mul_offset[:, 5, :, :] = x2 # x2
bbox_pred_grad_mul_offset[:, 7, :, :] = -1.0 * x1 # -x1
bbox_pred_grad_mul_offset[:, 11, :, :] = x2 # x2
bbox_pred_grad_mul_offset[:, 12, :, :] = y2 # y2
bbox_pred_grad_mul_offset[:, 13, :, :] = -1.0 * x1 # -x1
bbox_pred_grad_mul_offset[:, 14, :, :] = y2 # y2
bbox_pred_grad_mul_offset[:, 16, :, :] = y2 # y2
bbox_pred_grad_mul_offset[:, 17, :, :] = x2 # x2
dcn_offset = bbox_pred_grad_mul_offset - dcn_base_offset
return dcn_offset
@force_fp32(apply_to=('cls_scores', 'bbox_preds', 'bbox_preds_refine'))
def loss(self,
cls_scores,
bbox_preds,
bbox_preds_refine,
gt_bboxes,
gt_labels,
img_metas,
gt_bboxes_ignore=None):
"""Compute loss of the head.
Args:
cls_scores (list[Tensor]): Box iou-aware scores for each scale
level, each is a 4D-tensor, the channel number is
num_points * num_classes.
bbox_preds (list[Tensor]): Box offsets for each
scale level, each is a 4D-tensor, the channel number is
num_points * 4.
bbox_preds_refine (list[Tensor]): Refined Box offsets for
each scale level, each is a 4D-tensor, the channel
number is num_points * 4.
gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
gt_labels (list[Tensor]): class indices corresponding to each box
img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
gt_bboxes_ignore (None | list[Tensor]): specify which bounding
boxes can be ignored when computing the loss.
Default: None.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
assert len(cls_scores) == len(bbox_preds) == len(bbox_preds_refine)
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
all_level_points = self.get_points(featmap_sizes, bbox_preds[0].dtype,
bbox_preds[0].device)
labels, label_weights, bbox_targets, bbox_weights = self.get_targets(
cls_scores, all_level_points, gt_bboxes, gt_labels, img_metas,
gt_bboxes_ignore)
num_imgs = cls_scores[0].size(0)
# flatten cls_scores, bbox_preds and bbox_preds_refine
flatten_cls_scores = [
cls_score.permute(0, 2, 3,
1).reshape(-1,
self.cls_out_channels).contiguous()
for cls_score in cls_scores
]
flatten_bbox_preds = [
bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4).contiguous()
for bbox_pred in bbox_preds
]
flatten_bbox_preds_refine = [
bbox_pred_refine.permute(0, 2, 3, 1).reshape(-1, 4).contiguous()
for bbox_pred_refine in bbox_preds_refine
]
flatten_cls_scores = torch.cat(flatten_cls_scores)
flatten_bbox_preds = torch.cat(flatten_bbox_preds)
flatten_bbox_preds_refine = torch.cat(flatten_bbox_preds_refine)
flatten_labels = torch.cat(labels)
flatten_bbox_targets = torch.cat(bbox_targets)
# repeat points to align with bbox_preds
flatten_points = torch.cat(
[points.repeat(num_imgs, 1) for points in all_level_points])
# FG cat_id: [0, num_classes - 1], BG cat_id: num_classes
bg_class_ind = self.num_classes
pos_inds = torch.where(
((flatten_labels >= 0) & (flatten_labels < bg_class_ind)) > 0)[0]
num_pos = len(pos_inds)
pos_bbox_preds = flatten_bbox_preds[pos_inds]
pos_bbox_preds_refine = flatten_bbox_preds_refine[pos_inds]
pos_labels = flatten_labels[pos_inds]
# sync num_pos across all gpus
if self.sync_num_pos:
num_pos_avg_per_gpu = reduce_mean(
pos_inds.new_tensor(num_pos).float()).item()
num_pos_avg_per_gpu = max(num_pos_avg_per_gpu, 1.0)
else:
num_pos_avg_per_gpu = num_pos
if num_pos > 0:
pos_bbox_targets = flatten_bbox_targets[pos_inds]
pos_points = flatten_points[pos_inds]
pos_decoded_bbox_preds = distance2bbox(pos_points, pos_bbox_preds)
pos_decoded_target_preds = distance2bbox(pos_points,
pos_bbox_targets)
iou_targets_ini = bbox_overlaps(
pos_decoded_bbox_preds,
pos_decoded_target_preds.detach(),
is_aligned=True).clamp(min=1e-6)
bbox_weights_ini = iou_targets_ini.clone().detach()
iou_targets_ini_avg_per_gpu = reduce_mean(
bbox_weights_ini.sum()).item()
bbox_avg_factor_ini = max(iou_targets_ini_avg_per_gpu, 1.0)
loss_bbox = self.loss_bbox(
pos_decoded_bbox_preds,
pos_decoded_target_preds.detach(),
weight=bbox_weights_ini,
avg_factor=bbox_avg_factor_ini)
pos_decoded_bbox_preds_refine = \
distance2bbox(pos_points, pos_bbox_preds_refine)
iou_targets_rf = bbox_overlaps(
pos_decoded_bbox_preds_refine,
pos_decoded_target_preds.detach(),
is_aligned=True).clamp(min=1e-6)
bbox_weights_rf = iou_targets_rf.clone().detach()
iou_targets_rf_avg_per_gpu = reduce_mean(
bbox_weights_rf.sum()).item()
bbox_avg_factor_rf = max(iou_targets_rf_avg_per_gpu, 1.0)
loss_bbox_refine = self.loss_bbox_refine(
pos_decoded_bbox_preds_refine,
pos_decoded_target_preds.detach(),
weight=bbox_weights_rf,
avg_factor=bbox_avg_factor_rf)
# build IoU-aware cls_score targets
if self.use_vfl:
pos_ious = iou_targets_rf.clone().detach()
cls_iou_targets = torch.zeros_like(flatten_cls_scores)
cls_iou_targets[pos_inds, pos_labels] = pos_ious
else:
loss_bbox = pos_bbox_preds.sum() * 0
loss_bbox_refine = pos_bbox_preds_refine.sum() * 0
if self.use_vfl:
cls_iou_targets = torch.zeros_like(flatten_cls_scores)
if self.use_vfl:
loss_cls = self.loss_cls(
flatten_cls_scores,
cls_iou_targets,
avg_factor=num_pos_avg_per_gpu)
else:
loss_cls = self.loss_cls(
flatten_cls_scores,
flatten_labels,
weight=label_weights,
avg_factor=num_pos_avg_per_gpu)
return dict(
loss_cls=loss_cls,
loss_bbox=loss_bbox,
loss_bbox_rf=loss_bbox_refine)
@force_fp32(apply_to=('cls_scores', 'bbox_preds', 'bbox_preds_refine'))
def get_bboxes(self,
cls_scores,
bbox_preds,
bbox_preds_refine,
img_metas,
cfg=None,
rescale=None,
with_nms=True):
"""Transform network outputs for a batch into bbox predictions.
Args:
cls_scores (list[Tensor]): Box iou-aware scores for each scale
level with shape (N, num_points * num_classes, H, W).
bbox_preds (list[Tensor]): Box offsets for each scale
level with shape (N, num_points * 4, H, W).
bbox_preds_refine (list[Tensor]): Refined Box offsets for
each scale level with shape (N, num_points * 4, H, W).
img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
cfg (mmcv.Config): Test / postprocessing configuration,
if None, test_cfg would be used. Default: None.
rescale (bool): If True, return boxes in original image space.
Default: False.
with_nms (bool): If True, do nms before returning boxes.
Default: True.
Returns:
list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple.
The first item is an (n, 5) tensor, where the first 4 columns
are bounding box positions (tl_x, tl_y, br_x, br_y) and the
5-th column is a score between 0 and 1. The second item is a
(n,) tensor where each item is the predicted class label of
the corresponding box.
"""
assert len(cls_scores) == len(bbox_preds) == len(bbox_preds_refine)
num_levels = len(cls_scores)
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
mlvl_points = self.get_points(featmap_sizes, bbox_preds[0].dtype,
bbox_preds[0].device)
result_list = []
for img_id in range(len(img_metas)):
cls_score_list = [
cls_scores[i][img_id].detach() for i in range(num_levels)
]
bbox_pred_list = [
bbox_preds_refine[i][img_id].detach()
for i in range(num_levels)
]
img_shape = img_metas[img_id]['img_shape']
scale_factor = img_metas[img_id]['scale_factor']
det_bboxes = self._get_bboxes_single(cls_score_list,
bbox_pred_list, mlvl_points,
img_shape, scale_factor, cfg,
rescale, with_nms)
result_list.append(det_bboxes)
return result_list
def _get_bboxes_single(self,
cls_scores,
bbox_preds,
mlvl_points,
img_shape,
scale_factor,
cfg,
rescale=False,
with_nms=True):
"""Transform outputs for a single batch item into bbox predictions.
Args:
cls_scores (list[Tensor]): Box iou-aware scores for a single scale
level with shape (num_points * num_classes, H, W).
bbox_preds (list[Tensor]): Box offsets for a single scale
level with shape (num_points * 4, H, W).
mlvl_points (list[Tensor]): Box reference for a single scale level
with shape (num_total_points, 4).
img_shape (tuple[int]): Shape of the input image,
(height, width, 3).
scale_factor (ndarray): Scale factor of the image arrange as
(w_scale, h_scale, w_scale, h_scale).
cfg (mmcv.Config | None): Test / postprocessing configuration,
if None, test_cfg would be used.
rescale (bool): If True, return boxes in original image space.
Default: False.
with_nms (bool): If True, do nms before returning boxes.
Default: True.
Returns:
tuple(Tensor):
det_bboxes (Tensor): BBox predictions in shape (n, 5), where
the first 4 columns are bounding box positions
(tl_x, tl_y, br_x, br_y) and the 5-th column is a score
between 0 and 1.
det_labels (Tensor): A (n,) tensor where each item is the
predicted class label of the corresponding box.
"""
cfg = self.test_cfg if cfg is None else cfg
assert len(cls_scores) == len(bbox_preds) == len(mlvl_points)
mlvl_bboxes = []
mlvl_scores = []
for cls_score, bbox_pred, points in zip(cls_scores, bbox_preds,
mlvl_points):
assert cls_score.size()[-2:] == bbox_pred.size()[-2:]
scores = cls_score.permute(1, 2, 0).reshape(
-1, self.cls_out_channels).contiguous().sigmoid()
bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4).contiguous()
nms_pre = cfg.get('nms_pre', -1)
if 0 < nms_pre < scores.shape[0]:
max_scores, _ = scores.max(dim=1)
_, topk_inds = max_scores.topk(nms_pre)
points = points[topk_inds, :]
bbox_pred = bbox_pred[topk_inds, :]
scores = scores[topk_inds, :]
bboxes = distance2bbox(points, bbox_pred, max_shape=img_shape)
mlvl_bboxes.append(bboxes)
mlvl_scores.append(scores)
mlvl_bboxes = torch.cat(mlvl_bboxes)
if rescale:
mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor)
mlvl_scores = torch.cat(mlvl_scores)
padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1)
# remind that we set FG labels to [0, num_class-1] since mmdet v2.0
# BG cat_id: num_class
mlvl_scores = torch.cat([mlvl_scores, padding], dim=1)
if with_nms:
det_bboxes, det_labels = multiclass_nms(mlvl_bboxes, mlvl_scores,
cfg.score_thr, cfg.nms,
cfg.max_per_img)
return det_bboxes, det_labels
else:
return mlvl_bboxes, mlvl_scores
def _get_points_single(self,
featmap_size,
stride,
dtype,
device,
flatten=False):
"""Get points according to feature map sizes."""
h, w = featmap_size
x_range = torch.arange(
0, w * stride, stride, dtype=dtype, device=device)
y_range = torch.arange(
0, h * stride, stride, dtype=dtype, device=device)
y, x = torch.meshgrid(y_range, x_range)
# to be compatible with anchor points in ATSS
if self.use_atss:
points = torch.stack(
(x.reshape(-1), y.reshape(-1)), dim=-1) + \
stride * self.anchor_center_offset
else:
points = torch.stack(
(x.reshape(-1), y.reshape(-1)), dim=-1) + stride // 2
return points
def get_targets(self, cls_scores, mlvl_points, gt_bboxes, gt_labels,
img_metas, gt_bboxes_ignore):
"""A wrapper for computing ATSS and FCOS targets for points in multiple
images.
Args:
cls_scores (list[Tensor]): Box iou-aware scores for each scale
level with shape (N, num_points * num_classes, H, W).
mlvl_points (list[Tensor]): Points of each fpn level, each has
shape (num_points, 2).
gt_bboxes (list[Tensor]): Ground truth bboxes of each image,
each has shape (num_gt, 4).
gt_labels (list[Tensor]): Ground truth labels of each box,
each has shape (num_gt,).
img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
gt_bboxes_ignore (None | Tensor): Ground truth bboxes to be
ignored, shape (num_ignored_gts, 4).
Returns:
tuple:
labels_list (list[Tensor]): Labels of each level.
label_weights (Tensor/None): Label weights of all levels.
bbox_targets_list (list[Tensor]): Regression targets of each
level, (l, t, r, b).
bbox_weights (Tensor/None): Bbox weights of all levels.
"""
if self.use_atss:
return self.get_atss_targets(cls_scores, mlvl_points, gt_bboxes,
gt_labels, img_metas,
gt_bboxes_ignore)
else:
self.norm_on_bbox = False
return self.get_fcos_targets(mlvl_points, gt_bboxes, gt_labels)
def _get_target_single(self, *args, **kwargs):
"""Avoid ambiguity in multiple inheritance."""
if self.use_atss:
return ATSSHead._get_target_single(self, *args, **kwargs)
else:
return FCOSHead._get_target_single(self, *args, **kwargs)
def get_fcos_targets(self, points, gt_bboxes_list, gt_labels_list):
"""Compute FCOS regression and classification targets for points in
multiple images.
Args:
points (list[Tensor]): Points of each fpn level, each has shape
(num_points, 2).
gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image,
each has shape (num_gt, 4).
gt_labels_list (list[Tensor]): Ground truth labels of each box,
each has shape (num_gt,).
Returns:
tuple:
labels (list[Tensor]): Labels of each level.
label_weights: None, to be compatible with ATSS targets.
bbox_targets (list[Tensor]): BBox targets of each level.
bbox_weights: None, to be compatible with ATSS targets.
"""
labels, bbox_targets = FCOSHead.get_targets(self, points,
gt_bboxes_list,
gt_labels_list)
label_weights = None
bbox_weights = None
return labels, label_weights, bbox_targets, bbox_weights
def get_atss_targets(self,
cls_scores,
mlvl_points,
gt_bboxes,
gt_labels,
img_metas,
gt_bboxes_ignore=None):
"""A wrapper for computing ATSS targets for points in multiple images.
Args:
cls_scores (list[Tensor]): Box iou-aware scores for each scale
level with shape (N, num_points * num_classes, H, W).
mlvl_points (list[Tensor]): Points of each fpn level, each has
shape (num_points, 2).
gt_bboxes (list[Tensor]): Ground truth bboxes of each image,
each has shape (num_gt, 4).
gt_labels (list[Tensor]): Ground truth labels of each box,
each has shape (num_gt,).
img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
gt_bboxes_ignore (None | Tensor): Ground truth bboxes to be
ignored, shape (num_ignored_gts, 4). Default: None.
Returns:
tuple:
labels_list (list[Tensor]): Labels of each level.
label_weights (Tensor): Label weights of all levels.
bbox_targets_list (list[Tensor]): Regression targets of each
level, (l, t, r, b).
bbox_weights (Tensor): Bbox weights of all levels.
"""
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
assert len(featmap_sizes) == self.anchor_generator.num_levels
device = cls_scores[0].device
anchor_list, valid_flag_list = self.get_anchors(
featmap_sizes, img_metas, device=device)
label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1
cls_reg_targets = ATSSHead.get_targets(
self,
anchor_list,
valid_flag_list,
gt_bboxes,
img_metas,
gt_bboxes_ignore_list=gt_bboxes_ignore,
gt_labels_list=gt_labels,
label_channels=label_channels,
unmap_outputs=True)
if cls_reg_targets is None:
return None
(anchor_list, labels_list, label_weights_list, bbox_targets_list,
bbox_weights_list, num_total_pos, num_total_neg) = cls_reg_targets
bbox_targets_list = [
bbox_targets.reshape(-1, 4) for bbox_targets in bbox_targets_list
]
num_imgs = len(img_metas)
# transform bbox_targets (x1, y1, x2, y2) into (l, t, r, b) format
bbox_targets_list = self.transform_bbox_targets(
bbox_targets_list, mlvl_points, num_imgs)
labels_list = [labels.reshape(-1) for labels in labels_list]
label_weights_list = [
label_weights.reshape(-1) for label_weights in label_weights_list
]
bbox_weights_list = [
bbox_weights.reshape(-1) for bbox_weights in bbox_weights_list
]
label_weights = torch.cat(label_weights_list)
bbox_weights = torch.cat(bbox_weights_list)
return labels_list, label_weights, bbox_targets_list, bbox_weights
def transform_bbox_targets(self, decoded_bboxes, mlvl_points, num_imgs):
"""Transform bbox_targets (x1, y1, x2, y2) into (l, t, r, b) format.
Args:
decoded_bboxes (list[Tensor]): Regression targets of each level,
in the form of (x1, y1, x2, y2).
mlvl_points (list[Tensor]): Points of each fpn level, each has
shape (num_points, 2).
num_imgs (int): the number of images in a batch.
Returns:
bbox_targets (list[Tensor]): Regression targets of each level in
the form of (l, t, r, b).
"""
# TODO: Re-implemented in Class PointCoder
assert len(decoded_bboxes) == len(mlvl_points)
num_levels = len(decoded_bboxes)
mlvl_points = [points.repeat(num_imgs, 1) for points in mlvl_points]
bbox_targets = []
for i in range(num_levels):
bbox_target = bbox2distance(mlvl_points[i], decoded_bboxes[i])
bbox_targets.append(bbox_target)
return bbox_targets
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
missing_keys, unexpected_keys, error_msgs):
"""Override the method in the parent class to avoid changing para's
name."""
pass
| insightface/detection/scrfd/mmdet/models/dense_heads/vfnet_head.py/0 | {
"file_path": "insightface/detection/scrfd/mmdet/models/dense_heads/vfnet_head.py",
"repo_id": "insightface",
"token_count": 18724
} | 112 |
from ..builder import DETECTORS
from .cascade_rcnn import CascadeRCNN
@DETECTORS.register_module()
class HybridTaskCascade(CascadeRCNN):
"""Implementation of `HTC <https://arxiv.org/abs/1901.07518>`_"""
def __init__(self, **kwargs):
super(HybridTaskCascade, self).__init__(**kwargs)
@property
def with_semantic(self):
"""bool: whether the detector has a semantic head"""
return self.roi_head.with_semantic
| insightface/detection/scrfd/mmdet/models/detectors/htc.py/0 | {
"file_path": "insightface/detection/scrfd/mmdet/models/detectors/htc.py",
"repo_id": "insightface",
"token_count": 173
} | 113 |
from .accuracy import Accuracy, accuracy
from .ae_loss import AssociativeEmbeddingLoss
from .balanced_l1_loss import BalancedL1Loss, balanced_l1_loss
from .cross_entropy_loss import (CrossEntropyLoss, binary_cross_entropy,
cross_entropy, mask_cross_entropy)
from .focal_loss import FocalLoss, sigmoid_focal_loss
from .gaussian_focal_loss import GaussianFocalLoss
from .gfocal_loss import DistributionFocalLoss, QualityFocalLoss
from .ghm_loss import GHMC, GHMR
from .iou_loss import (BoundedIoULoss, CIoULoss, DIoULoss, GIoULoss, IoULoss,
bounded_iou_loss, iou_loss)
from .mse_loss import MSELoss, mse_loss
from .pisa_loss import carl_loss, isr_p
from .smooth_l1_loss import L1Loss, SmoothL1Loss, l1_loss, smooth_l1_loss
from .utils import reduce_loss, weight_reduce_loss, weighted_loss
from .varifocal_loss import VarifocalLoss
__all__ = [
'accuracy', 'Accuracy', 'cross_entropy', 'binary_cross_entropy',
'mask_cross_entropy', 'CrossEntropyLoss', 'sigmoid_focal_loss',
'FocalLoss', 'smooth_l1_loss', 'SmoothL1Loss', 'balanced_l1_loss',
'BalancedL1Loss', 'mse_loss', 'MSELoss', 'iou_loss', 'bounded_iou_loss',
'IoULoss', 'BoundedIoULoss', 'GIoULoss', 'DIoULoss', 'CIoULoss', 'GHMC',
'GHMR', 'reduce_loss', 'weight_reduce_loss', 'weighted_loss', 'L1Loss',
'l1_loss', 'isr_p', 'carl_loss', 'AssociativeEmbeddingLoss',
'GaussianFocalLoss', 'QualityFocalLoss', 'DistributionFocalLoss',
'VarifocalLoss'
]
| insightface/detection/scrfd/mmdet/models/losses/__init__.py/0 | {
"file_path": "insightface/detection/scrfd/mmdet/models/losses/__init__.py",
"repo_id": "insightface",
"token_count": 635
} | 114 |
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule, xavier_init
from mmcv.cnn.bricks import NonLocal2d
import numpy as np
from ..builder import NECKS
@NECKS.register_module()
class BFP(nn.Module):
"""BFP (Balanced Feature Pyrmamids)
BFP takes multi-level features as inputs and gather them into a single one,
then refine the gathered feature and scatter the refined results to
multi-level features. This module is used in Libra R-CNN (CVPR 2019), see
the paper `Libra R-CNN: Towards Balanced Learning for Object Detection
<https://arxiv.org/abs/1904.02701>`_ for details.
Args:
in_channels (int): Number of input channels (feature maps of all levels
should have the same channels).
num_levels (int): Number of input feature levels.
conv_cfg (dict): The config dict for convolution layers.
norm_cfg (dict): The config dict for normalization layers.
refine_level (int): Index of integration and refine level of BSF in
multi-level features from bottom to top.
refine_type (str): Type of the refine op, currently support
[None, 'conv', 'non_local'].
"""
def __init__(self,
in_channels,
num_levels,
refine_level=2,
refine_type=None,
conv_cfg=None,
norm_cfg=None):
super(BFP, self).__init__()
assert refine_type in [None, 'conv', 'non_local']
self.in_channels = in_channels
self.num_levels = num_levels
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.refine_level = refine_level
self.refine_type = refine_type
assert 0 <= self.refine_level < self.num_levels
if self.refine_type == 'conv':
self.refine = ConvModule(
self.in_channels,
self.in_channels,
3,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
elif self.refine_type == 'non_local':
self.refine = NonLocal2d(
self.in_channels,
reduction=1,
use_scale=False,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
def init_weights(self):
"""Initialize the weights of FPN module."""
for m in self.modules():
if isinstance(m, nn.Conv2d):
xavier_init(m, distribution='uniform')
def forward(self, inputs):
"""Forward function."""
assert len(inputs) == self.num_levels
# step 1: gather multi-level features by resize and average
feats = []
gather_size = inputs[self.refine_level].size()[2:]
for i in range(self.num_levels):
if i < self.refine_level:
gathered = F.adaptive_max_pool2d(
inputs[i], output_size=gather_size)
else:
gathered = F.interpolate(
inputs[i], size=gather_size, mode='nearest')
feats.append(gathered)
bsf = np.mean(feats)
# step 2: refine gathered features
if self.refine_type is not None:
bsf = self.refine(bsf)
# step 3: scatter refined features to multi-levels by a residual path
outs = []
for i in range(self.num_levels):
out_size = inputs[i].size()[2:]
if i < self.refine_level:
residual = F.interpolate(bsf, size=out_size, mode='nearest')
else:
residual = F.adaptive_max_pool2d(bsf, output_size=out_size)
outs.append(residual + inputs[i])
return tuple(outs)
| insightface/detection/scrfd/mmdet/models/necks/bfp.py/0 | {
"file_path": "insightface/detection/scrfd/mmdet/models/necks/bfp.py",
"repo_id": "insightface",
"token_count": 1768
} | 115 |
import torch.nn as nn
from mmcv.cnn import ConvModule, normal_init, xavier_init
from mmdet.models.backbones.resnet import Bottleneck
from mmdet.models.builder import HEADS
from .bbox_head import BBoxHead
class BasicResBlock(nn.Module):
"""Basic residual block.
This block is a little different from the block in the ResNet backbone.
The kernel size of conv1 is 1 in this block while 3 in ResNet BasicBlock.
Args:
in_channels (int): Channels of the input feature map.
out_channels (int): Channels of the output feature map.
conv_cfg (dict): The config dict for convolution layers.
norm_cfg (dict): The config dict for normalization layers.
"""
def __init__(self,
in_channels,
out_channels,
conv_cfg=None,
norm_cfg=dict(type='BN')):
super(BasicResBlock, self).__init__()
# main path
self.conv1 = ConvModule(
in_channels,
in_channels,
kernel_size=3,
padding=1,
bias=False,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg)
self.conv2 = ConvModule(
in_channels,
out_channels,
kernel_size=1,
bias=False,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=None)
# identity path
self.conv_identity = ConvModule(
in_channels,
out_channels,
kernel_size=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=None)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
identity = x
x = self.conv1(x)
x = self.conv2(x)
identity = self.conv_identity(identity)
out = x + identity
out = self.relu(out)
return out
@HEADS.register_module()
class DoubleConvFCBBoxHead(BBoxHead):
r"""Bbox head used in Double-Head R-CNN
.. code-block:: none
/-> cls
/-> shared convs ->
\-> reg
roi features
/-> cls
\-> shared fc ->
\-> reg
""" # noqa: W605
def __init__(self,
num_convs=0,
num_fcs=0,
conv_out_channels=1024,
fc_out_channels=1024,
conv_cfg=None,
norm_cfg=dict(type='BN'),
**kwargs):
kwargs.setdefault('with_avg_pool', True)
super(DoubleConvFCBBoxHead, self).__init__(**kwargs)
assert self.with_avg_pool
assert num_convs > 0
assert num_fcs > 0
self.num_convs = num_convs
self.num_fcs = num_fcs
self.conv_out_channels = conv_out_channels
self.fc_out_channels = fc_out_channels
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
# increase the channel of input features
self.res_block = BasicResBlock(self.in_channels,
self.conv_out_channels)
# add conv heads
self.conv_branch = self._add_conv_branch()
# add fc heads
self.fc_branch = self._add_fc_branch()
out_dim_reg = 4 if self.reg_class_agnostic else 4 * self.num_classes
self.fc_reg = nn.Linear(self.conv_out_channels, out_dim_reg)
self.fc_cls = nn.Linear(self.fc_out_channels, self.num_classes + 1)
self.relu = nn.ReLU(inplace=True)
def _add_conv_branch(self):
"""Add the fc branch which consists of a sequential of conv layers."""
branch_convs = nn.ModuleList()
for i in range(self.num_convs):
branch_convs.append(
Bottleneck(
inplanes=self.conv_out_channels,
planes=self.conv_out_channels // 4,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
return branch_convs
def _add_fc_branch(self):
"""Add the fc branch which consists of a sequential of fc layers."""
branch_fcs = nn.ModuleList()
for i in range(self.num_fcs):
fc_in_channels = (
self.in_channels *
self.roi_feat_area if i == 0 else self.fc_out_channels)
branch_fcs.append(nn.Linear(fc_in_channels, self.fc_out_channels))
return branch_fcs
def init_weights(self):
# conv layers are already initialized by ConvModule
normal_init(self.fc_cls, std=0.01)
normal_init(self.fc_reg, std=0.001)
for m in self.fc_branch.modules():
if isinstance(m, nn.Linear):
xavier_init(m, distribution='uniform')
def forward(self, x_cls, x_reg):
# conv head
x_conv = self.res_block(x_reg)
for conv in self.conv_branch:
x_conv = conv(x_conv)
if self.with_avg_pool:
x_conv = self.avg_pool(x_conv)
x_conv = x_conv.view(x_conv.size(0), -1)
bbox_pred = self.fc_reg(x_conv)
# fc head
x_fc = x_cls.view(x_cls.size(0), -1)
for fc in self.fc_branch:
x_fc = self.relu(fc(x_fc))
cls_score = self.fc_cls(x_fc)
return cls_score, bbox_pred
| insightface/detection/scrfd/mmdet/models/roi_heads/bbox_heads/double_bbox_head.py/0 | {
"file_path": "insightface/detection/scrfd/mmdet/models/roi_heads/bbox_heads/double_bbox_head.py",
"repo_id": "insightface",
"token_count": 2869
} | 116 |
from mmdet.core import bbox2roi
from ..builder import HEADS
from ..losses.pisa_loss import carl_loss, isr_p
from .standard_roi_head import StandardRoIHead
@HEADS.register_module()
class PISARoIHead(StandardRoIHead):
r"""The RoI head for `Prime Sample Attention in Object Detection
<https://arxiv.org/abs/1904.04821>`_."""
def forward_train(self,
x,
img_metas,
proposal_list,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=None,
gt_masks=None):
"""Forward function for training.
Args:
x (list[Tensor]): List of multi-level img features.
img_metas (list[dict]): List of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmdet/datasets/pipelines/formatting.py:Collect`.
proposals (list[Tensors]): List of region proposals.
gt_bboxes (list[Tensor]): Each item are the truth boxes for each
image in [tl_x, tl_y, br_x, br_y] format.
gt_labels (list[Tensor]): Class indices corresponding to each box
gt_bboxes_ignore (list[Tensor], optional): Specify which bounding
boxes can be ignored when computing the loss.
gt_masks (None | Tensor) : True segmentation masks for each box
used if the architecture supports a segmentation task.
Returns:
dict[str, Tensor]: a dictionary of loss components
"""
# assign gts and sample proposals
if self.with_bbox or self.with_mask:
num_imgs = len(img_metas)
if gt_bboxes_ignore is None:
gt_bboxes_ignore = [None for _ in range(num_imgs)]
sampling_results = []
neg_label_weights = []
for i in range(num_imgs):
assign_result = self.bbox_assigner.assign(
proposal_list[i], gt_bboxes[i], gt_bboxes_ignore[i],
gt_labels[i])
sampling_result = self.bbox_sampler.sample(
assign_result,
proposal_list[i],
gt_bboxes[i],
gt_labels[i],
feats=[lvl_feat[i][None] for lvl_feat in x])
# neg label weight is obtained by sampling when using ISR-N
neg_label_weight = None
if isinstance(sampling_result, tuple):
sampling_result, neg_label_weight = sampling_result
sampling_results.append(sampling_result)
neg_label_weights.append(neg_label_weight)
losses = dict()
# bbox head forward and loss
if self.with_bbox:
bbox_results = self._bbox_forward_train(
x,
sampling_results,
gt_bboxes,
gt_labels,
img_metas,
neg_label_weights=neg_label_weights)
losses.update(bbox_results['loss_bbox'])
# mask head forward and loss
if self.with_mask:
mask_results = self._mask_forward_train(x, sampling_results,
bbox_results['bbox_feats'],
gt_masks, img_metas)
losses.update(mask_results['loss_mask'])
return losses
def _bbox_forward(self, x, rois):
"""Box forward function used in both training and testing."""
# TODO: a more flexible way to decide which feature maps to use
bbox_feats = self.bbox_roi_extractor(
x[:self.bbox_roi_extractor.num_inputs], rois)
if self.with_shared_head:
bbox_feats = self.shared_head(bbox_feats)
cls_score, bbox_pred = self.bbox_head(bbox_feats)
bbox_results = dict(
cls_score=cls_score, bbox_pred=bbox_pred, bbox_feats=bbox_feats)
return bbox_results
def _bbox_forward_train(self,
x,
sampling_results,
gt_bboxes,
gt_labels,
img_metas,
neg_label_weights=None):
"""Run forward function and calculate loss for box head in training."""
rois = bbox2roi([res.bboxes for res in sampling_results])
bbox_results = self._bbox_forward(x, rois)
bbox_targets = self.bbox_head.get_targets(sampling_results, gt_bboxes,
gt_labels, self.train_cfg)
# neg_label_weights obtained by sampler is image-wise, mapping back to
# the corresponding location in label weights
if neg_label_weights[0] is not None:
label_weights = bbox_targets[1]
cur_num_rois = 0
for i in range(len(sampling_results)):
num_pos = sampling_results[i].pos_inds.size(0)
num_neg = sampling_results[i].neg_inds.size(0)
label_weights[cur_num_rois + num_pos:cur_num_rois + num_pos +
num_neg] = neg_label_weights[i]
cur_num_rois += num_pos + num_neg
cls_score = bbox_results['cls_score']
bbox_pred = bbox_results['bbox_pred']
# Apply ISR-P
isr_cfg = self.train_cfg.get('isr', None)
if isr_cfg is not None:
bbox_targets = isr_p(
cls_score,
bbox_pred,
bbox_targets,
rois,
sampling_results,
self.bbox_head.loss_cls,
self.bbox_head.bbox_coder,
**isr_cfg,
num_class=self.bbox_head.num_classes)
loss_bbox = self.bbox_head.loss(cls_score, bbox_pred, rois,
*bbox_targets)
# Add CARL Loss
carl_cfg = self.train_cfg.get('carl', None)
if carl_cfg is not None:
loss_carl = carl_loss(
cls_score,
bbox_targets[0],
bbox_pred,
bbox_targets[2],
self.bbox_head.loss_bbox,
**carl_cfg,
num_class=self.bbox_head.num_classes)
loss_bbox.update(loss_carl)
bbox_results.update(loss_bbox=loss_bbox)
return bbox_results
| insightface/detection/scrfd/mmdet/models/roi_heads/pisa_roi_head.py/0 | {
"file_path": "insightface/detection/scrfd/mmdet/models/roi_heads/pisa_roi_head.py",
"repo_id": "insightface",
"token_count": 3612
} | 117 |
import torch
import torch.nn as nn
from mmcv.cnn import (Linear, build_activation_layer, build_norm_layer,
xavier_init)
from .builder import TRANSFORMER
class MultiheadAttention(nn.Module):
"""A warpper for torch.nn.MultiheadAttention.
This module implements MultiheadAttention with residual connection,
and positional encoding used in DETR is also passed as input.
Args:
embed_dims (int): The embedding dimension.
num_heads (int): Parallel attention heads. Same as
`nn.MultiheadAttention`.
dropout (float): A Dropout layer on attn_output_weights. Default 0.0.
"""
def __init__(self, embed_dims, num_heads, dropout=0.0):
super(MultiheadAttention, self).__init__()
assert embed_dims % num_heads == 0, 'embed_dims must be ' \
f'divisible by num_heads. got {embed_dims} and {num_heads}.'
self.embed_dims = embed_dims
self.num_heads = num_heads
self.dropout = dropout
self.attn = nn.MultiheadAttention(embed_dims, num_heads, dropout)
self.dropout = nn.Dropout(dropout)
def forward(self,
x,
key=None,
value=None,
residual=None,
query_pos=None,
key_pos=None,
attn_mask=None,
key_padding_mask=None):
"""Forward function for `MultiheadAttention`.
Args:
x (Tensor): The input query with shape [num_query, bs,
embed_dims]. Same in `nn.MultiheadAttention.forward`.
key (Tensor): The key tensor with shape [num_key, bs,
embed_dims]. Same in `nn.MultiheadAttention.forward`.
Default None. If None, the `query` will be used.
value (Tensor): The value tensor with same shape as `key`.
Same in `nn.MultiheadAttention.forward`. Default None.
If None, the `key` will be used.
residual (Tensor): The tensor used for addition, with the
same shape as `x`. Default None. If None, `x` will be used.
query_pos (Tensor): The positional encoding for query, with
the same shape as `x`. Default None. If not None, it will
be added to `x` before forward function.
key_pos (Tensor): The positional encoding for `key`, with the
same shape as `key`. Default None. If not None, it will
be added to `key` before forward function. If None, and
`query_pos` has the same shape as `key`, then `query_pos`
will be used for `key_pos`.
attn_mask (Tensor): ByteTensor mask with shape [num_query,
num_key]. Same in `nn.MultiheadAttention.forward`.
Default None.
key_padding_mask (Tensor): ByteTensor with shape [bs, num_key].
Same in `nn.MultiheadAttention.forward`. Default None.
Returns:
Tensor: forwarded results with shape [num_query, bs, embed_dims].
"""
query = x
if key is None:
key = query
if value is None:
value = key
if residual is None:
residual = x
if key_pos is None:
if query_pos is not None and key is not None:
if query_pos.shape == key.shape:
key_pos = query_pos
if query_pos is not None:
query = query + query_pos
if key_pos is not None:
key = key + key_pos
out = self.attn(
query,
key,
value=value,
attn_mask=attn_mask,
key_padding_mask=key_padding_mask)[0]
return residual + self.dropout(out)
def __repr__(self):
"""str: a string that describes the module"""
repr_str = self.__class__.__name__
repr_str += f'(embed_dims={self.embed_dims}, '
repr_str += f'num_heads={self.num_heads}, '
repr_str += f'dropout={self.dropout})'
return repr_str
class FFN(nn.Module):
"""Implements feed-forward networks (FFNs) with residual connection.
Args:
embed_dims (int): The feature dimension. Same as
`MultiheadAttention`.
feedforward_channels (int): The hidden dimension of FFNs.
num_fcs (int): The number of fully-connected layers in FFNs.
act_cfg (dict): The activation config for FFNs.
dropout (float): Probability of an element to be zeroed. Default 0.0.
"""
def __init__(self,
embed_dims,
feedforward_channels,
num_fcs=2,
act_cfg=dict(type='ReLU', inplace=True),
dropout=0.0,
add_residual=True):
super(FFN, self).__init__()
assert num_fcs >= 2, 'num_fcs should be no less ' \
f'than 2. got {num_fcs}.'
self.embed_dims = embed_dims
self.feedforward_channels = feedforward_channels
self.num_fcs = num_fcs
self.act_cfg = act_cfg
self.dropout = dropout
self.activate = build_activation_layer(act_cfg)
layers = nn.ModuleList()
in_channels = embed_dims
for _ in range(num_fcs - 1):
layers.append(
nn.Sequential(
Linear(in_channels, feedforward_channels), self.activate,
nn.Dropout(dropout)))
in_channels = feedforward_channels
layers.append(Linear(feedforward_channels, embed_dims))
self.layers = nn.Sequential(*layers)
self.dropout = nn.Dropout(dropout)
self.add_residual = add_residual
def forward(self, x, residual=None):
"""Forward function for `FFN`."""
out = self.layers(x)
if not self.add_residual:
return out
if residual is None:
residual = x
return residual + self.dropout(out)
def __repr__(self):
"""str: a string that describes the module"""
repr_str = self.__class__.__name__
repr_str += f'(embed_dims={self.embed_dims}, '
repr_str += f'feedforward_channels={self.feedforward_channels}, '
repr_str += f'num_fcs={self.num_fcs}, '
repr_str += f'act_cfg={self.act_cfg}, '
repr_str += f'dropout={self.dropout}, '
repr_str += f'add_residual={self.add_residual})'
return repr_str
class TransformerEncoderLayer(nn.Module):
"""Implements one encoder layer in DETR transformer.
Args:
embed_dims (int): The feature dimension. Same as `FFN`.
num_heads (int): Parallel attention heads.
feedforward_channels (int): The hidden dimension for FFNs.
dropout (float): Probability of an element to be zeroed. Default 0.0.
order (tuple[str]): The order for encoder layer. Valid examples are
('selfattn', 'norm', 'ffn', 'norm') and ('norm', 'selfattn',
'norm', 'ffn'). Default ('selfattn', 'norm', 'ffn', 'norm').
act_cfg (dict): The activation config for FFNs. Defalut ReLU.
norm_cfg (dict): Config dict for normalization layer. Default
layer normalization.
num_fcs (int): The number of fully-connected layers for FFNs.
Default 2.
"""
def __init__(self,
embed_dims,
num_heads,
feedforward_channels,
dropout=0.0,
order=('selfattn', 'norm', 'ffn', 'norm'),
act_cfg=dict(type='ReLU', inplace=True),
norm_cfg=dict(type='LN'),
num_fcs=2):
super(TransformerEncoderLayer, self).__init__()
assert isinstance(order, tuple) and len(order) == 4
assert set(order) == set(['selfattn', 'norm', 'ffn'])
self.embed_dims = embed_dims
self.num_heads = num_heads
self.feedforward_channels = feedforward_channels
self.dropout = dropout
self.order = order
self.act_cfg = act_cfg
self.norm_cfg = norm_cfg
self.num_fcs = num_fcs
self.pre_norm = order[0] == 'norm'
self.self_attn = MultiheadAttention(embed_dims, num_heads, dropout)
self.ffn = FFN(embed_dims, feedforward_channels, num_fcs, act_cfg,
dropout)
self.norms = nn.ModuleList()
self.norms.append(build_norm_layer(norm_cfg, embed_dims)[1])
self.norms.append(build_norm_layer(norm_cfg, embed_dims)[1])
def forward(self, x, pos=None, attn_mask=None, key_padding_mask=None):
"""Forward function for `TransformerEncoderLayer`.
Args:
x (Tensor): The input query with shape [num_key, bs,
embed_dims]. Same in `MultiheadAttention.forward`.
pos (Tensor): The positional encoding for query. Default None.
Same as `query_pos` in `MultiheadAttention.forward`.
attn_mask (Tensor): ByteTensor mask with shape [num_key,
num_key]. Same in `MultiheadAttention.forward`. Default None.
key_padding_mask (Tensor): ByteTensor with shape [bs, num_key].
Same in `MultiheadAttention.forward`. Default None.
Returns:
Tensor: forwarded results with shape [num_key, bs, embed_dims].
"""
norm_cnt = 0
inp_residual = x
for layer in self.order:
if layer == 'selfattn':
# self attention
query = key = value = x
x = self.self_attn(
query,
key,
value,
inp_residual if self.pre_norm else None,
query_pos=pos,
key_pos=pos,
attn_mask=attn_mask,
key_padding_mask=key_padding_mask)
inp_residual = x
elif layer == 'norm':
x = self.norms[norm_cnt](x)
norm_cnt += 1
elif layer == 'ffn':
x = self.ffn(x, inp_residual if self.pre_norm else None)
return x
def __repr__(self):
"""str: a string that describes the module"""
repr_str = self.__class__.__name__
repr_str += f'(embed_dims={self.embed_dims}, '
repr_str += f'num_heads={self.num_heads}, '
repr_str += f'feedforward_channels={self.feedforward_channels}, '
repr_str += f'dropout={self.dropout}, '
repr_str += f'order={self.order}, '
repr_str += f'act_cfg={self.act_cfg}, '
repr_str += f'norm_cfg={self.norm_cfg}, '
repr_str += f'num_fcs={self.num_fcs})'
return repr_str
class TransformerDecoderLayer(nn.Module):
"""Implements one decoder layer in DETR transformer.
Args:
embed_dims (int): The feature dimension. Same as
`TransformerEncoderLayer`.
num_heads (int): Parallel attention heads.
feedforward_channels (int): Same as `TransformerEncoderLayer`.
dropout (float): Same as `TransformerEncoderLayer`. Default 0.0.
order (tuple[str]): The order for decoder layer. Valid examples are
('selfattn', 'norm', 'multiheadattn', 'norm', 'ffn', 'norm') and
('norm', 'selfattn', 'norm', 'multiheadattn', 'norm', 'ffn').
Default the former.
act_cfg (dict): Same as `TransformerEncoderLayer`. Defalut ReLU.
norm_cfg (dict): Config dict for normalization layer. Default
layer normalization.
num_fcs (int): The number of fully-connected layers in FFNs.
"""
def __init__(self,
embed_dims,
num_heads,
feedforward_channels,
dropout=0.0,
order=('selfattn', 'norm', 'multiheadattn', 'norm', 'ffn',
'norm'),
act_cfg=dict(type='ReLU', inplace=True),
norm_cfg=dict(type='LN'),
num_fcs=2):
super(TransformerDecoderLayer, self).__init__()
assert isinstance(order, tuple) and len(order) == 6
assert set(order) == set(['selfattn', 'norm', 'multiheadattn', 'ffn'])
self.embed_dims = embed_dims
self.num_heads = num_heads
self.feedforward_channels = feedforward_channels
self.dropout = dropout
self.order = order
self.act_cfg = act_cfg
self.norm_cfg = norm_cfg
self.num_fcs = num_fcs
self.pre_norm = order[0] == 'norm'
self.self_attn = MultiheadAttention(embed_dims, num_heads, dropout)
self.multihead_attn = MultiheadAttention(embed_dims, num_heads,
dropout)
self.ffn = FFN(embed_dims, feedforward_channels, num_fcs, act_cfg,
dropout)
self.norms = nn.ModuleList()
# 3 norm layers in official DETR's TransformerDecoderLayer
for _ in range(3):
self.norms.append(build_norm_layer(norm_cfg, embed_dims)[1])
def forward(self,
x,
memory,
memory_pos=None,
query_pos=None,
memory_attn_mask=None,
target_attn_mask=None,
memory_key_padding_mask=None,
target_key_padding_mask=None):
"""Forward function for `TransformerDecoderLayer`.
Args:
x (Tensor): Input query with shape [num_query, bs, embed_dims].
memory (Tensor): Tensor got from `TransformerEncoder`, with shape
[num_key, bs, embed_dims].
memory_pos (Tensor): The positional encoding for `memory`. Default
None. Same as `key_pos` in `MultiheadAttention.forward`.
query_pos (Tensor): The positional encoding for `query`. Default
None. Same as `query_pos` in `MultiheadAttention.forward`.
memory_attn_mask (Tensor): ByteTensor mask for `memory`, with
shape [num_key, num_key]. Same as `attn_mask` in
`MultiheadAttention.forward`. Default None.
target_attn_mask (Tensor): ByteTensor mask for `x`, with shape
[num_query, num_query]. Same as `attn_mask` in
`MultiheadAttention.forward`. Default None.
memory_key_padding_mask (Tensor): ByteTensor for `memory`, with
shape [bs, num_key]. Same as `key_padding_mask` in
`MultiheadAttention.forward`. Default None.
target_key_padding_mask (Tensor): ByteTensor for `x`, with shape
[bs, num_query]. Same as `key_padding_mask` in
`MultiheadAttention.forward`. Default None.
Returns:
Tensor: forwarded results with shape [num_query, bs, embed_dims].
"""
norm_cnt = 0
inp_residual = x
for layer in self.order:
if layer == 'selfattn':
query = key = value = x
x = self.self_attn(
query,
key,
value,
inp_residual if self.pre_norm else None,
query_pos,
key_pos=query_pos,
attn_mask=target_attn_mask,
key_padding_mask=target_key_padding_mask)
inp_residual = x
elif layer == 'norm':
x = self.norms[norm_cnt](x)
norm_cnt += 1
elif layer == 'multiheadattn':
query = x
key = value = memory
x = self.multihead_attn(
query,
key,
value,
inp_residual if self.pre_norm else None,
query_pos,
key_pos=memory_pos,
attn_mask=memory_attn_mask,
key_padding_mask=memory_key_padding_mask)
inp_residual = x
elif layer == 'ffn':
x = self.ffn(x, inp_residual if self.pre_norm else None)
return x
def __repr__(self):
"""str: a string that describes the module"""
repr_str = self.__class__.__name__
repr_str += f'(embed_dims={self.embed_dims}, '
repr_str += f'num_heads={self.num_heads}, '
repr_str += f'feedforward_channels={self.feedforward_channels}, '
repr_str += f'dropout={self.dropout}, '
repr_str += f'order={self.order}, '
repr_str += f'act_cfg={self.act_cfg}, '
repr_str += f'norm_cfg={self.norm_cfg}, '
repr_str += f'num_fcs={self.num_fcs})'
return repr_str
class TransformerEncoder(nn.Module):
"""Implements the encoder in DETR transformer.
Args:
num_layers (int): The number of `TransformerEncoderLayer`.
embed_dims (int): Same as `TransformerEncoderLayer`.
num_heads (int): Same as `TransformerEncoderLayer`.
feedforward_channels (int): Same as `TransformerEncoderLayer`.
dropout (float): Same as `TransformerEncoderLayer`. Default 0.0.
order (tuple[str]): Same as `TransformerEncoderLayer`.
act_cfg (dict): Same as `TransformerEncoderLayer`. Defalut ReLU.
norm_cfg (dict): Same as `TransformerEncoderLayer`. Default
layer normalization.
num_fcs (int): Same as `TransformerEncoderLayer`. Default 2.
"""
def __init__(self,
num_layers,
embed_dims,
num_heads,
feedforward_channels,
dropout=0.0,
order=('selfattn', 'norm', 'ffn', 'norm'),
act_cfg=dict(type='ReLU', inplace=True),
norm_cfg=dict(type='LN'),
num_fcs=2):
super(TransformerEncoder, self).__init__()
assert isinstance(order, tuple) and len(order) == 4
assert set(order) == set(['selfattn', 'norm', 'ffn'])
self.num_layers = num_layers
self.embed_dims = embed_dims
self.num_heads = num_heads
self.feedforward_channels = feedforward_channels
self.dropout = dropout
self.order = order
self.act_cfg = act_cfg
self.norm_cfg = norm_cfg
self.num_fcs = num_fcs
self.pre_norm = order[0] == 'norm'
self.layers = nn.ModuleList()
for _ in range(num_layers):
self.layers.append(
TransformerEncoderLayer(embed_dims, num_heads,
feedforward_channels, dropout, order,
act_cfg, norm_cfg, num_fcs))
self.norm = build_norm_layer(norm_cfg,
embed_dims)[1] if self.pre_norm else None
def forward(self, x, pos=None, attn_mask=None, key_padding_mask=None):
"""Forward function for `TransformerEncoder`.
Args:
x (Tensor): Input query. Same in `TransformerEncoderLayer.forward`.
pos (Tensor): Positional encoding for query. Default None.
Same in `TransformerEncoderLayer.forward`.
attn_mask (Tensor): ByteTensor attention mask. Default None.
Same in `TransformerEncoderLayer.forward`.
key_padding_mask (Tensor): Same in
`TransformerEncoderLayer.forward`. Default None.
Returns:
Tensor: Results with shape [num_key, bs, embed_dims].
"""
for layer in self.layers:
x = layer(x, pos, attn_mask, key_padding_mask)
if self.norm is not None:
x = self.norm(x)
return x
def __repr__(self):
"""str: a string that describes the module"""
repr_str = self.__class__.__name__
repr_str += f'(num_layers={self.num_layers}, '
repr_str += f'embed_dims={self.embed_dims}, '
repr_str += f'num_heads={self.num_heads}, '
repr_str += f'feedforward_channels={self.feedforward_channels}, '
repr_str += f'dropout={self.dropout}, '
repr_str += f'order={self.order}, '
repr_str += f'act_cfg={self.act_cfg}, '
repr_str += f'norm_cfg={self.norm_cfg}, '
repr_str += f'num_fcs={self.num_fcs})'
return repr_str
class TransformerDecoder(nn.Module):
"""Implements the decoder in DETR transformer.
Args:
num_layers (int): The number of `TransformerDecoderLayer`.
embed_dims (int): Same as `TransformerDecoderLayer`.
num_heads (int): Same as `TransformerDecoderLayer`.
feedforward_channels (int): Same as `TransformerDecoderLayer`.
dropout (float): Same as `TransformerDecoderLayer`. Default 0.0.
order (tuple[str]): Same as `TransformerDecoderLayer`.
act_cfg (dict): Same as `TransformerDecoderLayer`. Defalut ReLU.
norm_cfg (dict): Same as `TransformerDecoderLayer`. Default
layer normalization.
num_fcs (int): Same as `TransformerDecoderLayer`. Default 2.
"""
def __init__(self,
num_layers,
embed_dims,
num_heads,
feedforward_channels,
dropout=0.0,
order=('selfattn', 'norm', 'multiheadattn', 'norm', 'ffn',
'norm'),
act_cfg=dict(type='ReLU', inplace=True),
norm_cfg=dict(type='LN'),
num_fcs=2,
return_intermediate=False):
super(TransformerDecoder, self).__init__()
assert isinstance(order, tuple) and len(order) == 6
assert set(order) == set(['selfattn', 'norm', 'multiheadattn', 'ffn'])
self.num_layers = num_layers
self.embed_dims = embed_dims
self.num_heads = num_heads
self.feedforward_channels = feedforward_channels
self.dropout = dropout
self.order = order
self.act_cfg = act_cfg
self.norm_cfg = norm_cfg
self.num_fcs = num_fcs
self.return_intermediate = return_intermediate
self.layers = nn.ModuleList()
for _ in range(num_layers):
self.layers.append(
TransformerDecoderLayer(embed_dims, num_heads,
feedforward_channels, dropout, order,
act_cfg, norm_cfg, num_fcs))
self.norm = build_norm_layer(norm_cfg, embed_dims)[1]
def forward(self,
x,
memory,
memory_pos=None,
query_pos=None,
memory_attn_mask=None,
target_attn_mask=None,
memory_key_padding_mask=None,
target_key_padding_mask=None):
"""Forward function for `TransformerDecoder`.
Args:
x (Tensor): Input query. Same in `TransformerDecoderLayer.forward`.
memory (Tensor): Same in `TransformerDecoderLayer.forward`.
memory_pos (Tensor): Same in `TransformerDecoderLayer.forward`.
Default None.
query_pos (Tensor): Same in `TransformerDecoderLayer.forward`.
Default None.
memory_attn_mask (Tensor): Same in
`TransformerDecoderLayer.forward`. Default None.
target_attn_mask (Tensor): Same in
`TransformerDecoderLayer.forward`. Default None.
memory_key_padding_mask (Tensor): Same in
`TransformerDecoderLayer.forward`. Default None.
target_key_padding_mask (Tensor): Same in
`TransformerDecoderLayer.forward`. Default None.
Returns:
Tensor: Results with shape [num_query, bs, embed_dims].
"""
intermediate = []
for layer in self.layers:
x = layer(x, memory, memory_pos, query_pos, memory_attn_mask,
target_attn_mask, memory_key_padding_mask,
target_key_padding_mask)
if self.return_intermediate:
intermediate.append(self.norm(x))
if self.norm is not None:
x = self.norm(x)
if self.return_intermediate:
intermediate.pop()
intermediate.append(x)
if self.return_intermediate:
return torch.stack(intermediate)
return x.unsqueeze(0)
def __repr__(self):
"""str: a string that describes the module"""
repr_str = self.__class__.__name__
repr_str += f'(num_layers={self.num_layers}, '
repr_str += f'embed_dims={self.embed_dims}, '
repr_str += f'num_heads={self.num_heads}, '
repr_str += f'feedforward_channels={self.feedforward_channels}, '
repr_str += f'dropout={self.dropout}, '
repr_str += f'order={self.order}, '
repr_str += f'act_cfg={self.act_cfg}, '
repr_str += f'norm_cfg={self.norm_cfg}, '
repr_str += f'num_fcs={self.num_fcs}, '
repr_str += f'return_intermediate={self.return_intermediate})'
return repr_str
@TRANSFORMER.register_module()
class Transformer(nn.Module):
"""Implements the DETR transformer.
Following the official DETR implementation, this module copy-paste
from torch.nn.Transformer with modifications:
* positional encodings are passed in MultiheadAttention
* extra LN at the end of encoder is removed
* decoder returns a stack of activations from all decoding layers
See `paper: End-to-End Object Detection with Transformers
<https://arxiv.org/pdf/2005.12872>`_ for details.
Args:
embed_dims (int): The feature dimension.
num_heads (int): Parallel attention heads. Same as
`nn.MultiheadAttention`.
num_encoder_layers (int): Number of `TransformerEncoderLayer`.
num_decoder_layers (int): Number of `TransformerDecoderLayer`.
feedforward_channels (int): The hidden dimension for FFNs used in both
encoder and decoder.
dropout (float): Probability of an element to be zeroed. Default 0.0.
act_cfg (dict): Activation config for FFNs used in both encoder
and decoder. Defalut ReLU.
norm_cfg (dict): Config dict for normalization used in both encoder
and decoder. Default layer normalization.
num_fcs (int): The number of fully-connected layers in FFNs, which is
used for both encoder and decoder.
pre_norm (bool): Whether the normalization layer is ordered
first in the encoder and decoder. Default False.
return_intermediate_dec (bool): Whether to return the intermediate
output from each TransformerDecoderLayer or only the last
TransformerDecoderLayer. Default False. If False, the returned
`hs` has shape [num_decoder_layers, bs, num_query, embed_dims].
If True, the returned `hs` will have shape [1, bs, num_query,
embed_dims].
"""
def __init__(self,
embed_dims=512,
num_heads=8,
num_encoder_layers=6,
num_decoder_layers=6,
feedforward_channels=2048,
dropout=0.0,
act_cfg=dict(type='ReLU', inplace=True),
norm_cfg=dict(type='LN'),
num_fcs=2,
pre_norm=False,
return_intermediate_dec=False):
super(Transformer, self).__init__()
self.embed_dims = embed_dims
self.num_heads = num_heads
self.num_encoder_layers = num_encoder_layers
self.num_decoder_layers = num_decoder_layers
self.feedforward_channels = feedforward_channels
self.dropout = dropout
self.act_cfg = act_cfg
self.norm_cfg = norm_cfg
self.num_fcs = num_fcs
self.pre_norm = pre_norm
self.return_intermediate_dec = return_intermediate_dec
if self.pre_norm:
encoder_order = ('norm', 'selfattn', 'norm', 'ffn')
decoder_order = ('norm', 'selfattn', 'norm', 'multiheadattn',
'norm', 'ffn')
else:
encoder_order = ('selfattn', 'norm', 'ffn', 'norm')
decoder_order = ('selfattn', 'norm', 'multiheadattn', 'norm',
'ffn', 'norm')
self.encoder = TransformerEncoder(num_encoder_layers, embed_dims,
num_heads, feedforward_channels,
dropout, encoder_order, act_cfg,
norm_cfg, num_fcs)
self.decoder = TransformerDecoder(num_decoder_layers, embed_dims,
num_heads, feedforward_channels,
dropout, decoder_order, act_cfg,
norm_cfg, num_fcs,
return_intermediate_dec)
def init_weights(self, distribution='uniform'):
"""Initialize the transformer weights."""
# follow the official DETR to init parameters
for m in self.modules():
if hasattr(m, 'weight') and m.weight.dim() > 1:
xavier_init(m, distribution=distribution)
def forward(self, x, mask, query_embed, pos_embed):
"""Forward function for `Transformer`.
Args:
x (Tensor): Input query with shape [bs, c, h, w] where
c = embed_dims.
mask (Tensor): The key_padding_mask used for encoder and decoder,
with shape [bs, h, w].
query_embed (Tensor): The query embedding for decoder, with shape
[num_query, c].
pos_embed (Tensor): The positional encoding for encoder and
decoder, with the same shape as `x`.
Returns:
tuple[Tensor]: results of decoder containing the following tensor.
- out_dec: Output from decoder. If return_intermediate_dec \
is True output has shape [num_dec_layers, bs,
num_query, embed_dims], else has shape [1, bs, \
num_query, embed_dims].
- memory: Output results from encoder, with shape \
[bs, embed_dims, h, w].
"""
bs, c, h, w = x.shape
x = x.flatten(2).permute(2, 0, 1) # [bs, c, h, w] -> [h*w, bs, c]
pos_embed = pos_embed.flatten(2).permute(2, 0, 1)
query_embed = query_embed.unsqueeze(1).repeat(
1, bs, 1) # [num_query, dim] -> [num_query, bs, dim]
mask = mask.flatten(1) # [bs, h, w] -> [bs, h*w]
memory = self.encoder(
x, pos=pos_embed, attn_mask=None, key_padding_mask=mask)
target = torch.zeros_like(query_embed)
# out_dec: [num_layers, num_query, bs, dim]
out_dec = self.decoder(
target,
memory,
memory_pos=pos_embed,
query_pos=query_embed,
memory_attn_mask=None,
target_attn_mask=None,
memory_key_padding_mask=mask,
target_key_padding_mask=None)
out_dec = out_dec.transpose(1, 2)
memory = memory.permute(1, 2, 0).reshape(bs, c, h, w)
return out_dec, memory
def __repr__(self):
"""str: a string that describes the module"""
repr_str = self.__class__.__name__
repr_str += f'(embed_dims={self.embed_dims}, '
repr_str += f'num_heads={self.num_heads}, '
repr_str += f'num_encoder_layers={self.num_encoder_layers}, '
repr_str += f'num_decoder_layers={self.num_decoder_layers}, '
repr_str += f'feedforward_channels={self.feedforward_channels}, '
repr_str += f'dropout={self.dropout}, '
repr_str += f'act_cfg={self.act_cfg}, '
repr_str += f'norm_cfg={self.norm_cfg}, '
repr_str += f'num_fcs={self.num_fcs}, '
repr_str += f'pre_norm={self.pre_norm}, '
repr_str += f'return_intermediate_dec={self.return_intermediate_dec})'
return repr_str
| insightface/detection/scrfd/mmdet/models/utils/transformer.py/0 | {
"file_path": "insightface/detection/scrfd/mmdet/models/utils/transformer.py",
"repo_id": "insightface",
"token_count": 15746
} | 118 |
import datetime
import numpy as np
import os
import os.path as osp
import glob
import cv2
import insightface
assert insightface.__version__>='0.4'
def detect_person(img, detector):
bboxes, kpss = detector.detect(img)
bboxes = np.round(bboxes[:,:4]).astype(np.int)
kpss = np.round(kpss).astype(np.int)
kpss[:,:,0] = np.clip(kpss[:,:,0], 0, img.shape[1])
kpss[:,:,1] = np.clip(kpss[:,:,1], 0, img.shape[0])
vbboxes = bboxes.copy()
vbboxes[:,0] = kpss[:, 0, 0]
vbboxes[:,1] = kpss[:, 0, 1]
vbboxes[:,2] = kpss[:, 4, 0]
vbboxes[:,3] = kpss[:, 4, 1]
return bboxes, vbboxes
if __name__ == '__main__':
import glob
detector = insightface.model_zoo.get_model('scrfd_person_2.5g.onnx', download=True)
detector.prepare(0, nms_thresh=0.5, input_size=(640, 640))
img_paths = glob.glob('data/images/*.jpg')
for img_path in img_paths:
img = cv2.imread(img_path)
bboxes, vbboxes = detect_person(img, detector)
for i in range(bboxes.shape[0]):
bbox = bboxes[i]
vbbox = vbboxes[i]
x1,y1,x2,y2 = bbox
vx1,vy1,vx2,vy2 = vbbox
cv2.rectangle(img, (x1,y1) , (x2,y2) , (0,255,0) , 1)
alpha = 0.8
color = (255, 0, 0)
for c in range(3):
img[vy1:vy2,vx1:vx2,c] = img[vy1:vy2, vx1:vx2, c]*alpha + color[c]*(1.0-alpha)
cv2.circle(img, (vx1,vy1) , 1, color , 2)
cv2.circle(img, (vx1,vy2) , 1, color , 2)
cv2.circle(img, (vx2,vy1) , 1, color , 2)
cv2.circle(img, (vx2,vy2) , 1, color , 2)
filename = img_path.split('/')[-1]
cv2.imwrite('./outputs/%s'%filename, img)
| insightface/examples/person_detection/scrfd_person.py/0 | {
"file_path": "insightface/examples/person_detection/scrfd_person.py",
"repo_id": "insightface",
"token_count": 943
} | 119 |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
@Author : Qingping Zheng
@Contact : qingpingzheng2014@gmail.com
@File : encoding.py
@Time : 10/01/21 00:00 PM
@Desc :
@License : Licensed under the Apache License, Version 2.0 (the "License");
@Copyright : Copyright 2022 The Authors. All Rights Reserved.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import threading
import torch
import torch.cuda.comm as comm
from torch.autograd import Variable, Function
from torch.nn.parallel.data_parallel import DataParallel
from torch.nn.parallel.parallel_apply import get_a_var
from torch.nn.parallel._functions import ReduceAddCoalesced, Broadcast
torch_ver = torch.__version__[:3]
__all__ = ['allreduce', 'DataParallelModel', 'DataParallelCriterion',
'patch_replication_callback']
def allreduce(*inputs):
"""Cross GPU all reduce autograd operation for calculate mean and
variance in SyncBN.
"""
return AllReduce.apply(*inputs)
class AllReduce(Function):
@staticmethod
def forward(ctx, num_inputs, *inputs):
ctx.num_inputs = num_inputs
ctx.target_gpus = [inputs[i].get_device() for i in range(0, len(inputs), num_inputs)]
inputs = [inputs[i:i + num_inputs]
for i in range(0, len(inputs), num_inputs)]
# sort before reduce sum
inputs = sorted(inputs, key=lambda i: i[0].get_device())
results = comm.reduce_add_coalesced(inputs, ctx.target_gpus[0])
outputs = comm.broadcast_coalesced(results, ctx.target_gpus)
return tuple([t for tensors in outputs for t in tensors])
@staticmethod
def backward(ctx, *inputs):
inputs = [i.data for i in inputs]
inputs = [inputs[i:i + ctx.num_inputs]
for i in range(0, len(inputs), ctx.num_inputs)]
results = comm.reduce_add_coalesced(inputs, ctx.target_gpus[0])
outputs = comm.broadcast_coalesced(results, ctx.target_gpus)
return (None,) + tuple([Variable(t) for tensors in outputs for t in tensors])
class Reduce(Function):
@staticmethod
def forward(ctx, *inputs):
ctx.target_gpus = [inputs[i].get_device() for i in range(len(inputs))]
inputs = sorted(inputs, key=lambda i: i.get_device())
return comm.reduce_add(inputs)
@staticmethod
def backward(ctx, gradOutput):
return Broadcast.apply(ctx.target_gpus, gradOutput)
class DataParallelModel(DataParallel):
"""Implements data parallelism at the module level.
This container parallelizes the application of the given module by
splitting the input across the specified devices by chunking in the
batch dimension.
In the forward pass, the module is replicated on each device,
and each replica handles a portion of the input. During the backwards pass, gradients from each replica are summed into the original module.
Note that the outputs are not gathered, please use compatible
:class:`encoding.parallel.DataParallelCriterion`.
The batch size should be larger than the number of GPUs used. It should
also be an integer multiple of the number of GPUs so that each chunk is
the same size (so that each GPU processes the same number of samples).
Args:
module: module to be parallelized
device_ids: CUDA devices (default: all devices)
Reference:
Hang Zhang, Kristin Dana, Jianping Shi, Zhongyue Zhang, Xiaogang Wang, Ambrish Tyagi,
Amit Agrawal. “Context Encoding for Semantic Segmentation.
*The IEEE Conference on Computer Vision and Pattern Recognition (CVPR) 2018*
Example::
>>> net = encoding.nn.DataParallelModel(model, device_ids=[0, 1, 2])
>>> y = net(x)
"""
def gather(self, outputs, output_device):
return outputs
def replicate(self, module, device_ids):
modules = super(DataParallelModel, self).replicate(module, device_ids)
execute_replication_callbacks(modules)
return modules
class DataParallelCriterion(DataParallel):
"""
Calculate loss in multiple-GPUs, which balance the memory usage for
Semantic Segmentation.
The targets are splitted across the specified devices by chunking in
the batch dimension. Please use together with :class:`encoding.parallel.DataParallelModel`.
Reference:
Hang Zhang, Kristin Dana, Jianping Shi, Zhongyue Zhang, Xiaogang Wang, Ambrish Tyagi,
Amit Agrawal. “Context Encoding for Semantic Segmentation.
*The IEEE Conference on Computer Vision and Pattern Recognition (CVPR) 2018*
Example::
>>> net = encoding.nn.DataParallelModel(model, device_ids=[0, 1, 2])
>>> criterion = encoding.nn.DataParallelCriterion(criterion, device_ids=[0, 1, 2])
>>> y = net(x)
>>> loss = criterion(y, target)
"""
def forward(self, inputs, *targets, **kwargs):
# input should be already scatterd
# scattering the targets instead
if not self.device_ids:
return self.module(inputs, *targets, **kwargs)
targets, kwargs = self.scatter(targets, kwargs, self.device_ids)
if len(self.device_ids) == 1:
return self.module(inputs, *targets[0], **kwargs[0])
replicas = self.replicate(self.module, self.device_ids[:len(inputs)])
outputs = _criterion_parallel_apply(replicas, inputs, targets, kwargs)
return Reduce.apply(*outputs) / len(outputs)
#return self.gather(outputs, self.output_device).mean()
def _criterion_parallel_apply(modules, inputs, targets, kwargs_tup=None, devices=None):
assert len(modules) == len(inputs)
assert len(targets) == len(inputs)
if kwargs_tup:
assert len(modules) == len(kwargs_tup)
else:
kwargs_tup = ({},) * len(modules)
if devices is not None:
assert len(modules) == len(devices)
else:
devices = [None] * len(modules)
lock = threading.Lock()
results = {}
if torch_ver != "0.3":
grad_enabled = torch.is_grad_enabled()
def _worker(i, module, input, target, kwargs, device=None):
if torch_ver != "0.3":
torch.set_grad_enabled(grad_enabled)
if device is None:
device = get_a_var(input).get_device()
try:
if not isinstance(input, tuple):
input = (input,)
with torch.cuda.device(device):
output = module(*(input + target), **kwargs)
with lock:
results[i] = output
except Exception as e:
with lock:
results[i] = e
if len(modules) > 1:
threads = [threading.Thread(target=_worker,
args=(i, module, input, target,
kwargs, device),)
for i, (module, input, target, kwargs, device) in
enumerate(zip(modules, inputs, targets, kwargs_tup, devices))]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
else:
_worker(0, modules[0], inputs[0], kwargs_tup[0], devices[0])
outputs = []
for i in range(len(inputs)):
output = results[i]
if isinstance(output, Exception):
raise output
outputs.append(output)
return outputs
###########################################################################
# Adapted from Synchronized-BatchNorm-PyTorch.
# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
#
class CallbackContext(object):
pass
def execute_replication_callbacks(modules):
"""
Execute an replication callback `__data_parallel_replicate__` on each module created
by original replication.
The callback will be invoked with arguments `__data_parallel_replicate__(ctx, copy_id)`
Note that, as all modules are isomorphism, we assign each sub-module with a context
(shared among multiple copies of this module on different devices).
Through this context, different copies can share some information.
We guarantee that the callback on the master copy (the first copy) will be called ahead
of calling the callback of any slave copies.
"""
master_copy = modules[0]
nr_modules = len(list(master_copy.modules()))
ctxs = [CallbackContext() for _ in range(nr_modules)]
for i, module in enumerate(modules):
for j, m in enumerate(module.modules()):
if hasattr(m, '__data_parallel_replicate__'):
m.__data_parallel_replicate__(ctxs[j], i)
def patch_replication_callback(data_parallel):
"""
Monkey-patch an existing `DataParallel` object. Add the replication callback.
Useful when you have customized `DataParallel` implementation.
Examples:
> sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)
> sync_bn = DataParallel(sync_bn, device_ids=[0, 1])
> patch_replication_callback(sync_bn)
# this is equivalent to
> sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)
> sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1])
"""
assert isinstance(data_parallel, DataParallel)
old_replicate = data_parallel.replicate
@functools.wraps(old_replicate)
def new_replicate(module, device_ids):
modules = old_replicate(module, device_ids)
execute_replication_callbacks(modules)
return modules
data_parallel.replicate = new_replicate
| insightface/parsing/dml_csr/utils/encoding.py/0 | {
"file_path": "insightface/parsing/dml_csr/utils/encoding.py",
"repo_id": "insightface",
"token_count": 3754
} | 120 |
import numbers
import os
from argparse import ArgumentParser, Namespace
import mxnet as mx
import numpy as np
from ..app import MaskRenderer
from ..data.rec_builder import RecBuilder
from . import BaseInsightFaceCLICommand
def rec_add_mask_param_command_factory(args: Namespace):
return RecAddMaskParamCommand(
args.input, args.output
)
class RecAddMaskParamCommand(BaseInsightFaceCLICommand):
@staticmethod
def register_subcommand(parser: ArgumentParser):
_parser = parser.add_parser("rec.addmaskparam")
_parser.add_argument("input", type=str, help="input rec")
_parser.add_argument("output", type=str, help="output rec, with mask param")
_parser.set_defaults(func=rec_add_mask_param_command_factory)
def __init__(
self,
input: str,
output: str,
):
self._input = input
self._output = output
def run(self):
tool = MaskRenderer()
tool.prepare(ctx_id=0, det_size=(128,128))
root_dir = self._input
path_imgrec = os.path.join(root_dir, 'train.rec')
path_imgidx = os.path.join(root_dir, 'train.idx')
imgrec = mx.recordio.MXIndexedRecordIO(path_imgidx, path_imgrec, 'r')
save_path = self._output
wrec=RecBuilder(path=save_path)
s = imgrec.read_idx(0)
header, _ = mx.recordio.unpack(s)
if header.flag > 0:
if len(header.label)==2:
imgidx = np.array(range(1, int(header.label[0])))
else:
imgidx = np.array(list(self.imgrec.keys))
else:
imgidx = np.array(list(self.imgrec.keys))
stat = [0, 0]
print('total:', len(imgidx))
for iid, idx in enumerate(imgidx):
#if iid==500000:
# break
if iid%1000==0:
print('processing:', iid)
s = imgrec.read_idx(idx)
header, img = mx.recordio.unpack(s)
label = header.label
if not isinstance(label, numbers.Number):
label = label[0]
sample = mx.image.imdecode(img).asnumpy()
bgr = sample[:,:,::-1]
params = tool.build_params(bgr)
#if iid<10:
# mask_out = tool.render_mask(bgr, 'mask_blue', params)
# cv2.imwrite('maskout_%d.jpg'%iid, mask_out)
stat[1] += 1
if params is None:
wlabel = [label] + [-1.0]*236
stat[0] += 1
else:
#print(0, params[0].shape, params[0].dtype)
#print(1, params[1].shape, params[1].dtype)
#print(2, params[2])
#print(3, len(params[3]), params[3][0].__class__)
#print(4, params[4].shape, params[4].dtype)
mask_label = tool.encode_params(params)
wlabel = [label, 0.0]+mask_label # 237 including idlabel, total mask params size is 235
if iid==0:
print('param size:', len(mask_label), len(wlabel), label)
assert len(wlabel)==237
wrec.add_image(img, wlabel)
#print(len(params))
wrec.close()
print('finished on', self._output, ', failed:', stat[0])
| insightface/python-package/insightface/commands/rec_add_mask_param.py/0 | {
"file_path": "insightface/python-package/insightface/commands/rec_add_mask_param.py",
"repo_id": "insightface",
"token_count": 1658
} | 121 |
# -*- coding: utf-8 -*-
# @Organization : insightface.ai
# @Author : Jia Guo
# @Time : 2021-05-04
# @Function :
from __future__ import division
import numpy as np
import cv2
import onnx
import onnxruntime
from ..utils import face_align
from ..utils import transform
from ..data import get_object
__all__ = [
'Landmark',
]
class Landmark:
def __init__(self, model_file=None, session=None):
assert model_file is not None
self.model_file = model_file
self.session = session
find_sub = False
find_mul = False
model = onnx.load(self.model_file)
graph = model.graph
for nid, node in enumerate(graph.node[:8]):
#print(nid, node.name)
if node.name.startswith('Sub') or node.name.startswith('_minus'):
find_sub = True
if node.name.startswith('Mul') or node.name.startswith('_mul'):
find_mul = True
if nid<3 and node.name=='bn_data':
find_sub = True
find_mul = True
if find_sub and find_mul:
#mxnet arcface model
input_mean = 0.0
input_std = 1.0
else:
input_mean = 127.5
input_std = 128.0
self.input_mean = input_mean
self.input_std = input_std
#print('input mean and std:', model_file, self.input_mean, self.input_std)
if self.session is None:
self.session = onnxruntime.InferenceSession(self.model_file, None)
input_cfg = self.session.get_inputs()[0]
input_shape = input_cfg.shape
input_name = input_cfg.name
self.input_size = tuple(input_shape[2:4][::-1])
self.input_shape = input_shape
outputs = self.session.get_outputs()
output_names = []
for out in outputs:
output_names.append(out.name)
self.input_name = input_name
self.output_names = output_names
assert len(self.output_names)==1
output_shape = outputs[0].shape
self.require_pose = False
#print('init output_shape:', output_shape)
if output_shape[1]==3309:
self.lmk_dim = 3
self.lmk_num = 68
self.mean_lmk = get_object('meanshape_68.pkl')
self.require_pose = True
else:
self.lmk_dim = 2
self.lmk_num = output_shape[1]//self.lmk_dim
self.taskname = 'landmark_%dd_%d'%(self.lmk_dim, self.lmk_num)
def prepare(self, ctx_id, **kwargs):
if ctx_id<0:
self.session.set_providers(['CPUExecutionProvider'])
def get(self, img, face):
bbox = face.bbox
w, h = (bbox[2] - bbox[0]), (bbox[3] - bbox[1])
center = (bbox[2] + bbox[0]) / 2, (bbox[3] + bbox[1]) / 2
rotate = 0
_scale = self.input_size[0] / (max(w, h)*1.5)
#print('param:', img.shape, bbox, center, self.input_size, _scale, rotate)
aimg, M = face_align.transform(img, center, self.input_size[0], _scale, rotate)
input_size = tuple(aimg.shape[0:2][::-1])
#assert input_size==self.input_size
blob = cv2.dnn.blobFromImage(aimg, 1.0/self.input_std, input_size, (self.input_mean, self.input_mean, self.input_mean), swapRB=True)
pred = self.session.run(self.output_names, {self.input_name : blob})[0][0]
if pred.shape[0] >= 3000:
pred = pred.reshape((-1, 3))
else:
pred = pred.reshape((-1, 2))
if self.lmk_num < pred.shape[0]:
pred = pred[self.lmk_num*-1:,:]
pred[:, 0:2] += 1
pred[:, 0:2] *= (self.input_size[0] // 2)
if pred.shape[1] == 3:
pred[:, 2] *= (self.input_size[0] // 2)
IM = cv2.invertAffineTransform(M)
pred = face_align.trans_points(pred, IM)
face[self.taskname] = pred
if self.require_pose:
P = transform.estimate_affine_matrix_3d23d(self.mean_lmk, pred)
s, R, t = transform.P2sRt(P)
rx, ry, rz = transform.matrix2angle(R)
pose = np.array( [rx, ry, rz], dtype=np.float32 )
face['pose'] = pose #pitch, yaw, roll
return pred
| insightface/python-package/insightface/model_zoo/landmark.py/0 | {
"file_path": "insightface/python-package/insightface/model_zoo/landmark.py",
"repo_id": "insightface",
"token_count": 2121
} | 122 |
DEFAULT_MP_NAME = 'buffalo_l'
| insightface/python-package/insightface/utils/constant.py/0 | {
"file_path": "insightface/python-package/insightface/utils/constant.py",
"repo_id": "insightface",
"token_count": 15
} | 123 |
Download megaface testsuite from [baiducloud](https://pan.baidu.com/s/1Vdxc2GgbY8wIW0hVcObIwg)(code:0n6w) or [gdrive](https://drive.google.com/file/d/1KBwp0U9oZgZj7SYDXRxUnnH7Lwvd9XMy/view?usp=sharing). The official devkit is also included.
| insightface/recognition/_evaluation_/megaface/README.md/0 | {
"file_path": "insightface/recognition/_evaluation_/megaface/README.md",
"repo_id": "insightface",
"token_count": 115
} | 124 |
import numpy as np
import mxnet as mx
class AccMetric(mx.metric.EvalMetric):
def __init__(self):
self.axis = 1
super(AccMetric, self).__init__('acc',
axis=self.axis,
output_names=None,
label_names=None)
self.losses = []
self.count = 0
def update(self, labels, preds):
self.count += 1
label = labels[0]
pred_label = preds[1]
#print('ACC', label.shape, pred_label.shape)
if pred_label.shape != label.shape:
pred_label = mx.ndarray.argmax(pred_label, axis=self.axis)
pred_label = pred_label.asnumpy().astype('int32').flatten()
label = label.asnumpy()
if label.ndim == 2:
label = label[:, 0]
label = label.astype('int32').flatten()
assert label.shape == pred_label.shape
self.sum_metric += (pred_label.flat == label.flat).sum()
self.num_inst += len(pred_label.flat)
class LossValueMetric(mx.metric.EvalMetric):
def __init__(self):
self.axis = 1
super(LossValueMetric, self).__init__('lossvalue',
axis=self.axis,
output_names=None,
label_names=None)
self.losses = []
def update(self, labels, preds):
#label = labels[0].asnumpy()
pred = preds[-1].asnumpy()
#print('in loss', pred.shape)
#print(pred)
loss = pred[0]
self.sum_metric += loss
self.num_inst += 1.0
#gt_label = preds[-2].asnumpy()
#print(gt_label)
| insightface/recognition/arcface_mxnet/metric.py/0 | {
"file_path": "insightface/recognition/arcface_mxnet/metric.py",
"repo_id": "insightface",
"token_count": 952
} | 125 |
# InsightFace in OneFlow
[English](README.md) **|** [简体中文](README_CH.md)
It introduces how to train InsightFace in OneFlow, and do verification over the validation datasets via the well-toned networks.
## Contents
\- [InsightFace in OneFlow](#insightface-in-oneflow)
\- [Contents](#contents)
\- [Background](#background)
\- [InsightFace opensource project](#insightface-opensource-project)
\- [Implementation in OneFlow](#implementation-in-oneflow)
\- [Preparations](#preparations)
\- [Install OneFlow](#install-oneflow)
\- [Data preparations](#data-preparations)
\- [1. Download datasets](#1-download-datasets)
\- [2. Transformation from MS1M recordio to OFRecord](#2-transformation-from-ms1m-recordio-to-ofrecord)
\- [Training and verification](#training-and-verification)
\- [Training](#training)
\- [OneFLow2ONNX](#OneFLow2ONNX)
## Background
### InsightFace opensource project
[InsightFace](https://github.com/deepinsight/insightface) is an open-source 2D&3D deep face analysis toolbox, mainly based on MXNet.
In InsightFace, it supports:
- Datasets typically used for face recognition, such as CASIA-Webface、MS1M、VGG2(Provided with the form of a binary file which could run in MXNet, [here](https://github.com/deepinsight/insightface/wiki/Dataset-Zoo) is more details about the datasets and how to download.
* Backbones of ResNet, MobilefaceNet, InceptionResNet_v2, and other deep-learning networks to apply in facial recognition.
* Implementation of different loss functions, including SphereFace Loss、Softmax Loss、SphereFace Loss, etc.
### Implementation in OneFlow
Based upon the currently existing work of Insightface, OneFlow ported basic models from it, and now OneFlow supports:
- Training datasets of MS1M、Glint360k, and validation datasets of Lfw、Cfp_fp and Agedb_30, scripts for training and validating.
- Backbones of ResNet100 and MobileFaceNet to recognize faces.
- Loss function, e.g. Softmax Loss and Margin Softmax Loss(including Arcface、Cosface and Combined Loss).
- Model parallelism and [Partial FC](https://github.com/deepinsight/insightface/tree/760d6de043d7f654c5963391271f215dab461547/recognition/partial_fc#partial-fc) optimization.
- Model transformation via MXNet.
To be coming further:
- Additional datasets transformation.
- Plentiful backbones.
- Full-scale loss functions implementation.
- Incremental tutorial on the distributed configuration.
This project is open for every developer to PR, new implementation and animated discussion will be most welcome.
## Preparations
First of all, before execution, please make sure that:
1. Install OneFlow
2. Prepare training and validation datasets in form of OFRecord.
### Install OneFlow
According to steps in [Install OneFlow](https://github.com/Oneflow-Inc/oneflow#install-oneflow) install the newest release master whl packages.
```
python3 -m pip install oneflow -f https://oneflow-staging.oss-cn-beijing.aliyuncs.com/branch/master/cu102/6aa719d70119b65837b25cc5f186eb19ef2b7891/index.html --user
```
### Data preparations
According to [Load and Prepare OFRecord Datasets](https://docs.oneflow.org/en/extended_topics/how_to_make_ofdataset.html), datasets should be converted into the form of OFREcord, to test InsightFace.
It has provided a set of datasets related to face recognition tasks, which have been pre-processed via face alignment or other processions already in [InsightFace](https://github.com/deepinsight/insightface). The corresponding datasets could be downloaded from [here](https://github.com/deepinsight/insightface/wiki/Dataset-Zoo) and should be converted into OFRecord, which performs better in OneFlow. Considering the cumbersome steps, it is suggested to download converted OFrecord datasets:
[MS1M-ArcFace(face_emore)](http://oneflow-public.oss-cn-beijing.aliyuncs.com/face_dataset/train_ofrecord.tar.gz)
[MS1MV3](https://oneflow-public.oss-cn-beijing.aliyuncs.com/facedata/MS1V3/oneflow/ms1m-retinaface-t1.zip)
It illustrates how to convert downloaded datasets into OFRecords, and take MS1M-ArcFace as an example in the following.
#### 1. Download datasets
The structure of the downloaded MS1M-ArcFace is shown as follown:
```
faces_emore/
train.idx
train.rec
property
lfw.bin
cfp_fp.bin
agedb_30.bin
```
The first three files are MXNet recordio format files of MS1M training dataset, the last three `.bin` files are different validation datasets.
#### 2. Transformation from MS1M recordio to OFRecord
Only need to execute 2.1 or 2.2
2.1 Use Python scripts directly
Run
```
python tools/mx_recordio_2_ofrecord_shuffled_npart.py --data_dir datasets/faces_emore --output_filepath faces_emore/ofrecord/train --num_part 16
```
And you will get the number of `part_num` parts of OFRecord, it's 16 parts in this example, it showed like this
```
tree ofrecord/test/
ofrecord/test/
|-- _SUCCESS
|-- part-00000
|-- part-00001
|-- part-00002
|-- part-00003
|-- part-00004
|-- part-00005
|-- part-00006
|-- part-00007
|-- part-00008
|-- part-00009
|-- part-00010
|-- part-00011
|-- part-00012
|-- part-00013
|-- part-00014
`-- part-00015
0 directories, 17 files
```
2.2 Use Python scripts + Spark Shuffle + Spark partition
Run
```
python tools/dataset_convert/mx_recordio_2_ofrecord.py --data_dir datasets/faces_emore --output_filepath faces_emore/ofrecord/train
```
And you will get one part of OFRecord(`part-0`) with all data in this way. Then you should use Spark to shuffle and partition.
1. Get jar package available
You can download Spark-oneflow-connector-assembly-0.1.0.jar via [Github](https://github.com/Oneflow-Inc/spark-oneflow-connector) or [OSS](https://oneflow-public.oss-cn-beijing.aliyuncs.com/spark-oneflow-connector/spark-oneflow-connector-assembly-0.1.1.jar)
2. Run in Spark
Assign that you have already installed and configured Spark.
Run
```
//Start Spark
./Spark-2.4.3-bin-hadoop2.7/bin/Spark-shell --jars ~/Spark-oneflow-connector-assembly-0.1.0.jar --driver-memory=64G --conf Spark.local.dir=/tmp/
// shuffle and partition in 16 parts
import org.oneflow.Spark.functions._
Spark.read.chunk("data_path").shuffle().repartition(16).write.chunk("new_data_path")
sc.formatFilenameAsOneflowStyle("new_data_path")
```
Hence you will get 16 parts of OFRecords, it shown like this
```
tree ofrecord/test/
ofrecord/test/
|-- _SUCCESS
|-- part-00000
|-- part-00001
|-- part-00002
|-- part-00003
|-- part-00004
|-- part-00005
|-- part-00006
|-- part-00007
|-- part-00008
|-- part-00009
|-- part-00010
|-- part-00011
|-- part-00012
|-- part-00013
|-- part-00014
`-- part-00015
0 directories, 17 files
```
## Training and verification
### Training
To reduce the usage cost of user, OneFlow draws close the scripts to Torch style, you can directly modify parameters via configs/*.py
#### eager
```
./train_ddp.sh
```
#### Graph
```
train_graph_distributed.sh
```
### Varification
Moreover, OneFlow offers a validation script to do verification separately, val.py, which facilitates you to check the precision of the pre-training model saved.
```
./val.sh
```
## OneFLow2ONNX
```
pip install oneflow-onnx==0.5.1
./convert.sh
``` | insightface/recognition/arcface_oneflow/README.md/0 | {
"file_path": "insightface/recognition/arcface_oneflow/README.md",
"repo_id": "insightface",
"token_count": 2372
} | 126 |
python3 oneflow2onnx.py configs/ms1mv3_r50 --model_path /workdir/epoch_0
| insightface/recognition/arcface_oneflow/convert.sh/0 | {
"file_path": "insightface/recognition/arcface_oneflow/convert.sh",
"repo_id": "insightface",
"token_count": 35
} | 127 |
import oneflow as flow
import oneflow.nn as nn
import os
from typing import List, Union
class OFRecordDataLoader(nn.Module):
def __init__(
self,
ofrecord_root: str = "./ofrecord",
mode: str = "train", # "val"
dataset_size: int = 9469,
batch_size: int = 1,
total_batch_size: int = 1,
data_part_num: int = 8,
placement: flow.placement = None,
sbp: Union[flow.sbp.sbp, List[flow.sbp.sbp]] = None,
):
super().__init__()
channel_last = False
output_layout = "NHWC" if channel_last else "NCHW"
assert (ofrecord_root, mode)
self.train_record_reader = flow.nn.OfrecordReader(
os.path.join(ofrecord_root, mode),
batch_size=batch_size,
data_part_num=data_part_num,
part_name_suffix_length=5,
random_shuffle=True if mode == "train" else False,
shuffle_after_epoch=True if mode == "train" else False,
placement=placement,
sbp=sbp,
)
self.record_label_decoder = flow.nn.OfrecordRawDecoder(
"label", shape=(), dtype=flow.int32
)
color_space = "RGB"
height = 112
width = 112
self.record_image_decoder = flow.nn.OFRecordImageDecoder(
"encoded", color_space=color_space
)
self.resize = (
flow.nn.image.Resize(target_size=[height, width])
if mode == "train"
else flow.nn.image.Resize(
resize_side="shorter", keep_aspect_ratio=True, target_size=112
)
)
self.flip = (
flow.nn.CoinFlip(batch_size=batch_size, placement=placement, sbp=sbp)
if mode == "train"
else None
)
rgb_mean = [127.5, 127.5, 127.5]
rgb_std = [127.5, 127.5, 127.5]
self.crop_mirror_norm = (
flow.nn.CropMirrorNormalize(
color_space=color_space,
output_layout=output_layout,
mean=rgb_mean,
std=rgb_std,
output_dtype=flow.float,
)
if mode == "train"
else flow.nn.CropMirrorNormalize(
color_space=color_space,
output_layout=output_layout,
crop_h=0,
crop_w=0,
crop_pos_y=0.5,
crop_pos_x=0.5,
mean=rgb_mean,
std=rgb_std,
output_dtype=flow.float,
)
)
self.batch_size = batch_size
self.total_batch_size = total_batch_size
self.dataset_size = dataset_size
def __len__(self):
return self.dataset_size // self.total_batch_size
def forward(self):
train_record = self.train_record_reader()
label = self.record_label_decoder(train_record)
image_raw_buffer = self.record_image_decoder(train_record)
image = self.resize(image_raw_buffer)[0]
rng = self.flip() if self.flip != None else None
image = self.crop_mirror_norm(image, rng)
return image, label
class SyntheticDataLoader(flow.nn.Module):
def __init__(
self, batch_size, image_size=112, num_classes=10000, placement=None, sbp=None,
):
super().__init__()
self.image_shape = (batch_size, 3, image_size, image_size)
self.label_shape = (batch_size,)
self.num_classes = num_classes
self.placement = placement
self.sbp = sbp
if self.placement is not None and self.sbp is not None:
self.image = flow.nn.Parameter(
flow.randint(
0,
high=255,
size=self.image_shape,
dtype=flow.float32,
placement=self.placement,
sbp=self.sbp,
),
requires_grad=False,
)
self.label = flow.nn.Parameter(
flow.randint(
0,
high=self.num_classes,
size=self.label_shape,
placement=self.placement,
sbp=self.sbp,
).to(dtype=flow.int32),
requires_grad=False,
)
else:
self.image = flow.randint(
0, high=255, size=self.image_shape, dtype=flow.float32, device="cuda"
)
self.label = flow.randint(
0, high=self.num_classes, size=self.label_shape, device="cuda",
).to(dtype=flow.int32)
def __len__(self):
return 10000
def forward(self):
return self.image, self.label
| insightface/recognition/arcface_oneflow/utils/ofrecord_data_utils.py/0 | {
"file_path": "insightface/recognition/arcface_oneflow/utils/ofrecord_data_utils.py",
"repo_id": "insightface",
"token_count": 2560
} | 128 |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
from paddle import nn
import math
__all__ = ['MobileFaceNet_128']
MobileFaceNet_BottleNeck_Setting = [
# t, c , n ,s
[2, 64, 5, 2],
[4, 128, 1, 2],
[2, 128, 6, 1],
[4, 128, 1, 2],
[2, 128, 2, 1]
]
class BottleNeck(nn.Layer):
def __init__(self, inp, oup, stride, expansion):
super().__init__()
self.connect = stride == 1 and inp == oup
self.conv = nn.Sequential(
# 1*1 conv
nn.Conv2D(
inp, inp * expansion, 1, 1, 0, bias_attr=False),
nn.BatchNorm2D(inp * expansion),
nn.PReLU(inp * expansion),
# 3*3 depth wise conv
nn.Conv2D(
inp * expansion,
inp * expansion,
3,
stride,
1,
groups=inp * expansion,
bias_attr=False),
nn.BatchNorm2D(inp * expansion),
nn.PReLU(inp * expansion),
# 1*1 conv
nn.Conv2D(
inp * expansion, oup, 1, 1, 0, bias_attr=False),
nn.BatchNorm2D(oup), )
def forward(self, x):
if self.connect:
return x + self.conv(x)
else:
return self.conv(x)
class ConvBlock(nn.Layer):
def __init__(self, inp, oup, k, s, p, dw=False, linear=False):
super().__init__()
self.linear = linear
if dw:
self.conv = nn.Conv2D(
inp, oup, k, s, p, groups=inp, bias_attr=False)
else:
self.conv = nn.Conv2D(inp, oup, k, s, p, bias_attr=False)
self.bn = nn.BatchNorm2D(oup)
if not linear:
self.prelu = nn.PReLU(oup)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
if self.linear:
return x
else:
return self.prelu(x)
class MobileFaceNet(nn.Layer):
def __init__(self,
feature_dim=128,
bottleneck_setting=MobileFaceNet_BottleNeck_Setting,
**args):
super().__init__()
self.conv1 = ConvBlock(3, 64, 3, 2, 1)
self.dw_conv1 = ConvBlock(64, 64, 3, 1, 1, dw=True)
self.cur_channel = 64
block = BottleNeck
self.blocks = self._make_layer(block, bottleneck_setting)
self.conv2 = ConvBlock(128, 512, 1, 1, 0)
self.linear7 = ConvBlock(512, 512, 7, 1, 0, dw=True, linear=True)
self.linear1 = ConvBlock(512, feature_dim, 1, 1, 0, linear=True)
for m in self.sublayers():
if isinstance(m, nn.Conv2D):
# ks * ks * out_ch
n = m.weight.shape[1] * m.weight.shape[2] * m.weight.shape[3]
m.weight = paddle.create_parameter(
shape=m.weight.shape,
dtype=m.weight.dtype,
default_initializer=nn.initializer.Normal(
mean=0.0, std=math.sqrt(2.0 / n)))
elif isinstance(m, (nn.BatchNorm, nn.BatchNorm2D, nn.GroupNorm)):
m.weight = paddle.create_parameter(
shape=m.weight.shape,
dtype=m.weight.dtype,
default_initializer=nn.initializer.Constant(value=1.0))
m.bias = paddle.create_parameter(
shape=m.bias.shape,
dtype=m.bias.dtype,
default_initializer=nn.initializer.Constant(value=0.0))
def _make_layer(self, block, setting):
layers = []
for t, c, n, s in setting:
for i in range(n):
if i == 0:
layers.append(block(self.cur_channel, c, s, t))
else:
layers.append(block(self.cur_channel, c, 1, t))
self.cur_channel = c
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.dw_conv1(x)
x = self.blocks(x)
x = self.conv2(x)
x = self.linear7(x)
x = self.linear1(x)
x = x.reshape([x.shape[0], x.shape[1] * x.shape[2] * x.shape[3]])
return x
def MobileFaceNet_128(num_features=128, **args):
model = MobileFaceNet(feature_dim=num_features, **args)
return model
# if __name__ == "__main__":
# paddle.set_device("cpu")
# x = paddle.rand([2, 3, 112, 112])
# net = MobileFaceNet()
# print(net)
# x = net(x)
# print(x.shape)
| insightface/recognition/arcface_paddle/dynamic/backbones/mobilefacenet.py/0 | {
"file_path": "insightface/recognition/arcface_paddle/dynamic/backbones/mobilefacenet.py",
"repo_id": "insightface",
"token_count": 2666
} | 129 |
# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from six.moves import reduce
from collections import OrderedDict
import paddle
__all__ = ["LargeScaleClassifier"]
class LargeScaleClassifier(object):
"""
Author: {Xiang An, Yang Xiao, XuHan Zhu} in DeepGlint,
Partial FC: Training 10 Million Identities on a Single Machine
See the original paper:
https://arxiv.org/abs/2010.05222
"""
def __init__(self,
feature,
label,
rank,
world_size,
num_classes,
margin1=1.0,
margin2=0.5,
margin3=0.0,
scale=64.0,
sample_ratio=1.0,
embedding_size=512,
name=None):
super(LargeScaleClassifier, self).__init__()
self.num_classes: int = num_classes
self.rank: int = rank
self.world_size: int = world_size
self.sample_ratio: float = sample_ratio
self.embedding_size: int = embedding_size
self.num_local: int = (num_classes + world_size - 1) // world_size
if num_classes % world_size != 0 and rank == world_size - 1:
self.num_local = num_classes % self.num_local
self.num_sample: int = int(self.sample_ratio * self.num_local)
self.margin1 = margin1
self.margin2 = margin2
self.margin3 = margin3
self.logit_scale = scale
self.input_dict = OrderedDict()
self.input_dict['feature'] = feature
self.input_dict['label'] = label
self.output_dict = OrderedDict()
if name is None:
name = 'dist@fc@rank@%05d.w' % rank
assert '.w' in name
stddev = math.sqrt(2.0 / (self.embedding_size + self.num_local))
param_attr = paddle.ParamAttr(
initializer=paddle.nn.initializer.Normal(std=stddev))
weight_dtype = 'float16' if feature.dtype == paddle.float16 else 'float32'
weight = paddle.static.create_parameter(
shape=[self.embedding_size, self.num_local],
dtype=weight_dtype,
name=name,
attr=param_attr,
is_bias=False)
# avoid allreducing gradients for distributed parameters
weight.is_distributed = True
# avoid broadcasting distributed parameters in startup program
paddle.static.default_startup_program().global_block().vars[
weight.name].is_distributed = True
if self.world_size > 1:
feature_list = []
paddle.distributed.all_gather(feature_list, feature)
total_feature = paddle.concat(feature_list, axis=0)
label_list = []
paddle.distributed.all_gather(label_list, label)
total_label = paddle.concat(label_list, axis=0)
total_label.stop_gradient = True
else:
total_feature = feature
total_label = label
total_label.stop_gradient = True
if self.sample_ratio < 1.0:
# partial fc sample process
total_label, sampled_class_index = paddle.nn.functional.class_center_sample(
total_label, self.num_local, self.num_sample)
sampled_class_index.stop_gradient = True
weight = paddle.gather(weight, sampled_class_index, axis=1)
norm_feature = paddle.fluid.layers.l2_normalize(total_feature, axis=1)
norm_weight = paddle.fluid.layers.l2_normalize(weight, axis=0)
local_logit = paddle.matmul(norm_feature, norm_weight)
loss = paddle.nn.functional.margin_cross_entropy(
local_logit,
total_label,
margin1=self.margin1,
margin2=self.margin2,
margin3=self.margin3,
scale=self.logit_scale,
return_softmax=False,
reduction=None, )
loss.desc.set_dtype(paddle.fluid.core.VarDesc.VarType.FP32)
loss = paddle.mean(loss)
self.output_dict['loss'] = loss
| insightface/recognition/arcface_paddle/static/classifiers/lsc.py/0 | {
"file_path": "insightface/recognition/arcface_paddle/static/classifiers/lsc.py",
"repo_id": "insightface",
"token_count": 2052
} | 130 |
## 1. 环境准备
本教程适用于TIPC目录下基础功能测试的运行环境搭建。
推荐环境:
- CUDA 10.1/10.2
- CUDNN 7.6/cudnn8.1
- TensorRT 6.1.0.5 / 7.1 / 7.2
环境配置可以选择docker镜像安装,或者在本地环境Python搭建环境。推荐使用docker镜像安装,避免不必要的环境配置。
## 2. Docker 镜像安装
推荐docker镜像安装,按照如下命令创建镜像,当前目录映射到镜像中的`/paddle`目录下
```
nvidia-docker run --name paddle -it -v $PWD:/paddle paddlepaddle/paddle:latest-dev-cuda10.1-cudnn7-gcc82 /bin/bash
cd /paddle
# 安装带TRT的paddle
pip3.7 install https://paddle-wheel.bj.bcebos.com/with-trt/2.1.3/linux-gpu-cuda10.1-cudnn7-mkl-gcc8.2-trt6-avx/paddlepaddle_gpu-2.1.3.post101-cp37-cp37m-linux_x86_64.whl
```
## 3 Python 环境构建
非docker环境下,环境配置比较灵活,推荐环境组合配置:
- CUDA10.1 + CUDNN7.6 + TensorRT 6
- CUDA10.2 + CUDNN8.1 + TensorRT 7
- CUDA11.1 + CUDNN8.1 + TensorRT 7
下面以 CUDA10.2 + CUDNN8.1 + TensorRT 7 配置为例,介绍环境配置的流程。
### 3.1 安装CUDNN
如果当前环境满足CUDNN版本的要求,可以跳过此步骤。
以CUDNN8.1 安装安装为例,安装步骤如下,首先下载CUDNN,从[Nvidia官网](https://developer.nvidia.com/rdp/cudnn-archive)下载CUDNN8.1版本,下载符合当前系统版本的三个deb文件,分别是:
- cuDNN Runtime Library ,如:libcudnn8_8.1.0.77-1+cuda10.2_amd64.deb
- cuDNN Developer Library ,如:libcudnn8-dev_8.1.0.77-1+cuda10.2_amd64.deb
- cuDNN Code Samples,如:libcudnn8-samples_8.1.0.77-1+cuda10.2_amd64.deb
deb安装可以参考[官方文档](https://docs.nvidia.com/deeplearning/cudnn/install-guide/index.html#installlinux-deb),安装方式如下
```
# x.x.x表示下载的版本号
# $HOME为工作目录
sudo dpkg -i libcudnn8_x.x.x-1+cudax.x_arm64.deb
sudo dpkg -i libcudnn8-dev_8.x.x.x-1+cudax.x_arm64.deb
sudo dpkg -i libcudnn8-samples_8.x.x.x-1+cudax.x_arm64.deb
# 验证是否正确安装
cp -r /usr/src/cudnn_samples_v8/ $HOME
cd $HOME/cudnn_samples_v8/mnistCUDNN
# 编译
make clean && make
./mnistCUDNN
```
如果运行mnistCUDNN完后提示运行成功,则表示安装成功。如果运行后出现freeimage相关的报错,需要按照提示安装freeimage库:
```
sudo apt-get install libfreeimage-dev
sudo apt-get install libfreeimage
```
### 3.2 安装TensorRT
首先,从[Nvidia官网TensorRT板块](https://developer.nvidia.com/tensorrt-getting-started)下载TensorRT,这里选择7.1.3.4版本的TensorRT,注意选择适合自己系统版本和CUDA版本的TensorRT,另外建议下载TAR package的安装包。
以Ubuntu16.04+CUDA10.2为例,下载并解压后可以参考[官方文档](https://docs.nvidia.com/deeplearning/tensorrt/archives/tensorrt-713/install-guide/index.html#installing-tar)的安装步骤,按照如下步骤安装:
```
# 以下安装命令中 '${version}' 为下载的TensorRT版本,如7.1.3.4
# 设置环境变量,<TensorRT-${version}/lib> 为解压后的TensorRT的lib目录
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:<TensorRT-${version}/lib>
# 安装TensorRT
cd TensorRT-${version}/python
pip3.7 install tensorrt-*-cp3x-none-linux_x86_64.whl
# 安装graphsurgeon
cd TensorRT-${version}/graphsurgeon
```
### 3.3 安装PaddlePaddle
下载支持TensorRT版本的Paddle安装包,注意安装包的TensorRT版本需要与本地TensorRT一致,下载[链接](https://paddleinference.paddlepaddle.org.cn/user_guides/download_lib.html#python)
选择下载 linux-cuda10.2-trt7-gcc8.2 Python3.7版本的Paddle:
```
# 从下载链接中可以看到是paddle2.1.1-cuda10.2-cudnn8.1版本
wget https://paddle-wheel.bj.bcebos.com/with-trt/2.1.1-gpu-cuda10.2-cudnn8.1-mkl-gcc8.2/paddlepaddle_gpu-2.1.1-cp37-cp37m-linux_x86_64.whl
pip3.7 install -U paddlepaddle_gpu-2.1.1-cp37-cp37m-linux_x86_64.whl
```
## 4. 安装依赖
```
# 安装AutoLog
git clone https://github.com/LDOUBLEV/AutoLog
cd AutoLog
pip3.7 install -r requirements.txt
python3.7 setup.py bdist_wheel
pip3.7 install ./dist/auto_log-1.0.0-py3-none-any.whl
# 下载insightface代码
cd ../
git clone https://github.com/deepinsight/insightface
```
安装Arcface依赖:
```
cd insightface/recognition/arcface_paddle
pip3.7 install -r requirements.txt
```
## FAQ :
Q. You are using Paddle compiled with TensorRT, but TensorRT dynamic library is not found. Ignore this if TensorRT is not needed.
A. 问题一般是当前安装paddle版本带TRT,但是本地环境找不到TensorRT的预测库,需要下载TensorRT库,解压后设置环境变量LD_LIBRARY_PATH;
如:
```
export LD_LIBRARY_PATH=/usr/local/python3.7.0/lib:/usr/local/nvidia/lib:/usr/local/nvidia/lib64:/paddle/package/TensorRT-6.0.1.5/lib
```
或者问题是下载的TensorRT版本和当前paddle中编译的TRT版本不匹配,需要下载版本相符的TensorRT重新安装。
| insightface/recognition/arcface_paddle/test_tipc/docs/install.md/0 | {
"file_path": "insightface/recognition/arcface_paddle/test_tipc/docs/install.md",
"repo_id": "insightface",
"token_count": 2809
} | 131 |
from easydict import EasyDict as edict
# make training faster
# our RAM is 256G
# mount -t tmpfs -o size=140G tmpfs /train_tmp
config = edict()
# Margin Base Softmax
config.margin_list = (1.0, 0.5, 0.0)
config.network = "r50"
config.resume = False
config.save_all_states = False
config.output = "ms1mv3_arcface_r50"
config.embedding_size = 512
# Partial FC
config.sample_rate = 1
config.interclass_filtering_threshold = 0
config.fp16 = False
config.batch_size = 128
# For SGD
config.optimizer = "sgd"
config.lr = 0.1
config.momentum = 0.9
config.weight_decay = 5e-4
# For AdamW
# config.optimizer = "adamw"
# config.lr = 0.001
# config.weight_decay = 0.1
config.verbose = 2000
config.frequent = 10
# For Large Sacle Dataset, such as WebFace42M
config.dali = False
config.dali_aug = False
# Gradient ACC
config.gradient_acc = 1
# setup seed
config.seed = 2048
# dataload numworkers
config.num_workers = 2
# WandB Logger
config.wandb_key = "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
config.suffix_run_name = None
config.using_wandb = False
config.wandb_entity = "entity"
config.wandb_project = "project"
config.wandb_log_all = True
config.save_artifacts = False
config.wandb_resume = False # resume wandb run: Only if the you wand t resume the last run that it was interrupted | insightface/recognition/arcface_torch/configs/base.py/0 | {
"file_path": "insightface/recognition/arcface_torch/configs/base.py",
"repo_id": "insightface",
"token_count": 474
} | 132 |
Firstly, your face images require detection and alignment to ensure proper preparation for processing. Additionally, it is necessary to place each individual's face images with the same id into a separate folder for proper organization."
```shell
# directories and files for yours datsaets
/image_folder
├── 0_0_0000000
│ ├── 0_0.jpg
│ ├── 0_1.jpg
│ ├── 0_2.jpg
│ ├── 0_3.jpg
│ └── 0_4.jpg
├── 0_0_0000001
│ ├── 0_5.jpg
│ ├── 0_6.jpg
│ ├── 0_7.jpg
│ ├── 0_8.jpg
│ └── 0_9.jpg
├── 0_0_0000002
│ ├── 0_10.jpg
│ ├── 0_11.jpg
│ ├── 0_12.jpg
│ ├── 0_13.jpg
│ ├── 0_14.jpg
│ ├── 0_15.jpg
│ ├── 0_16.jpg
│ └── 0_17.jpg
├── 0_0_0000003
│ ├── 0_18.jpg
│ ├── 0_19.jpg
│ └── 0_20.jpg
├── 0_0_0000004
# 0) Dependencies installation
pip install opencv-python
apt-get update
apt-get install ffmpeg libsm6 libxext6 -y
# 1) create train.lst using follow command
python -m mxnet.tools.im2rec --list --recursive train image_folder
# 2) create train.rec and train.idx using train.lst using following command
python -m mxnet.tools.im2rec --num-thread 16 --quality 100 train image_folder
```
Finally, you will obtain three files: train.lst, train.rec, and train.idx, where train.idx and train.rec are utilized for training.
| insightface/recognition/arcface_torch/docs/prepare_custom_dataset.md/0 | {
"file_path": "insightface/recognition/arcface_torch/docs/prepare_custom_dataset.md",
"repo_id": "insightface",
"token_count": 491
} | 133 |
import numpy as np
import onnx
import torch
def convert_onnx(net, path_module, output, opset=11, simplify=False):
assert isinstance(net, torch.nn.Module)
img = np.random.randint(0, 255, size=(112, 112, 3), dtype=np.int32)
img = img.astype(np.float)
img = (img / 255. - 0.5) / 0.5 # torch style norm
img = img.transpose((2, 0, 1))
img = torch.from_numpy(img).unsqueeze(0).float()
weight = torch.load(path_module)
net.load_state_dict(weight, strict=True)
net.eval()
torch.onnx.export(net, img, output, input_names=["data"], keep_initializers_as_inputs=False, verbose=False, opset_version=opset)
model = onnx.load(output)
graph = model.graph
graph.input[0].type.tensor_type.shape.dim[0].dim_param = 'None'
if simplify:
from onnxsim import simplify
model, check = simplify(model)
assert check, "Simplified ONNX model could not be validated"
onnx.save(model, output)
if __name__ == '__main__':
import os
import argparse
from backbones import get_model
parser = argparse.ArgumentParser(description='ArcFace PyTorch to onnx')
parser.add_argument('input', type=str, help='input backbone.pth file or path')
parser.add_argument('--output', type=str, default=None, help='output onnx path')
parser.add_argument('--network', type=str, default=None, help='backbone network')
parser.add_argument('--simplify', type=bool, default=False, help='onnx simplify')
args = parser.parse_args()
input_file = args.input
if os.path.isdir(input_file):
input_file = os.path.join(input_file, "model.pt")
assert os.path.exists(input_file)
# model_name = os.path.basename(os.path.dirname(input_file)).lower()
# params = model_name.split("_")
# if len(params) >= 3 and params[1] in ('arcface', 'cosface'):
# if args.network is None:
# args.network = params[2]
assert args.network is not None
print(args)
backbone_onnx = get_model(args.network, dropout=0.0, fp16=False, num_features=512)
if args.output is None:
args.output = os.path.join(os.path.dirname(args.input), "model.onnx")
convert_onnx(backbone_onnx, input_file, args.output, simplify=args.simplify)
| insightface/recognition/arcface_torch/torch2onnx.py/0 | {
"file_path": "insightface/recognition/arcface_torch/torch2onnx.py",
"repo_id": "insightface",
"token_count": 893
} | 134 |
import numpy as np
import pandas as pd
import os,sys
sys.path.append(os.getcwd())
print(sys.path)
import argparse
import torch
from PIL import Image
from network.lightcnn112 import LightCNN_29Layers
from evaluate import evaluate2
fars = [10 ** -4, 10 ** -3, 10 ** -2]
parser = argparse.ArgumentParser()
parser.add_argument('--test_fold_id', default=1, type=int)
parser.add_argument('--input_mode', default='grey', choices=['grey'], type=str)
parser.add_argument('--model_mode', default='29', choices=['29'], type=str)
parser.add_argument('--model_name', default='', type=str)
parser.add_argument('--img_root', default='', type=str)
parser.add_argument('--test_mode', default='pretrain', type=str)
args = parser.parse_args()
INPUT_MODE = args.input_mode
MODEL_MODE = args.model_mode
model_name = args.model_name
test_mode = args.test_mode
img_root = args.img_root
num_classes = 725
test_list_dir = './data/oulu/'
model_dir = f'./models/{test_mode}/'
model_path = os.path.join(model_dir, model_name)
def load_model(model, pretrained):
weights = torch.load(pretrained)
weights = weights['state_dict']
model_dict = model.state_dict()
weights = {k.replace('module.',''): v for k, v in weights.items() if k.replace('module.','') in model_dict.keys() and 'fc2' not in k}
print("==> len of weights to be loaded: {}. \n".format(len(weights)))
model.load_state_dict(weights, strict=False)
model.eval()
class Embedding:
def __init__(self, root, model):
self.model = model
self.root = root
self.image_size = (112, 112)
self.batch_size = 1
def get(self, img):
img_flip = np.fliplr(img)
img = np.transpose(img, (2, 0, 1)) # 1*112*112
img_flip = np.transpose(img_flip, (2, 0, 1))
input_blob = np.zeros((2, 1, self.image_size[1], self.image_size[0]),
dtype=np.uint8)
input_blob[0] = img
input_blob[1] = img_flip
return input_blob
@torch.no_grad()
def forward_db(self, batch_data):
imgs = torch.Tensor(batch_data).cuda()
imgs.div_(255)
feat = self.model(imgs)
feat = feat.reshape([self.batch_size, 2 * feat.shape[1]])
return feat.cpu().numpy()
def extract_feats_labels(self, data_list):
img_feats = []
pids = []
for (imgPath, pid) in data_list:
img = Image.open(os.path.join(self.root, imgPath)).convert('L')
img = np.array(img)
img = img[..., np.newaxis]
img_feats.append(self.forward_db(self.get(img)).flatten())
pids.append(pid)
img_feats = np.array(img_feats).astype(np.float32)
img_input_feats = img_feats[:, 0:img_feats.shape[1] //2] + img_feats[:, img_feats.shape[1] // 2:]
img_input_feats = img_input_feats / np.sqrt(np.sum(img_input_feats ** 2, -1, keepdims=True))
pids = np.array(pids)
return img_input_feats, pids
def get_vis_nir_info_csv():
vis = pd.read_csv(test_list_dir + 'vis_test_paths.csv', header=None, sep=' ')
vis_labels = [int(s.strip().split(',')[-1].split('P')[-1]) for s in vis[0]]
vis = [s.strip().split(',')[0] for s in vis[0]]
nir = pd.read_csv(test_list_dir + 'nir_test_paths.csv', header=None, sep=' ')
nir_labels = [int(s.strip().split(',')[-1].split('P')[-1]) for s in nir[0]]
nir = [s.strip().split(',')[0] for s in nir[0]]
vis = [(p,l) for (p,l) in zip(vis, vis_labels)]
nir = [(p,l) for (p,l) in zip(nir, nir_labels)]
return vis,nir
def get_vis_nir_info_txt():
def read_file(file_name):
with open(test_list_dir + file_name, 'r') as f:
lines = f.readlines()
paths = [s.strip().split(' ')[0] for s in lines]
labels = [int(s.strip().split(' ')[1]) for s in lines]
info = [(p,l) for (p,l) in zip(paths, labels)]
return info
vis = read_file('test_vis_paths.txt')
nir = read_file('test_nir_paths.txt')
return vis, nir
### Testing pretrain/finetune model
if test_mode == 'pretrain':
vis, nir = get_vis_nir_info_csv()
elif test_mode == "finetune":
vis, nir = get_vis_nir_info_txt()
else:
print("Wrong test_mode!!!")
if MODEL_MODE == '29':
model = LightCNN_29Layers(num_classes=num_classes)
model.cuda()
embedding = Embedding(img_root, model)
if not os.path.exists(model_path):
print("cannot find model ",model_path)
sys.exit()
load_model(embedding.model, model_path)
feat_vis, label_vis = embedding.extract_feats_labels(vis)
feat_nir, label_nir = embedding.extract_feats_labels(nir)
labels = np.equal.outer(label_vis, label_nir).astype(np.float32)
print("*" * 16)
print("INPUT_MODE: ", INPUT_MODE)
print("MODEL_MODE: ", MODEL_MODE)
print("model path: ", model_path)
print("*" * 16)
print("[query] feat_nir.shape ",feat_nir.shape)
print("[gallery] feat_vis.shape ",feat_vis.shape)
print("*" * 16)
acc, tarfar = evaluate2(feat_vis, feat_nir, labels, fars=fars) | insightface/recognition/idmmd/evaluate/eval_oulu_112.py/0 | {
"file_path": "insightface/recognition/idmmd/evaluate/eval_oulu_112.py",
"repo_id": "insightface",
"token_count": 2242
} | 135 |
export CUDA_VISIBLE_DEVICES='0,1,2,3,4,5,6,7'
export HOROVOD_GPU_ALLREDUCE=NCCL
export HOROVOD_GPU_ALLGATHER=NCCL
export HOROVOD_GPU_BROADCAST=NCLL
export MXNET_CPU_WORKER_NTHREADS=3
# use `which python` to get the absolute path of your python interpreter
#
PYTHON_EXEC=/usr/bin/python
${PYTHON_EXEC} train_memory.py \
--dataset glint360k_8GPU \
--loss cosface \
--network r100 \
--models-root /data/anxiang/opensource/glint360k_8GPU_r100FC_1.0_fp32_cosface
| insightface/recognition/partial_fc/mxnet/config.sh/0 | {
"file_path": "insightface/recognition/partial_fc/mxnet/config.sh",
"repo_id": "insightface",
"token_count": 198
} | 136 |
#!/bin/bash
mpirun -np 8 \
-hostfile hosts/host_8 --allow-run-as-root \
-bind-to none -map-by slot \
-x LD_LIBRARY_PATH -x PATH \
-mca pml ob1 -mca btl ^openib \
-mca btl_tcp_if_include eth0 \
-x OMP_NUM_THREADS=2 \
bash config.sh
| insightface/recognition/partial_fc/mxnet/run.sh/0 | {
"file_path": "insightface/recognition/partial_fc/mxnet/run.sh",
"repo_id": "insightface",
"token_count": 111
} | 137 |
import os
import torch
import numpy as np
import utils.general as utils
from utils import rend_util
class IFDataset(torch.utils.data.Dataset):
"""Dataset for a class of objects, where each datapoint is a SceneInstanceDataset."""
def __init__(self,
train_cameras,
data_dir,
img_res,
scan_id=0,
cam_file=None
):
self.instance_dir = os.path.join('../data', data_dir, 'scan{0}'.format(scan_id))
self.total_pixels = img_res[0] * img_res[1]
self.img_res = img_res
assert os.path.exists(self.instance_dir), "Data directory is empty"
self.sampling_idx = None
self.train_cameras = train_cameras
image_dir = '{0}/image'.format(self.instance_dir)
image_paths = sorted(utils.glob_imgs(image_dir))
mask_dir = '{0}/mask'.format(self.instance_dir)
mask_paths = sorted(utils.glob_imgs(mask_dir))
self.n_images = len(image_paths)
self.cam_file = '{0}/cameras.npz'.format(self.instance_dir)
if cam_file is not None:
self.cam_file = '{0}/{1}'.format(self.instance_dir, cam_file)
camera_dict = np.load(self.cam_file)
scale_mats = [camera_dict['scale_mat_%d' % idx].astype(np.float32) for idx in range(self.n_images)]
world_mats = [camera_dict['world_mat_%d' % idx].astype(np.float32) for idx in range(self.n_images)]
self.intrinsics_all = []
self.pose_all = []
for scale_mat, world_mat in zip(scale_mats, world_mats):
P = world_mat @ scale_mat
P = P[:3, :4]
intrinsics, pose = rend_util.load_K_Rt_from_P(None, P)
self.intrinsics_all.append(torch.from_numpy(intrinsics).float())
self.pose_all.append(torch.from_numpy(pose).float())
self.rgb_images = []
for path in image_paths:
rgb = rend_util.load_rgb(path)
rgb = rgb.reshape(3, -1).transpose(1, 0)
self.rgb_images.append(torch.from_numpy(rgb).float())
self.object_masks = []
for path in mask_paths:
object_mask = rend_util.load_mask_white_bg(path)
object_mask = object_mask.reshape(-1)
self.object_masks.append(torch.from_numpy(object_mask).bool())
def __len__(self):
return self.n_images
def __getitem__(self, idx):
uv = np.mgrid[0:self.img_res[0], 0:self.img_res[1]].astype(np.int32)
uv = torch.from_numpy(np.flip(uv, axis=0).copy()).float()
uv = uv.reshape(2, -1).transpose(1, 0)
sample = {
"object_mask": self.object_masks[idx],
"uv": uv,
"intrinsics": self.intrinsics_all[idx],
}
ground_truth = {
"rgb": self.rgb_images[idx]
}
if self.sampling_idx is not None:
ground_truth["rgb"] = self.rgb_images[idx][self.sampling_idx, :]
sample["object_mask"] = self.object_masks[idx][self.sampling_idx]
sample["uv"] = uv[self.sampling_idx, :]
if not self.train_cameras:
sample["pose"] = self.pose_all[idx]
return idx, sample, ground_truth
def collate_fn(self, batch_list):
# get list of dictionaries and returns input, ground_true as dictionary for all batch instances
batch_list = zip(*batch_list)
all_parsed = []
for entry in batch_list:
if type(entry[0]) is dict:
# make them all into a new dict
ret = {}
for k in entry[0].keys():
ret[k] = torch.stack([obj[k] for obj in entry])
all_parsed.append(ret)
else:
all_parsed.append(torch.LongTensor(entry))
return tuple(all_parsed)
def change_sampling_idx(self, sampling_size):
if sampling_size == -1:
self.sampling_idx = None
else:
self.sampling_idx = torch.randperm(self.total_pixels)[:sampling_size]
def get_scale_mat(self):
return np.load(self.cam_file)['scale_mat_0']
def get_gt_pose(self, scaled=False):
# Load gt pose without normalization to unit sphere
camera_dict = np.load(self.cam_file)
world_mats = [camera_dict['world_mat_%d' % idx].astype(np.float32) for idx in range(self.n_images)]
scale_mats = [camera_dict['scale_mat_%d' % idx].astype(np.float32) for idx in range(self.n_images)]
pose_all = []
for scale_mat, world_mat in zip(scale_mats, world_mats):
P = world_mat
if scaled:
P = world_mat @ scale_mat
P = P[:3, :4]
_, pose = rend_util.load_K_Rt_from_P(None, P)
pose_all.append(torch.from_numpy(pose).float())
return torch.cat([p.float().unsqueeze(0) for p in pose_all], 0)
def get_pose_init(self):
# get noisy initializations obtained with the linear method
cam_file = '{0}/cameras_linear_init.npz'.format(self.instance_dir)
camera_dict = np.load(cam_file)
scale_mats = [camera_dict['scale_mat_%d' % idx].astype(np.float32) for idx in range(self.n_images)]
world_mats = [camera_dict['world_mat_%d' % idx].astype(np.float32) for idx in range(self.n_images)]
init_pose = []
for scale_mat, world_mat in zip(scale_mats, world_mats):
P = world_mat @ scale_mat
P = P[:3, :4]
_, pose = rend_util.load_K_Rt_from_P(None, P)
init_pose.append(pose)
init_pose = torch.cat([torch.Tensor(pose).float().unsqueeze(0) for pose in init_pose], 0).cuda()
init_quat = rend_util.rot_to_quat(init_pose[:, :3, :3])
init_quat = torch.cat([init_quat, init_pose[:, :3, 3]], 1)
return init_quat
| insightface/reconstruction/PBIDR/code/datasets/dataset.py/0 | {
"file_path": "insightface/reconstruction/PBIDR/code/datasets/dataset.py",
"repo_id": "insightface",
"token_count": 2919
} | 138 |
import numpy as np
import imageio
import skimage
import cv2
import torch
from torch.nn import functional as F
def load_rgb(path):
img = imageio.imread(path)
img = skimage.img_as_float32(img)
# pixel values between [-1,1]
img -= 0.5
img *= 2.
img = img.transpose(2, 0, 1)
return img
def load_mask(path):
alpha = imageio.imread(path, as_gray=True)
alpha = skimage.img_as_float32(alpha)
object_mask = alpha > 127.5
return object_mask
def load_mask_white_bg(path):
alpha = imageio.imread(path, as_gray=True)
alpha = skimage.img_as_float32(alpha)
object_mask = alpha < 250.5
return object_mask
def load_K_Rt_from_P(filename, P=None):
if P is None:
lines = open(filename).read().splitlines()
if len(lines) == 4:
lines = lines[1:]
lines = [[x[0], x[1], x[2], x[3]] for x in (x.split(" ") for x in lines)]
P = np.asarray(lines).astype(np.float32).squeeze()
out = cv2.decomposeProjectionMatrix(P)
K = out[0]
R = out[1]
t = out[2]
K = K/K[2,2]
intrinsics = np.eye(4)
intrinsics[:3, :3] = K
pose = np.eye(4, dtype=np.float32)
to_gl = np.eye(3, dtype=np.float32)
to_gl[0, 0] = -1.
to_gl[1, 1] = -1.
pose[:3, :3] = np.dot(R.transpose(), to_gl)
pose[:3,3] = (t[:3] / t[3])[:,0]
return intrinsics, pose
def get_camera_params(uv, pose, intrinsics):
if pose.shape[1] == 7: #In case of quaternion vector representation
cam_loc = pose[:, 4:]
R = quat_to_rot(pose[:,:4])
p = torch.eye(4).repeat(pose.shape[0],1,1).cuda().float()
p[:, :3, :3] = R
p[:, :3, 3] = cam_loc
else: # In case of pose matrix representation
cam_loc = pose[:, :3, 3]
p = pose
batch_size, num_samples, _ = uv.shape
depth = torch.ones((batch_size, num_samples)).cuda()
x_cam = uv[:, :, 0].view(batch_size, -1)
y_cam = uv[:, :, 1].view(batch_size, -1)
z_cam = depth.view(batch_size, -1)
pixel_points_cam = lift(x_cam, y_cam, z_cam, intrinsics=intrinsics)
# permute for batch matrix product
pixel_points_cam = pixel_points_cam.permute(0, 2, 1)
world_coords = torch.bmm(p, pixel_points_cam).permute(0, 2, 1)[:, :, :3]
ray_dirs = world_coords - cam_loc[:, None, :]
ray_dirs = F.normalize(ray_dirs, dim=2)
return ray_dirs, cam_loc
def get_camera_for_plot(pose):
if pose.shape[1] == 7: #In case of quaternion vector representation
cam_loc = pose[:, 4:].detach()
R = quat_to_rot(pose[:,:4].detach())
else: # In case of pose matrix representation
cam_loc = pose[:, :3, 3]
R = pose[:, :3, :3]
cam_dir = R[:, :3, 2]
return cam_loc, cam_dir
def lift(x, y, z, intrinsics):
# parse intrinsics
intrinsics = intrinsics.cuda()
fx = intrinsics[:, 0, 0]
fy = intrinsics[:, 1, 1]
cx = intrinsics[:, 0, 2]
cy = intrinsics[:, 1, 2]
sk = intrinsics[:, 0, 1]
x_lift = (x - cx.unsqueeze(-1) + cy.unsqueeze(-1)*sk.unsqueeze(-1)/fy.unsqueeze(-1) - sk.unsqueeze(-1)*y/fy.unsqueeze(-1)) / fx.unsqueeze(-1) * z
y_lift = (y - cy.unsqueeze(-1)) / fy.unsqueeze(-1) * z
# homogeneous
return torch.stack((x_lift, y_lift, z, torch.ones_like(z).cuda()), dim=-1)
def quat_to_rot(q):
batch_size, _ = q.shape
q = F.normalize(q, dim=1)
R = torch.ones((batch_size, 3,3)).cuda()
qr=q[:,0]
qi = q[:, 1]
qj = q[:, 2]
qk = q[:, 3]
R[:, 0, 0]=1-2 * (qj**2 + qk**2)
R[:, 0, 1] = 2 * (qj *qi -qk*qr)
R[:, 0, 2] = 2 * (qi * qk + qr * qj)
R[:, 1, 0] = 2 * (qj * qi + qk * qr)
R[:, 1, 1] = 1-2 * (qi**2 + qk**2)
R[:, 1, 2] = 2*(qj*qk - qi*qr)
R[:, 2, 0] = 2 * (qk * qi-qj * qr)
R[:, 2, 1] = 2 * (qj*qk + qi*qr)
R[:, 2, 2] = 1-2 * (qi**2 + qj**2)
return R
def rot_to_quat(R):
batch_size, _,_ = R.shape
q = torch.ones((batch_size, 4)).cuda()
R00 = R[:, 0,0]
R01 = R[:, 0, 1]
R02 = R[:, 0, 2]
R10 = R[:, 1, 0]
R11 = R[:, 1, 1]
R12 = R[:, 1, 2]
R20 = R[:, 2, 0]
R21 = R[:, 2, 1]
R22 = R[:, 2, 2]
q[:,0]=torch.sqrt(1.0+R00+R11+R22)/2
q[:, 1]=(R21-R12)/(4*q[:,0])
q[:, 2] = (R02 - R20) / (4 * q[:, 0])
q[:, 3] = (R10 - R01) / (4 * q[:, 0])
return q
def get_sphere_intersection(cam_loc, ray_directions, r = 1.0):
# Input: n_images x 4 x 4 ; n_images x n_rays x 3
# Output: n_images * n_rays x 2 (close and far) ; n_images * n_rays
n_imgs, n_pix, _ = ray_directions.shape
cam_loc = cam_loc.unsqueeze(-1)
ray_cam_dot = torch.bmm(ray_directions, cam_loc).squeeze()
under_sqrt = ray_cam_dot ** 2 - (cam_loc.norm(2,1) ** 2 - r ** 2)
under_sqrt = under_sqrt.reshape(-1)
mask_intersect = under_sqrt > 0
sphere_intersections = torch.zeros(n_imgs * n_pix, 2).cuda().float()
sphere_intersections[mask_intersect] = torch.sqrt(under_sqrt[mask_intersect]).unsqueeze(-1) * torch.Tensor([-1, 1]).cuda().float()
sphere_intersections[mask_intersect] -= ray_cam_dot.reshape(-1)[mask_intersect].unsqueeze(-1)
sphere_intersections = sphere_intersections.reshape(n_imgs, n_pix, 2)
sphere_intersections = sphere_intersections.clamp_min(0.0)
mask_intersect = mask_intersect.reshape(n_imgs, n_pix)
return sphere_intersections, mask_intersect
def get_depth(points, pose):
''' Retruns depth from 3D points according to camera pose '''
batch_size, num_samples, _ = points.shape
if pose.shape[1] == 7: # In case of quaternion vector representation
cam_loc = pose[:, 4:]
R = quat_to_rot(pose[:, :4])
pose = torch.eye(4).unsqueeze(0).repeat(batch_size, 1, 1).cuda().float()
pose[:, :3, 3] = cam_loc
pose[:, :3, :3] = R
points_hom = torch.cat((points, torch.ones((batch_size, num_samples, 1)).cuda()), dim=2)
# permute for batch matrix product
points_hom = points_hom.permute(0, 2, 1)
points_cam = torch.inverse(pose).bmm(points_hom)
depth = points_cam[:, 2, :][:, :, None]
return depth
| insightface/reconstruction/PBIDR/code/utils/rend_util.py/0 | {
"file_path": "insightface/reconstruction/PBIDR/code/utils/rend_util.py",
"repo_id": "insightface",
"token_count": 2919
} | 139 |
from easydict import EasyDict as edict
import numpy as np
config = edict()
config.embedding_size = 512
config.sample_rate = 1
config.fp16 = 0
config.tf32 = False
config.backbone_wd = None
config.batch_size = 128
config.clip_grad = None
config.dropout = 0.0
#config.warmup_epoch = -1
config.loss = 'cosface'
config.margin = 0.4
config.hard_margin = False
config.network = 'r50'
config.prelu = True
config.stem_type = ''
config.dropblock = 0.0
config.output = None
config.input_size = 112
config.width_mult = 1.0
config.kaiming_init = True
config.use_se = False
config.aug_modes = []
config.checkpoint_segments = [1, 1, 1, 1]
config.sampling_id = True
config.id_sampling_ratio = None
metric_loss = edict()
metric_loss.enable = False
metric_loss.lambda_n = 0.0
metric_loss.lambda_c = 0.0
metric_loss.lambda_t = 0.0
metric_loss.margin_c = 1.0
metric_loss.margin_t = 1.0
metric_loss.margin_n = 0.4
config.metric_loss = metric_loss
config.opt = 'sgd'
config.lr = 0.1 # when batch size is 512
config.momentum = 0.9
config.weight_decay = 5e-4
config.fc_mom = 0.9
config.warmup_epochs = 0
config.max_warmup_steps = 6000
config.num_epochs = 24
config.resume = False
config.resume_path = None
config.resume_from = None
config.save_every_epochs = True
config.lr_func = None
config.lr_epochs = None
config.save_pfc = False
config.save_onnx = False
config.save_opt = False
config.label_6dof_mean = np.array([-0.018197, -0.017891, 0.025348, -0.005368, 0.001176, -0.532206], dtype=np.float32) # mean of pitch, yaw, roll, tx, ty, tz
config.label_6dof_std = np.array([0.314015, 0.271809, 0.081881, 0.022173, 0.048839, 0.065444], dtype=np.float32) # std of pitch, yaw, roll, tx, ty, tz
config.num_verts = 1220
config.flipindex_file = 'cache_align/flip_index.npy'
config.enable_flip = True
config.verts3d_central_index = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 956, 975, 1022, 1041, 1047, 1048, 1049]
config.task = 0
config.ckpt = None
config.loss_hard = False
config.sampling_hard = False
config.loss_pip = False
config.net_stride = 32
config.loss_bone3d = False
config.loss_bone2d = False
config.lossw_verts3d = 8.0
config.lossw_verts2d = 16.0
config.lossw_bone3d = 10.0
config.lossw_bone2d = 10.0
config.lossw_project = 10.0
config.lossw_eyes3d = 8.0
config.lossw_eyes2d = 16.0
config.align_face = False
config.no_gap = False
config.use_trainval = False
config.project_loss = False
config.use_onenetwork = True
config.use_rtloss = False
config.use_arcface = False
config.eyes = None
| insightface/reconstruction/jmlr/configs/base.py/0 | {
"file_path": "insightface/reconstruction/jmlr/configs/base.py",
"repo_id": "insightface",
"token_count": 1075
} | 140 |
import importlib
import os
import os.path as osp
import numpy as np
def get_config(config_file):
assert config_file.startswith('configs/'), 'config file setting must start with configs/'
temp_config_name = osp.basename(config_file)
temp_module_name = osp.splitext(temp_config_name)[0]
#print('A:', config_file, temp_config_name, temp_module_name)
config1 = importlib.import_module("configs.base")
importlib.reload(config1)
cfg = config1.config
#print('B1:', cfg)
config2 = importlib.import_module("configs.%s"%temp_module_name)
importlib.reload(config2)
#reload(config2)
job_cfg = config2.config
#print('B2:', job_cfg)
cfg.update(job_cfg)
cfg.job_name = temp_module_name
#print('B:', cfg)
if cfg.output is None:
cfg.output = osp.join('work_dirs', temp_module_name)
#print('C:', cfg.output)
cfg.flipindex = np.load(cfg.flipindex_file)
return cfg
| insightface/reconstruction/jmlr/utils/utils_config.py/0 | {
"file_path": "insightface/reconstruction/jmlr/utils/utils_config.py",
"repo_id": "insightface",
"token_count": 394
} | 141 |
# Copyright (c) 2020, Baris Gecer. All rights reserved.
#
# This work is made available under the CC BY-NC-SA 4.0.
# To view a copy of this license, see LICENSE
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
import cv2
import os
import inspect
class Face_Detector(object):
def __init__(self, gpuid = -1):
self.minsize = 40 # minimum size of face
self.threshold = [0.6, 0.7, 0.7] # three steps's threshold
self.factor = 0.709 # scale factor
current_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
with tf.device('/'+('cpu' if gpuid<0 else 'gpu')+':'+('0' if gpuid<0 else str(gpuid))):
with tf.Graph().as_default():
sess = tf.Session(config= tf.ConfigProto(device_count = {'GPU': 0 if gpuid<0 else 1}))
with sess.as_default():
self.pnet, self.rnet, self.onet = create_detector(sess, '{}/mtcnn'.format(current_dir))
print('MTCNN loaded')
def face_detection(self,img):
bounding_boxes, points = detect_face(img, self.minsize,
self.pnet, self.rnet, self.onet, self.threshold,
self.factor)
return bounding_boxes, points.reshape([2,-1]).T
def layer(op):
'''Decorator for composable network layers.'''
def layer_decorated(self, *args, **kwargs):
# Automatically set a name if not provided.
name = kwargs.setdefault('name', self.get_unique_name(op.__name__))
# Figure out the layer inputs.
if len(self.terminals) == 0:
raise RuntimeError('No input variables found for layer %s.' % name)
elif len(self.terminals) == 1:
layer_input = self.terminals[0]
else:
layer_input = list(self.terminals)
# Perform the operation and get the output.
layer_output = op(self, layer_input, *args, **kwargs)
# Add to layer LUT.
self.layers[name] = layer_output
# This output is now the input for the next layer.
self.feed(layer_output)
# Return self for chained calls.
return self
return layer_decorated
class Network(object):
def __init__(self, inputs, trainable=True):
# The input nodes for this network
self.inputs = inputs
# The current list of terminal nodes
self.terminals = []
# Mapping from layer names to layers
self.layers = dict(inputs)
# If true, the resulting variables are set as trainable
self.trainable = trainable
self.setup()
def setup(self):
'''Construct the network. '''
raise NotImplementedError('Must be implemented by the subclass.')
def load(self, data_path, session, ignore_missing=False):
'''Load network weights.
data_path: The path to the numpy-serialized network weights
session: The current TensorFlow session
ignore_missing: If true, serialized weights for missing layers are ignored.
'''
data_dict = np.load(data_path, encoding='latin1').item() #pylint: disable=no-member
for op_name in data_dict:
with tf.variable_scope(op_name, reuse=True):
for param_name, data in data_dict[op_name].items():
try:
var = tf.get_variable(param_name)
session.run(var.assign(data))
except ValueError:
if not ignore_missing:
raise
def feed(self, *args):
'''Set the input(s) for the next operation by replacing the terminal nodes.
The arguments can be either layer names or the actual layers.
'''
assert len(args) != 0
self.terminals = []
for fed_layer in args:
if isinstance(fed_layer, str):
try:
fed_layer = self.layers[fed_layer]
except KeyError:
raise KeyError('Unknown layer name fed: %s' % fed_layer)
self.terminals.append(fed_layer)
return self
def get_output(self):
'''Returns the current network output.'''
return self.terminals[-1]
def get_unique_name(self, prefix):
'''Returns an index-suffixed unique name for the given prefix.
This is used for auto-generating layer names based on the type-prefix.
'''
ident = sum(t.startswith(prefix) for t, _ in self.layers.items()) + 1
return '%s_%d' % (prefix, ident)
def make_var(self, name, shape):
'''Creates a new TensorFlow variable.'''
return tf.get_variable(name, shape, trainable=self.trainable)
def validate_padding(self, padding):
'''Verifies that the padding is one of the supported ones.'''
assert padding in ('SAME', 'VALID')
@layer
def conv(self,
inp,
k_h,
k_w,
c_o,
s_h,
s_w,
name,
relu=True,
padding='SAME',
group=1,
biased=True):
# Verify that the padding is acceptable
self.validate_padding(padding)
# Get the number of channels in the input
c_i = inp.get_shape()[-1]
# Verify that the grouping parameter is valid
assert c_i % group == 0
assert c_o % group == 0
# Convolution for a given input and kernel
convolve = lambda i, k: tf.nn.conv2d(i, k, [1, s_h, s_w, 1], padding=padding)
with tf.variable_scope(name) as scope:
kernel = self.make_var('weights', shape=[k_h, k_w, c_i // group, c_o])
# This is the common-case. Convolve the input without any further complications.
output = convolve(inp, kernel)
# Add the biases
if biased:
biases = self.make_var('biases', [c_o])
output = tf.nn.bias_add(output, biases)
if relu:
# ReLU non-linearity
output = tf.nn.relu(output, name=scope.name)
return output
@layer
def prelu(self, inp, name):
with tf.variable_scope(name):
i = inp.get_shape().as_list()
alpha = self.make_var('alpha', shape=(i[-1]))
output = tf.nn.relu(inp) + tf.multiply(alpha, -tf.nn.relu(-inp))
return output
@layer
def max_pool(self, inp, k_h, k_w, s_h, s_w, name, padding='SAME'):
self.validate_padding(padding)
return tf.nn.max_pool(inp,
ksize=[1, k_h, k_w, 1],
strides=[1, s_h, s_w, 1],
padding=padding,
name=name)
@layer
def fc(self, inp, num_out, name, relu=True):
with tf.variable_scope(name):
input_shape = inp.get_shape()
if input_shape.ndims == 4:
# The input is spatial. Vectorize it first.
dim = 1
for d in input_shape[1:].as_list():
dim *= d
feed_in = tf.reshape(inp, [-1, dim])
else:
feed_in, dim = (inp, input_shape[-1].value)
weights = self.make_var('weights', shape=[dim, num_out])
biases = self.make_var('biases', [num_out])
op = tf.nn.relu_layer if relu else tf.nn.xw_plus_b
fc = op(feed_in, weights, biases, name=name)
return fc
"""
Multi dimensional softmax,
refer to https://github.com/tensorflow/tensorflow/issues/210
compute softmax along the dimension of target
the native softmax only supports batch_size x dimension
"""
@layer
def softmax(self, target, axis, name=None):
max_axis = tf.reduce_max(target, axis, keepdims=True)
target_exp = tf.exp(target-max_axis)
normalize = tf.reduce_sum(target_exp, axis, keepdims=True)
softmax = tf.div(target_exp, normalize, name)
return softmax
class PNet(Network):
def setup(self):
(self.feed('data') #pylint: disable=no-value-for-parameter, no-member
.conv(3, 3, 10, 1, 1, padding='VALID', relu=False, name='conv1')
.prelu(name='PReLU1')
.max_pool(2, 2, 2, 2, name='pool1')
.conv(3, 3, 16, 1, 1, padding='VALID', relu=False, name='conv2')
.prelu(name='PReLU2')
.conv(3, 3, 32, 1, 1, padding='VALID', relu=False, name='conv3')
.prelu(name='PReLU3')
.conv(1, 1, 2, 1, 1, relu=False, name='conv4-1')
.softmax(3,name='prob1'))
(self.feed('PReLU3') #pylint: disable=no-value-for-parameter
.conv(1, 1, 4, 1, 1, relu=False, name='conv4-2'))
class RNet(Network):
def setup(self):
(self.feed('data') #pylint: disable=no-value-for-parameter, no-member
.conv(3, 3, 28, 1, 1, padding='VALID', relu=False, name='conv1')
.prelu(name='prelu1')
.max_pool(3, 3, 2, 2, name='pool1')
.conv(3, 3, 48, 1, 1, padding='VALID', relu=False, name='conv2')
.prelu(name='prelu2')
.max_pool(3, 3, 2, 2, padding='VALID', name='pool2')
.conv(2, 2, 64, 1, 1, padding='VALID', relu=False, name='conv3')
.prelu(name='prelu3')
.fc(128, relu=False, name='conv4')
.prelu(name='prelu4')
.fc(2, relu=False, name='conv5-1')
.softmax(1,name='prob1'))
(self.feed('prelu4') #pylint: disable=no-value-for-parameter
.fc(4, relu=False, name='conv5-2'))
class ONet(Network):
def setup(self):
(self.feed('data') #pylint: disable=no-value-for-parameter, no-member
.conv(3, 3, 32, 1, 1, padding='VALID', relu=False, name='conv1')
.prelu(name='prelu1')
.max_pool(3, 3, 2, 2, name='pool1')
.conv(3, 3, 64, 1, 1, padding='VALID', relu=False, name='conv2')
.prelu(name='prelu2')
.max_pool(3, 3, 2, 2, padding='VALID', name='pool2')
.conv(3, 3, 64, 1, 1, padding='VALID', relu=False, name='conv3')
.prelu(name='prelu3')
.max_pool(2, 2, 2, 2, name='pool3')
.conv(2, 2, 128, 1, 1, padding='VALID', relu=False, name='conv4')
.prelu(name='prelu4')
.fc(256, relu=False, name='conv5')
.prelu(name='prelu5')
.fc(2, relu=False, name='conv6-1')
.softmax(1, name='prob1'))
(self.feed('prelu5') #pylint: disable=no-value-for-parameter
.fc(4, relu=False, name='conv6-2'))
(self.feed('prelu5') #pylint: disable=no-value-for-parameter
.fc(10, relu=False, name='conv6-3'))
def create_detector(sess, model_path):
with tf.variable_scope('pnet'):
data = tf.placeholder(tf.float32, (None,None,None,3), 'input')
pnet = PNet({'data':data})
pnet.load(os.path.join(model_path, 'cas1.npy'), sess)
with tf.variable_scope('rnet'):
data = tf.placeholder(tf.float32, (None,24,24,3), 'input')
rnet = RNet({'data':data})
rnet.load(os.path.join(model_path, 'cas2.npy'), sess)
with tf.variable_scope('onet'):
data = tf.placeholder(tf.float32, (None,48,48,3), 'input')
onet = ONet({'data':data})
onet.load(os.path.join(model_path, 'cas3.npy'), sess)
pnet_fun = lambda img : sess.run(('pnet/conv4-2/BiasAdd:0', 'pnet/prob1:0'), feed_dict={'pnet/input:0':img})
rnet_fun = lambda img : sess.run(('rnet/conv5-2/conv5-2:0', 'rnet/prob1:0'), feed_dict={'rnet/input:0':img})
onet_fun = lambda img : sess.run(('onet/conv6-2/conv6-2:0', 'onet/conv6-3/conv6-3:0', 'onet/prob1:0'), feed_dict={'onet/input:0':img})
return pnet_fun, rnet_fun, onet_fun
def detect_face(img, minsize, pnet, rnet, onet, threshold, factor):
# im: input image
# minsize: minimum of faces' size
# pnet, rnet, onet: model
# threshold: threshold=[th1 th2 th3], th1-3 are three steps's threshold
# factor: resize img to generate pyramid
factor_count=0
total_boxes=np.empty((0,9))
points=[]
h=img.shape[0]
w=img.shape[1]
minl=np.amin([h, w])
m=12.0/minsize
minl=minl*m
# creat scale pyramid
scales=[]
while minl>=12:
scales += [m*np.power(factor, factor_count)]
minl = minl*factor
factor_count += 1
# first stage
for j in range(len(scales)):
scale=scales[j]
hs=int(np.ceil(h*scale))
ws=int(np.ceil(w*scale))
im_data = imresample(img, (hs, ws))
im_data = (im_data-127.5)*0.0078125
img_x = np.expand_dims(im_data, 0)
img_y = np.transpose(img_x, (0,2,1,3))
out = pnet(img_y)
out0 = np.transpose(out[0], (0,2,1,3))
out1 = np.transpose(out[1], (0,2,1,3))
boxes, _ = generateBoundingBox(out1[0,:,:,1].copy(), out0[0,:,:,:].copy(), scale, threshold[0])
# inter-scale nms
pick = nms(boxes.copy(), 0.5, 'Union')
if boxes.size>0 and pick.size>0:
boxes = boxes[pick,:]
total_boxes = np.append(total_boxes, boxes, axis=0)
numbox = total_boxes.shape[0]
if numbox>0:
pick = nms(total_boxes.copy(), 0.7, 'Union')
total_boxes = total_boxes[pick,:]
regw = total_boxes[:,2]-total_boxes[:,0]
regh = total_boxes[:,3]-total_boxes[:,1]
qq1 = total_boxes[:,0]+total_boxes[:,5]*regw
qq2 = total_boxes[:,1]+total_boxes[:,6]*regh
qq3 = total_boxes[:,2]+total_boxes[:,7]*regw
qq4 = total_boxes[:,3]+total_boxes[:,8]*regh
total_boxes = np.transpose(np.vstack([qq1, qq2, qq3, qq4, total_boxes[:,4]]))
total_boxes = rerec(total_boxes.copy())
total_boxes[:,0:4] = np.fix(total_boxes[:,0:4]).astype(np.int32)
dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph = pad(total_boxes.copy(), w, h)
numbox = total_boxes.shape[0]
if numbox>0:
# second stage
tempimg = np.zeros((24,24,3,numbox))
for k in range(0,numbox):
tmp = np.zeros((int(tmph[k]),int(tmpw[k]),3))
tmp[dy[k]-1:edy[k],dx[k]-1:edx[k],:] = img[y[k]-1:ey[k],x[k]-1:ex[k],:]
if tmp.shape[0]>0 and tmp.shape[1]>0 or tmp.shape[0]==0 and tmp.shape[1]==0:
tempimg[:,:,:,k] = imresample(tmp, (24, 24))
else:
return np.empty()
tempimg = (tempimg-127.5)*0.0078125
tempimg1 = np.transpose(tempimg, (3,1,0,2))
out = rnet(tempimg1)
out0 = np.transpose(out[0])
out1 = np.transpose(out[1])
score = out1[1,:]
ipass = np.where(score>threshold[1])
total_boxes = np.hstack([total_boxes[ipass[0],0:4].copy(), np.expand_dims(score[ipass].copy(),1)])
mv = out0[:,ipass[0]]
if total_boxes.shape[0]>0:
pick = nms(total_boxes, 0.7, 'Union')
total_boxes = total_boxes[pick,:]
total_boxes = bbreg(total_boxes.copy(), np.transpose(mv[:,pick]))
total_boxes = rerec(total_boxes.copy())
numbox = total_boxes.shape[0]
if numbox>0:
# third stage
total_boxes = np.fix(total_boxes).astype(np.int32)
dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph = pad(total_boxes.copy(), w, h)
tempimg = np.zeros((48,48,3,numbox))
for k in range(0,numbox):
tmp = np.zeros((int(tmph[k]),int(tmpw[k]),3))
tmp[dy[k]-1:edy[k],dx[k]-1:edx[k],:] = img[y[k]-1:ey[k],x[k]-1:ex[k],:]
if tmp.shape[0]>0 and tmp.shape[1]>0 or tmp.shape[0]==0 and tmp.shape[1]==0:
tempimg[:,:,:,k] = imresample(tmp, (48, 48))
else:
return np.empty()
tempimg = (tempimg-127.5)*0.0078125
tempimg1 = np.transpose(tempimg, (3,1,0,2))
out = onet(tempimg1)
out0 = np.transpose(out[0])
out1 = np.transpose(out[1])
out2 = np.transpose(out[2])
score = out2[1,:]
points = out1
ipass = np.where(score>threshold[2])
points = points[:,ipass[0]]
total_boxes = np.hstack([total_boxes[ipass[0],0:4].copy(), np.expand_dims(score[ipass].copy(),1)])
mv = out0[:,ipass[0]]
w = total_boxes[:,2]-total_boxes[:,0]+1
h = total_boxes[:,3]-total_boxes[:,1]+1
points[0:5,:] = np.tile(w,(5, 1))*points[0:5,:] + np.tile(total_boxes[:,0],(5, 1))-1
points[5:10,:] = np.tile(h,(5, 1))*points[5:10,:] + np.tile(total_boxes[:,1],(5, 1))-1
if total_boxes.shape[0]>0:
total_boxes = bbreg(total_boxes.copy(), np.transpose(mv))
pick = nms(total_boxes.copy(), 0.7, 'Min')
total_boxes = total_boxes[pick,:]
points = points[:,pick]
return total_boxes, points
def box_regression(img, onet, total_boxes, threshold):
# im: input image
# onet: model
# total_boxes: [x1 y1 x2 y2 score, 5]
# threshold: 0.7
points=[]
h=img.shape[0]
w=img.shape[1]
numbox = total_boxes.shape[0]
if numbox>0:
total_boxes = rerec(total_boxes)
total_boxes = np.fix(total_boxes).astype(np.int32)
dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph = pad(total_boxes.copy(), w, h)
tempimg = np.zeros((48,48,3,numbox))
for k in range(0,numbox):
tmp = np.zeros((int(tmph[k]),int(tmpw[k]),3))
tmp[dy[k]-1:edy[k],dx[k]-1:edx[k],:] = img[y[k]-1:ey[k],x[k]-1:ex[k],:]
if tmp.shape[0]>0 and tmp.shape[1]>0 or tmp.shape[0]==0 and tmp.shape[1]==0:
tempimg[:,:,:,k] = imresample(tmp, (48, 48))
else:
return np.empty()
tempimg = (tempimg-127.5)*0.0078125
tempimg1 = np.transpose(tempimg, (3,1,0,2))
out = onet(tempimg1)
out0 = np.transpose(out[0])
out1 = np.transpose(out[1])
out2 = np.transpose(out[2])
score = out2[1,:]
points = out1
ipass = np.where(score>threshold)
points = points[:,ipass[0]]
total_boxes = np.hstack([total_boxes[ipass[0],0:4].copy(), np.expand_dims(score[ipass].copy(),1)])
mv = out0[:,ipass[0]]
w = total_boxes[:,2]-total_boxes[:,0]+1
h = total_boxes[:,3]-total_boxes[:,1]+1
points[0:5,:] = np.tile(w,(5, 1))*points[0:5,:] + np.tile(total_boxes[:,0],(5, 1))-1
points[5:10,:] = np.tile(h,(5, 1))*points[5:10,:] + np.tile(total_boxes[:,1],(5, 1))-1
if total_boxes.shape[0]>0:
total_boxes = bbreg(total_boxes.copy(), np.transpose(mv))
pick = nms(total_boxes.copy(), 0.7, 'Min')
total_boxes = total_boxes[pick,:]
points = points[:,pick]
return total_boxes, points
def bbreg(boundingbox,reg):
# calibrate bounding boxes
if reg.shape[1]==1:
reg = np.reshape(reg, (reg.shape[2], reg.shape[3]))
w = boundingbox[:,2]-boundingbox[:,0]+1
h = boundingbox[:,3]-boundingbox[:,1]+1
b1 = boundingbox[:,0]+reg[:,0]*w
b2 = boundingbox[:,1]+reg[:,1]*h
b3 = boundingbox[:,2]+reg[:,2]*w
b4 = boundingbox[:,3]+reg[:,3]*h
boundingbox[:,0:4] = np.transpose(np.vstack([b1, b2, b3, b4 ]))
return boundingbox
def generateBoundingBox(imap, reg, scale, t):
# use heatmap to generate bounding boxes
stride=2
cellsize=12
imap = np.transpose(imap)
dx1 = np.transpose(reg[:,:,0])
dy1 = np.transpose(reg[:,:,1])
dx2 = np.transpose(reg[:,:,2])
dy2 = np.transpose(reg[:,:,3])
y, x = np.where(imap >= t)
if y.shape[0]==1:
dx1 = np.flipud(dx1)
dy1 = np.flipud(dy1)
dx2 = np.flipud(dx2)
dy2 = np.flipud(dy2)
score = imap[(y,x)]
reg = np.transpose(np.vstack([ dx1[(y,x)], dy1[(y,x)], dx2[(y,x)], dy2[(y,x)] ]))
if reg.size==0:
reg = np.empty((0,3))
bb = np.transpose(np.vstack([y,x]))
q1 = np.fix((stride*bb+1)/scale)
q2 = np.fix((stride*bb+cellsize-1+1)/scale)
boundingbox = np.hstack([q1, q2, np.expand_dims(score,1), reg])
return boundingbox, reg
def nms(boxes, threshold, method):
if boxes.size==0:
return np.empty((0,3))
x1 = boxes[:,0]
y1 = boxes[:,1]
x2 = boxes[:,2]
y2 = boxes[:,3]
s = boxes[:,4]
area = (x2-x1+1) * (y2-y1+1)
I = np.argsort(s)
pick = np.zeros_like(s, dtype=np.int16)
counter = 0
while I.size>0:
i = I[-1]
pick[counter] = i
counter += 1
idx = I[0:-1]
xx1 = np.maximum(x1[i], x1[idx])
yy1 = np.maximum(y1[i], y1[idx])
xx2 = np.minimum(x2[i], x2[idx])
yy2 = np.minimum(y2[i], y2[idx])
w = np.maximum(0.0, xx2-xx1+1)
h = np.maximum(0.0, yy2-yy1+1)
inter = w * h
if method is 'Min':
o = inter / np.minimum(area[i], area[idx])
else:
o = inter / (area[i] + area[idx] - inter)
I = I[np.where(o<=threshold)]
pick = pick[0:counter]
return pick
def pad(total_boxes, w, h):
# compute the padding coordinates (pad the bounding boxes to square)
tmpw = (total_boxes[:,2]-total_boxes[:,0]+1).astype(np.int32)
tmph = (total_boxes[:,3]-total_boxes[:,1]+1).astype(np.int32)
numbox = total_boxes.shape[0]
dx = np.ones((numbox), dtype=np.int32)
dy = np.ones((numbox), dtype=np.int32)
edx = tmpw.copy().astype(np.int32)
edy = tmph.copy().astype(np.int32)
x = total_boxes[:,0].copy().astype(np.int32)
y = total_boxes[:,1].copy().astype(np.int32)
ex = total_boxes[:,2].copy().astype(np.int32)
ey = total_boxes[:,3].copy().astype(np.int32)
tmp = np.where(ex>w)
edx[tmp] = np.expand_dims(-ex[tmp]+w+tmpw[tmp],0)
ex[tmp] = w
tmp = np.where(ey>h)
edy[tmp] = np.expand_dims(-ey[tmp]+h+tmph[tmp],0)
ey[tmp] = h
tmp = np.where(x<1)
dx[tmp] = np.expand_dims(2-x[tmp],0)
x[tmp] = 1
tmp = np.where(y<1)
dy[tmp] = np.expand_dims(2-y[tmp],0)
y[tmp] = 1
return dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph
def rerec(bboxA):
# convert bboxA to square
h = bboxA[:,3]-bboxA[:,1]
w = bboxA[:,2]-bboxA[:,0]
l = np.maximum(w, h)
bboxA[:,0] = bboxA[:,0]+w*0.5-l*0.5
bboxA[:,1] = bboxA[:,1]+h*0.5-l*0.5
bboxA[:,2:4] = bboxA[:,0:2] + np.transpose(np.tile(l,(2,1)))
return bboxA
def imresample(img, sz):
im_data = cv2.resize(img, (sz[1], sz[0]), interpolation=cv2.INTER_AREA)
return im_data
| insightface/reconstruction/ostec/external/face_detector/detect_face.py/0 | {
"file_path": "insightface/reconstruction/ostec/external/face_detector/detect_face.py",
"repo_id": "insightface",
"token_count": 11624
} | 142 |
import tensorflow as tf
slim = tf.contrib.slim
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_float('initial_learning_rate', 0.0001, '''Initial learning rate.''')
tf.app.flags.DEFINE_float('num_epochs_per_decay', 5.0, '''Epochs after which learning rate decays.''')
tf.app.flags.DEFINE_float('learning_rate_decay_factor', 0.97, '''Learning rate decay factor.''')
tf.app.flags.DEFINE_float('learning_rate_decay_step', 30000,'''Learning rate decay factor.''')
tf.app.flags.DEFINE_integer('batch_size', 4, '''The batch size to use.''')
tf.app.flags.DEFINE_integer('eval_size', 4, '''The batch size to use.''')
tf.app.flags.DEFINE_integer('num_iterations', 2, '''The number of iterations to unfold the pose machine.''')
tf.app.flags.DEFINE_integer('num_preprocess_threads', 4,'''How many preprocess threads to use.''')
tf.app.flags.DEFINE_integer('n_landmarks', 84,'''number of landmarks.''')
tf.app.flags.DEFINE_integer('rescale', 256,'''Image scale.''')
tf.app.flags.DEFINE_string('dataset_dir', './data', '''Directory where to load datas.''')
tf.app.flags.DEFINE_string('train_dir', 'ckpt/train', '''Directory where to write event logs and checkpoint.''')
tf.app.flags.DEFINE_string('eval_dir', '','''Directory where to write event logs and checkpoint.''')
tf.app.flags.DEFINE_string('graph_dir', 'model/weight.pkl','''If specified, restore this pretrained model.''')
tf.app.flags.DEFINE_integer('max_steps', 1000000,'''Number of batches to run.''')
tf.app.flags.DEFINE_string('train_device', '/gpu:0','''Device to train with.''')
tf.app.flags.DEFINE_integer('flip_pred', 0,'''db name.''')
tf.app.flags.DEFINE_string('train_model', '', '''training model.''')
tf.app.flags.DEFINE_string('pretrained_model_checkpoint_path', '', '''Restore pretrained model.''')
tf.app.flags.DEFINE_string('testset_name', '', '''test set name.''')
tf.app.flags.DEFINE_string('model_name', '', '''test model name.''')
tf.app.flags.DEFINE_string('savemat_name', '', '''save_mat_name''') | insightface/reconstruction/ostec/external/landmark_detector/flags.py/0 | {
"file_path": "insightface/reconstruction/ostec/external/landmark_detector/flags.py",
"repo_id": "insightface",
"token_count": 702
} | 143 |
# Copyright (c) 2019, NVIDIA Corporation. All rights reserved.
#
# This work is made available under the Nvidia Source Code License-NC.
# To view a copy of this license, visit
# https://nvlabs.github.io/stylegan2/license.html
"""TensorFlow custom ops builder.
"""
import os
import re
import uuid
import hashlib
import tempfile
import shutil
import tensorflow as tf
from tensorflow.python.client import device_lib # pylint: disable=no-name-in-module
#----------------------------------------------------------------------------
# Global options.
cuda_cache_path = os.path.join(os.path.dirname(__file__), '_cudacache')
cuda_cache_version_tag = 'v1'
do_not_hash_included_headers = False # Speed up compilation by assuming that headers included by the CUDA code never change. Unsafe!
verbose = True # Print status messages to stdout.
compiler_bindir_search_path = [
'C:/Program Files (x86)/Microsoft Visual Studio/2017/Community/VC/Tools/MSVC/14.14.26428/bin/Hostx64/x64',
'C:/Program Files (x86)/Microsoft Visual Studio/2019/Community/VC/Tools/MSVC/14.23.28105/bin/Hostx64/x64',
'C:/Program Files (x86)/Microsoft Visual Studio 14.0/vc/bin',
]
#----------------------------------------------------------------------------
# Internal helper funcs.
def _find_compiler_bindir():
for compiler_path in compiler_bindir_search_path:
if os.path.isdir(compiler_path):
return compiler_path
return None
def _get_compute_cap(device):
caps_str = device.physical_device_desc
m = re.search('compute capability: (\\d+).(\\d+)', caps_str)
major = m.group(1)
minor = m.group(2)
return (major, minor)
def _get_cuda_gpu_arch_string():
gpus = [x for x in device_lib.list_local_devices() if x.device_type == 'GPU']
if len(gpus) == 0:
raise RuntimeError('No GPU devices found')
(major, minor) = _get_compute_cap(gpus[0])
return 'sm_%s%s' % (major, minor)
def _run_cmd(cmd):
with os.popen(cmd) as pipe:
output = pipe.read()
status = pipe.close()
if status is not None:
raise RuntimeError('NVCC returned an error. See below for full command line and output log:\n\n%s\n\n%s' % (cmd, output))
def _prepare_nvcc_cli(opts):
cmd = 'nvcc --std=c++11 -DNDEBUG ' + opts.strip()
cmd += ' --disable-warnings'
cmd += ' --include-path "%s"' % tf.sysconfig.get_include()
cmd += ' --include-path "%s"' % os.path.join(tf.sysconfig.get_include(), 'external', 'protobuf_archive', 'src')
cmd += ' --include-path "%s"' % os.path.join(tf.sysconfig.get_include(), 'external', 'com_google_absl')
cmd += ' --include-path "%s"' % os.path.join(tf.sysconfig.get_include(), 'external', 'eigen_archive')
compiler_bindir = _find_compiler_bindir()
if compiler_bindir is None:
# Require that _find_compiler_bindir succeeds on Windows. Allow
# nvcc to use whatever is the default on Linux.
if os.name == 'nt':
raise RuntimeError('Could not find MSVC/GCC/CLANG installation on this computer. Check compiler_bindir_search_path list in "%s".' % __file__)
else:
cmd += ' --compiler-bindir "%s"' % compiler_bindir
cmd += ' 2>&1'
return cmd
#----------------------------------------------------------------------------
# Main entry point.
_plugin_cache = dict()
def get_plugin(cuda_file):
cuda_file_base = os.path.basename(cuda_file)
cuda_file_name, cuda_file_ext = os.path.splitext(cuda_file_base)
# Already in cache?
if cuda_file in _plugin_cache:
return _plugin_cache[cuda_file]
# Setup plugin.
if verbose:
print('Setting up TensorFlow plugin "%s": ' % cuda_file_base, end='', flush=True)
try:
# Hash CUDA source.
md5 = hashlib.md5()
with open(cuda_file, 'rb') as f:
md5.update(f.read())
md5.update(b'\n')
# Hash headers included by the CUDA code by running it through the preprocessor.
if not do_not_hash_included_headers:
if verbose:
print('Preprocessing... ', end='', flush=True)
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_file = os.path.join(tmp_dir, cuda_file_name + '_tmp' + cuda_file_ext)
_run_cmd(_prepare_nvcc_cli('"%s" --preprocess -o "%s" --keep --keep-dir "%s"' % (cuda_file, tmp_file, tmp_dir)))
with open(tmp_file, 'rb') as f:
bad_file_str = ('"' + cuda_file.replace('\\', '/') + '"').encode('utf-8') # __FILE__ in error check macros
good_file_str = ('"' + cuda_file_base + '"').encode('utf-8')
for ln in f:
if not ln.startswith(b'# ') and not ln.startswith(b'#line '): # ignore line number pragmas
ln = ln.replace(bad_file_str, good_file_str)
md5.update(ln)
md5.update(b'\n')
# Select compiler options.
compile_opts = ''
if os.name == 'nt':
compile_opts += '"%s"' % os.path.join(tf.sysconfig.get_lib(), 'python', '_pywrap_tensorflow_internal.lib')
elif os.name == 'posix':
compile_opts += '"%s"' % os.path.join(tf.sysconfig.get_lib(), 'python', '_pywrap_tensorflow_internal.so')
compile_opts += ' --compiler-options \'-fPIC -D_GLIBCXX_USE_CXX11_ABI=0\''
else:
assert False # not Windows or Linux, w00t?
compile_opts += ' --gpu-architecture=%s' % _get_cuda_gpu_arch_string()
compile_opts += ' --use_fast_math'
nvcc_cmd = _prepare_nvcc_cli(compile_opts)
# Hash build configuration.
md5.update(('nvcc_cmd: ' + nvcc_cmd).encode('utf-8') + b'\n')
md5.update(('tf.VERSION: ' + tf.VERSION).encode('utf-8') + b'\n')
md5.update(('cuda_cache_version_tag: ' + cuda_cache_version_tag).encode('utf-8') + b'\n')
# Compile if not already compiled.
bin_file_ext = '.dll' if os.name == 'nt' else '.so'
bin_file = os.path.join(cuda_cache_path, cuda_file_name + '_' + md5.hexdigest() + bin_file_ext)
if not os.path.isfile(bin_file):
if verbose:
print('Compiling... ', end='', flush=True)
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_file = os.path.join(tmp_dir, cuda_file_name + '_tmp' + bin_file_ext)
_run_cmd(nvcc_cmd + ' "%s" --shared -o "%s" --keep --keep-dir "%s"' % (cuda_file, tmp_file, tmp_dir))
os.makedirs(cuda_cache_path, exist_ok=True)
intermediate_file = os.path.join(cuda_cache_path, cuda_file_name + '_' + uuid.uuid4().hex + '_tmp' + bin_file_ext)
shutil.copyfile(tmp_file, intermediate_file)
os.rename(intermediate_file, bin_file) # atomic
# Load.
if verbose:
print('Loading... ', end='', flush=True)
plugin = tf.load_op_library(bin_file)
# Add to cache.
_plugin_cache[cuda_file] = plugin
if verbose:
print('Done.', flush=True)
return plugin
except:
if verbose:
print('Failed!', flush=True)
raise
#----------------------------------------------------------------------------
| insightface/reconstruction/ostec/external/stylegan2/dnnlib/tflib/custom_ops.py/0 | {
"file_path": "insightface/reconstruction/ostec/external/stylegan2/dnnlib/tflib/custom_ops.py",
"repo_id": "insightface",
"token_count": 3131
} | 144 |
# Copyright (c) 2019, NVIDIA Corporation. All rights reserved.
#
# This work is made available under the Nvidia Source Code License-NC.
# To view a copy of this license, visit
# https://nvlabs.github.io/stylegan2/license.html
"""Frechet Inception Distance (FID)."""
import os
import numpy as np
import scipy
import tensorflow as tf
import dnnlib.tflib as tflib
from metrics import metric_base
from training import misc
#----------------------------------------------------------------------------
class FID(metric_base.MetricBase):
def __init__(self, num_images, minibatch_per_gpu, **kwargs):
super().__init__(**kwargs)
self.num_images = num_images
self.minibatch_per_gpu = minibatch_per_gpu
def _evaluate(self, Gs, Gs_kwargs, num_gpus):
minibatch_size = num_gpus * self.minibatch_per_gpu
inception = misc.load_pkl('https://drive.google.com/uc?id=1MzTY44rLToO5APn8TZmfR7_ENSe5aZUn') # inception_v3_features.pkl
activations = np.empty([self.num_images, inception.output_shape[1]], dtype=np.float32)
# Calculate statistics for reals.
cache_file = self._get_cache_file_for_reals(num_images=self.num_images)
os.makedirs(os.path.dirname(cache_file), exist_ok=True)
if os.path.isfile(cache_file):
mu_real, sigma_real = misc.load_pkl(cache_file)
else:
for idx, images in enumerate(self._iterate_reals(minibatch_size=minibatch_size)):
begin = idx * minibatch_size
end = min(begin + minibatch_size, self.num_images)
activations[begin:end] = inception.run(images[:end-begin], num_gpus=num_gpus, assume_frozen=True)
if end == self.num_images:
break
mu_real = np.mean(activations, axis=0)
sigma_real = np.cov(activations, rowvar=False)
misc.save_pkl((mu_real, sigma_real), cache_file)
# Construct TensorFlow graph.
result_expr = []
for gpu_idx in range(num_gpus):
with tf.device('/gpu:%d' % gpu_idx):
Gs_clone = Gs.clone()
inception_clone = inception.clone()
latents = tf.random_normal([self.minibatch_per_gpu] + Gs_clone.input_shape[1:])
labels = self._get_random_labels_tf(self.minibatch_per_gpu)
images = Gs_clone.get_output_for(latents, labels, **Gs_kwargs)
images = tflib.convert_images_to_uint8(images)
result_expr.append(inception_clone.get_output_for(images))
# Calculate statistics for fakes.
for begin in range(0, self.num_images, minibatch_size):
self._report_progress(begin, self.num_images)
end = min(begin + minibatch_size, self.num_images)
activations[begin:end] = np.concatenate(tflib.run(result_expr), axis=0)[:end-begin]
mu_fake = np.mean(activations, axis=0)
sigma_fake = np.cov(activations, rowvar=False)
# Calculate FID.
m = np.square(mu_fake - mu_real).sum()
s, _ = scipy.linalg.sqrtm(np.dot(sigma_fake, sigma_real), disp=False) # pylint: disable=no-member
dist = m + np.trace(sigma_fake + sigma_real - 2*s)
self._report_result(np.real(dist))
#----------------------------------------------------------------------------
| insightface/reconstruction/ostec/external/stylegan2/metrics/frechet_inception_distance.py/0 | {
"file_path": "insightface/reconstruction/ostec/external/stylegan2/metrics/frechet_inception_distance.py",
"repo_id": "insightface",
"token_count": 1485
} | 145 |
# Copyright (c) 2019, NVIDIA Corporation. All rights reserved.
#
# This work is made available under the Nvidia Source Code License-NC.
# To view a copy of this license, visit
# https://nvlabs.github.io/stylegan2/license.html
"""Loss functions."""
import numpy as np
import tensorflow as tf
import dnnlib.tflib as tflib
from dnnlib.tflib.autosummary import autosummary
#----------------------------------------------------------------------------
# Logistic loss from the paper
# "Generative Adversarial Nets", Goodfellow et al. 2014
def G_logistic(G, D, opt, training_set, minibatch_size):
_ = opt
latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:])
labels = training_set.get_random_labels_tf(minibatch_size)
fake_images_out = G.get_output_for(latents, labels, is_training=True)
fake_scores_out = D.get_output_for(fake_images_out, labels, is_training=True)
loss = -tf.nn.softplus(fake_scores_out) # log(1-sigmoid(fake_scores_out)) # pylint: disable=invalid-unary-operand-type
return loss, None
def G_logistic_ns(G, D, opt, training_set, minibatch_size):
_ = opt
latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:])
labels = training_set.get_random_labels_tf(minibatch_size)
fake_images_out = G.get_output_for(latents, labels, is_training=True)
fake_scores_out = D.get_output_for(fake_images_out, labels, is_training=True)
loss = tf.nn.softplus(-fake_scores_out) # -log(sigmoid(fake_scores_out))
return loss, None
def D_logistic(G, D, opt, training_set, minibatch_size, reals, labels):
_ = opt, training_set
latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:])
fake_images_out = G.get_output_for(latents, labels, is_training=True)
real_scores_out = D.get_output_for(reals, labels, is_training=True)
fake_scores_out = D.get_output_for(fake_images_out, labels, is_training=True)
real_scores_out = autosummary('Loss/scores/real', real_scores_out)
fake_scores_out = autosummary('Loss/scores/fake', fake_scores_out)
loss = tf.nn.softplus(fake_scores_out) # -log(1-sigmoid(fake_scores_out))
loss += tf.nn.softplus(-real_scores_out) # -log(sigmoid(real_scores_out)) # pylint: disable=invalid-unary-operand-type
return loss, None
#----------------------------------------------------------------------------
# R1 and R2 regularizers from the paper
# "Which Training Methods for GANs do actually Converge?", Mescheder et al. 2018
def D_logistic_r1(G, D, opt, training_set, minibatch_size, reals, labels, gamma=10.0):
_ = opt, training_set
latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:])
fake_images_out = G.get_output_for(latents, labels, is_training=True)
real_scores_out = D.get_output_for(reals, labels, is_training=True)
fake_scores_out = D.get_output_for(fake_images_out, labels, is_training=True)
real_scores_out = autosummary('Loss/scores/real', real_scores_out)
fake_scores_out = autosummary('Loss/scores/fake', fake_scores_out)
loss = tf.nn.softplus(fake_scores_out) # -log(1-sigmoid(fake_scores_out))
loss += tf.nn.softplus(-real_scores_out) # -log(sigmoid(real_scores_out)) # pylint: disable=invalid-unary-operand-type
with tf.name_scope('GradientPenalty'):
real_grads = tf.gradients(tf.reduce_sum(real_scores_out), [reals])[0]
gradient_penalty = tf.reduce_sum(tf.square(real_grads), axis=[1,2,3])
gradient_penalty = autosummary('Loss/gradient_penalty', gradient_penalty)
reg = gradient_penalty * (gamma * 0.5)
return loss, reg
def D_logistic_r2(G, D, opt, training_set, minibatch_size, reals, labels, gamma=10.0):
_ = opt, training_set
latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:])
fake_images_out = G.get_output_for(latents, labels, is_training=True)
real_scores_out = D.get_output_for(reals, labels, is_training=True)
fake_scores_out = D.get_output_for(fake_images_out, labels, is_training=True)
real_scores_out = autosummary('Loss/scores/real', real_scores_out)
fake_scores_out = autosummary('Loss/scores/fake', fake_scores_out)
loss = tf.nn.softplus(fake_scores_out) # -log(1-sigmoid(fake_scores_out))
loss += tf.nn.softplus(-real_scores_out) # -log(sigmoid(real_scores_out)) # pylint: disable=invalid-unary-operand-type
with tf.name_scope('GradientPenalty'):
fake_grads = tf.gradients(tf.reduce_sum(fake_scores_out), [fake_images_out])[0]
gradient_penalty = tf.reduce_sum(tf.square(fake_grads), axis=[1,2,3])
gradient_penalty = autosummary('Loss/gradient_penalty', gradient_penalty)
reg = gradient_penalty * (gamma * 0.5)
return loss, reg
#----------------------------------------------------------------------------
# WGAN loss from the paper
# "Wasserstein Generative Adversarial Networks", Arjovsky et al. 2017
def G_wgan(G, D, opt, training_set, minibatch_size):
_ = opt
latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:])
labels = training_set.get_random_labels_tf(minibatch_size)
fake_images_out = G.get_output_for(latents, labels, is_training=True)
fake_scores_out = D.get_output_for(fake_images_out, labels, is_training=True)
loss = -fake_scores_out
return loss, None
def D_wgan(G, D, opt, training_set, minibatch_size, reals, labels, wgan_epsilon=0.001):
_ = opt, training_set
latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:])
fake_images_out = G.get_output_for(latents, labels, is_training=True)
real_scores_out = D.get_output_for(reals, labels, is_training=True)
fake_scores_out = D.get_output_for(fake_images_out, labels, is_training=True)
real_scores_out = autosummary('Loss/scores/real', real_scores_out)
fake_scores_out = autosummary('Loss/scores/fake', fake_scores_out)
loss = fake_scores_out - real_scores_out
with tf.name_scope('EpsilonPenalty'):
epsilon_penalty = autosummary('Loss/epsilon_penalty', tf.square(real_scores_out))
loss += epsilon_penalty * wgan_epsilon
return loss, None
#----------------------------------------------------------------------------
# WGAN-GP loss from the paper
# "Improved Training of Wasserstein GANs", Gulrajani et al. 2017
def D_wgan_gp(G, D, opt, training_set, minibatch_size, reals, labels, wgan_lambda=10.0, wgan_epsilon=0.001, wgan_target=1.0):
_ = opt, training_set
latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:])
fake_images_out = G.get_output_for(latents, labels, is_training=True)
real_scores_out = D.get_output_for(reals, labels, is_training=True)
fake_scores_out = D.get_output_for(fake_images_out, labels, is_training=True)
real_scores_out = autosummary('Loss/scores/real', real_scores_out)
fake_scores_out = autosummary('Loss/scores/fake', fake_scores_out)
loss = fake_scores_out - real_scores_out
with tf.name_scope('EpsilonPenalty'):
epsilon_penalty = autosummary('Loss/epsilon_penalty', tf.square(real_scores_out))
loss += epsilon_penalty * wgan_epsilon
with tf.name_scope('GradientPenalty'):
mixing_factors = tf.random_uniform([minibatch_size, 1, 1, 1], 0.0, 1.0, dtype=fake_images_out.dtype)
mixed_images_out = tflib.lerp(tf.cast(reals, fake_images_out.dtype), fake_images_out, mixing_factors)
mixed_scores_out = D.get_output_for(mixed_images_out, labels, is_training=True)
mixed_scores_out = autosummary('Loss/scores/mixed', mixed_scores_out)
mixed_grads = tf.gradients(tf.reduce_sum(mixed_scores_out), [mixed_images_out])[0]
mixed_norms = tf.sqrt(tf.reduce_sum(tf.square(mixed_grads), axis=[1,2,3]))
mixed_norms = autosummary('Loss/mixed_norms', mixed_norms)
gradient_penalty = tf.square(mixed_norms - wgan_target)
reg = gradient_penalty * (wgan_lambda / (wgan_target**2))
return loss, reg
#----------------------------------------------------------------------------
# Non-saturating logistic loss with path length regularizer from the paper
# "Analyzing and Improving the Image Quality of StyleGAN", Karras et al. 2019
def G_logistic_ns_pathreg(G, D, opt, training_set, minibatch_size, pl_minibatch_shrink=2, pl_decay=0.01, pl_weight=2.0):
_ = opt
latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:])
labels = training_set.get_random_labels_tf(minibatch_size)
fake_images_out, fake_dlatents_out = G.get_output_for(latents, labels, is_training=True, return_dlatents=True)
fake_scores_out = D.get_output_for(fake_images_out, labels, is_training=True)
loss = tf.nn.softplus(-fake_scores_out) # -log(sigmoid(fake_scores_out))
# Path length regularization.
with tf.name_scope('PathReg'):
# Evaluate the regularization term using a smaller minibatch to conserve memory.
if pl_minibatch_shrink > 1:
pl_minibatch = minibatch_size // pl_minibatch_shrink
pl_latents = tf.random_normal([pl_minibatch] + G.input_shapes[0][1:])
pl_labels = training_set.get_random_labels_tf(pl_minibatch)
fake_images_out, fake_dlatents_out = G.get_output_for(pl_latents, pl_labels, is_training=True, return_dlatents=True)
# Compute |J*y|.
pl_noise = tf.random_normal(tf.shape(fake_images_out)) / np.sqrt(np.prod(G.output_shape[2:]))
pl_grads = tf.gradients(tf.reduce_sum(fake_images_out * pl_noise), [fake_dlatents_out])[0]
pl_lengths = tf.sqrt(tf.reduce_mean(tf.reduce_sum(tf.square(pl_grads), axis=2), axis=1))
pl_lengths = autosummary('Loss/pl_lengths', pl_lengths)
# Track exponential moving average of |J*y|.
with tf.control_dependencies(None):
pl_mean_var = tf.Variable(name='pl_mean', trainable=False, initial_value=0.0, dtype=tf.float32)
pl_mean = pl_mean_var + pl_decay * (tf.reduce_mean(pl_lengths) - pl_mean_var)
pl_update = tf.assign(pl_mean_var, pl_mean)
# Calculate (|J*y|-a)^2.
with tf.control_dependencies([pl_update]):
pl_penalty = tf.square(pl_lengths - pl_mean)
pl_penalty = autosummary('Loss/pl_penalty', pl_penalty)
# Apply weight.
#
# Note: The division in pl_noise decreases the weight by num_pixels, and the reduce_mean
# in pl_lengths decreases it by num_affine_layers. The effective weight then becomes:
#
# gamma_pl = pl_weight / num_pixels / num_affine_layers
# = 2 / (r^2) / (log2(r) * 2 - 2)
# = 1 / (r^2 * (log2(r) - 1))
# = ln(2) / (r^2 * (ln(r) - ln(2))
#
reg = pl_penalty * pl_weight
return loss, reg
#----------------------------------------------------------------------------
| insightface/reconstruction/ostec/external/stylegan2/training/loss.py/0 | {
"file_path": "insightface/reconstruction/ostec/external/stylegan2/training/loss.py",
"repo_id": "insightface",
"token_count": 4400
} | 146 |
#from __future__ import print_function
import sys
import caffe
import onnx
import numpy as np
from caffe.proto import caffe_pb2
caffe.set_mode_cpu()
from onnx2caffe._transformers import ConvAddFuser,ConstantsToInitializers
from onnx2caffe._graph import Graph
import onnx2caffe._operators as cvt
import onnx2caffe._weightloader as wlr
from onnx2caffe._error_utils import ErrorHandling
from collections import OrderedDict
from onnx import shape_inference
import importlib
USE_DECONV_AS_UPSAMPLE = True
transformers = [
ConstantsToInitializers(),
ConvAddFuser(),
]
def convertToCaffe(graph, prototxt_save_path, caffe_model_save_path):
exist_edges = []
layers = []
exist_nodes = []
err = ErrorHandling()
for i in graph.inputs:
edge_name = i[0]
input_layer = cvt.make_input(i)
layers.append(input_layer)
exist_edges.append(i[0])
graph.channel_dims[edge_name] = graph.shape_dict[edge_name][1]
for id, node in enumerate(graph.nodes):
node_name = node.name
op_type = node.op_type
inputs = node.inputs
inputs_tensor = node.input_tensors
input_non_exist_flag = False
for inp in inputs:
if inp not in exist_edges and inp not in inputs_tensor:
input_non_exist_flag = True
break
if input_non_exist_flag:
continue
if op_type not in cvt._ONNX_NODE_REGISTRY:
err.unsupported_op(node)
continue
converter_fn = cvt._ONNX_NODE_REGISTRY[op_type]
layer = converter_fn(node,graph,err)
if type(layer)==tuple:
for l in layer:
layers.append(l)
else:
layers.append(layer)
outs = node.outputs
for out in outs:
exist_edges.append(out)
net = caffe_pb2.NetParameter()
for id,layer in enumerate(layers):
layers[id] = layer._to_proto()
net.layer.extend(layers)
with open(prototxt_save_path, 'w') as f:
print(net,file=f)
caffe.set_mode_cpu()
deploy = prototxt_save_path
net = caffe.Net(deploy,
caffe.TEST)
for id, node in enumerate(graph.nodes):
node_name = node.name
op_type = node.op_type
inputs = node.inputs
inputs_tensor = node.input_tensors
input_non_exist_flag = False
if op_type not in wlr._ONNX_NODE_REGISTRY:
err.unsupported_op(node)
continue
converter_fn = wlr._ONNX_NODE_REGISTRY[op_type]
converter_fn(net, node, graph, err)
net.save(caffe_model_save_path)
return net
def getGraph(onnx_path):
model = onnx.load(onnx_path)
output_names = [node.name for node in model.graph.output]
model = shape_inference.infer_shapes(model)
model_graph = model.graph
graph = Graph.from_onnx(model_graph)
graph = graph.transformed(transformers)
graph.channel_dims = {}
return graph, output_names
if __name__ == "__main__":
cvt.USE_DECONV_AS_UPSAMPLE = USE_DECONV_AS_UPSAMPLE
wlr.USE_DECONV_AS_UPSAMPLE = USE_DECONV_AS_UPSAMPLE
onnx_path = sys.argv[1]
prototxt_path = sys.argv[2]
caffemodel_path = sys.argv[3]
graph, output_names = getGraph(onnx_path)
convertToCaffe(graph, prototxt_path, caffemodel_path)
print('output_names:', output_names)
| insightface/tools/onnx2caffe/convertCaffe.py/0 | {
"file_path": "insightface/tools/onnx2caffe/convertCaffe.py",
"repo_id": "insightface",
"token_count": 1554
} | 147 |
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="CompilerConfiguration">
<annotationProcessing>
<profile default="true" name="Default" enabled="true" />
<profile name="Maven default annotation processors profile" enabled="true">
<sourceOutputDir name="target/generated-sources/annotations" />
<sourceTestOutputDir name="target/generated-test-sources/test-annotations" />
<outputRelativeToContentRoot value="true" />
<module name="mybatis-native-demo" />
</profile>
</annotationProcessing>
</component>
<component name="JavacSettings">
<option name="ADDITIONAL_OPTIONS_OVERRIDE">
<module name="mybatis-native-demo" options="-parameters" />
</option>
</component>
</project> | mybatis-native-demo/.idea/compiler.xml/0 | {
"file_path": "mybatis-native-demo/.idea/compiler.xml",
"repo_id": "mybatis-native-demo",
"token_count": 278
} | 148 |
package com.example.nativedemo;
import com.example.nativedemo.controller.DemoController;
import org.graalvm.nativeimage.hosted.Feature;
import org.graalvm.nativeimage.hosted.RuntimeSerialization;
/**
* lambda 表达式注入到graal中
* @author ztp
*/
public class LambdaRegistrationFeature implements Feature {
@Override
public void duringSetup(DuringSetupAccess access) {
// TODO 这里需要将lambda表达式所使用的成员类都注册上来,具体情况视项目情况而定,一般扫描@Controller和@Service的会多点.
RuntimeSerialization.registerLambdaCapturingClass(DemoController.class);
}
}
| mybatis-native-demo/src/main/java/com/example/nativedemo/LambdaRegistrationFeature.java/0 | {
"file_path": "mybatis-native-demo/src/main/java/com/example/nativedemo/LambdaRegistrationFeature.java",
"repo_id": "mybatis-native-demo",
"token_count": 259
} | 149 |
[
{
"name": "graphql.relay.DefaultConnection",
"allDeclaredMethods": true,
"condition": {
"typeReachable": "graphql.relay.Connection"
}
},
{
"name": "graphql.relay.DefaultEdge",
"allDeclaredMethods": true,
"condition": {
"typeReachable": "graphql.relay.Connection"
}
},
{
"name": "graphql.relay.DefaultPageInfo",
"allDeclaredMethods": true,
"condition": {
"typeReachable": "graphql.relay.Connection"
}
},
{
"name": "graphql.relay.DefaultConnectionCursor",
"allDeclaredMethods": true,
"condition": {
"typeReachable": "graphql.relay.Connection"
}
},
{
"name": "graphql.schema.GraphQLArgument",
"allPublicMethods": true,
"condition": {
"typeReachable": "graphql.GraphQL"
}
},
{
"name": "graphql.schema.GraphQLDirective",
"allPublicMethods": true,
"condition": {
"typeReachable": "graphql.GraphQL"
}
},
{
"name": "graphql.schema.GraphQLEnumValueDefinition",
"allPublicMethods": true,
"condition": {
"typeReachable": "graphql.GraphQL"
}
},
{
"name": "graphql.schema.GraphQLFieldDefinition",
"allPublicMethods": true,
"condition": {
"typeReachable": "graphql.GraphQL"
}
},
{
"name": "graphql.schema.GraphQLInputObjectField",
"allPublicMethods": true,
"condition": {
"typeReachable": "graphql.GraphQL"
}
},
{
"name": "graphql.schema.GraphQLOutputType",
"allPublicMethods": true,
"condition": {
"typeReachable": "graphql.GraphQL"
}
}
]
| mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/com.graphql-java/graphql-java/19.2/reflect-config.json/0 | {
"file_path": "mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/com.graphql-java/graphql-java/19.2/reflect-config.json",
"repo_id": "mybatis-native-demo",
"token_count": 684
} | 150 |
[
"reflect-config.json"
]
| mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/io.grpc/grpc-netty/1.51.0/index.json/0 | {
"file_path": "mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/io.grpc/grpc-netty/1.51.0/index.json",
"repo_id": "mybatis-native-demo",
"token_count": 13
} | 151 |
[
{
"name": "io.nats.client.impl.SocketDataPort",
"condition": {
"typeReachable": "io.nats.client.Options$Builder"
},
"methods": [
{
"name": "<init>",
"parameterTypes": [
]
}
]
}
]
| mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/io.nats/jnats/2.16.11/reflect-config.json/0 | {
"file_path": "mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/io.nats/jnats/2.16.11/reflect-config.json",
"repo_id": "mybatis-native-demo",
"token_count": 133
} | 152 |
[
"reflect-config.json"
]
| mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/io.opentelemetry/opentelemetry-exporter-jaeger/1.19.0/index.json/0 | {
"file_path": "mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/io.opentelemetry/opentelemetry-exporter-jaeger/1.19.0/index.json",
"repo_id": "mybatis-native-demo",
"token_count": 13
} | 153 |
[
{
"condition": {
"typeReachable": "io.opentelemetry.internal.shaded.jctools.queues.MpscArrayQueueConsumerIndexField"
},
"name": "io.opentelemetry.internal.shaded.jctools.queues.MpscArrayQueueConsumerIndexField",
"fields": [
{
"name": "consumerIndex"
}
]
},
{
"condition": {
"typeReachable": "io.opentelemetry.internal.shaded.jctools.queues.MpscArrayQueueProducerIndexField"
},
"name": "io.opentelemetry.internal.shaded.jctools.queues.MpscArrayQueueProducerIndexField",
"fields": [
{
"name": "producerIndex"
}
]
},
{
"condition": {
"typeReachable": "io.opentelemetry.internal.shaded.jctools.queues.MpscArrayQueueProducerLimitField"
},
"name": "io.opentelemetry.internal.shaded.jctools.queues.MpscArrayQueueProducerLimitField",
"fields": [
{
"name": "producerLimit"
}
]
}
]
| mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/io.opentelemetry/opentelemetry-sdk-trace/1.19.0/reflect-config.json/0 | {
"file_path": "mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/io.opentelemetry/opentelemetry-sdk-trace/1.19.0/reflect-config.json",
"repo_id": "mybatis-native-demo",
"token_count": 415
} | 154 |
[
{
"latest": true,
"metadata-version": "8.0.29",
"module": "mysql:mysql-connector-java",
"tested-versions": [
"8.0.29"
]
}
]
| mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/mysql/mysql-connector-java/index.json/0 | {
"file_path": "mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/mysql/mysql-connector-java/index.json",
"repo_id": "mybatis-native-demo",
"token_count": 82
} | 155 |
{
"resources": {
"includes": [
{
"pattern": "\\QMETA-INF/services/org.apache.activemq.artemis.spi.core.remoting.ssl.OpenSSLContextFactory\\E",
"condition": {
"typeReachable": "org.apache.activemq.artemis.spi.core.remoting.ssl.OpenSSLContextFactoryProvider"
}
},
{
"pattern": "\\QMETA-INF/services/org.apache.activemq.artemis.spi.core.remoting.ssl.SSLContextFactory\\E",
"condition": {
"typeReachable": "org.apache.activemq.artemis.spi.core.remoting.ssl.SSLContextFactoryProvider"
}
},
{
"pattern": "\\Qactivemq-version.properties\\E",
"condition": {
"typeReachable": "org.apache.activemq.artemis.utils.VersionLoader"
}
}
]
},
"bundles": [
]
}
| mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/org.apache.activemq/artemis-jms-client/2.28.0/resource-config.json/0 | {
"file_path": "mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/org.apache.activemq/artemis-jms-client/2.28.0/resource-config.json",
"repo_id": "mybatis-native-demo",
"token_count": 545
} | 156 |
[
{
"latest": true,
"metadata-version": "3.5.1",
"module": "org.apache.kafka:kafka-clients",
"tested-versions": [
"3.5.1"
]
}
]
| mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/org.apache.kafka/kafka-clients/index.json/0 | {
"file_path": "mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/org.apache.kafka/kafka-clients/index.json",
"repo_id": "mybatis-native-demo",
"token_count": 86
} | 157 |
[
"reflect-config.json",
"resource-config.json"
]
| mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/org.eclipse.jetty/jetty-server/11.0.12/index.json/0 | {
"file_path": "mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/org.eclipse.jetty/jetty-server/11.0.12/index.json",
"repo_id": "mybatis-native-demo",
"token_count": 22
} | 158 |
[
"jni-config.json",
"proxy-config.json",
"reflect-config.json",
"resource-config.json"
]
| mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/org.ehcache/ehcache/3.10.8-jakarta/index.json/0 | {
"file_path": "mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/org.ehcache/ehcache/3.10.8-jakarta/index.json",
"repo_id": "mybatis-native-demo",
"token_count": 41
} | 159 |
[
"resource-config.json"
]
| mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/org.freemarker/freemarker/2.3.31/index.json/0 | {
"file_path": "mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/org.freemarker/freemarker/2.3.31/index.json",
"repo_id": "mybatis-native-demo",
"token_count": 13
} | 160 |
[
"reflect-config.json",
"resource-config.json"
]
| mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/org.hibernate/hibernate-core/5.6.14.Final/index.json/0 | {
"file_path": "mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/org.hibernate/hibernate-core/5.6.14.Final/index.json",
"repo_id": "mybatis-native-demo",
"token_count": 22
} | 161 |
[
{
"latest": true,
"metadata-version": "1.7.10",
"module": "org.jetbrains.kotlin:kotlin-stdlib",
"tested-versions": [
"1.7.10"
]
}
]
| mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/org.jetbrains.kotlin/kotlin-stdlib/index.json/0 | {
"file_path": "mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/org.jetbrains.kotlin/kotlin-stdlib/index.json",
"repo_id": "mybatis-native-demo",
"token_count": 88
} | 162 |
[
{
"condition": {
"typeReachable": "org.opengauss.log.LogFactory"
},
"name": "JdkLogger"
},
{
"condition": {
"typeReachable": "org.opengauss.core.v3.ConnectionFactoryImpl"
},
"name": "java.security.SecureRandomParameters"
},
{
"condition": {
"typeReachable": "org.opengauss.core.Oid"
},
"name": "org.opengauss.core.Oid",
"allPublicFields": true
},
{
"condition": {
"typeReachable": "org.opengauss.jdbc.PgConnection"
},
"name": "org.opengauss.geometric.PGbox",
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.opengauss.jdbc.PgConnection"
},
"name": "org.opengauss.geometric.PGcircle",
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.opengauss.jdbc.PgConnection"
},
"name": "org.opengauss.geometric.PGlseg",
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.opengauss.jdbc.PgConnection"
},
"name": "org.opengauss.geometric.PGpath",
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.opengauss.jdbc.PgConnection"
},
"name": "org.opengauss.geometric.PGpoint",
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.opengauss.jdbc.PgConnection"
},
"name": "org.opengauss.geometric.PGpolygon",
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.opengauss.log.LogFactory"
},
"name": "org.opengauss.log.JdkLogger",
"methods": [
{
"name": "<init>",
"parameterTypes": [
"java.lang.String"
]
}
]
},
{
"condition": {
"typeReachable": "org.opengauss.jdbc.PgConnection"
},
"name": "org.opengauss.util.PGInterval",
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.opengauss.util.MD5Digest"
},
"name": "sun.security.provider.MD5",
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.opengauss.core.v3.ConnectionFactoryImpl"
},
"name": "sun.security.provider.NativePRNG",
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.opengauss.core.v3.ConnectionFactoryImpl"
},
"name": "sun.security.provider.SHA",
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
}
]
| mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/org.opengauss/opengauss-jdbc/3.1.0-og/reflect-config.json/0 | {
"file_path": "mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/org.opengauss/opengauss-jdbc/3.1.0-og/reflect-config.json",
"repo_id": "mybatis-native-demo",
"token_count": 1573
} | 163 |
[
"reflect-config.json"
]
| mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/org.thymeleaf.extras/thymeleaf-extras-springsecurity6/3.1.0.M1/index.json/0 | {
"file_path": "mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/org.thymeleaf.extras/thymeleaf-extras-springsecurity6/3.1.0.M1/index.json",
"repo_id": "mybatis-native-demo",
"token_count": 13
} | 164 |
artifactId=mybatis-native-demo
groupId=com.example
version=0.0.1-SNAPSHOT
| mybatis-native-demo/target/maven-archiver/pom.properties/0 | {
"file_path": "mybatis-native-demo/target/maven-archiver/pom.properties",
"repo_id": "mybatis-native-demo",
"token_count": 33
} | 165 |
package com.baomidou.mybatisplus.autoconfigure;
import org.springframework.beans.factory.config.BeanDefinition;
import org.springframework.beans.factory.support.RootBeanDefinition;
/**
* Bean definitions for {@link MybatisPlusLanguageDriverAutoConfiguration}.
*/
public class MybatisPlusLanguageDriverAutoConfiguration__BeanDefinitions {
/**
* Get the bean definition for 'mybatisPlusLanguageDriverAutoConfiguration'.
*/
public static BeanDefinition getMybatisPlusLanguageDriverAutoConfigurationBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(MybatisPlusLanguageDriverAutoConfiguration.class);
beanDefinition.setLazyInit(true);
beanDefinition.setInstanceSupplier(MybatisPlusLanguageDriverAutoConfiguration::new);
return beanDefinition;
}
}
| mybatis-native-demo/target/spring-aot/main/sources/com/baomidou/mybatisplus/autoconfigure/MybatisPlusLanguageDriverAutoConfiguration__BeanDefinitions.java/0 | {
"file_path": "mybatis-native-demo/target/spring-aot/main/sources/com/baomidou/mybatisplus/autoconfigure/MybatisPlusLanguageDriverAutoConfiguration__BeanDefinitions.java",
"repo_id": "mybatis-native-demo",
"token_count": 219
} | 166 |
package com.github.yulichang.autoconfigure;
import org.springframework.beans.factory.config.BeanDefinition;
import org.springframework.beans.factory.support.RootBeanDefinition;
/**
* Bean definitions for {@link MybatisPlusJoinProperties}.
*/
public class MybatisPlusJoinProperties__BeanDefinitions {
/**
* Get the bean definition for 'mybatisPlusJoinProperties'.
*/
public static BeanDefinition getMybatisPlusJoinPropertiesBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(MybatisPlusJoinProperties.class);
beanDefinition.setInstanceSupplier(MybatisPlusJoinProperties::new);
return beanDefinition;
}
}
| mybatis-native-demo/target/spring-aot/main/sources/com/github/yulichang/autoconfigure/MybatisPlusJoinProperties__BeanDefinitions.java/0 | {
"file_path": "mybatis-native-demo/target/spring-aot/main/sources/com/github/yulichang/autoconfigure/MybatisPlusJoinProperties__BeanDefinitions.java",
"repo_id": "mybatis-native-demo",
"token_count": 196
} | 167 |
package org.springframework.boot.autoconfigure.jdbc;
import org.springframework.beans.factory.aot.BeanInstanceSupplier;
import org.springframework.beans.factory.config.BeanDefinition;
import org.springframework.beans.factory.support.RootBeanDefinition;
/**
* Bean definitions for {@link DataSourceAutoConfiguration}.
*/
public class DataSourceAutoConfiguration__BeanDefinitions {
/**
* Get the bean definition for 'dataSourceAutoConfiguration'.
*/
public static BeanDefinition getDataSourceAutoConfigurationBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(DataSourceAutoConfiguration.class);
beanDefinition.setInstanceSupplier(DataSourceAutoConfiguration::new);
return beanDefinition;
}
/**
* Bean definitions for {@link DataSourceAutoConfiguration.PooledDataSourceConfiguration}.
*/
public static class PooledDataSourceConfiguration {
/**
* Get the bean definition for 'pooledDataSourceConfiguration'.
*/
public static BeanDefinition getPooledDataSourceConfigurationBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(DataSourceAutoConfiguration.PooledDataSourceConfiguration.class);
beanDefinition.setInstanceSupplier(DataSourceAutoConfiguration.PooledDataSourceConfiguration::new);
return beanDefinition;
}
/**
* Get the bean instance supplier for 'jdbcConnectionDetails'.
*/
private static BeanInstanceSupplier<PropertiesJdbcConnectionDetails> getJdbcConnectionDetailsInstanceSupplier(
) {
return BeanInstanceSupplier.<PropertiesJdbcConnectionDetails>forFactoryMethod(DataSourceAutoConfiguration.PooledDataSourceConfiguration.class, "jdbcConnectionDetails", DataSourceProperties.class)
.withGenerator((registeredBean, args) -> registeredBean.getBeanFactory().getBean(DataSourceAutoConfiguration.PooledDataSourceConfiguration.class).jdbcConnectionDetails(args.get(0)));
}
/**
* Get the bean definition for 'jdbcConnectionDetails'.
*/
public static BeanDefinition getJdbcConnectionDetailsBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(PropertiesJdbcConnectionDetails.class);
beanDefinition.setInstanceSupplier(getJdbcConnectionDetailsInstanceSupplier());
return beanDefinition;
}
}
}
| mybatis-native-demo/target/spring-aot/main/sources/org/springframework/boot/autoconfigure/jdbc/DataSourceAutoConfiguration__BeanDefinitions.java/0 | {
"file_path": "mybatis-native-demo/target/spring-aot/main/sources/org/springframework/boot/autoconfigure/jdbc/DataSourceAutoConfiguration__BeanDefinitions.java",
"repo_id": "mybatis-native-demo",
"token_count": 671
} | 168 |
package org.springframework.boot.autoconfigure.task;
import org.springframework.beans.factory.config.BeanDefinition;
import org.springframework.beans.factory.support.RootBeanDefinition;
/**
* Bean definitions for {@link TaskExecutionAutoConfiguration}.
*/
public class TaskExecutionAutoConfiguration__BeanDefinitions {
/**
* Get the bean definition for 'taskExecutionAutoConfiguration'.
*/
public static BeanDefinition getTaskExecutionAutoConfigurationBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(TaskExecutionAutoConfiguration.class);
beanDefinition.setInstanceSupplier(TaskExecutionAutoConfiguration::new);
return beanDefinition;
}
}
| mybatis-native-demo/target/spring-aot/main/sources/org/springframework/boot/autoconfigure/task/TaskExecutionAutoConfiguration__BeanDefinitions.java/0 | {
"file_path": "mybatis-native-demo/target/spring-aot/main/sources/org/springframework/boot/autoconfigure/task/TaskExecutionAutoConfiguration__BeanDefinitions.java",
"repo_id": "mybatis-native-demo",
"token_count": 184
} | 169 |
package org.springframework.boot.web.server;
import org.springframework.beans.factory.config.BeanDefinition;
import org.springframework.beans.factory.support.RootBeanDefinition;
/**
* Bean definitions for {@link WebServerFactoryCustomizerBeanPostProcessor}.
*/
public class WebServerFactoryCustomizerBeanPostProcessor__BeanDefinitions {
/**
* Get the bean definition for 'webServerFactoryCustomizerBeanPostProcessor'.
*/
public static BeanDefinition getWebServerFactoryCustomizerBeanPostProcessorBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(WebServerFactoryCustomizerBeanPostProcessor.class);
beanDefinition.setSynthetic(true);
beanDefinition.setInstanceSupplier(WebServerFactoryCustomizerBeanPostProcessor::new);
return beanDefinition;
}
}
| mybatis-native-demo/target/spring-aot/main/sources/org/springframework/boot/web/server/WebServerFactoryCustomizerBeanPostProcessor__BeanDefinitions.java/0 | {
"file_path": "mybatis-native-demo/target/spring-aot/main/sources/org/springframework/boot/web/server/WebServerFactoryCustomizerBeanPostProcessor__BeanDefinitions.java",
"repo_id": "mybatis-native-demo",
"token_count": 226
} | 170 |
package org.springframework.cloud.commons.security;
import org.springframework.beans.factory.config.BeanDefinition;
import org.springframework.beans.factory.support.RootBeanDefinition;
/**
* Bean definitions for {@link ResourceServerTokenRelayAutoConfiguration}.
*/
public class ResourceServerTokenRelayAutoConfiguration__BeanDefinitions {
/**
* Get the bean definition for 'resourceServerTokenRelayAutoConfiguration'.
*/
public static BeanDefinition getResourceServerTokenRelayAutoConfigurationBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(ResourceServerTokenRelayAutoConfiguration.class);
beanDefinition.setInstanceSupplier(ResourceServerTokenRelayAutoConfiguration::new);
return beanDefinition;
}
/**
* Bean definitions for {@link ResourceServerTokenRelayAutoConfiguration.ResourceServerTokenRelayRegistrationAutoConfiguration}.
*/
public static class ResourceServerTokenRelayRegistrationAutoConfiguration {
/**
* Get the bean definition for 'resourceServerTokenRelayRegistrationAutoConfiguration'.
*/
public static BeanDefinition getResourceServerTokenRelayRegistrationAutoConfigurationBeanDefinition(
) {
RootBeanDefinition beanDefinition = new RootBeanDefinition(ResourceServerTokenRelayAutoConfiguration.ResourceServerTokenRelayRegistrationAutoConfiguration.class);
beanDefinition.setInstanceSupplier(ResourceServerTokenRelayAutoConfiguration.ResourceServerTokenRelayRegistrationAutoConfiguration::new);
return beanDefinition;
}
}
}
| mybatis-native-demo/target/spring-aot/main/sources/org/springframework/cloud/commons/security/ResourceServerTokenRelayAutoConfiguration__BeanDefinitions.java/0 | {
"file_path": "mybatis-native-demo/target/spring-aot/main/sources/org/springframework/cloud/commons/security/ResourceServerTokenRelayAutoConfiguration__BeanDefinitions.java",
"repo_id": "mybatis-native-demo",
"token_count": 395
} | 171 |
#!/bin/bash
# Copyright 1999-2018 Alibaba Group Holding Ltd.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -x
export CUSTOM_SEARCH_NAMES="application"
export CUSTOM_SEARCH_LOCATIONS=file:${BASE_DIR}/conf/
export MEMBER_LIST=""
PLUGINS_DIR="/home/nacos/plugins/peer-finder"
function print_servers() {
if [[ ! -d "${PLUGINS_DIR}" ]]; then
echo "" >"$CLUSTER_CONF"
for server in ${NACOS_SERVERS}; do
echo "$server" >>"$CLUSTER_CONF"
done
else
bash $PLUGINS_DIR/plugin.sh
sleep 30
fi
}
#===========================================================================================
# JVM Configuration
#===========================================================================================
JAVA_OPT="${JAVA_OPT} -XX:+UseConcMarkSweepGC -XX:+UseCMSCompactAtFullCollection -XX:CMSInitiatingOccupancyFraction=70 -XX:+CMSParallelRemarkEnabled -XX:SoftRefLRUPolicyMSPerMB=0 -XX:+CMSClassUnloadingEnabled -XX:SurvivorRatio=8 "
if [[ "${MODE}" == "standalone" ]]; then
JAVA_OPT="${JAVA_OPT} -Xms${JVM_XMS} -Xmx${JVM_XMX} -Xmn${JVM_XMN}"
JAVA_OPT="${JAVA_OPT} -Dnacos.standalone=true"
else
if [[ "${EMBEDDED_STORAGE}" == "embedded" ]]; then
JAVA_OPT="${JAVA_OPT} -DembeddedStorage=true"
fi
JAVA_OPT="${JAVA_OPT} -server -Xms${JVM_XMS} -Xmx${JVM_XMX} -Xmn${JVM_XMN} -XX:MetaspaceSize=${JVM_MS} -XX:MaxMetaspaceSize=${JVM_MMS}"
if [[ "${NACOS_DEBUG}" == "y" ]]; then
JAVA_OPT="${JAVA_OPT} -Xdebug -Xrunjdwp:transport=dt_socket,address=9555,server=y,suspend=n"
fi
JAVA_OPT="${JAVA_OPT} -XX:-OmitStackTraceInFastThrow -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=${BASE_DIR}/logs/java_heapdump.hprof"
JAVA_OPT="${JAVA_OPT} -XX:-UseLargePages"
print_servers
fi
#===========================================================================================
# Setting system properties
#===========================================================================================
# set mode that Nacos Server function of split
if [[ "${FUNCTION_MODE}" == "config" ]]; then
JAVA_OPT="${JAVA_OPT} -Dnacos.functionMode=config"
elif [[ "${FUNCTION_MODE}" == "naming" ]]; then
JAVA_OPT="${JAVA_OPT} -Dnacos.functionMode=naming"
fi
# set nacos server ip
if [[ ! -z "${NACOS_SERVER_IP}" ]]; then
JAVA_OPT="${JAVA_OPT} -Dnacos.server.ip=${NACOS_SERVER_IP}"
fi
if [[ ! -z "${USE_ONLY_SITE_INTERFACES}" ]]; then
JAVA_OPT="${JAVA_OPT} -Dnacos.inetutils.use-only-site-local-interfaces=${USE_ONLY_SITE_INTERFACES}"
fi
if [[ ! -z "${PREFERRED_NETWORKS}" ]]; then
JAVA_OPT="${JAVA_OPT} -Dnacos.inetutils.preferred-networks=${PREFERRED_NETWORKS}"
fi
if [[ ! -z "${IGNORED_INTERFACES}" ]]; then
JAVA_OPT="${JAVA_OPT} -Dnacos.inetutils.ignored-interfaces=${IGNORED_INTERFACES}"
fi
### If turn on auth system:
if [[ ! -z "${NACOS_AUTH_ENABLE}" ]]; then
JAVA_OPT="${JAVA_OPT} -Dnacos.core.auth.enabled=${NACOS_AUTH_ENABLE}"
fi
if [[ "${PREFER_HOST_MODE}" == "hostname" ]]; then
JAVA_OPT="${JAVA_OPT} -Dnacos.preferHostnameOverIp=true"
fi
JAVA_OPT="${JAVA_OPT} -Dnacos.member.list=${MEMBER_LIST}"
JAVA_MAJOR_VERSION=$($JAVA -version 2>&1 | sed -E -n 's/.* version "([0-9]*).*$/\1/p')
if [[ "$JAVA_MAJOR_VERSION" -ge "9" ]]; then
JAVA_OPT="${JAVA_OPT} -Xlog:gc*:file=${BASE_DIR}/logs/nacos_gc.log:time,tags:filecount=10,filesize=102400"
else
JAVA_OPT_EXT_FIX="-Djava.ext.dirs=${JAVA_HOME}/jre/lib/ext:${JAVA_HOME}/lib/ext"
JAVA_OPT="${JAVA_OPT} -Xloggc:${BASE_DIR}/logs/nacos_gc.log -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -XX:+PrintGCTimeStamps -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=100M"
fi
JAVA_OPT="${JAVA_OPT} -Dloader.path=${BASE_DIR}/plugins,${BASE_DIR}/plugins/health,${BASE_DIR}/plugins/cmdb,${BASE_DIR}/plugins/selector"
JAVA_OPT="${JAVA_OPT} -Dnacos.home=${BASE_DIR}"
JAVA_OPT="${JAVA_OPT} -jar ${BASE_DIR}/target/nacos-server.jar"
JAVA_OPT="${JAVA_OPT} ${JAVA_OPT_EXT}"
JAVA_OPT="${JAVA_OPT} --spring.config.additional-location=${CUSTOM_SEARCH_LOCATIONS}"
JAVA_OPT="${JAVA_OPT} --spring.config.name=${CUSTOM_SEARCH_NAMES}"
JAVA_OPT="${JAVA_OPT} --logging.config=${BASE_DIR}/conf/nacos-logback.xml"
JAVA_OPT="${JAVA_OPT} --server.max-http-header-size=524288"
echo "Nacos is starting, you can docker logs your container"
exec $JAVA ${JAVA_OPT}
| nacos-docker/build/bin/docker-startup.sh/0 | {
"file_path": "nacos-docker/build/bin/docker-startup.sh",
"repo_id": "nacos-docker",
"token_count": 2030
} | 172 |
#
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#*************** Spring Boot Related Configurations ***************#
### Default web context path:
server.servlet.contextPath=/nacos
### Include message field
server.error.include-message=ALWAYS
### Default web server port:
server.port=8848
#*************** Network Related Configurations ***************#
### If prefer hostname over ip for Nacos server addresses in cluster.conf:
# nacos.inetutils.prefer-hostname-over-ip=false
### Specify local server's IP:
# nacos.inetutils.ip-address=
#*************** Config Module Related Configurations ***************#
### Deprecated configuration property, it is recommended to use `spring.sql.init.platform` replaced.
#spring.datasource.platform=${SPRING_DATASOURCE_PLATFORM:}
spring.sql.init.platform=${SPRING_DATASOURCE_PLATFORM:}
# nacos.plugin.datasource.log.enabled=true
### Count of DB:
db.num=1
### Connect URL of DB:
db.url.0=jdbc:mysql://mysql:3306/nacos_devtest?characterEncoding=utf8&connectTimeout=1000&socketTimeout=3000&autoReconnect=true&useUnicode=true&useSSL=false&serverTimezone=UTC&useSSL=false&allowPublicKeyRetrieval=true
db.user.0=nacos
db.password.0=nacos
### Connection pool configuration: hikariCP
db.pool.config.connectionTimeout=30000
db.pool.config.validationTimeout=10000
db.pool.config.maximumPoolSize=20
db.pool.config.minimumIdle=2
#*************** Naming Module Related Configurations ***************#
### Data dispatch task execution period in milliseconds:
### If enable data warmup. If set to false, the server would accept request without local data preparation:
# nacos.naming.data.warmup=true
### If enable the instance auto expiration, kind like of health check of instance:
# nacos.naming.expireInstance=true
### will be removed and replaced by `nacos.naming.clean` properties
nacos.naming.empty-service.auto-clean=true
nacos.naming.empty-service.clean.initial-delay-ms=50000
nacos.naming.empty-service.clean.period-time-ms=30000
### Add in 2.0.0
### The interval to clean empty service, unit: milliseconds.
# nacos.naming.clean.empty-service.interval=60000
### The expired time to clean empty service, unit: milliseconds.
# nacos.naming.clean.empty-service.expired-time=60000
### The interval to clean expired metadata, unit: milliseconds.
# nacos.naming.clean.expired-metadata.interval=5000
### The expired time to clean metadata, unit: milliseconds.
# nacos.naming.clean.expired-metadata.expired-time=60000
### The delay time before push task to execute from service changed, unit: milliseconds.
# nacos.naming.push.pushTaskDelay=500
### The timeout for push task execute, unit: milliseconds.
# nacos.naming.push.pushTaskTimeout=5000
### The delay time for retrying failed push task, unit: milliseconds.
# nacos.naming.push.pushTaskRetryDelay=1000
### Since 2.0.3
### The expired time for inactive client, unit: milliseconds.
# nacos.naming.client.expired.time=180000
#*************** CMDB Module Related Configurations ***************#
### The interval to dump external CMDB in seconds:
# nacos.cmdb.dumpTaskInterval=3600
### The interval of polling data change event in seconds:
# nacos.cmdb.eventTaskInterval=10
### The interval of loading labels in seconds:
# nacos.cmdb.labelTaskInterval=300
### If turn on data loading task:
# nacos.cmdb.loadDataAtStart=false
#*************** Metrics Related Configurations ***************#
### Metrics for prometheus
#management.endpoints.web.exposure.include=*
### Metrics for elastic search
management.metrics.export.elastic.enabled=false
#management.metrics.export.elastic.host=http://localhost:9200
### Metrics for influx
management.metrics.export.influx.enabled=false
#management.metrics.export.influx.db=springboot
#management.metrics.export.influx.uri=http://localhost:8086
#management.metrics.export.influx.auto-create-db=true
#management.metrics.export.influx.consistency=one
#management.metrics.export.influx.compressed=true
#*************** Access Log Related Configurations ***************#
### If turn on the access log:
server.tomcat.accesslog.enabled=true
### accesslog automatic cleaning time
server.tomcat.accesslog.max-days=30
### The access log pattern:
server.tomcat.accesslog.pattern=%h %l %u %t "%r" %s %b %D %{User-Agent}i %{Request-Source}i
### The directory of access log:
server.tomcat.basedir=file:.
#*************** Access Control Related Configurations ***************#
### If enable spring security, this option is deprecated in 1.2.0:
#spring.security.enabled=false
### The ignore urls of auth, is deprecated in 1.2.0:
nacos.security.ignore.urls=/,/error,/**/*.css,/**/*.js,/**/*.html,/**/*.map,/**/*.svg,/**/*.png,/**/*.ico,/console-ui/public/**,/v1/auth/**,/v1/console/health/**,/actuator/**,/v1/console/server/**
### The auth system to use, currently only 'nacos' and 'ldap' is supported:
nacos.core.auth.system.type=nacos
### If turn on auth system:
nacos.core.auth.enabled=false
### Turn on/off caching of auth information. By turning on this switch, the update of auth information would have a 15 seconds delay.
nacos.core.auth.caching.enabled=true
### Since 1.4.1, Turn on/off white auth for user-agent: nacos-server, only for upgrade from old version.
nacos.core.auth.enable.userAgentAuthWhite=false
### Since 1.4.1, worked when nacos.core.auth.enabled=true and nacos.core.auth.enable.userAgentAuthWhite=false.
### The two properties is the white list for auth and used by identity the request from other server.
nacos.core.auth.server.identity.key=serverIdentity
nacos.core.auth.server.identity.value=security
### worked when nacos.core.auth.system.type=nacos
### The token expiration in seconds:
nacos.core.auth.plugin.nacos.token.expire.seconds=18000
### The default token (Base64 string):
nacos.core.auth.plugin.nacos.token.secret.key=SecretKey012345678901234567890123456789012345678901234567890123456789
### worked when nacos.core.auth.system.type=ldap?{0} is Placeholder,replace login username
#nacos.core.auth.ldap.url=ldap://localhost:389
#nacos.core.auth.ldap.basedc=dc=example,dc=org
#nacos.core.auth.ldap.userDn=cn=admin,${nacos.core.auth.ldap.basedc}
#nacos.core.auth.ldap.password=admin
#nacos.core.auth.ldap.userdn=cn={0},dc=example,dc=org
#nacos.core.auth.ldap.filter.prefix=uid
#nacos.core.auth.ldap.case.sensitive=true
#*************** Istio Related Configurations ***************#
### If turn on the MCP server:
nacos.istio.mcp.server.enabled=false
###*************** Add from 1.3.0 ***************###
#*************** Core Related Configurations ***************#
### set the WorkerID manually
# nacos.core.snowflake.worker-id=
### Member-MetaData
# nacos.core.member.meta.site=
# nacos.core.member.meta.adweight=
# nacos.core.member.meta.weight=
### MemberLookup
### Addressing pattern category, If set, the priority is highest
# nacos.core.member.lookup.type=[file,address-server]
## Set the cluster list with a configuration file or command-line argument
# nacos.member.list=192.168.16.101:8847?raft_port=8807,192.168.16.101?raft_port=8808,192.168.16.101:8849?raft_port=8809
## for AddressServerMemberLookup
# Maximum number of retries to query the address server upon initialization
# nacos.core.address-server.retry=5
## Server domain name address of [address-server] mode
# address.server.domain=jmenv.tbsite.net
## Server port of [address-server] mode
# address.server.port=8080
## Request address of [address-server] mode
# address.server.url=/nacos/serverlist
#*************** JRaft Related Configurations ***************#
### Sets the Raft cluster election timeout, default value is 5 second
# nacos.core.protocol.raft.data.election_timeout_ms=5000
### Sets the amount of time the Raft snapshot will execute periodically, default is 30 minute
# nacos.core.protocol.raft.data.snapshot_interval_secs=30
### raft internal worker threads
# nacos.core.protocol.raft.data.core_thread_num=8
### Number of threads required for raft business request processing
# nacos.core.protocol.raft.data.cli_service_thread_num=4
### raft linear read strategy. Safe linear reads are used by default, that is, the Leader tenure is confirmed by heartbeat
# nacos.core.protocol.raft.data.read_index_type=ReadOnlySafe
### rpc request timeout, default 5 seconds
# nacos.core.protocol.raft.data.rpc_request_timeout_ms=5000
#*************** Distro Related Configurations ***************#
### Distro data sync delay time, when sync task delayed, task will be merged for same data key. Default 1 second.
# nacos.core.protocol.distro.data.sync.delayMs=1000
### Distro data sync timeout for one sync data, default 3 seconds.
# nacos.core.protocol.distro.data.sync.timeoutMs=3000
### Distro data sync retry delay time when sync data failed or timeout, same behavior with delayMs, default 3 seconds.
# nacos.core.protocol.distro.data.sync.retryDelayMs=3000
### Distro data verify interval time, verify synced data whether expired for a interval. Default 5 seconds.
# nacos.core.protocol.distro.data.verify.intervalMs=5000
### Distro data verify timeout for one verify, default 3 seconds.
# nacos.core.protocol.distro.data.verify.timeoutMs=3000
### Distro data load retry delay when load snapshot data failed, default 30 seconds.
# nacos.core.protocol.distro.data.load.retryDelayMs=30000
### enable to support prometheus service discovery
#nacos.prometheus.metrics.enabled=true
| nacos-docker/example/init.d/application.properties/0 | {
"file_path": "nacos-docker/example/init.d/application.properties",
"repo_id": "nacos-docker",
"token_count": 3073
} | 173 |
2024-06-12 10:08:08,258|5|200|172.20.192.1|get|remote.yml|DEFAULT_GROUP|pgvector|83267b8356f558ebb72b0da2ebdde5f0|unknown
2024-06-12 10:08:11,350|1|200|172.20.192.1|listen|1|true|||unknown
2024-06-12 10:11:11,657|0|200|172.20.192.1|listen|1|true|||unknown
2024-06-12 10:12:20,070|56|true|10.20.28.245|publish|remote.yml|DEFAULT_GROUP|pgvector|9bf4f8d111bb4bbc63de484dc04158f5|null
2024-06-12 10:12:20,211|0|200|172.20.192.1|listen|1|true|||unknown
2024-06-12 10:12:20,293|0|200|172.20.192.1|get|remote.yml|DEFAULT_GROUP|pgvector|9bf4f8d111bb4bbc63de484dc04158f5|unknown
2024-06-12 10:12:20,441|2|200|172.20.192.1|get|remote.yml|DEFAULT_GROUP|pgvector|9bf4f8d111bb4bbc63de484dc04158f5|unknown
2024-06-12 10:12:20,512|0|200|172.20.192.1|listen|1|true|||unknown
2024-06-12 10:12:33,297|0|200|172.20.192.1|get|remote.yml|DEFAULT_GROUP|pgvector|9bf4f8d111bb4bbc63de484dc04158f5|unknown
2024-06-12 10:13:11,961|4|true|10.20.28.245|publish|remote.yml|DEFAULT_GROUP|pgvector|87694188aa4ef4e7b518d586a9aee34b|null
2024-06-12 10:13:22,619|0|200|172.20.192.1|get|remote.yml|DEFAULT_GROUP|pgvector|87694188aa4ef4e7b518d586a9aee34b|unknown
2024-06-12 10:13:24,916|1|200|172.20.192.1|listen|1|true|||unknown
2024-06-12 10:14:21,742|0|200|172.20.192.1|get|remote.yml|DEFAULT_GROUP|pgvector|87694188aa4ef4e7b518d586a9aee34b|unknown
2024-06-12 10:14:25,007|0|200|172.20.192.1|listen|1|true|||unknown
2024-06-12 10:17:25,396|1|200|172.20.192.1|listen|1|true|||unknown
2024-06-12 10:19:04,265|1|200|172.20.192.1|get|remote.yml|DEFAULT_GROUP|pgvector|87694188aa4ef4e7b518d586a9aee34b|unknown
2024-06-12 10:19:07,822|0|200|172.20.192.1|listen|1|true|||unknown
2024-06-12 10:22:03,151|0|200|172.20.192.1|listen|1|true|||unknown
2024-06-12 10:23:40,447|1|200|172.20.192.1|get|remote.yml|DEFAULT_GROUP|pgvector|87694188aa4ef4e7b518d586a9aee34b|unknown
2024-06-12 10:23:42,829|0|200|172.20.192.1|listen|1|true|||unknown
2024-06-12 10:26:43,198|0|200|172.20.192.1|listen|1|true|||unknown
2024-06-12 10:28:00,451|0|200|172.20.192.1|get|remote.yml|DEFAULT_GROUP|pgvector|87694188aa4ef4e7b518d586a9aee34b|unknown
2024-06-12 10:28:02,794|0|200|172.20.192.1|listen|1|true|||unknown
2024-06-12 10:31:03,154|1|200|172.20.192.1|listen|1|true|||unknown
2024-06-12 10:34:03,507|0|200|172.20.192.1|listen|1|true|||unknown
2024-06-12 10:35:05,197|0|200|172.20.192.1|get|remote.yml|DEFAULT_GROUP|pgvector|87694188aa4ef4e7b518d586a9aee34b|unknown
2024-06-12 10:35:07,510|0|200|172.20.192.1|listen|1|true|||unknown
2024-06-12 10:35:46,285|0|200|172.20.192.1|get|remote.yml|DEFAULT_GROUP|pgvector|87694188aa4ef4e7b518d586a9aee34b|unknown
2024-06-12 10:35:48,567|0|200|172.20.192.1|listen|1|true|||unknown
2024-06-12 10:37:56,672|24|true|10.20.28.245|publish|remote.yml|DEFAULT_GROUP|pgvector|83267b8356f558ebb72b0da2ebdde5f0|null
2024-06-12 10:37:56,849|1|200|172.20.192.1|listen|1|true|||unknown
2024-06-12 10:37:56,878|0|200|172.20.192.1|get|remote.yml|DEFAULT_GROUP|pgvector|83267b8356f558ebb72b0da2ebdde5f0|unknown
2024-06-12 10:37:57,079|0|200|172.20.192.1|get|remote.yml|DEFAULT_GROUP|pgvector|83267b8356f558ebb72b0da2ebdde5f0|unknown
2024-06-12 10:37:57,151|0|200|172.20.192.1|listen|1|true|||unknown
2024-06-12 10:38:08,527|0|200|172.20.192.1|get|remote.yml|DEFAULT_GROUP|pgvector|83267b8356f558ebb72b0da2ebdde5f0|unknown
2024-06-12 10:38:10,881|0|200|172.20.192.1|listen|1|true|||unknown
2024-06-12 10:41:11,231|1|200|172.20.192.1|listen|1|true|||unknown
2024-06-12 10:44:11,571|1|200|172.20.192.1|listen|1|true|||unknown
2024-06-12 10:47:11,939|0|200|172.20.192.1|listen|1|true|||unknown
2024-06-12 10:50:12,282|1|200|172.20.192.1|listen|1|true|||unknown
2024-06-12 10:53:12,550|1|200|172.20.192.1|listen|1|true|||unknown
2024-06-12 10:56:12,891|1|200|172.20.192.1|listen|1|true|||unknown
2024-06-12 10:59:13,187|0|200|172.20.192.1|listen|1|true|||unknown
2024-06-12 11:02:13,463|1|200|172.20.192.1|listen|1|true|||unknown
2024-06-12 11:05:13,791|0|200|172.20.192.1|listen|1|true|||unknown
2024-06-12 11:08:14,118|0|200|172.20.192.1|listen|1|true|||unknown
2024-06-12 11:11:14,454|1|200|172.20.192.1|listen|1|true|||unknown
2024-06-12 11:14:14,839|0|200|172.20.192.1|listen|1|true|||unknown
2024-06-12 11:17:15,170|0|200|172.20.192.1|listen|1|true|||unknown
2024-06-12 11:20:15,455|0|200|172.20.192.1|listen|1|true|||unknown
2024-06-12 11:21:56,562|1|200|172.20.192.1|get|remote.yml|DEFAULT_GROUP|pgvector|83267b8356f558ebb72b0da2ebdde5f0|unknown
2024-06-12 11:22:45,044|0|200|172.20.192.1|get|remote.yml|DEFAULT_GROUP|pgvector|83267b8356f558ebb72b0da2ebdde5f0|unknown
2024-06-12 11:23:15,770|0|200|172.20.192.1|listen|1|true|||unknown
2024-06-12 11:24:08,557|0|200|172.20.192.1|get|remote.yml|DEFAULT_GROUP|pgvector|83267b8356f558ebb72b0da2ebdde5f0|unknown
| nacos-docker/example/standalone-logs/config-client-request.log.2024-06-12.0/0 | {
"file_path": "nacos-docker/example/standalone-logs/config-client-request.log.2024-06-12.0",
"repo_id": "nacos-docker",
"token_count": 2510
} | 174 |
remote.yml+DEFAULT_GROUP+pgvector|172.20.192.1|83267b8356f558ebb72b0da2ebdde5f0|2024-06-12 10:08:08
remote.yml+DEFAULT_GROUP+pgvector|172.20.192.1|9bf4f8d111bb4bbc63de484dc04158f5|2024-06-12 10:12:20
remote.yml+DEFAULT_GROUP+pgvector|172.20.192.1|9bf4f8d111bb4bbc63de484dc04158f5|2024-06-12 10:12:20
remote.yml+DEFAULT_GROUP+pgvector|172.20.192.1|9bf4f8d111bb4bbc63de484dc04158f5|2024-06-12 10:12:33
remote.yml+DEFAULT_GROUP+pgvector|172.20.192.1|87694188aa4ef4e7b518d586a9aee34b|2024-06-12 10:13:22
remote.yml+DEFAULT_GROUP+pgvector|172.20.192.1|87694188aa4ef4e7b518d586a9aee34b|2024-06-12 10:14:21
remote.yml+DEFAULT_GROUP+pgvector|172.20.192.1|87694188aa4ef4e7b518d586a9aee34b|2024-06-12 10:19:04
remote.yml+DEFAULT_GROUP+pgvector|172.20.192.1|87694188aa4ef4e7b518d586a9aee34b|2024-06-12 10:23:40
remote.yml+DEFAULT_GROUP+pgvector|172.20.192.1|87694188aa4ef4e7b518d586a9aee34b|2024-06-12 10:28:00
remote.yml+DEFAULT_GROUP+pgvector|172.20.192.1|87694188aa4ef4e7b518d586a9aee34b|2024-06-12 10:35:05
remote.yml+DEFAULT_GROUP+pgvector|172.20.192.1|87694188aa4ef4e7b518d586a9aee34b|2024-06-12 10:35:46
remote.yml+DEFAULT_GROUP+pgvector|172.20.192.1|83267b8356f558ebb72b0da2ebdde5f0|2024-06-12 10:37:56
remote.yml+DEFAULT_GROUP+pgvector|172.20.192.1|83267b8356f558ebb72b0da2ebdde5f0|2024-06-12 10:37:57
remote.yml+DEFAULT_GROUP+pgvector|172.20.192.1|83267b8356f558ebb72b0da2ebdde5f0|2024-06-12 10:38:08
remote.yml+DEFAULT_GROUP+pgvector|172.20.192.1|83267b8356f558ebb72b0da2ebdde5f0|2024-06-12 11:21:56
remote.yml+DEFAULT_GROUP+pgvector|172.20.192.1|83267b8356f558ebb72b0da2ebdde5f0|2024-06-12 11:22:45
remote.yml+DEFAULT_GROUP+pgvector|172.20.192.1|83267b8356f558ebb72b0da2ebdde5f0|2024-06-12 11:24:08
| nacos-docker/example/standalone-logs/config-pull-check.log.2024-06-12.0/0 | {
"file_path": "nacos-docker/example/standalone-logs/config-pull-check.log.2024-06-12.0",
"repo_id": "nacos-docker",
"token_count": 908
} | 175 |
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ExternalStorageConfigurationManager" enabled="true" />
<component name="FrameworkDetectionExcludesConfiguration">
<file type="web" url="file://$PROJECT_DIR$" />
</component>
<component name="ProjectRootManager" version="2" languageLevel="JDK_21" default="true" project-jdk-name="corretto-21" project-jdk-type="JavaSDK">
<output url="file://$PROJECT_DIR$/out" />
</component>
</project> | pgvector/.idea/misc.xml/0 | {
"file_path": "pgvector/.idea/misc.xml",
"repo_id": "pgvector",
"token_count": 158
} | 176 |
package org.springframework.boot.autoconfigure.aop;
import org.springframework.aot.generate.Generated;
import org.springframework.beans.factory.config.BeanDefinition;
import org.springframework.beans.factory.support.RootBeanDefinition;
/**
* Bean definitions for {@link AopAutoConfiguration}.
*/
@Generated
public class AopAutoConfiguration__BeanDefinitions {
/**
* Get the bean definition for 'aopAutoConfiguration'.
*/
public static BeanDefinition getAopAutoConfigurationBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(AopAutoConfiguration.class);
beanDefinition.setInstanceSupplier(AopAutoConfiguration::new);
return beanDefinition;
}
/**
* Bean definitions for {@link AopAutoConfiguration.AspectJAutoProxyingConfiguration}.
*/
@Generated
public static class AspectJAutoProxyingConfiguration {
/**
* Get the bean definition for 'aspectJAutoProxyingConfiguration'.
*/
public static BeanDefinition getAspectJAutoProxyingConfigurationBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(AopAutoConfiguration.AspectJAutoProxyingConfiguration.class);
beanDefinition.setInstanceSupplier(AopAutoConfiguration.AspectJAutoProxyingConfiguration::new);
return beanDefinition;
}
/**
* Bean definitions for {@link AopAutoConfiguration.AspectJAutoProxyingConfiguration.CglibAutoProxyConfiguration}.
*/
@Generated
public static class CglibAutoProxyConfiguration {
/**
* Get the bean definition for 'cglibAutoProxyConfiguration'.
*/
public static BeanDefinition getCglibAutoProxyConfigurationBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(AopAutoConfiguration.AspectJAutoProxyingConfiguration.CglibAutoProxyConfiguration.class);
beanDefinition.setInstanceSupplier(AopAutoConfiguration.AspectJAutoProxyingConfiguration.CglibAutoProxyConfiguration::new);
return beanDefinition;
}
}
}
}
| pgvector/build/generated/aotSources/org/springframework/boot/autoconfigure/aop/AopAutoConfiguration__BeanDefinitions.java/0 | {
"file_path": "pgvector/build/generated/aotSources/org/springframework/boot/autoconfigure/aop/AopAutoConfiguration__BeanDefinitions.java",
"repo_id": "pgvector",
"token_count": 616
} | 177 |
package org.springframework.boot.autoconfigure.info;
import org.springframework.aot.generate.Generated;
import org.springframework.beans.factory.config.BeanDefinition;
import org.springframework.beans.factory.support.RootBeanDefinition;
/**
* Bean definitions for {@link ProjectInfoProperties}.
*/
@Generated
public class ProjectInfoProperties__BeanDefinitions {
/**
* Get the bean definition for 'projectInfoProperties'.
*/
public static BeanDefinition getProjectInfoPropertiesBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(ProjectInfoProperties.class);
beanDefinition.setInstanceSupplier(ProjectInfoProperties::new);
return beanDefinition;
}
}
| pgvector/build/generated/aotSources/org/springframework/boot/autoconfigure/info/ProjectInfoProperties__BeanDefinitions.java/0 | {
"file_path": "pgvector/build/generated/aotSources/org/springframework/boot/autoconfigure/info/ProjectInfoProperties__BeanDefinitions.java",
"repo_id": "pgvector",
"token_count": 198
} | 178 |
Subsets and Splits