text
stringlengths 3
11.2M
| id
stringlengths 15
188
| metadata
dict | __index_level_0__
int64 0
275
|
---|---|---|---|
# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import paddle
import paddle.nn as nn
from paddle import ParamAttr
import paddle.nn.functional as F
from paddle.nn import Conv2D, MaxPool2D, AdaptiveAvgPool2D, BatchNorm2D
from paddle.nn.initializer import KaimingNormal
from paddle.regularizer import L2Decay
from ppdet.core.workspace import register, serializable
from numbers import Integral
from ..shape_spec import ShapeSpec
from ppdet.modeling.ops import channel_shuffle
__all__ = ['ShuffleNetV2']
class ConvBNLayer(nn.Layer):
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
groups=1,
act=None):
super(ConvBNLayer, self).__init__()
self._conv = Conv2D(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
groups=groups,
weight_attr=ParamAttr(initializer=KaimingNormal()),
bias_attr=False)
self._batch_norm = BatchNorm2D(
out_channels,
weight_attr=ParamAttr(regularizer=L2Decay(0.0)),
bias_attr=ParamAttr(regularizer=L2Decay(0.0)))
if act == "hard_swish":
act = 'hardswish'
self.act = act
def forward(self, inputs):
y = self._conv(inputs)
y = self._batch_norm(y)
if self.act:
y = getattr(F, self.act)(y)
return y
class InvertedResidual(nn.Layer):
def __init__(self, in_channels, out_channels, stride, act="relu"):
super(InvertedResidual, self).__init__()
self._conv_pw = ConvBNLayer(
in_channels=in_channels // 2,
out_channels=out_channels // 2,
kernel_size=1,
stride=1,
padding=0,
groups=1,
act=act)
self._conv_dw = ConvBNLayer(
in_channels=out_channels // 2,
out_channels=out_channels // 2,
kernel_size=3,
stride=stride,
padding=1,
groups=out_channels // 2,
act=None)
self._conv_linear = ConvBNLayer(
in_channels=out_channels // 2,
out_channels=out_channels // 2,
kernel_size=1,
stride=1,
padding=0,
groups=1,
act=act)
def forward(self, inputs):
x1, x2 = paddle.split(
inputs,
num_or_sections=[inputs.shape[1] // 2, inputs.shape[1] // 2],
axis=1)
x2 = self._conv_pw(x2)
x2 = self._conv_dw(x2)
x2 = self._conv_linear(x2)
out = paddle.concat([x1, x2], axis=1)
return channel_shuffle(out, 2)
class InvertedResidualDS(nn.Layer):
def __init__(self, in_channels, out_channels, stride, act="relu"):
super(InvertedResidualDS, self).__init__()
# branch1
self._conv_dw_1 = ConvBNLayer(
in_channels=in_channels,
out_channels=in_channels,
kernel_size=3,
stride=stride,
padding=1,
groups=in_channels,
act=None)
self._conv_linear_1 = ConvBNLayer(
in_channels=in_channels,
out_channels=out_channels // 2,
kernel_size=1,
stride=1,
padding=0,
groups=1,
act=act)
# branch2
self._conv_pw_2 = ConvBNLayer(
in_channels=in_channels,
out_channels=out_channels // 2,
kernel_size=1,
stride=1,
padding=0,
groups=1,
act=act)
self._conv_dw_2 = ConvBNLayer(
in_channels=out_channels // 2,
out_channels=out_channels // 2,
kernel_size=3,
stride=stride,
padding=1,
groups=out_channels // 2,
act=None)
self._conv_linear_2 = ConvBNLayer(
in_channels=out_channels // 2,
out_channels=out_channels // 2,
kernel_size=1,
stride=1,
padding=0,
groups=1,
act=act)
def forward(self, inputs):
x1 = self._conv_dw_1(inputs)
x1 = self._conv_linear_1(x1)
x2 = self._conv_pw_2(inputs)
x2 = self._conv_dw_2(x2)
x2 = self._conv_linear_2(x2)
out = paddle.concat([x1, x2], axis=1)
return channel_shuffle(out, 2)
@register
@serializable
class ShuffleNetV2(nn.Layer):
def __init__(self, scale=1.0, act="relu", feature_maps=[5, 13, 17]):
super(ShuffleNetV2, self).__init__()
self.scale = scale
if isinstance(feature_maps, Integral):
feature_maps = [feature_maps]
self.feature_maps = feature_maps
stage_repeats = [4, 8, 4]
if scale == 0.25:
stage_out_channels = [-1, 24, 24, 48, 96, 512]
elif scale == 0.33:
stage_out_channels = [-1, 24, 32, 64, 128, 512]
elif scale == 0.5:
stage_out_channels = [-1, 24, 48, 96, 192, 1024]
elif scale == 1.0:
stage_out_channels = [-1, 24, 116, 232, 464, 1024]
elif scale == 1.5:
stage_out_channels = [-1, 24, 176, 352, 704, 1024]
elif scale == 2.0:
stage_out_channels = [-1, 24, 244, 488, 976, 2048]
else:
raise NotImplementedError("This scale size:[" + str(scale) +
"] is not implemented!")
self._out_channels = []
self._feature_idx = 0
# 1. conv1
self._conv1 = ConvBNLayer(
in_channels=3,
out_channels=stage_out_channels[1],
kernel_size=3,
stride=2,
padding=1,
act=act)
self._max_pool = MaxPool2D(kernel_size=3, stride=2, padding=1)
self._feature_idx += 1
# 2. bottleneck sequences
self._block_list = []
for stage_id, num_repeat in enumerate(stage_repeats):
for i in range(num_repeat):
if i == 0:
block = self.add_sublayer(
name=str(stage_id + 2) + '_' + str(i + 1),
sublayer=InvertedResidualDS(
in_channels=stage_out_channels[stage_id + 1],
out_channels=stage_out_channels[stage_id + 2],
stride=2,
act=act))
else:
block = self.add_sublayer(
name=str(stage_id + 2) + '_' + str(i + 1),
sublayer=InvertedResidual(
in_channels=stage_out_channels[stage_id + 2],
out_channels=stage_out_channels[stage_id + 2],
stride=1,
act=act))
self._block_list.append(block)
self._feature_idx += 1
self._update_out_channels(stage_out_channels[stage_id + 2],
self._feature_idx, self.feature_maps)
def _update_out_channels(self, channel, feature_idx, feature_maps):
if feature_idx in feature_maps:
self._out_channels.append(channel)
def forward(self, inputs):
y = self._conv1(inputs['image'])
y = self._max_pool(y)
outs = []
for i, inv in enumerate(self._block_list):
y = inv(y)
if i + 2 in self.feature_maps:
outs.append(y)
return outs
@property
def out_shape(self):
return [ShapeSpec(channels=c) for c in self._out_channels]
| PaddleDetection/ppdet/modeling/backbones/shufflenet_v2.py/0 | {
"file_path": "PaddleDetection/ppdet/modeling/backbones/shufflenet_v2.py",
"repo_id": "PaddleDetection",
"token_count": 4470
} | 71 |
import math
import paddle
import numpy as np
import paddle.nn as nn
import paddle.nn.functional as F
from ppdet.core.workspace import register
from ppdet.modeling.initializer import normal_
from ppdet.modeling.lane_utils import Lane
from ppdet.modeling.losses import line_iou
from ppdet.modeling.clrnet_utils import ROIGather, LinearModule, SegDecoder
__all__ = ['CLRHead']
@register
class CLRHead(nn.Layer):
__inject__ = ['loss']
__shared__ = [
'img_w', 'img_h', 'ori_img_h', 'num_classes', 'cut_height',
'num_points', "max_lanes"
]
def __init__(self,
num_points=72,
prior_feat_channels=64,
fc_hidden_dim=64,
num_priors=192,
img_w=800,
img_h=320,
ori_img_h=590,
cut_height=270,
num_classes=5,
num_fc=2,
refine_layers=3,
sample_points=36,
conf_threshold=0.4,
nms_thres=0.5,
max_lanes=4,
loss='CLRNetLoss'):
super(CLRHead, self).__init__()
self.img_w = img_w
self.img_h = img_h
self.n_strips = num_points - 1
self.n_offsets = num_points
self.num_priors = num_priors
self.sample_points = sample_points
self.refine_layers = refine_layers
self.num_classes = num_classes
self.fc_hidden_dim = fc_hidden_dim
self.ori_img_h = ori_img_h
self.cut_height = cut_height
self.conf_threshold = conf_threshold
self.nms_thres = nms_thres
self.max_lanes = max_lanes
self.prior_feat_channels = prior_feat_channels
self.loss = loss
self.register_buffer(
name='sample_x_indexs',
tensor=(paddle.linspace(
start=0, stop=1, num=self.sample_points,
dtype=paddle.float32) * self.n_strips).astype(dtype='int64'))
self.register_buffer(
name='prior_feat_ys',
tensor=paddle.flip(
x=(1 - self.sample_x_indexs.astype('float32') / self.n_strips),
axis=[-1]))
self.register_buffer(
name='prior_ys',
tensor=paddle.linspace(
start=1, stop=0, num=self.n_offsets).astype('float32'))
self.prior_feat_channels = prior_feat_channels
self._init_prior_embeddings()
init_priors, priors_on_featmap = self.generate_priors_from_embeddings()
self.register_buffer(name='priors', tensor=init_priors)
self.register_buffer(name='priors_on_featmap', tensor=priors_on_featmap)
self.seg_decoder = SegDecoder(self.img_h, self.img_w, self.num_classes,
self.prior_feat_channels,
self.refine_layers)
reg_modules = list()
cls_modules = list()
for _ in range(num_fc):
reg_modules += [*LinearModule(self.fc_hidden_dim)]
cls_modules += [*LinearModule(self.fc_hidden_dim)]
self.reg_modules = nn.LayerList(sublayers=reg_modules)
self.cls_modules = nn.LayerList(sublayers=cls_modules)
self.roi_gather = ROIGather(self.prior_feat_channels, self.num_priors,
self.sample_points, self.fc_hidden_dim,
self.refine_layers)
self.reg_layers = nn.Linear(
in_features=self.fc_hidden_dim,
out_features=self.n_offsets + 1 + 2 + 1,
bias_attr=True)
self.cls_layers = nn.Linear(
in_features=self.fc_hidden_dim, out_features=2, bias_attr=True)
self.init_weights()
def init_weights(self):
for m in self.cls_layers.parameters():
normal_(m, mean=0.0, std=0.001)
for m in self.reg_layers.parameters():
normal_(m, mean=0.0, std=0.001)
def pool_prior_features(self, batch_features, num_priors, prior_xs):
"""
pool prior feature from feature map.
Args:
batch_features (Tensor): Input feature maps, shape: (B, C, H, W)
"""
batch_size = batch_features.shape[0]
prior_xs = prior_xs.reshape([batch_size, num_priors, -1, 1])
prior_ys = self.prior_feat_ys.tile(repeat_times=[
batch_size * num_priors
]).reshape([batch_size, num_priors, -1, 1])
prior_xs = prior_xs * 2.0 - 1.0
prior_ys = prior_ys * 2.0 - 1.0
grid = paddle.concat(x=(prior_xs, prior_ys), axis=-1)
feature = F.grid_sample(
x=batch_features, grid=grid,
align_corners=True).transpose(perm=[0, 2, 1, 3])
feature = feature.reshape([
batch_size * num_priors, self.prior_feat_channels,
self.sample_points, 1
])
return feature
def generate_priors_from_embeddings(self):
predictions = self.prior_embeddings.weight
# 2 scores, 1 start_y, 1 start_x, 1 theta, 1 length, 72 coordinates, score[0] = negative prob, score[1] = positive prob
priors = paddle.zeros(
(self.num_priors, 2 + 2 + 2 + self.n_offsets),
dtype=predictions.dtype)
priors[:, 2:5] = predictions.clone()
priors[:, 6:] = (
priors[:, 3].unsqueeze(1).clone().tile([1, self.n_offsets]) *
(self.img_w - 1) +
((1 - self.prior_ys.tile([self.num_priors, 1]) -
priors[:, 2].unsqueeze(1).clone().tile([1, self.n_offsets])) *
self.img_h / paddle.tan(x=priors[:, 4].unsqueeze(1).clone().tile(
[1, self.n_offsets]) * math.pi + 1e-05))) / (self.img_w - 1)
priors_on_featmap = paddle.index_select(
priors, 6 + self.sample_x_indexs, axis=-1)
return priors, priors_on_featmap
def _init_prior_embeddings(self):
self.prior_embeddings = nn.Embedding(self.num_priors, 3)
bottom_priors_nums = self.num_priors * 3 // 4
left_priors_nums, _ = self.num_priors // 8, self.num_priors // 8
strip_size = 0.5 / (left_priors_nums // 2 - 1)
bottom_strip_size = 1 / (bottom_priors_nums // 4 + 1)
with paddle.no_grad():
for i in range(left_priors_nums):
self.prior_embeddings.weight[i, 0] = i // 2 * strip_size
self.prior_embeddings.weight[i, 1] = 0.0
self.prior_embeddings.weight[i,
2] = 0.16 if i % 2 == 0 else 0.32
for i in range(left_priors_nums,
left_priors_nums + bottom_priors_nums):
self.prior_embeddings.weight[i, 0] = 0.0
self.prior_embeddings.weight[i, 1] = (
(i - left_priors_nums) // 4 + 1) * bottom_strip_size
self.prior_embeddings.weight[i, 2] = 0.2 * (i % 4 + 1)
for i in range(left_priors_nums + bottom_priors_nums,
self.num_priors):
self.prior_embeddings.weight[i, 0] = (
i - left_priors_nums - bottom_priors_nums) // 2 * strip_size
self.prior_embeddings.weight[i, 1] = 1.0
self.prior_embeddings.weight[i,
2] = 0.68 if i % 2 == 0 else 0.84
def forward(self, x, inputs=None):
"""
Take pyramid features as input to perform Cross Layer Refinement and finally output the prediction lanes.
Each feature is a 4D tensor.
Args:
x: input features (list[Tensor])
Return:
prediction_list: each layer's prediction result
seg: segmentation result for auxiliary loss
"""
batch_features = list(x[len(x) - self.refine_layers:])
batch_features.reverse()
batch_size = batch_features[-1].shape[0]
if self.training:
self.priors, self.priors_on_featmap = self.generate_priors_from_embeddings(
)
priors, priors_on_featmap = self.priors.tile(
[batch_size, 1,
1]), self.priors_on_featmap.tile([batch_size, 1, 1])
predictions_lists = []
prior_features_stages = []
for stage in range(self.refine_layers):
num_priors = priors_on_featmap.shape[1]
prior_xs = paddle.flip(x=priors_on_featmap, axis=[2])
batch_prior_features = self.pool_prior_features(
batch_features[stage], num_priors, prior_xs)
prior_features_stages.append(batch_prior_features)
fc_features = self.roi_gather(prior_features_stages,
batch_features[stage], stage)
# return fc_features
fc_features = fc_features.reshape(
[num_priors, batch_size, -1]).reshape(
[batch_size * num_priors, self.fc_hidden_dim])
cls_features = fc_features.clone()
reg_features = fc_features.clone()
for cls_layer in self.cls_modules:
cls_features = cls_layer(cls_features)
# return cls_features
for reg_layer in self.reg_modules:
reg_features = reg_layer(reg_features)
cls_logits = self.cls_layers(cls_features)
reg = self.reg_layers(reg_features)
cls_logits = cls_logits.reshape(
[batch_size, -1, cls_logits.shape[1]])
reg = reg.reshape([batch_size, -1, reg.shape[1]])
predictions = priors.clone()
predictions[:, :, :2] = cls_logits
predictions[:, :, 2:5] += reg[:, :, :3]
predictions[:, :, 5] = reg[:, :, 3]
def tran_tensor(t):
return t.unsqueeze(axis=2).clone().tile([1, 1, self.n_offsets])
predictions[..., 6:] = (
tran_tensor(predictions[..., 3]) * (self.img_w - 1) +
((1 - self.prior_ys.tile([batch_size, num_priors, 1]) -
tran_tensor(predictions[..., 2])) * self.img_h / paddle.tan(
tran_tensor(predictions[..., 4]) * math.pi + 1e-05))) / (
self.img_w - 1)
prediction_lines = predictions.clone()
predictions[..., 6:] += reg[..., 4:]
predictions_lists.append(predictions)
if stage != self.refine_layers - 1:
priors = prediction_lines.detach().clone()
priors_on_featmap = priors.index_select(
6 + self.sample_x_indexs, axis=-1)
if self.training:
seg = None
seg_features = paddle.concat(
[
F.interpolate(
feature,
size=[
batch_features[-1].shape[2],
batch_features[-1].shape[3]
],
mode='bilinear',
align_corners=False) for feature in batch_features
],
axis=1)
seg = self.seg_decoder(seg_features)
output = {'predictions_lists': predictions_lists, 'seg': seg}
return self.loss(output, inputs)
return predictions_lists[-1]
def predictions_to_pred(self, predictions):
"""
Convert predictions to internal Lane structure for evaluation.
"""
self.prior_ys = paddle.to_tensor(self.prior_ys)
self.prior_ys = self.prior_ys.astype('float64')
lanes = []
for lane in predictions:
lane_xs = lane[6:].clone()
start = min(
max(0, int(round(lane[2].item() * self.n_strips))),
self.n_strips)
length = int(round(lane[5].item()))
end = start + length - 1
end = min(end, len(self.prior_ys) - 1)
if start > 0:
mask = ((lane_xs[:start] >= 0.) &
(lane_xs[:start] <= 1.)).cpu().detach().numpy()[::-1]
mask = ~((mask.cumprod()[::-1]).astype(np.bool))
lane_xs[:start][mask] = -2
if end < len(self.prior_ys) - 1:
lane_xs[end + 1:] = -2
lane_ys = self.prior_ys[lane_xs >= 0].clone()
lane_xs = lane_xs[lane_xs >= 0]
lane_xs = lane_xs.flip(axis=0).astype('float64')
lane_ys = lane_ys.flip(axis=0)
lane_ys = (lane_ys *
(self.ori_img_h - self.cut_height) + self.cut_height
) / self.ori_img_h
if len(lane_xs) <= 1:
continue
points = paddle.stack(
x=(lane_xs.reshape([-1, 1]), lane_ys.reshape([-1, 1])),
axis=1).squeeze(axis=2)
lane = Lane(
points=points.cpu().numpy(),
metadata={
'start_x': lane[3],
'start_y': lane[2],
'conf': lane[1]
})
lanes.append(lane)
return lanes
def lane_nms(self, predictions, scores, nms_overlap_thresh, top_k):
"""
NMS for lane detection.
predictions: paddle.Tensor [num_lanes,conf,y,x,lenght,72offsets] [12,77]
scores: paddle.Tensor [num_lanes]
nms_overlap_thresh: float
top_k: int
"""
# sort by scores to get idx
idx = scores.argsort(descending=True)
keep = []
condidates = predictions.clone()
condidates = condidates.index_select(idx)
while len(condidates) > 0:
keep.append(idx[0])
if len(keep) >= top_k or len(condidates) == 1:
break
ious = []
for i in range(1, len(condidates)):
ious.append(1 - line_iou(
condidates[i].unsqueeze(0),
condidates[0].unsqueeze(0),
img_w=self.img_w,
length=15))
ious = paddle.to_tensor(ious)
mask = ious <= nms_overlap_thresh
id = paddle.where(mask == False)[0]
if id.shape[0] == 0:
break
condidates = condidates[1:].index_select(id)
idx = idx[1:].index_select(id)
keep = paddle.stack(keep)
return keep
def get_lanes(self, output, as_lanes=True):
"""
Convert model output to lanes.
"""
softmax = nn.Softmax(axis=1)
decoded = []
for predictions in output:
threshold = self.conf_threshold
scores = softmax(predictions[:, :2])[:, 1]
keep_inds = scores >= threshold
predictions = predictions[keep_inds]
scores = scores[keep_inds]
if predictions.shape[0] == 0:
decoded.append([])
continue
nms_predictions = predictions.detach().clone()
nms_predictions = paddle.concat(
x=[nms_predictions[..., :4], nms_predictions[..., 5:]], axis=-1)
nms_predictions[..., 4] = nms_predictions[..., 4] * self.n_strips
nms_predictions[..., 5:] = nms_predictions[..., 5:] * (
self.img_w - 1)
keep = self.lane_nms(
nms_predictions[..., 5:],
scores,
nms_overlap_thresh=self.nms_thres,
top_k=self.max_lanes)
predictions = predictions.index_select(keep)
if predictions.shape[0] == 0:
decoded.append([])
continue
predictions[:, 5] = paddle.round(predictions[:, 5] * self.n_strips)
if as_lanes:
pred = self.predictions_to_pred(predictions)
else:
pred = predictions
decoded.append(pred)
return decoded
| PaddleDetection/ppdet/modeling/heads/clrnet_head.py/0 | {
"file_path": "PaddleDetection/ppdet/modeling/heads/clrnet_head.py",
"repo_id": "PaddleDetection",
"token_count": 8628
} | 72 |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# The code is based on:
# https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/dense_heads/yolox_head.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from functools import partial
import numpy as np
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddle import ParamAttr
from paddle.nn.initializer import Normal, Constant
from ppdet.core.workspace import register
from ppdet.modeling.bbox_utils import distance2bbox, bbox2distance
from ppdet.data.transform.atss_assigner import bbox_overlaps
from .gfl_head import GFLHead
@register
class OTAHead(GFLHead):
"""
OTAHead
Args:
conv_feat (object): Instance of 'FCOSFeat'
num_classes (int): Number of classes
fpn_stride (list): The stride of each FPN Layer
prior_prob (float): Used to set the bias init for the class prediction layer
loss_qfl (object): Instance of QualityFocalLoss.
loss_dfl (object): Instance of DistributionFocalLoss.
loss_bbox (object): Instance of bbox loss.
assigner (object): Instance of label assigner.
reg_max: Max value of integral set :math: `{0, ..., reg_max}`
n QFL setting. Default: 16.
"""
__inject__ = [
'conv_feat', 'dgqp_module', 'loss_class', 'loss_dfl', 'loss_bbox',
'assigner', 'nms'
]
__shared__ = ['num_classes']
def __init__(self,
conv_feat='FCOSFeat',
dgqp_module=None,
num_classes=80,
fpn_stride=[8, 16, 32, 64, 128],
prior_prob=0.01,
loss_class='QualityFocalLoss',
loss_dfl='DistributionFocalLoss',
loss_bbox='GIoULoss',
assigner='SimOTAAssigner',
reg_max=16,
feat_in_chan=256,
nms=None,
nms_pre=1000,
cell_offset=0):
super(OTAHead, self).__init__(
conv_feat=conv_feat,
dgqp_module=dgqp_module,
num_classes=num_classes,
fpn_stride=fpn_stride,
prior_prob=prior_prob,
loss_class=loss_class,
loss_dfl=loss_dfl,
loss_bbox=loss_bbox,
reg_max=reg_max,
feat_in_chan=feat_in_chan,
nms=nms,
nms_pre=nms_pre,
cell_offset=cell_offset)
self.conv_feat = conv_feat
self.dgqp_module = dgqp_module
self.num_classes = num_classes
self.fpn_stride = fpn_stride
self.prior_prob = prior_prob
self.loss_qfl = loss_class
self.loss_dfl = loss_dfl
self.loss_bbox = loss_bbox
self.reg_max = reg_max
self.feat_in_chan = feat_in_chan
self.nms = nms
self.nms_pre = nms_pre
self.cell_offset = cell_offset
self.use_sigmoid = self.loss_qfl.use_sigmoid
self.assigner = assigner
def _get_target_single(self, flatten_cls_pred, flatten_center_and_stride,
flatten_bbox, gt_bboxes, gt_labels):
"""Compute targets for priors in a single image.
"""
pos_num, label, label_weight, bbox_target = self.assigner(
F.sigmoid(flatten_cls_pred), flatten_center_and_stride,
flatten_bbox, gt_bboxes, gt_labels)
return (pos_num, label, label_weight, bbox_target)
def get_loss(self, head_outs, gt_meta):
cls_scores, bbox_preds = head_outs
num_level_anchors = [
featmap.shape[-2] * featmap.shape[-1] for featmap in cls_scores
]
num_imgs = gt_meta['im_id'].shape[0]
featmap_sizes = [[featmap.shape[-2], featmap.shape[-1]]
for featmap in cls_scores]
decode_bbox_preds = []
center_and_strides = []
for featmap_size, stride, bbox_pred in zip(featmap_sizes,
self.fpn_stride, bbox_preds):
# center in origin image
yy, xx = self.get_single_level_center_point(featmap_size, stride,
self.cell_offset)
center_and_stride = paddle.stack([xx, yy, stride, stride], -1).tile(
[num_imgs, 1, 1])
center_and_strides.append(center_and_stride)
center_in_feature = center_and_stride.reshape(
[-1, 4])[:, :-2] / stride
bbox_pred = bbox_pred.transpose([0, 2, 3, 1]).reshape(
[num_imgs, -1, 4 * (self.reg_max + 1)])
pred_distances = self.distribution_project(bbox_pred)
decode_bbox_pred_wo_stride = distance2bbox(
center_in_feature, pred_distances).reshape([num_imgs, -1, 4])
decode_bbox_preds.append(decode_bbox_pred_wo_stride * stride)
flatten_cls_preds = [
cls_pred.transpose([0, 2, 3, 1]).reshape(
[num_imgs, -1, self.cls_out_channels])
for cls_pred in cls_scores
]
flatten_cls_preds = paddle.concat(flatten_cls_preds, axis=1)
flatten_bboxes = paddle.concat(decode_bbox_preds, axis=1)
flatten_center_and_strides = paddle.concat(center_and_strides, axis=1)
gt_boxes, gt_labels = gt_meta['gt_bbox'], gt_meta['gt_class']
pos_num_l, label_l, label_weight_l, bbox_target_l = [], [], [], []
for flatten_cls_pred,flatten_center_and_stride,flatten_bbox,gt_box, gt_label \
in zip(flatten_cls_preds.detach(),flatten_center_and_strides.detach(), \
flatten_bboxes.detach(),gt_boxes, gt_labels):
pos_num, label, label_weight, bbox_target = self._get_target_single(
flatten_cls_pred, flatten_center_and_stride, flatten_bbox,
gt_box, gt_label)
pos_num_l.append(pos_num)
label_l.append(label)
label_weight_l.append(label_weight)
bbox_target_l.append(bbox_target)
labels = paddle.to_tensor(np.stack(label_l, axis=0))
label_weights = paddle.to_tensor(np.stack(label_weight_l, axis=0))
bbox_targets = paddle.to_tensor(np.stack(bbox_target_l, axis=0))
center_and_strides_list = self._images_to_levels(
flatten_center_and_strides, num_level_anchors)
labels_list = self._images_to_levels(labels, num_level_anchors)
label_weights_list = self._images_to_levels(label_weights,
num_level_anchors)
bbox_targets_list = self._images_to_levels(bbox_targets,
num_level_anchors)
num_total_pos = sum(pos_num_l)
try:
paddle.distributed.all_reduce(paddle.to_tensor(num_total_pos))
num_total_pos = paddle.clip(
num_total_pos / paddle.distributed.get_world_size(), min=1.)
except:
num_total_pos = max(num_total_pos, 1)
loss_bbox_list, loss_dfl_list, loss_qfl_list, avg_factor = [], [], [], []
for cls_score, bbox_pred, center_and_strides, labels, label_weights, bbox_targets, stride in zip(
cls_scores, bbox_preds, center_and_strides_list, labels_list,
label_weights_list, bbox_targets_list, self.fpn_stride):
center_and_strides = center_and_strides.reshape([-1, 4])
cls_score = cls_score.transpose([0, 2, 3, 1]).reshape(
[-1, self.cls_out_channels])
bbox_pred = bbox_pred.transpose([0, 2, 3, 1]).reshape(
[-1, 4 * (self.reg_max + 1)])
bbox_targets = bbox_targets.reshape([-1, 4])
labels = labels.reshape([-1])
label_weights = label_weights.reshape([-1])
bg_class_ind = self.num_classes
pos_inds = paddle.nonzero(
paddle.logical_and((labels >= 0), (labels < bg_class_ind)),
as_tuple=False).squeeze(1)
score = np.zeros(labels.shape)
if len(pos_inds) > 0:
pos_bbox_targets = paddle.gather(bbox_targets, pos_inds, axis=0)
pos_bbox_pred = paddle.gather(bbox_pred, pos_inds, axis=0)
pos_centers = paddle.gather(
center_and_strides[:, :-2], pos_inds, axis=0) / stride
weight_targets = F.sigmoid(cls_score.detach())
weight_targets = paddle.gather(
weight_targets.max(axis=1, keepdim=True), pos_inds, axis=0)
pos_bbox_pred_corners = self.distribution_project(pos_bbox_pred)
pos_decode_bbox_pred = distance2bbox(pos_centers,
pos_bbox_pred_corners)
pos_decode_bbox_targets = pos_bbox_targets / stride
bbox_iou = bbox_overlaps(
pos_decode_bbox_pred.detach().numpy(),
pos_decode_bbox_targets.detach().numpy(),
is_aligned=True)
score[pos_inds.numpy()] = bbox_iou
pred_corners = pos_bbox_pred.reshape([-1, self.reg_max + 1])
target_corners = bbox2distance(pos_centers,
pos_decode_bbox_targets,
self.reg_max).reshape([-1])
# regression loss
loss_bbox = paddle.sum(
self.loss_bbox(pos_decode_bbox_pred,
pos_decode_bbox_targets) * weight_targets)
# dfl loss
loss_dfl = self.loss_dfl(
pred_corners,
target_corners,
weight=weight_targets.expand([-1, 4]).reshape([-1]),
avg_factor=4.0)
else:
loss_bbox = bbox_pred.sum() * 0
loss_dfl = bbox_pred.sum() * 0
weight_targets = paddle.to_tensor([0], dtype='float32')
# qfl loss
score = paddle.to_tensor(score)
loss_qfl = self.loss_qfl(
cls_score, (labels, score),
weight=label_weights,
avg_factor=num_total_pos)
loss_bbox_list.append(loss_bbox)
loss_dfl_list.append(loss_dfl)
loss_qfl_list.append(loss_qfl)
avg_factor.append(weight_targets.sum())
avg_factor = sum(avg_factor)
try:
paddle.distributed.all_reduce(paddle.to_tensor(avg_factor))
avg_factor = paddle.clip(
avg_factor / paddle.distributed.get_world_size(), min=1)
except:
avg_factor = max(avg_factor.item(), 1)
if avg_factor <= 0:
loss_qfl = paddle.to_tensor(0, dtype='float32', stop_gradient=False)
loss_bbox = paddle.to_tensor(
0, dtype='float32', stop_gradient=False)
loss_dfl = paddle.to_tensor(0, dtype='float32', stop_gradient=False)
else:
losses_bbox = list(map(lambda x: x / avg_factor, loss_bbox_list))
losses_dfl = list(map(lambda x: x / avg_factor, loss_dfl_list))
loss_qfl = sum(loss_qfl_list)
loss_bbox = sum(losses_bbox)
loss_dfl = sum(losses_dfl)
loss_states = dict(
loss_qfl=loss_qfl, loss_bbox=loss_bbox, loss_dfl=loss_dfl)
return loss_states
@register
class OTAVFLHead(OTAHead):
__inject__ = [
'conv_feat', 'dgqp_module', 'loss_class', 'loss_dfl', 'loss_bbox',
'assigner', 'nms'
]
__shared__ = ['num_classes']
def __init__(self,
conv_feat='FCOSFeat',
dgqp_module=None,
num_classes=80,
fpn_stride=[8, 16, 32, 64, 128],
prior_prob=0.01,
loss_class='VarifocalLoss',
loss_dfl='DistributionFocalLoss',
loss_bbox='GIoULoss',
assigner='SimOTAAssigner',
reg_max=16,
feat_in_chan=256,
nms=None,
nms_pre=1000,
cell_offset=0):
super(OTAVFLHead, self).__init__(
conv_feat=conv_feat,
dgqp_module=dgqp_module,
num_classes=num_classes,
fpn_stride=fpn_stride,
prior_prob=prior_prob,
loss_class=loss_class,
loss_dfl=loss_dfl,
loss_bbox=loss_bbox,
reg_max=reg_max,
feat_in_chan=feat_in_chan,
nms=nms,
nms_pre=nms_pre,
cell_offset=cell_offset)
self.conv_feat = conv_feat
self.dgqp_module = dgqp_module
self.num_classes = num_classes
self.fpn_stride = fpn_stride
self.prior_prob = prior_prob
self.loss_vfl = loss_class
self.loss_dfl = loss_dfl
self.loss_bbox = loss_bbox
self.reg_max = reg_max
self.feat_in_chan = feat_in_chan
self.nms = nms
self.nms_pre = nms_pre
self.cell_offset = cell_offset
self.use_sigmoid = self.loss_vfl.use_sigmoid
self.assigner = assigner
def get_loss(self, head_outs, gt_meta):
cls_scores, bbox_preds = head_outs
num_level_anchors = [
featmap.shape[-2] * featmap.shape[-1] for featmap in cls_scores
]
num_imgs = gt_meta['im_id'].shape[0]
featmap_sizes = [[featmap.shape[-2], featmap.shape[-1]]
for featmap in cls_scores]
decode_bbox_preds = []
center_and_strides = []
for featmap_size, stride, bbox_pred in zip(featmap_sizes,
self.fpn_stride, bbox_preds):
# center in origin image
yy, xx = self.get_single_level_center_point(featmap_size, stride,
self.cell_offset)
strides = paddle.full((len(xx), ), stride)
center_and_stride = paddle.stack([xx, yy, strides, strides],
-1).tile([num_imgs, 1, 1])
center_and_strides.append(center_and_stride)
center_in_feature = center_and_stride.reshape(
[-1, 4])[:, :-2] / stride
bbox_pred = bbox_pred.transpose([0, 2, 3, 1]).reshape(
[num_imgs, -1, 4 * (self.reg_max + 1)])
pred_distances = self.distribution_project(bbox_pred)
decode_bbox_pred_wo_stride = distance2bbox(
center_in_feature, pred_distances).reshape([num_imgs, -1, 4])
decode_bbox_preds.append(decode_bbox_pred_wo_stride * stride)
flatten_cls_preds = [
cls_pred.transpose([0, 2, 3, 1]).reshape(
[num_imgs, -1, self.cls_out_channels])
for cls_pred in cls_scores
]
flatten_cls_preds = paddle.concat(flatten_cls_preds, axis=1)
flatten_bboxes = paddle.concat(decode_bbox_preds, axis=1)
flatten_center_and_strides = paddle.concat(center_and_strides, axis=1)
gt_boxes, gt_labels = gt_meta['gt_bbox'], gt_meta['gt_class']
pos_num_l, label_l, label_weight_l, bbox_target_l = [], [], [], []
for flatten_cls_pred, flatten_center_and_stride, flatten_bbox,gt_box,gt_label \
in zip(flatten_cls_preds.detach(), flatten_center_and_strides.detach(), \
flatten_bboxes.detach(),gt_boxes,gt_labels):
pos_num, label, label_weight, bbox_target = self._get_target_single(
flatten_cls_pred, flatten_center_and_stride, flatten_bbox,
gt_box, gt_label)
pos_num_l.append(pos_num)
label_l.append(label)
label_weight_l.append(label_weight)
bbox_target_l.append(bbox_target)
labels = paddle.to_tensor(np.stack(label_l, axis=0))
label_weights = paddle.to_tensor(np.stack(label_weight_l, axis=0))
bbox_targets = paddle.to_tensor(np.stack(bbox_target_l, axis=0))
center_and_strides_list = self._images_to_levels(
flatten_center_and_strides, num_level_anchors)
labels_list = self._images_to_levels(labels, num_level_anchors)
label_weights_list = self._images_to_levels(label_weights,
num_level_anchors)
bbox_targets_list = self._images_to_levels(bbox_targets,
num_level_anchors)
num_total_pos = sum(pos_num_l)
try:
paddle.distributed.all_reduce(paddle.to_tensor(num_total_pos))
num_total_pos = paddle.clip(
num_total_pos / paddle.distributed.get_world_size(), min=1.)
except:
num_total_pos = max(num_total_pos, 1)
loss_bbox_list, loss_dfl_list, loss_vfl_list, avg_factor = [], [], [], []
for cls_score, bbox_pred, center_and_strides, labels, label_weights, bbox_targets, stride in zip(
cls_scores, bbox_preds, center_and_strides_list, labels_list,
label_weights_list, bbox_targets_list, self.fpn_stride):
center_and_strides = center_and_strides.reshape([-1, 4])
cls_score = cls_score.transpose([0, 2, 3, 1]).reshape(
[-1, self.cls_out_channels])
bbox_pred = bbox_pred.transpose([0, 2, 3, 1]).reshape(
[-1, 4 * (self.reg_max + 1)])
bbox_targets = bbox_targets.reshape([-1, 4])
labels = labels.reshape([-1])
bg_class_ind = self.num_classes
pos_inds = paddle.nonzero(
paddle.logical_and((labels >= 0), (labels < bg_class_ind)),
as_tuple=False).squeeze(1)
# vfl
vfl_score = np.zeros(cls_score.shape)
if len(pos_inds) > 0:
pos_bbox_targets = paddle.gather(bbox_targets, pos_inds, axis=0)
pos_bbox_pred = paddle.gather(bbox_pred, pos_inds, axis=0)
pos_centers = paddle.gather(
center_and_strides[:, :-2], pos_inds, axis=0) / stride
weight_targets = F.sigmoid(cls_score.detach())
weight_targets = paddle.gather(
weight_targets.max(axis=1, keepdim=True), pos_inds, axis=0)
pos_bbox_pred_corners = self.distribution_project(pos_bbox_pred)
pos_decode_bbox_pred = distance2bbox(pos_centers,
pos_bbox_pred_corners)
pos_decode_bbox_targets = pos_bbox_targets / stride
bbox_iou = bbox_overlaps(
pos_decode_bbox_pred.detach().numpy(),
pos_decode_bbox_targets.detach().numpy(),
is_aligned=True)
# vfl
pos_labels = paddle.gather(labels, pos_inds, axis=0)
vfl_score[pos_inds.numpy(), pos_labels] = bbox_iou
pred_corners = pos_bbox_pred.reshape([-1, self.reg_max + 1])
target_corners = bbox2distance(pos_centers,
pos_decode_bbox_targets,
self.reg_max).reshape([-1])
# regression loss
loss_bbox = paddle.sum(
self.loss_bbox(pos_decode_bbox_pred,
pos_decode_bbox_targets) * weight_targets)
# dfl loss
loss_dfl = self.loss_dfl(
pred_corners,
target_corners,
weight=weight_targets.expand([-1, 4]).reshape([-1]),
avg_factor=4.0)
else:
loss_bbox = bbox_pred.sum() * 0
loss_dfl = bbox_pred.sum() * 0
weight_targets = paddle.to_tensor([0], dtype='float32')
# vfl loss
num_pos_avg_per_gpu = num_total_pos
vfl_score = paddle.to_tensor(vfl_score)
loss_vfl = self.loss_vfl(
cls_score, vfl_score, avg_factor=num_pos_avg_per_gpu)
loss_bbox_list.append(loss_bbox)
loss_dfl_list.append(loss_dfl)
loss_vfl_list.append(loss_vfl)
avg_factor.append(weight_targets.sum())
avg_factor = sum(avg_factor)
try:
paddle.distributed.all_reduce(paddle.to_tensor(avg_factor))
avg_factor = paddle.clip(
avg_factor / paddle.distributed.get_world_size(), min=1)
except:
avg_factor = max(avg_factor.item(), 1)
if avg_factor <= 0:
loss_vfl = paddle.to_tensor(0, dtype='float32', stop_gradient=False)
loss_bbox = paddle.to_tensor(
0, dtype='float32', stop_gradient=False)
loss_dfl = paddle.to_tensor(0, dtype='float32', stop_gradient=False)
else:
losses_bbox = list(map(lambda x: x / avg_factor, loss_bbox_list))
losses_dfl = list(map(lambda x: x / avg_factor, loss_dfl_list))
loss_vfl = sum(loss_vfl_list)
loss_bbox = sum(losses_bbox)
loss_dfl = sum(losses_dfl)
loss_states = dict(
loss_vfl=loss_vfl, loss_bbox=loss_bbox, loss_dfl=loss_dfl)
return loss_states
| PaddleDetection/ppdet/modeling/heads/simota_head.py/0 | {
"file_path": "PaddleDetection/ppdet/modeling/heads/simota_head.py",
"repo_id": "PaddleDetection",
"token_count": 12041
} | 73 |
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from ppdet.core.workspace import register
from ppdet.modeling.clrnet_utils import accuracy
from ppdet.modeling.assigners.clrnet_assigner import assign
from ppdet.modeling.losses.clrnet_line_iou_loss import liou_loss
__all__ = ['CLRNetLoss']
class SoftmaxFocalLoss(nn.Layer):
def __init__(self, gamma, ignore_lb=255, *args, **kwargs):
super(SoftmaxFocalLoss, self).__init__()
self.gamma = gamma
self.nll = nn.NLLLoss(ignore_index=ignore_lb)
def forward(self, logits, labels):
scores = F.softmax(logits, dim=1)
factor = paddle.pow(1. - scores, self.gamma)
log_score = F.log_softmax(logits, dim=1)
log_score = factor * log_score
loss = self.nll(log_score, labels)
return loss
def focal_loss(input: paddle.Tensor,
target: paddle.Tensor,
alpha: float,
gamma: float=2.0,
reduction: str='none',
eps: float=1e-8) -> paddle.Tensor:
r"""Function that computes Focal loss.
See :class:`~kornia.losses.FocalLoss` for details.
"""
if not paddle.is_tensor(input):
raise TypeError("Input type is not a torch.Tensor. Got {}".format(
type(input)))
if not len(input.shape) >= 2:
raise ValueError("Invalid input shape, we expect BxCx*. Got: {}".format(
input.shape))
if input.shape[0] != target.shape[0]:
raise ValueError(
'Expected input batch_size ({}) to match target batch_size ({}).'.
format(input.shape[0], target.shape[0]))
n = input.shape[0]
out_size = (n, ) + tuple(input.shape[2:])
if target.shape[1:] != input.shape[2:]:
raise ValueError('Expected target size {}, got {}'.format(out_size,
target.shape))
if (isinstance(input.place, paddle.CUDAPlace) and
isinstance(target.place, paddle.CPUPlace)) | (isinstance(
input.place, paddle.CPUPlace) and isinstance(target.place,
paddle.CUDAPlace)):
raise ValueError(
"input and target must be in the same device. Got: {} and {}".
format(input.place, target.place))
# compute softmax over the classes axis
input_soft: paddle.Tensor = F.softmax(input, axis=1) + eps
# create the labels one hot tensor
target_one_hot: paddle.Tensor = paddle.to_tensor(
F.one_hot(
target, num_classes=input.shape[1]).cast(input.dtype),
place=input.place)
# compute the actual focal loss
weight = paddle.pow(-input_soft + 1., gamma)
focal = -alpha * weight * paddle.log(input_soft)
loss_tmp = paddle.sum(target_one_hot * focal, axis=1)
if reduction == 'none':
loss = loss_tmp
elif reduction == 'mean':
loss = paddle.mean(loss_tmp)
elif reduction == 'sum':
loss = paddle.sum(loss_tmp)
else:
raise NotImplementedError("Invalid reduction mode: {}".format(
reduction))
return loss
class FocalLoss(nn.Layer):
r"""Criterion that computes Focal loss.
According to [1], the Focal loss is computed as follows:
.. math::
\text{FL}(p_t) = -\alpha_t (1 - p_t)^{\gamma} \, \text{log}(p_t)
where:
- :math:`p_t` is the model's estimated probability for each class.
Arguments:
alpha (float): Weighting factor :math:`\alpha \in [0, 1]`.
gamma (float): Focusing parameter :math:`\gamma >= 0`.
reduction (str, optional): Specifies the reduction to apply to the
output: ‘none’ | ‘mean’ | ‘sum’. ‘none’: no reduction will be applied,
‘mean’: the sum of the output will be divided by the number of elements
in the output, ‘sum’: the output will be summed. Default: ‘none’.
Shape:
- Input: :math:`(N, C, *)` where C = number of classes.
- Target: :math:`(N, *)` where each value is
:math:`0 ≤ targets[i] ≤ C−1`.
Examples:
>>> N = 5 # num_classes
>>> kwargs = {"alpha": 0.5, "gamma": 2.0, "reduction": 'mean'}
>>> loss = kornia.losses.FocalLoss(**kwargs)
>>> input = torch.randn(1, N, 3, 5, requires_grad=True)
>>> target = torch.empty(1, 3, 5, dtype=torch.long).random_(N)
>>> output = loss(input, target)
>>> output.backward()
References:
[1] https://arxiv.org/abs/1708.02002
"""
def __init__(self, alpha: float, gamma: float=2.0,
reduction: str='none') -> None:
super(FocalLoss, self).__init__()
self.alpha: float = alpha
self.gamma: float = gamma
self.reduction: str = reduction
self.eps: float = 1e-6
def forward( # type: ignore
self, input: paddle.Tensor, target: paddle.Tensor) -> paddle.Tensor:
return focal_loss(input, target, self.alpha, self.gamma, self.reduction,
self.eps)
@register
class CLRNetLoss(nn.Layer):
__shared__ = ['img_w', 'img_h', 'num_classes', 'num_points']
def __init__(self,
cls_loss_weight=2.0,
xyt_loss_weight=0.2,
iou_loss_weight=2.0,
seg_loss_weight=1.0,
refine_layers=3,
num_points=72,
img_w=800,
img_h=320,
num_classes=5,
ignore_label=255,
bg_weight=0.4):
super(CLRNetLoss, self).__init__()
self.cls_loss_weight = cls_loss_weight
self.xyt_loss_weight = xyt_loss_weight
self.iou_loss_weight = iou_loss_weight
self.seg_loss_weight = seg_loss_weight
self.refine_layers = refine_layers
self.img_w = img_w
self.img_h = img_h
self.n_strips = num_points - 1
self.num_classes = num_classes
self.ignore_label = ignore_label
weights = paddle.ones(shape=[self.num_classes])
weights[0] = bg_weight
self.criterion = nn.NLLLoss(
ignore_index=self.ignore_label, weight=weights)
def forward(self, output, batch):
predictions_lists = output['predictions_lists']
targets = batch['lane_line'].clone()
cls_criterion = FocalLoss(alpha=0.25, gamma=2.0)
cls_loss = paddle.to_tensor(0.0)
reg_xytl_loss = paddle.to_tensor(0.0)
iou_loss = paddle.to_tensor(0.0)
cls_acc = []
cls_acc_stage = []
for stage in range(self.refine_layers):
predictions_list = predictions_lists[stage]
for predictions, target in zip(predictions_list, targets):
target = target[target[:, 1] == 1]
if len(target) == 0:
# If there are no targets, all predictions have to be negatives (i.e., 0 confidence)
cls_target = paddle.zeros(
[predictions.shape[0]], dtype='int64')
cls_pred = predictions[:, :2]
cls_loss = cls_loss + cls_criterion(cls_pred,
cls_target).sum()
continue
with paddle.no_grad():
matched_row_inds, matched_col_inds = assign(
predictions, target, self.img_w, self.img_h)
# classification targets
cls_target = paddle.zeros([predictions.shape[0]], dtype='int64')
cls_target[matched_row_inds] = 1
cls_pred = predictions[:, :2]
# regression targets -> [start_y, start_x, theta] (all transformed to absolute values), only on matched pairs
reg_yxtl = predictions.index_select(matched_row_inds)[..., 2:6]
reg_yxtl[:, 0] *= self.n_strips
reg_yxtl[:, 1] *= (self.img_w - 1)
reg_yxtl[:, 2] *= 180
reg_yxtl[:, 3] *= self.n_strips
target_yxtl = target.index_select(matched_col_inds)[..., 2:
6].clone()
# regression targets -> S coordinates (all transformed to absolute values)
reg_pred = predictions.index_select(matched_row_inds)[..., 6:]
reg_pred *= (self.img_w - 1)
reg_targets = target.index_select(matched_col_inds)[...,
6:].clone()
with paddle.no_grad():
predictions_starts = paddle.clip(
(predictions.index_select(matched_row_inds)[..., 2] *
self.n_strips).round().cast("int64"),
min=0,
max=self.
n_strips) # ensure the predictions starts is valid
target_starts = (
target.index_select(matched_col_inds)[..., 2] *
self.n_strips).round().cast("int64")
target_yxtl[:, -1] -= (
predictions_starts - target_starts) # reg length
# Loss calculation
cls_loss = cls_loss + cls_criterion(
cls_pred, cls_target).sum() / target.shape[0]
target_yxtl[:, 0] *= self.n_strips
target_yxtl[:, 2] *= 180
reg_xytl_loss = reg_xytl_loss + F.smooth_l1_loss(
input=reg_yxtl, label=target_yxtl, reduction='none').mean()
iou_loss = iou_loss + liou_loss(
reg_pred, reg_targets, self.img_w, length=15)
cls_accuracy = accuracy(cls_pred, cls_target)
cls_acc_stage.append(cls_accuracy)
cls_acc.append(sum(cls_acc_stage) / (len(cls_acc_stage) + 1e-5))
# extra segmentation loss
seg_loss = self.criterion(
F.log_softmax(
output['seg'], axis=1), batch['seg'].cast('int64'))
cls_loss /= (len(targets) * self.refine_layers)
reg_xytl_loss /= (len(targets) * self.refine_layers)
iou_loss /= (len(targets) * self.refine_layers)
loss = cls_loss * self.cls_loss_weight \
+ reg_xytl_loss * self.xyt_loss_weight \
+ seg_loss * self.seg_loss_weight \
+ iou_loss * self.iou_loss_weight
return_value = {
'loss': loss,
'cls_loss': cls_loss * self.cls_loss_weight,
'reg_xytl_loss': reg_xytl_loss * self.xyt_loss_weight,
'seg_loss': seg_loss * self.seg_loss_weight,
'iou_loss': iou_loss * self.iou_loss_weight
}
for i in range(self.refine_layers):
if not isinstance(cls_acc[i], paddle.Tensor):
cls_acc[i] = paddle.to_tensor(cls_acc[i])
return_value['stage_{}_acc'.format(i)] = cls_acc[i]
return return_value
| PaddleDetection/ppdet/modeling/losses/clrnet_loss.py/0 | {
"file_path": "PaddleDetection/ppdet/modeling/losses/clrnet_loss.py",
"repo_id": "PaddleDetection",
"token_count": 5651
} | 74 |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import paddle
import paddle.nn.functional as F
from ppdet.core.workspace import register, serializable
__all__ = ['SOLOv2Loss']
@register
@serializable
class SOLOv2Loss(object):
"""
SOLOv2Loss
Args:
ins_loss_weight (float): Weight of instance loss.
focal_loss_gamma (float): Gamma parameter for focal loss.
focal_loss_alpha (float): Alpha parameter for focal loss.
"""
def __init__(self,
ins_loss_weight=3.0,
focal_loss_gamma=2.0,
focal_loss_alpha=0.25):
self.ins_loss_weight = ins_loss_weight
self.focal_loss_gamma = focal_loss_gamma
self.focal_loss_alpha = focal_loss_alpha
def _dice_loss(self, input, target):
input = paddle.reshape(input, shape=(paddle.shape(input)[0], -1))
target = paddle.reshape(target, shape=(paddle.shape(target)[0], -1))
a = paddle.sum(input * target, axis=1)
b = paddle.sum(input * input, axis=1) + 0.001
c = paddle.sum(target * target, axis=1) + 0.001
d = (2 * a) / (b + c)
return 1 - d
def __call__(self, ins_pred_list, ins_label_list, cate_preds, cate_labels,
num_ins):
"""
Get loss of network of SOLOv2.
Args:
ins_pred_list (list): Variable list of instance branch output.
ins_label_list (list): List of instance labels pre batch.
cate_preds (list): Concat Variable list of categroy branch output.
cate_labels (list): Concat list of categroy labels pre batch.
num_ins (int): Number of positive samples in a mini-batch.
Returns:
loss_ins (Variable): The instance loss Variable of SOLOv2 network.
loss_cate (Variable): The category loss Variable of SOLOv2 network.
"""
#1. Ues dice_loss to calculate instance loss
loss_ins = []
total_weights = paddle.zeros(shape=[1], dtype='float32')
for input, target in zip(ins_pred_list, ins_label_list):
if input is None:
continue
target = paddle.cast(target, 'float32')
target = paddle.reshape(
target,
shape=[-1, paddle.shape(input)[-2], paddle.shape(input)[-1]])
weights = paddle.cast(
paddle.sum(target, axis=[1, 2]) > 0, 'float32')
input = F.sigmoid(input)
dice_out = paddle.multiply(self._dice_loss(input, target), weights)
total_weights += paddle.sum(weights)
loss_ins.append(dice_out)
loss_ins = paddle.sum(paddle.concat(loss_ins)) / total_weights
loss_ins = loss_ins * self.ins_loss_weight
#2. Ues sigmoid_focal_loss to calculate category loss
# expand onehot labels
num_classes = cate_preds.shape[-1]
cate_labels_bin = F.one_hot(cate_labels, num_classes=num_classes + 1)
cate_labels_bin = cate_labels_bin[:, 1:]
loss_cate = F.sigmoid_focal_loss(
cate_preds,
label=cate_labels_bin,
normalizer=num_ins + 1.,
gamma=self.focal_loss_gamma,
alpha=self.focal_loss_alpha)
return loss_ins, loss_cate
| PaddleDetection/ppdet/modeling/losses/solov2_loss.py/0 | {
"file_path": "PaddleDetection/ppdet/modeling/losses/solov2_loss.py",
"repo_id": "PaddleDetection",
"token_count": 1704
} | 75 |
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import copy
import numpy as np
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from ppdet.core.workspace import register, serializable
from ppdet.modeling.layers import DropBlock, MultiHeadAttention
from ppdet.modeling.ops import get_act_fn
from ..backbones.cspresnet import ConvBNLayer, BasicBlock
from ..shape_spec import ShapeSpec
from ..initializer import linear_init_
__all__ = ['CustomCSPPAN']
def _get_clones(module, N):
return nn.LayerList([copy.deepcopy(module) for _ in range(N)])
class SPP(nn.Layer):
def __init__(self,
ch_in,
ch_out,
k,
pool_size,
act='swish',
data_format='NCHW'):
super(SPP, self).__init__()
self.pool = []
self.data_format = data_format
for i, size in enumerate(pool_size):
pool = self.add_sublayer(
'pool{}'.format(i),
nn.MaxPool2D(
kernel_size=size,
stride=1,
padding=size // 2,
data_format=data_format,
ceil_mode=False))
self.pool.append(pool)
self.conv = ConvBNLayer(ch_in, ch_out, k, padding=k // 2, act=act)
def forward(self, x):
outs = [x]
for pool in self.pool:
outs.append(pool(x))
if self.data_format == 'NCHW':
y = paddle.concat(outs, axis=1)
else:
y = paddle.concat(outs, axis=-1)
y = self.conv(y)
return y
class CSPStage(nn.Layer):
def __init__(self,
block_fn,
ch_in,
ch_out,
n,
act='swish',
spp=False,
use_alpha=False):
super(CSPStage, self).__init__()
ch_mid = int(ch_out // 2)
self.conv1 = ConvBNLayer(ch_in, ch_mid, 1, act=act)
self.conv2 = ConvBNLayer(ch_in, ch_mid, 1, act=act)
self.convs = nn.Sequential()
next_ch_in = ch_mid
for i in range(n):
self.convs.add_sublayer(
str(i),
eval(block_fn)(next_ch_in,
ch_mid,
act=act,
shortcut=False,
use_alpha=use_alpha))
if i == (n - 1) // 2 and spp:
self.convs.add_sublayer(
'spp', SPP(ch_mid * 4, ch_mid, 1, [5, 9, 13], act=act))
next_ch_in = ch_mid
self.conv3 = ConvBNLayer(ch_mid * 2, ch_out, 1, act=act)
def forward(self, x):
y1 = self.conv1(x)
y2 = self.conv2(x)
y2 = self.convs(y2)
y = paddle.concat([y1, y2], axis=1)
y = self.conv3(y)
return y
class TransformerEncoderLayer(nn.Layer):
def __init__(self,
d_model,
nhead,
dim_feedforward=2048,
dropout=0.1,
activation="relu",
attn_dropout=None,
act_dropout=None,
normalize_before=False):
super(TransformerEncoderLayer, self).__init__()
attn_dropout = dropout if attn_dropout is None else attn_dropout
act_dropout = dropout if act_dropout is None else act_dropout
self.normalize_before = normalize_before
self.self_attn = MultiHeadAttention(d_model, nhead, attn_dropout)
# Implementation of Feedforward model
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(act_dropout, mode="upscale_in_train")
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout, mode="upscale_in_train")
self.dropout2 = nn.Dropout(dropout, mode="upscale_in_train")
self.activation = getattr(F, activation)
self._reset_parameters()
def _reset_parameters(self):
linear_init_(self.linear1)
linear_init_(self.linear2)
@staticmethod
def with_pos_embed(tensor, pos_embed):
return tensor if pos_embed is None else tensor + pos_embed
def forward(self, src, src_mask=None, pos_embed=None):
residual = src
if self.normalize_before:
src = self.norm1(src)
q = k = self.with_pos_embed(src, pos_embed)
src = self.self_attn(q, k, value=src, attn_mask=src_mask)
src = residual + self.dropout1(src)
if not self.normalize_before:
src = self.norm1(src)
residual = src
if self.normalize_before:
src = self.norm2(src)
src = self.linear2(self.dropout(self.activation(self.linear1(src))))
src = residual + self.dropout2(src)
if not self.normalize_before:
src = self.norm2(src)
return src
class TransformerEncoder(nn.Layer):
def __init__(self, encoder_layer, num_layers, norm=None):
super(TransformerEncoder, self).__init__()
self.layers = _get_clones(encoder_layer, num_layers)
self.num_layers = num_layers
self.norm = norm
def forward(self, src, src_mask=None, pos_embed=None):
output = src
for layer in self.layers:
output = layer(output, src_mask=src_mask, pos_embed=pos_embed)
if self.norm is not None:
output = self.norm(output)
return output
@register
@serializable
class CustomCSPPAN(nn.Layer):
__shared__ = [
'norm_type', 'data_format', 'width_mult', 'depth_mult', 'trt',
'eval_size'
]
def __init__(self,
in_channels=[256, 512, 1024],
out_channels=[1024, 512, 256],
norm_type='bn',
act='leaky',
stage_fn='CSPStage',
block_fn='BasicBlock',
stage_num=1,
block_num=3,
drop_block=False,
block_size=3,
keep_prob=0.9,
spp=False,
data_format='NCHW',
width_mult=1.0,
depth_mult=1.0,
use_alpha=False,
trt=False,
dim_feedforward=2048,
dropout=0.1,
activation='gelu',
nhead=4,
num_layers=4,
attn_dropout=None,
act_dropout=None,
normalize_before=False,
use_trans=False,
eval_size=None):
super(CustomCSPPAN, self).__init__()
out_channels = [max(round(c * width_mult), 1) for c in out_channels]
block_num = max(round(block_num * depth_mult), 1)
act = get_act_fn(
act, trt=trt) if act is None or isinstance(act,
(str, dict)) else act
self.num_blocks = len(in_channels)
self.data_format = data_format
self._out_channels = out_channels
self.hidden_dim = in_channels[-1]
in_channels = in_channels[::-1]
self.use_trans = use_trans
self.eval_size = eval_size
if use_trans:
if eval_size is not None:
self.pos_embed = self.build_2d_sincos_position_embedding(
eval_size[1] // 32,
eval_size[0] // 32,
embed_dim=self.hidden_dim)
else:
self.pos_embed = None
encoder_layer = TransformerEncoderLayer(
self.hidden_dim, nhead, dim_feedforward, dropout, activation,
attn_dropout, act_dropout, normalize_before)
encoder_norm = nn.LayerNorm(
self.hidden_dim) if normalize_before else None
self.encoder = TransformerEncoder(encoder_layer, num_layers,
encoder_norm)
fpn_stages = []
fpn_routes = []
for i, (ch_in, ch_out) in enumerate(zip(in_channels, out_channels)):
if i > 0:
ch_in += ch_pre // 2
stage = nn.Sequential()
for j in range(stage_num):
stage.add_sublayer(
str(j),
eval(stage_fn)(block_fn,
ch_in if j == 0 else ch_out,
ch_out,
block_num,
act=act,
spp=(spp and i == 0),
use_alpha=use_alpha))
if drop_block:
stage.add_sublayer('drop', DropBlock(block_size, keep_prob))
fpn_stages.append(stage)
if i < self.num_blocks - 1:
fpn_routes.append(
ConvBNLayer(
ch_in=ch_out,
ch_out=ch_out // 2,
filter_size=1,
stride=1,
padding=0,
act=act))
ch_pre = ch_out
self.fpn_stages = nn.LayerList(fpn_stages)
self.fpn_routes = nn.LayerList(fpn_routes)
pan_stages = []
pan_routes = []
for i in reversed(range(self.num_blocks - 1)):
pan_routes.append(
ConvBNLayer(
ch_in=out_channels[i + 1],
ch_out=out_channels[i + 1],
filter_size=3,
stride=2,
padding=1,
act=act))
ch_in = out_channels[i] + out_channels[i + 1]
ch_out = out_channels[i]
stage = nn.Sequential()
for j in range(stage_num):
stage.add_sublayer(
str(j),
eval(stage_fn)(block_fn,
ch_in if j == 0 else ch_out,
ch_out,
block_num,
act=act,
spp=False,
use_alpha=use_alpha))
if drop_block:
stage.add_sublayer('drop', DropBlock(block_size, keep_prob))
pan_stages.append(stage)
self.pan_stages = nn.LayerList(pan_stages[::-1])
self.pan_routes = nn.LayerList(pan_routes[::-1])
def build_2d_sincos_position_embedding(
self,
w,
h,
embed_dim=1024,
temperature=10000., ):
grid_w = paddle.arange(int(w), dtype=paddle.float32)
grid_h = paddle.arange(int(h), dtype=paddle.float32)
grid_w, grid_h = paddle.meshgrid(grid_w, grid_h)
assert embed_dim % 4 == 0, 'Embed dimension must be divisible by 4 for 2D sin-cos position embedding'
pos_dim = embed_dim // 4
omega = paddle.arange(pos_dim, dtype=paddle.float32) / pos_dim
omega = 1. / (temperature**omega)
out_w = grid_w.flatten()[..., None] @omega[None]
out_h = grid_h.flatten()[..., None] @omega[None]
pos_emb = paddle.concat(
[
paddle.sin(out_w), paddle.cos(out_w), paddle.sin(out_h),
paddle.cos(out_h)
],
axis=1)[None, :, :]
return pos_emb
def forward(self, blocks, for_mot=False):
if self.use_trans:
last_feat = blocks[-1]
n, c, h, w = last_feat.shape
# flatten [B, C, H, W] to [B, HxW, C]
src_flatten = last_feat.flatten(2).transpose([0, 2, 1])
if self.eval_size is not None and not self.training:
pos_embed = self.pos_embed
else:
pos_embed = self.build_2d_sincos_position_embedding(
w=w, h=h, embed_dim=self.hidden_dim)
memory = self.encoder(src_flatten, pos_embed=pos_embed)
last_feat_encode = memory.transpose([0, 2, 1]).reshape([n, c, h, w])
blocks[-1] = last_feat_encode
blocks = blocks[::-1]
fpn_feats = []
for i, block in enumerate(blocks):
if i > 0:
block = paddle.concat([route, block], axis=1)
route = self.fpn_stages[i](block)
fpn_feats.append(route)
if i < self.num_blocks - 1:
route = self.fpn_routes[i](route)
route = F.interpolate(
route, scale_factor=2., data_format=self.data_format)
pan_feats = [fpn_feats[-1], ]
route = fpn_feats[-1]
for i in reversed(range(self.num_blocks - 1)):
block = fpn_feats[i]
route = self.pan_routes[i](route)
block = paddle.concat([route, block], axis=1)
route = self.pan_stages[i](block)
pan_feats.append(route)
return pan_feats[::-1]
@classmethod
def from_config(cls, cfg, input_shape):
return {'in_channels': [i.channels for i in input_shape], }
@property
def out_shape(self):
return [ShapeSpec(channels=c) for c in self._out_channels]
| PaddleDetection/ppdet/modeling/necks/custom_pan.py/0 | {
"file_path": "PaddleDetection/ppdet/modeling/necks/custom_pan.py",
"repo_id": "PaddleDetection",
"token_count": 7674
} | 76 |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import paddle
from ppdet.core.workspace import register, serializable
from .target import rpn_anchor_target, generate_proposal_target, generate_mask_target, libra_generate_proposal_target
import numpy as np
@register
@serializable
class RPNTargetAssign(object):
__shared__ = ['assign_on_cpu']
"""
RPN targets assignment module
The assignment consists of three steps:
1. Match anchor and ground-truth box, label the anchor with foreground
or background sample
2. Sample anchors to keep the properly ratio between foreground and
background
3. Generate the targets for classification and regression branch
Args:
batch_size_per_im (int): Total number of RPN samples per image.
default 256
fg_fraction (float): Fraction of anchors that is labeled
foreground, default 0.5
positive_overlap (float): Minimum overlap required between an anchor
and ground-truth box for the (anchor, gt box) pair to be
a foreground sample. default 0.7
negative_overlap (float): Maximum overlap allowed between an anchor
and ground-truth box for the (anchor, gt box) pair to be
a background sample. default 0.3
ignore_thresh(float): Threshold for ignoring the is_crowd ground-truth
if the value is larger than zero.
use_random (bool): Use random sampling to choose foreground and
background boxes, default true.
assign_on_cpu (bool): In case the number of gt box is too large,
compute IoU on CPU, default false.
"""
def __init__(self,
batch_size_per_im=256,
fg_fraction=0.5,
positive_overlap=0.7,
negative_overlap=0.3,
ignore_thresh=-1.,
use_random=True,
assign_on_cpu=False):
super(RPNTargetAssign, self).__init__()
self.batch_size_per_im = batch_size_per_im
self.fg_fraction = fg_fraction
self.positive_overlap = positive_overlap
self.negative_overlap = negative_overlap
self.ignore_thresh = ignore_thresh
self.use_random = use_random
self.assign_on_cpu = assign_on_cpu
def __call__(self, inputs, anchors):
"""
inputs: ground-truth instances.
anchor_box (Tensor): [num_anchors, 4], num_anchors are all anchors in all feature maps.
"""
gt_boxes = inputs['gt_bbox']
is_crowd = inputs.get('is_crowd', None)
batch_size = len(gt_boxes)
tgt_labels, tgt_bboxes, tgt_deltas = rpn_anchor_target(
anchors,
gt_boxes,
self.batch_size_per_im,
self.positive_overlap,
self.negative_overlap,
self.fg_fraction,
self.use_random,
batch_size,
self.ignore_thresh,
is_crowd,
assign_on_cpu=self.assign_on_cpu)
norm = self.batch_size_per_im * batch_size
return tgt_labels, tgt_bboxes, tgt_deltas, norm
@register
class BBoxAssigner(object):
__shared__ = ['num_classes', 'assign_on_cpu']
"""
RCNN targets assignment module
The assignment consists of three steps:
1. Match RoIs and ground-truth box, label the RoIs with foreground
or background sample
2. Sample anchors to keep the properly ratio between foreground and
background
3. Generate the targets for classification and regression branch
Args:
batch_size_per_im (int): Total number of RoIs per image.
default 512
fg_fraction (float): Fraction of RoIs that is labeled
foreground, default 0.25
fg_thresh (float): Minimum overlap required between a RoI
and ground-truth box for the (roi, gt box) pair to be
a foreground sample. default 0.5
bg_thresh (float): Maximum overlap allowed between a RoI
and ground-truth box for the (roi, gt box) pair to be
a background sample. default 0.5
ignore_thresh(float): Threshold for ignoring the is_crowd ground-truth
if the value is larger than zero.
use_random (bool): Use random sampling to choose foreground and
background boxes, default true
cascade_iou (list[iou]): The list of overlap to select foreground and
background of each stage, which is only used In Cascade RCNN.
num_classes (int): The number of class.
assign_on_cpu (bool): In case the number of gt box is too large,
compute IoU on CPU, default false.
"""
def __init__(self,
batch_size_per_im=512,
fg_fraction=.25,
fg_thresh=.5,
bg_thresh=.5,
ignore_thresh=-1.,
use_random=True,
cascade_iou=[0.5, 0.6, 0.7],
num_classes=80,
assign_on_cpu=False):
super(BBoxAssigner, self).__init__()
self.batch_size_per_im = batch_size_per_im
self.fg_fraction = fg_fraction
self.fg_thresh = fg_thresh
self.bg_thresh = bg_thresh
self.ignore_thresh = ignore_thresh
self.use_random = use_random
self.cascade_iou = cascade_iou
self.num_classes = num_classes
self.assign_on_cpu = assign_on_cpu
def __call__(self,
rpn_rois,
rpn_rois_num,
inputs,
stage=0,
is_cascade=False,
add_gt_as_proposals=True):
gt_classes = inputs['gt_class']
gt_boxes = inputs['gt_bbox']
is_crowd = inputs.get('is_crowd', None)
# rois, tgt_labels, tgt_bboxes, tgt_gt_inds
# new_rois_num
outs = generate_proposal_target(
rpn_rois, gt_classes, gt_boxes, self.batch_size_per_im,
self.fg_fraction, self.fg_thresh, self.bg_thresh, self.num_classes,
self.ignore_thresh, is_crowd, self.use_random, is_cascade,
self.cascade_iou[stage], self.assign_on_cpu, add_gt_as_proposals)
rois = outs[0]
rois_num = outs[-1]
# tgt_labels, tgt_bboxes, tgt_gt_inds
targets = outs[1:4]
return rois, rois_num, targets
@register
class BBoxLibraAssigner(object):
__shared__ = ['num_classes']
"""
Libra-RCNN targets assignment module
The assignment consists of three steps:
1. Match RoIs and ground-truth box, label the RoIs with foreground
or background sample
2. Sample anchors to keep the properly ratio between foreground and
background
3. Generate the targets for classification and regression branch
Args:
batch_size_per_im (int): Total number of RoIs per image.
default 512
fg_fraction (float): Fraction of RoIs that is labeled
foreground, default 0.25
fg_thresh (float): Minimum overlap required between a RoI
and ground-truth box for the (roi, gt box) pair to be
a foreground sample. default 0.5
bg_thresh (float): Maximum overlap allowed between a RoI
and ground-truth box for the (roi, gt box) pair to be
a background sample. default 0.5
use_random (bool): Use random sampling to choose foreground and
background boxes, default true
cascade_iou (list[iou]): The list of overlap to select foreground and
background of each stage, which is only used In Cascade RCNN.
num_classes (int): The number of class.
num_bins (int): The number of libra_sample.
"""
def __init__(self,
batch_size_per_im=512,
fg_fraction=.25,
fg_thresh=.5,
bg_thresh=.5,
use_random=True,
cascade_iou=[0.5, 0.6, 0.7],
num_classes=80,
num_bins=3):
super(BBoxLibraAssigner, self).__init__()
self.batch_size_per_im = batch_size_per_im
self.fg_fraction = fg_fraction
self.fg_thresh = fg_thresh
self.bg_thresh = bg_thresh
self.use_random = use_random
self.cascade_iou = cascade_iou
self.num_classes = num_classes
self.num_bins = num_bins
def __call__(self,
rpn_rois,
rpn_rois_num,
inputs,
stage=0,
is_cascade=False):
gt_classes = inputs['gt_class']
gt_boxes = inputs['gt_bbox']
# rois, tgt_labels, tgt_bboxes, tgt_gt_inds
outs = libra_generate_proposal_target(
rpn_rois, gt_classes, gt_boxes, self.batch_size_per_im,
self.fg_fraction, self.fg_thresh, self.bg_thresh, self.num_classes,
self.use_random, is_cascade, self.cascade_iou[stage], self.num_bins)
rois = outs[0]
rois_num = outs[-1]
# tgt_labels, tgt_bboxes, tgt_gt_inds
targets = outs[1:4]
return rois, rois_num, targets
@register
@serializable
class MaskAssigner(object):
__shared__ = ['num_classes', 'mask_resolution']
"""
Mask targets assignment module
The assignment consists of three steps:
1. Select RoIs labels with foreground.
2. Encode the RoIs and corresponding gt polygons to generate
mask target
Args:
num_classes (int): The number of class
mask_resolution (int): The resolution of mask target, default 14
"""
def __init__(self, num_classes=80, mask_resolution=14):
super(MaskAssigner, self).__init__()
self.num_classes = num_classes
self.mask_resolution = mask_resolution
def __call__(self, rois, tgt_labels, tgt_gt_inds, inputs):
gt_segms = inputs['gt_poly']
outs = generate_mask_target(gt_segms, rois, tgt_labels, tgt_gt_inds,
self.num_classes, self.mask_resolution)
# mask_rois, mask_rois_num, tgt_classes, tgt_masks, mask_index, tgt_weights
return outs
@register
class RBoxAssigner(object):
"""
assigner of rbox
Args:
pos_iou_thr (float): threshold of pos samples
neg_iou_thr (float): threshold of neg samples
min_iou_thr (float): the min threshold of samples
ignore_iof_thr (int): the ignored threshold
"""
def __init__(self,
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_iou_thr=0.0,
ignore_iof_thr=-2):
super(RBoxAssigner, self).__init__()
self.pos_iou_thr = pos_iou_thr
self.neg_iou_thr = neg_iou_thr
self.min_iou_thr = min_iou_thr
self.ignore_iof_thr = ignore_iof_thr
def anchor_valid(self, anchors):
"""
Args:
anchor: M x 4
Returns:
"""
if anchors.ndim == 3:
anchors = anchors.reshape(-1, anchors.shape[-1])
assert anchors.ndim == 2
anchor_num = anchors.shape[0]
anchor_valid = np.ones((anchor_num), np.int32)
anchor_inds = np.arange(anchor_num)
return anchor_inds
def rbox2delta(self,
proposals,
gt,
means=[0, 0, 0, 0, 0],
stds=[1, 1, 1, 1, 1]):
"""
Args:
proposals: tensor [N, 5]
gt: gt [N, 5]
means: means [5]
stds: stds [5]
Returns:
"""
proposals = proposals.astype(np.float64)
PI = np.pi
gt_widths = gt[..., 2]
gt_heights = gt[..., 3]
gt_angle = gt[..., 4]
proposals_widths = proposals[..., 2]
proposals_heights = proposals[..., 3]
proposals_angle = proposals[..., 4]
coord = gt[..., 0:2] - proposals[..., 0:2]
dx = (np.cos(proposals[..., 4]) * coord[..., 0] +
np.sin(proposals[..., 4]) * coord[..., 1]) / proposals_widths
dy = (-np.sin(proposals[..., 4]) * coord[..., 0] +
np.cos(proposals[..., 4]) * coord[..., 1]) / proposals_heights
dw = np.log(gt_widths / proposals_widths)
dh = np.log(gt_heights / proposals_heights)
da = (gt_angle - proposals_angle)
da = (da + PI / 4) % PI - PI / 4
da /= PI
deltas = np.stack([dx, dy, dw, dh, da], axis=-1)
means = np.array(means, dtype=deltas.dtype)
stds = np.array(stds, dtype=deltas.dtype)
deltas = (deltas - means) / stds
deltas = deltas.astype(np.float32)
return deltas
def assign_anchor(self,
anchors,
gt_bboxes,
gt_labels,
pos_iou_thr,
neg_iou_thr,
min_iou_thr=0.0,
ignore_iof_thr=-2):
assert anchors.shape[1] == 4 or anchors.shape[1] == 5
assert gt_bboxes.shape[1] == 4 or gt_bboxes.shape[1] == 5
anchors_xc_yc = anchors
gt_bboxes_xc_yc = gt_bboxes
# calc rbox iou
anchors_xc_yc = anchors_xc_yc.astype(np.float32)
gt_bboxes_xc_yc = gt_bboxes_xc_yc.astype(np.float32)
anchors_xc_yc = paddle.to_tensor(anchors_xc_yc)
gt_bboxes_xc_yc = paddle.to_tensor(gt_bboxes_xc_yc)
try:
from ext_op import rbox_iou
except Exception as e:
print("import custom_ops error, try install ext_op " \
"following ppdet/ext_op/README.md", e)
sys.stdout.flush()
sys.exit(-1)
iou = rbox_iou(gt_bboxes_xc_yc, anchors_xc_yc)
iou = iou.numpy()
iou = iou.T
# every gt's anchor's index
gt_bbox_anchor_inds = iou.argmax(axis=0)
gt_bbox_anchor_iou = iou[gt_bbox_anchor_inds, np.arange(iou.shape[1])]
gt_bbox_anchor_iou_inds = np.where(iou == gt_bbox_anchor_iou)[0]
# every anchor's gt bbox's index
anchor_gt_bbox_inds = iou.argmax(axis=1)
anchor_gt_bbox_iou = iou[np.arange(iou.shape[0]), anchor_gt_bbox_inds]
# (1) set labels=-2 as default
labels = np.ones((iou.shape[0], ), dtype=np.int32) * ignore_iof_thr
# (2) assign ignore
labels[anchor_gt_bbox_iou < min_iou_thr] = ignore_iof_thr
# (3) assign neg_ids -1
assign_neg_ids1 = anchor_gt_bbox_iou >= min_iou_thr
assign_neg_ids2 = anchor_gt_bbox_iou < neg_iou_thr
assign_neg_ids = np.logical_and(assign_neg_ids1, assign_neg_ids2)
labels[assign_neg_ids] = -1
# anchor_gt_bbox_iou_inds
# (4) assign max_iou as pos_ids >=0
anchor_gt_bbox_iou_inds = anchor_gt_bbox_inds[gt_bbox_anchor_iou_inds]
# gt_bbox_anchor_iou_inds = np.logical_and(gt_bbox_anchor_iou_inds, anchor_gt_bbox_iou >= min_iou_thr)
labels[gt_bbox_anchor_iou_inds] = gt_labels[anchor_gt_bbox_iou_inds]
# (5) assign >= pos_iou_thr as pos_ids
iou_pos_iou_thr_ids = anchor_gt_bbox_iou >= pos_iou_thr
iou_pos_iou_thr_ids_box_inds = anchor_gt_bbox_inds[iou_pos_iou_thr_ids]
labels[iou_pos_iou_thr_ids] = gt_labels[iou_pos_iou_thr_ids_box_inds]
return anchor_gt_bbox_inds, anchor_gt_bbox_iou, labels
def __call__(self, anchors, gt_bboxes, gt_labels, is_crowd):
assert anchors.ndim == 2
assert anchors.shape[1] == 5
assert gt_bboxes.ndim == 2
assert gt_bboxes.shape[1] == 5
pos_iou_thr = self.pos_iou_thr
neg_iou_thr = self.neg_iou_thr
min_iou_thr = self.min_iou_thr
ignore_iof_thr = self.ignore_iof_thr
anchor_num = anchors.shape[0]
gt_bboxes = gt_bboxes
is_crowd_slice = is_crowd
not_crowd_inds = np.where(is_crowd_slice == 0)
# Step1: match anchor and gt_bbox
anchor_gt_bbox_inds, anchor_gt_bbox_iou, labels = self.assign_anchor(
anchors, gt_bboxes,
gt_labels.reshape(-1), pos_iou_thr, neg_iou_thr, min_iou_thr,
ignore_iof_thr)
# Step2: sample anchor
pos_inds = np.where(labels >= 0)[0]
neg_inds = np.where(labels == -1)[0]
# Step3: make output
anchors_num = anchors.shape[0]
bbox_targets = np.zeros_like(anchors)
bbox_weights = np.zeros_like(anchors)
bbox_gt_bboxes = np.zeros_like(anchors)
pos_labels = np.zeros(anchors_num, dtype=np.int32)
pos_labels_weights = np.zeros(anchors_num, dtype=np.float32)
pos_sampled_anchors = anchors[pos_inds]
pos_sampled_gt_boxes = gt_bboxes[anchor_gt_bbox_inds[pos_inds]]
if len(pos_inds) > 0:
pos_bbox_targets = self.rbox2delta(pos_sampled_anchors,
pos_sampled_gt_boxes)
bbox_targets[pos_inds, :] = pos_bbox_targets
bbox_gt_bboxes[pos_inds, :] = pos_sampled_gt_boxes
bbox_weights[pos_inds, :] = 1.0
pos_labels[pos_inds] = labels[pos_inds]
pos_labels_weights[pos_inds] = 1.0
if len(neg_inds) > 0:
pos_labels_weights[neg_inds] = 1.0
return (pos_labels, pos_labels_weights, bbox_targets, bbox_weights,
bbox_gt_bboxes, pos_inds, neg_inds)
| PaddleDetection/ppdet/modeling/proposal_generator/target_layer.py/0 | {
"file_path": "PaddleDetection/ppdet/modeling/proposal_generator/target_layer.py",
"repo_id": "PaddleDetection",
"token_count": 8951
} | 77 |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
import ppdet
class TestFasterRCNN(unittest.TestCase):
def setUp(self):
self.set_config()
def set_config(self):
self.cfg_file = 'configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.yml'
def test_trainer(self):
# Trainer __init__ will build model and DataLoader
# 'train' and 'eval' mode include dataset loading
# use 'test' mode to simplify tests
cfg = ppdet.core.workspace.load_config(self.cfg_file)
trainer = ppdet.engine.Trainer(cfg, mode='test')
class TestMaskRCNN(TestFasterRCNN):
def set_config(self):
self.cfg_file = 'configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.yml'
class TestCascadeRCNN(TestFasterRCNN):
def set_config(self):
self.cfg_file = 'configs/cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco.yml'
class TestYolov3(TestFasterRCNN):
def set_config(self):
self.cfg_file = 'configs/yolov3/yolov3_darknet53_270e_coco.yml'
class TestSSD(TestFasterRCNN):
def set_config(self):
self.cfg_file = 'configs/ssd/ssd_vgg16_300_240e_voc.yml'
class TestGFL(TestFasterRCNN):
def set_config(self):
self.cfg_file = 'configs/gfl/gfl_r50_fpn_1x_coco.yml'
class TestPicoDet(TestFasterRCNN):
def set_config(self):
self.cfg_file = 'configs/picodet/picodet_s_320_coco_lcnet.yml'
if __name__ == '__main__':
unittest.main()
| PaddleDetection/ppdet/modeling/tests/test_architectures.py/0 | {
"file_path": "PaddleDetection/ppdet/modeling/tests/test_architectures.py",
"repo_id": "PaddleDetection",
"token_count": 831
} | 78 |
# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Modified from Deformable-DETR (https://github.com/fundamentalvision/Deformable-DETR)
# Copyright (c) 2020 SenseTime. All Rights Reserved.
# Modified from detrex (https://github.com/IDEA-Research/detrex)
# Copyright 2022 The IDEA Authors. All rights reserved.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddle import ParamAttr
from paddle.regularizer import L2Decay
from ppdet.core.workspace import register
from .position_encoding import PositionEmbedding
from ..heads.detr_head import MLP
from .deformable_transformer import (DeformableTransformerEncoderLayer,
DeformableTransformerEncoder)
from .dino_transformer import (DINOTransformerDecoderLayer)
from ..initializer import (linear_init_, constant_, xavier_uniform_,
bias_init_with_prob)
from .utils import (_get_clones, get_valid_ratio, get_denoising_training_group,
get_sine_pos_embed, inverse_sigmoid, mask_to_box_coordinate)
__all__ = ['MaskDINO']
class ConvGNBlock(nn.Layer):
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
groups=1,
num_groups=32,
bias=False,
act=None):
super(ConvGNBlock, self).__init__()
self.conv = nn.Conv2D(
in_channels,
out_channels,
kernel_size=kernel_size,
stride=stride,
padding=(kernel_size - 1) // 2,
groups=groups,
bias_attr=bias)
self.norm = nn.GroupNorm(
num_groups,
out_channels,
weight_attr=ParamAttr(regularizer=L2Decay(0.0)),
bias_attr=ParamAttr(regularizer=L2Decay(0.0)))
self.act = getattr(F, act) if act is not None else None
self._init_weights()
def _init_weights(self):
xavier_uniform_(self.conv.weight)
def forward(self, x):
x = self.norm(self.conv(x))
if self.act is not None:
x = self.act(x)
return x
class MaskDINOTransformerDecoder(nn.Layer):
def __init__(self, hidden_dim, decoder_layer, num_layers):
super(MaskDINOTransformerDecoder, self).__init__()
self.layers = _get_clones(decoder_layer, num_layers)
self.hidden_dim = hidden_dim
self.num_layers = num_layers
def forward(self,
tgt,
ref_points_unact,
memory,
memory_spatial_shapes,
memory_level_start_index,
bbox_head,
query_pos_head,
dec_norm,
valid_ratios=None,
attn_mask=None,
memory_mask=None):
if valid_ratios is None:
valid_ratios = paddle.ones(
[memory.shape[0], memory_spatial_shapes.shape[0], 2])
output = tgt
intermediate = []
inter_bboxes = []
ref_points = F.sigmoid(ref_points_unact)
for i, layer in enumerate(self.layers):
reference_points_input = ref_points.detach().unsqueeze(
2) * valid_ratios.tile([1, 1, 2]).unsqueeze(1)
query_pos_embed = get_sine_pos_embed(
reference_points_input[..., 0, :], self.hidden_dim // 2)
query_pos_embed = query_pos_head(query_pos_embed)
output = layer(output, reference_points_input, memory,
memory_spatial_shapes, memory_level_start_index,
attn_mask, memory_mask, query_pos_embed)
ref_points = F.sigmoid(
bbox_head(output) + inverse_sigmoid(ref_points.detach()))
intermediate.append(dec_norm(output))
inter_bboxes.append(ref_points)
return paddle.stack(intermediate), paddle.stack(inter_bboxes)
@register
class MaskDINO(nn.Layer):
__shared__ = ['num_classes', 'hidden_dim']
def __init__(self,
num_classes=80,
hidden_dim=256,
num_queries=300,
position_embed_type='sine',
in_feats_channel=[256, 512, 1024, 2048],
num_levels=3,
num_encoder_points=4,
num_decoder_points=4,
nhead=8,
num_encoder_layers=6,
num_decoder_layers=9,
enc_dim_feedforward=1024,
dec_dim_feedforward=2048,
dropout=0.,
activation="relu",
lr_mult=1.0,
pe_temperature=10000,
pe_offset=-0.5,
num_denoising=100,
label_noise_ratio=0.4,
box_noise_scale=0.4,
learnt_init_query=False,
mask_enhanced=True,
eps=1e-2):
super(MaskDINO, self).__init__()
assert position_embed_type in ['sine', 'learned'], \
f'ValueError: position_embed_type not supported {position_embed_type}!'
feat0_dim = in_feats_channel.pop(0)
assert len(in_feats_channel) <= num_levels
self.hidden_dim = hidden_dim
self.nhead = nhead
self.num_levels = num_levels
self.num_classes = num_classes
self.num_queries = num_queries
self.eps = eps
self.num_decoder_layers = num_decoder_layers
self.mask_enhanced = mask_enhanced
weight_attr = ParamAttr(regularizer=L2Decay(0.0))
bias_attr = ParamAttr(regularizer=L2Decay(0.0))
# backbone feature projection
self._build_input_proj_layer(in_feats_channel, weight_attr, bias_attr)
# Transformer module
encoder_layer = DeformableTransformerEncoderLayer(
hidden_dim, nhead, enc_dim_feedforward, dropout, activation,
num_levels, num_encoder_points, lr_mult, weight_attr, bias_attr)
self.encoder = DeformableTransformerEncoder(encoder_layer,
num_encoder_layers)
decoder_layer = DINOTransformerDecoderLayer(
hidden_dim, nhead, dec_dim_feedforward, dropout, activation,
num_levels, num_decoder_points, lr_mult, weight_attr, bias_attr)
self.decoder = MaskDINOTransformerDecoder(hidden_dim, decoder_layer,
num_decoder_layers)
# denoising part
self.denoising_class_embed = nn.Embedding(
num_classes,
hidden_dim,
weight_attr=ParamAttr(initializer=nn.initializer.Normal()))
self.num_denoising = num_denoising
self.label_noise_ratio = label_noise_ratio
self.box_noise_scale = box_noise_scale
# position embedding
self.position_embedding = PositionEmbedding(
hidden_dim // 2,
temperature=pe_temperature,
normalize=True if position_embed_type == 'sine' else False,
embed_type=position_embed_type,
offset=pe_offset)
self.level_embed = nn.Embedding(
num_levels,
hidden_dim,
weight_attr=ParamAttr(initializer=nn.initializer.Normal()))
# decoder embedding
self.learnt_init_query = learnt_init_query
if learnt_init_query:
self.tgt_embed = nn.Embedding(num_queries, hidden_dim)
self.query_pos_head = MLP(2 * hidden_dim,
hidden_dim,
hidden_dim,
num_layers=2)
# mask embedding
self.mask_query_head = MLP(hidden_dim,
hidden_dim,
hidden_dim,
num_layers=3)
# encoder mask head
self.enc_mask_lateral = ConvGNBlock(feat0_dim, hidden_dim, 1)
self.enc_mask_output = nn.Sequential(
ConvGNBlock(
hidden_dim, hidden_dim, 3, act=activation),
nn.Conv2D(hidden_dim, hidden_dim, 1))
# encoder head
self.enc_output = nn.Sequential(
nn.Linear(hidden_dim, hidden_dim),
nn.LayerNorm(
hidden_dim, weight_attr=weight_attr, bias_attr=bias_attr))
# decoder norm layer
self.dec_norm = nn.LayerNorm(
hidden_dim, weight_attr=weight_attr, bias_attr=bias_attr)
# shared prediction head
self.class_head = nn.Linear(hidden_dim, num_classes)
self.bbox_head = MLP(hidden_dim, hidden_dim, 4, num_layers=3)
self._reset_parameters()
def _reset_parameters(self):
# class and bbox head init
bias_cls = bias_init_with_prob(0.01)
linear_init_(self.class_head)
constant_(self.class_head.bias, bias_cls)
constant_(self.bbox_head.layers[-1].weight)
constant_(self.bbox_head.layers[-1].bias)
xavier_uniform_(self.enc_mask_output[1].weight)
linear_init_(self.enc_output[0])
xavier_uniform_(self.enc_output[0].weight)
if self.learnt_init_query:
xavier_uniform_(self.tgt_embed.weight)
xavier_uniform_(self.query_pos_head.layers[0].weight)
xavier_uniform_(self.query_pos_head.layers[1].weight)
for l in self.input_proj:
xavier_uniform_(l[0].weight)
@classmethod
def from_config(cls, cfg, input_shape):
return {'in_feats_channel': [i.channels for i in input_shape], }
def _build_input_proj_layer(self,
in_feats_channel,
weight_attr=None,
bias_attr=None):
self.input_proj = nn.LayerList()
for in_channels in in_feats_channel:
self.input_proj.append(
nn.Sequential(
('conv', nn.Conv2D(
in_channels, self.hidden_dim, kernel_size=1)), (
'norm', nn.GroupNorm(
32,
self.hidden_dim,
weight_attr=weight_attr,
bias_attr=bias_attr))))
in_channels = in_feats_channel[-1]
for _ in range(self.num_levels - len(in_feats_channel)):
self.input_proj.append(
nn.Sequential(
('conv', nn.Conv2D(
in_channels,
self.hidden_dim,
kernel_size=3,
stride=2,
padding=1)), ('norm', nn.GroupNorm(
32,
self.hidden_dim,
weight_attr=weight_attr,
bias_attr=bias_attr))))
in_channels = self.hidden_dim
def _get_encoder_input(self, feats, pad_mask=None):
# get projection features
proj_feats = [self.input_proj[i](feat) for i, feat in enumerate(feats)]
if self.num_levels > len(proj_feats):
len_srcs = len(proj_feats)
for i in range(len_srcs, self.num_levels):
if i == len_srcs:
proj_feats.append(self.input_proj[i](feats[-1]))
else:
proj_feats.append(self.input_proj[i](proj_feats[-1]))
# get encoder inputs
feat_flatten = []
mask_flatten = []
lvl_pos_embed_flatten = []
spatial_shapes = []
valid_ratios = []
for i, feat in enumerate(proj_feats):
bs, _, h, w = paddle.shape(feat)
spatial_shapes.append(paddle.concat([h, w]))
# [b,c,h,w] -> [b,h*w,c]
feat_flatten.append(feat.flatten(2).transpose([0, 2, 1]))
if pad_mask is not None:
mask = F.interpolate(pad_mask.unsqueeze(0), size=(h, w))[0]
else:
mask = paddle.ones([bs, h, w])
valid_ratios.append(get_valid_ratio(mask))
# [b, h*w, c]
pos_embed = self.position_embedding(mask).flatten(1, 2)
lvl_pos_embed = pos_embed + self.level_embed.weight[i]
lvl_pos_embed_flatten.append(lvl_pos_embed)
if pad_mask is not None:
# [b, h*w]
mask_flatten.append(mask.flatten(1))
# [b, l, c]
feat_flatten = paddle.concat(feat_flatten, 1)
# [b, l]
mask_flatten = None if pad_mask is None else paddle.concat(mask_flatten,
1)
# [b, l, c]
lvl_pos_embed_flatten = paddle.concat(lvl_pos_embed_flatten, 1)
# [num_levels, 2]
spatial_shapes = paddle.to_tensor(
paddle.stack(spatial_shapes).astype('int64'))
# [l], 每一个level的起始index
level_start_index = paddle.concat([
paddle.zeros(
[1], dtype='int64'), spatial_shapes.prod(1).cumsum(0)[:-1]
])
# [b, num_levels, 2]
valid_ratios = paddle.stack(valid_ratios, 1)
return (feat_flatten, spatial_shapes, level_start_index, mask_flatten,
lvl_pos_embed_flatten, valid_ratios)
def forward(self, feats, pad_mask=None, gt_meta=None):
feat0 = feats.pop(0)
# input projection and embedding
(feat_flatten, spatial_shapes, level_start_index, mask_flatten,
lvl_pos_embed_flatten,
valid_ratios) = self._get_encoder_input(feats, pad_mask)
# encoder
memory = self.encoder(feat_flatten, spatial_shapes, level_start_index,
mask_flatten, lvl_pos_embed_flatten, valid_ratios)
mask_feat = self._get_encoder_mask_feature(feat0, memory,
spatial_shapes)
# prepare denoising training
if self.training:
denoising_class, denoising_bbox_unact, attn_mask, dn_meta = \
get_denoising_training_group(gt_meta,
self.num_classes,
self.num_queries,
self.denoising_class_embed.weight,
self.num_denoising,
self.label_noise_ratio,
self.box_noise_scale)
else:
denoising_class, denoising_bbox_unact, attn_mask, dn_meta = None, None, None, None
target, init_ref_points_unact, enc_out, init_out = \
self._get_decoder_input(
memory, mask_feat, spatial_shapes, mask_flatten, denoising_class,
denoising_bbox_unact)
# decoder
inter_feats, inter_bboxes = self.decoder(
target, init_ref_points_unact, memory, spatial_shapes,
level_start_index, self.bbox_head, self.query_pos_head,
self.dec_norm, valid_ratios, attn_mask, mask_flatten)
out_logits = []
out_bboxes = []
out_masks = []
for i in range(self.num_decoder_layers):
if self.training or i == self.num_decoder_layers - 1:
logits_, masks_ = self._get_pred_class_and_mask(inter_feats[i],
mask_feat)
else:
continue
out_logits.append(logits_)
out_masks.append(masks_)
if i == 0:
out_bboxes.append(
F.sigmoid(
self.bbox_head(inter_feats[i]) + init_ref_points_unact))
else:
out_bboxes.append(
F.sigmoid(
self.bbox_head(inter_feats[i]) + inverse_sigmoid(
inter_bboxes[i - 1])))
out_bboxes = paddle.stack(out_bboxes)
out_logits = paddle.stack(out_logits)
out_masks = paddle.stack(out_masks)
return (out_logits, out_bboxes, out_masks, enc_out, init_out, dn_meta)
def _get_encoder_mask_feature(self, in_feat, memory, spatial_shapes):
memory_feat0 = memory.split(
spatial_shapes.prod(1).split(self.num_levels), axis=1)[0]
h, w = spatial_shapes[0]
memory_feat0 = memory_feat0.reshape(
[0, h, w, self.hidden_dim]).transpose([0, 3, 1, 2])
out = self.enc_mask_lateral(in_feat) + F.interpolate(
memory_feat0,
scale_factor=2.0,
mode='bilinear',
align_corners=False)
return self.enc_mask_output(out)
def _get_encoder_output_anchors(self,
memory,
spatial_shapes,
memory_mask=None,
grid_size=0.05):
output_anchors = []
idx = 0
for lvl, (h, w) in enumerate(spatial_shapes):
if memory_mask is not None:
mask_ = memory_mask[:, idx:idx + h * w].reshape([-1, h, w])
valid_H = paddle.sum(mask_[:, :, 0], 1)
valid_W = paddle.sum(mask_[:, 0, :], 1)
else:
valid_H, valid_W = h, w
grid_y, grid_x = paddle.meshgrid(
paddle.arange(end=h), paddle.arange(end=w))
grid_xy = paddle.stack([grid_x, grid_y], -1).astype(memory.dtype)
valid_WH = paddle.stack([valid_W, valid_H], -1).reshape(
[-1, 1, 1, 2]).astype(grid_xy.dtype)
grid_xy = (grid_xy.unsqueeze(0) + 0.5) / valid_WH
wh = paddle.ones_like(grid_xy) * grid_size * (2.0**lvl)
output_anchors.append(
paddle.concat([grid_xy, wh], -1).reshape([-1, h * w, 4]))
idx += h * w
output_anchors = paddle.concat(output_anchors, 1)
valid_mask = ((output_anchors > self.eps) *
(output_anchors < 1 - self.eps)).all(-1, keepdim=True)
output_anchors = paddle.log(output_anchors / (1 - output_anchors))
if memory_mask is not None:
valid_mask = (valid_mask * (memory_mask.unsqueeze(-1) > 0)) > 0
output_anchors = paddle.where(valid_mask, output_anchors,
paddle.to_tensor(float("inf")))
memory = paddle.where(valid_mask, memory, paddle.to_tensor(0.))
output_memory = self.enc_output(memory)
return output_memory, output_anchors
def _get_decoder_input(self,
memory,
mask_feat,
spatial_shapes,
memory_mask=None,
denoising_class=None,
denoising_bbox_unact=None):
# prepare input for decoder
bs, _, _ = memory.shape
output_memory, output_anchors = self._get_encoder_output_anchors(
memory, spatial_shapes, memory_mask)
enc_logits_unact = self.class_head(output_memory)
enc_bboxes_unact = self.bbox_head(output_memory) + output_anchors
# get topk index
_, topk_ind = paddle.topk(
enc_logits_unact.max(-1), self.num_queries, axis=1)
batch_ind = paddle.arange(end=bs).astype(topk_ind.dtype)
batch_ind = batch_ind.unsqueeze(-1).tile([1, self.num_queries])
topk_ind = paddle.stack([batch_ind, topk_ind], axis=-1)
# extract content and position query embedding
target = paddle.gather_nd(output_memory, topk_ind)
reference_points_unact = paddle.gather_nd(enc_bboxes_unact,
topk_ind) # unsigmoided.
# get encoder output: {logits, bboxes, masks}
enc_out_logits, enc_out_masks = self._get_pred_class_and_mask(target,
mask_feat)
enc_out_bboxes = F.sigmoid(reference_points_unact)
enc_out = (enc_out_logits, enc_out_bboxes, enc_out_masks)
# concat denoising query
if self.learnt_init_query:
target = self.tgt_embed.weight.unsqueeze(0).tile([bs, 1, 1])
else:
target = target.detach()
if denoising_class is not None:
target = paddle.concat([denoising_class, target], 1)
if self.mask_enhanced:
# use mask-enhanced anchor box initialization
reference_points = mask_to_box_coordinate(
enc_out_masks > 0, normalize=True, format="xywh")
reference_points_unact = inverse_sigmoid(reference_points)
if denoising_bbox_unact is not None:
reference_points_unact = paddle.concat(
[denoising_bbox_unact, reference_points_unact], 1)
# direct prediction from the matching and denoising part in the begining
if self.training and denoising_class is not None:
init_out_logits, init_out_masks = self._get_pred_class_and_mask(
target, mask_feat)
init_out_bboxes = F.sigmoid(reference_points_unact)
init_out = (init_out_logits, init_out_bboxes, init_out_masks)
else:
init_out = None
return target, reference_points_unact.detach(), enc_out, init_out
def _get_pred_class_and_mask(self, query_embed, mask_feat):
out_query = self.dec_norm(query_embed)
out_logits = self.class_head(out_query)
mask_query_embed = self.mask_query_head(out_query)
_, _, h, w = paddle.shape(mask_feat)
# [b, q, c] x [b, c, h, w] -> [b, q, h, w]
out_mask = paddle.bmm(mask_query_embed, mask_feat.flatten(2)).reshape(
[0, 0, h, w])
return out_logits, out_mask
| PaddleDetection/ppdet/modeling/transformers/mask_dino_transformer.py/0 | {
"file_path": "PaddleDetection/ppdet/modeling/transformers/mask_dino_transformer.py",
"repo_id": "PaddleDetection",
"token_count": 12018
} | 79 |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from paddle.utils import try_import
from ppdet.core.workspace import register, serializable
from ppdet.utils.logger import setup_logger
logger = setup_logger(__name__)
@register
@serializable
class QAT(object):
def __init__(self, quant_config, print_model):
super(QAT, self).__init__()
self.quant_config = quant_config
self.print_model = print_model
def __call__(self, model):
paddleslim = try_import('paddleslim')
self.quanter = paddleslim.dygraph.quant.QAT(config=self.quant_config)
if self.print_model:
logger.info("Model before quant:")
logger.info(model)
# For PP-YOLOE, convert model to deploy firstly.
for layer in model.sublayers():
if hasattr(layer, 'convert_to_deploy'):
layer.convert_to_deploy()
self.quanter.quantize(model)
if self.print_model:
logger.info("Quantized model:")
logger.info(model)
return model
def save_quantized_model(self, layer, path, input_spec=None, **config):
self.quanter.save_quantized_model(
model=layer, path=path, input_spec=input_spec, **config)
@register
@serializable
class PTQ(object):
def __init__(self,
ptq_config,
quant_batch_num=10,
output_dir='output_inference',
fuse=True,
fuse_list=None):
super(PTQ, self).__init__()
self.ptq_config = ptq_config
self.quant_batch_num = quant_batch_num
self.output_dir = output_dir
self.fuse = fuse
self.fuse_list = fuse_list
def __call__(self, model):
paddleslim = try_import('paddleslim')
self.ptq = paddleslim.PTQ(**self.ptq_config)
model.eval()
quant_model = self.ptq.quantize(
model, fuse=self.fuse, fuse_list=self.fuse_list)
return quant_model
def save_quantized_model(self,
quant_model,
quantize_model_path,
input_spec=None):
self.ptq.save_quantized_model(quant_model, quantize_model_path,
input_spec)
| PaddleDetection/ppdet/slim/quant.py/0 | {
"file_path": "PaddleDetection/ppdet/slim/quant.py",
"repo_id": "PaddleDetection",
"token_count": 1282
} | 80 |
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import os.path as osp
import re
import random
__all__ = ['create_list']
def create_list(devkit_dir, years, output_dir):
"""
create following list:
1. trainval.txt
2. test.txt
"""
trainval_list = []
test_list = []
for year in years:
trainval, test = _walk_voc_dir(devkit_dir, year, output_dir)
trainval_list.extend(trainval)
test_list.extend(test)
random.shuffle(trainval_list)
with open(osp.join(output_dir, 'trainval.txt'), 'w') as ftrainval:
for item in trainval_list:
ftrainval.write(item[0] + ' ' + item[1] + '\n')
with open(osp.join(output_dir, 'test.txt'), 'w') as fval:
ct = 0
for item in test_list:
ct += 1
fval.write(item[0] + ' ' + item[1] + '\n')
def _get_voc_dir(devkit_dir, year, type):
return osp.join(devkit_dir, 'VOC' + year, type)
def _walk_voc_dir(devkit_dir, year, output_dir):
filelist_dir = _get_voc_dir(devkit_dir, year, 'ImageSets/Main')
annotation_dir = _get_voc_dir(devkit_dir, year, 'Annotations')
img_dir = _get_voc_dir(devkit_dir, year, 'JPEGImages')
trainval_list = []
test_list = []
added = set()
for _, _, files in os.walk(filelist_dir):
for fname in files:
img_ann_list = []
if re.match(r'[a-z]+_trainval\.txt', fname):
img_ann_list = trainval_list
elif re.match(r'[a-z]+_test\.txt', fname):
img_ann_list = test_list
else:
continue
fpath = osp.join(filelist_dir, fname)
for line in open(fpath):
name_prefix = line.strip().split()[0]
if name_prefix in added:
continue
added.add(name_prefix)
ann_path = osp.join(
osp.relpath(annotation_dir, output_dir),
name_prefix + '.xml')
img_path = osp.join(
osp.relpath(img_dir, output_dir), name_prefix + '.jpg')
img_ann_list.append((img_path, ann_path))
return trainval_list, test_list
| PaddleDetection/ppdet/utils/voc_utils.py/0 | {
"file_path": "PaddleDetection/ppdet/utils/voc_utils.py",
"repo_id": "PaddleDetection",
"token_count": 1288
} | 81 |
# C++预测功能测试
C++预测功能测试的主程序为`test_inference_cpp.sh`,可以测试基于C++预测库的模型推理功能。
## 1. 测试结论汇总
基于训练是否使用量化,进行本测试的模型可以分为`正常模型`和`量化模型`,这两类模型对应的C++预测功能汇总如下:
| 模型类型 |device | batchsize | tensorrt | mkldnn | cpu多线程 |
| ---- | ---- | ---- | :----: | :----: | :----: |
| 正常模型 | GPU | 1/8 | fp32/fp16 | - | - |
| 正常模型 | CPU | 1/8 | - | fp32 | 支持 |
| 量化模型 | GPU | 1/8 | int8 | - | - |
| 量化模型 | CPU | 1/8 | - | int8 | 支持 |
## 2. 测试流程
运行环境配置请参考[文档](./install.md)的内容配置TIPC的运行环境。
```
# 请设置paddle_inference环境变量,如:
export PADDLE_INFER_DIR=/path/to/paddle_inference
# 若不设置paddle_inference环境变量,也可通过指定参数的方式使脚本自动下载paddle_inference.tgz,如:
bash test_tipc/test_inference_cpp.sh test_tipc/configs/yolov3/yolov3_darknet53_270e_coco_model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt 'https://paddle-inference-lib.bj.bcebos.com/2.3.0/cxx_c/Linux/GPU/x86-64_gcc8.2_avx_mkl_cuda10.1_cudnn7.6.5_trt6.0.1.5/paddle_inference.tgz'
# 若未使用docker镜像: paddlepaddle/paddle:latest-gpu-cuda10.1-cudnn7-gcc82-dev
# 请设置TensorRT环境变量,如:
export TENSORRT_ROOT=/usr/local/TensorRT6-cuda10.1-cudnn7
```
### 2.1 功能测试
先运行`prepare.sh`准备数据和模型,然后运行`test_inference_cpp.sh`进行测试,最终在```test_tipc/output```目录下生成`cpp_infer_*.log`后缀的日志文件。
```shell
bash test_tipc/prepare.sh ./test_tipc/configs/yolov3/yolov3_darknet53_270e_coco_model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt "cpp_infer"
# 用法1:
bash test_tipc/test_inference_cpp.sh test_tipc/configs/yolov3/yolov3_darknet53_270e_coco_model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt
# 用法2: 指定下载paddle_inference.tgz链接,第二个传入参数为下载链接
bash test_tipc/test_inference_cpp.sh test_tipc/configs/yolov3/yolov3_darknet53_270e_coco_model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt 'https://paddle-inference-lib.bj.bcebos.com/2.3.0/cxx_c/Linux/GPU/x86-64_gcc8.2_avx_mkl_cuda10.1_cudnn7.6.5_trt6.0.1.5/paddle_inference.tgz'
# 用法3: 同时指定下载paddle_inference.tgz链接和指定GPU卡预测,第三个传入参数为GPU卡号
bash test_tipc/test_inference_cpp.sh test_tipc/configs/yolov3/yolov3_darknet53_270e_coco_model_linux_gpu_normal_normal_infer_cpp_linux_gpu_cpu.txt 'https://paddle-inference-lib.bj.bcebos.com/2.3.0/cxx_c/Linux/GPU/x86-64_gcc8.2_avx_mkl_cuda10.1_cudnn7.6.5_trt6.0.1.5/paddle_inference.tgz' '1'
```
运行预测指令后,在`test_tipc/output`文件夹下自动会保存运行日志,包括以下文件:
```shell
test_tipc/output/
|- results_cpp.log # 运行指令状态的日志
|- cpp_infer_cpu_usemkldnn_False_threads_1_precision_fluid_batchsize_1.log # CPU上不开启Mkldnn,线程数设置为1,测试batch_size=1条件下的预测运行日志
|- cpp_infer_cpu_usemkldnn_False_threads_6_precision_fluid_batchsize_1.log # CPU上不开启Mkldnn,线程数设置为6,测试batch_size=1条件下的预测运行日志
|- cpp_infer_gpu_precision_fluid_batchsize_1.log # GPU上不开启TensorRT,测试batch_size=1的fp32精度预测日志
|- cpp_infer_gpu_precision_trt_fp16_batchsize_1.log # GPU上开启TensorRT,测试batch_size=1的fp16精度预测日志
......
```
其中results_cpp.log中包含了每条指令的运行状态,如果运行成功会输出:
```
Run successfully with command - python3.7 tools/export_model.py -c configs/yolov3/yolov3_darknet53_270e_coco.yml -o weights=https://paddledet.bj.bcebos.com/models/yolov3_darknet53_270e_coco.pdparams filename=yolov3_darknet53_270e_coco --output_dir=./output_inference !
Run successfully with command - ./deploy/cpp/build/main --device=gpu --run_mode=fluid --model_dir=./output_inference/yolov3_darknet53_270e_coco --batch_size=8 --image_dir=./dataset/coco/test2017/ --run_benchmark=True > ./test_tipc/output/cpp_infer_gpu_precision_fluid_batchsize_8.log 2>&1 !
......
```
如果运行失败,会输出:
```
Run failed with command - python3.7 tools/export_model.py -c configs/yolov3/yolov3_darknet53_270e_coco.yml -o weights=https://paddledet.bj.bcebos.com/models/yolov3_darknet53_270e_coco.pdparams filename=yolov3_darknet53_270e_coco --output_dir=./output_inference !
Run failed with command - ./deploy/cpp/build/main --device=gpu --run_mode=fluid --model_dir=./output_inference/yolov3_darknet53_270e_coco --batch_size=8 --image_dir=./dataset/coco/test2017/ --run_benchmark=True > ./test_tipc/output/cpp_infer_gpu_precision_fluid_batchsize_8.log 2>&1 !
......
```
可以很方便的根据results_cpp.log中的内容判定哪一个指令运行错误。
### 2.2 精度测试
使用compare_results.py脚本比较模型预测的结果是否符合预期,主要步骤包括:
- 提取日志中的预测坐标;
- 从本地文件中提取保存好的坐标结果;
- 比较上述两个结果是否符合精度预期,误差大于设置阈值时会报错。
#### 使用方式
运行命令:
```shell
python3.7 test_tipc/compare_results.py --gt_file=./test_tipc/results/cpp_*.txt --log_file=./test_tipc/output/cpp_*.log --atol=1e-3 --rtol=1e-3
```
参数介绍:
- gt_file: 指向事先保存好的预测结果路径,支持*.txt 结尾,会自动索引*.txt格式的文件,文件默认保存在test_tipc/result/ 文件夹下
- log_file: 指向运行test_tipc/test_inference_cpp.sh 脚本的infer模式保存的预测日志,预测日志中打印的有预测结果,比如:文本框,预测文本,类别等等,同样支持cpp_infer_*.log格式传入
- atol: 设置的绝对误差
- rtol: 设置的相对误差
#### 运行结果
正常运行效果如下图:
<img src="compare_cpp_right.png" width="1000">
出现不一致结果时的运行输出:
<img src="compare_cpp_wrong.png" width="1000">
## 3. 更多教程
本文档为功能测试用,更详细的c++预测使用教程请参考:[C++预测](../../deploy/cpp/README.md)
| PaddleDetection/test_tipc/docs/test_inference_cpp.md/0 | {
"file_path": "PaddleDetection/test_tipc/docs/test_inference_cpp.md",
"repo_id": "PaddleDetection",
"token_count": 3553
} | 82 |
#!/bin/bash
source test_tipc/utils_func.sh
function readlinkf() {
perl -MCwd -e 'print Cwd::abs_path shift' "$1";
}
function func_parser_config() {
strs=$1
IFS=" "
array=(${strs})
tmp=${array[2]}
echo ${tmp}
}
function func_parser_dir() {
strs=$1
IFS="/"
array=(${strs})
len=${#array[*]}
dir=""
count=1
for arr in ${array[*]}; do
if [ ${len} = "${count}" ]; then
continue;
else
dir="${dir}/${arr}"
count=$((${count} + 1))
fi
done
echo "${dir}"
}
BASEDIR=$(dirname "$0")
REPO_ROOT_PATH=$(readlinkf ${BASEDIR}/../)
FILENAME=$1
# change gpu to npu in tipc txt configs
sed -i "s/use_gpu:True/use_npu:True/g" $FILENAME
sed -i "s/--device:gpu|cpu/--device:npu|cpu/g" $FILENAME
sed -i "s/--device:gpu/--device:npu/g" $FILENAME
sed -i "s/--device:cpu|gpu/--device:cpu|npu/g" $FILENAME
sed -i "s/trainer:pact_train/trainer:norm_train/g" $FILENAME
sed -i "s/trainer:fpgm_train/trainer:norm_train/g" $FILENAME
sed -i "s/--slim_config _template_pact/ /g" $FILENAME
sed -i "s/--slim_config _template_fpgm/ /g" $FILENAME
sed -i "s/--slim_config _template_kl_quant/ /g" $FILENAME
# python has been updated to version 3.9 for npu backend
sed -i "s/python3.7/python3.9/g" $FILENAME
sed -i 's/\"gpu\"/\"npu\"/g' test_tipc/test_train_inference_python.sh
# parser params
dataline=`cat $FILENAME`
IFS=$'\n'
lines=(${dataline})
# replace training config file
grep -n '.yml' $FILENAME | cut -d ":" -f 1 \
| while read line_num ; do
train_cmd=$(func_parser_value "${lines[line_num-1]}")
trainer_config=$(func_parser_config ${train_cmd})
sed -i 's/use_gpu/use_npu/g' "$REPO_ROOT_PATH/$trainer_config"
sed -i 's/aligned: True/aligned: False/g' "$REPO_ROOT_PATH/$trainer_config"
# fine use_gpu in those included yaml
sub_datalinee=`cat $REPO_ROOT_PATH/$trainer_config`
IFS=$'\n'
sub_lines=(${sub_datalinee})
grep -n '.yml' "$REPO_ROOT_PATH/$trainer_config" | cut -d ":" -f 1 \
| while read sub_line_num; do
sub_config=${sub_lines[sub_line_num-1]}
dst=${#sub_config}-5
sub_path=$(func_parser_dir "${trainer_config}")
sub_config_name=$(echo "$sub_config" | awk -F"'" '{ print $2 }')
sub_config_path="${REPO_ROOT_PATH}${sub_path}/${sub_config_name}"
sed -i 's/use_gpu/use_npu/g' "$sub_config_path"
sed -i 's/aligned: True/aligned: False/g' "$sub_config_path"
done
done
# NPU lacks operators such as deformable_conv, depthwise_conv2d_transpose,
# which will affects ips. Here, we reduce the number of coco training sets
# for npu tipc bencnmark. This is a temporary hack.
# # TODO(duanyanhui): add vision ops for npu
train_img_num=`cat $REPO_ROOT_PATH/dataset/coco/annotations/instances_train2017.json | grep -o file_name | wc -l`
exp_num=8
if [ ${train_img_num} != ${exp_num} ];then
echo "Replace with npu tipc coco training annotations"
mv $REPO_ROOT_PATH/dataset/coco/annotations/instances_train2017.json $REPO_ROOT_PATH/dataset/coco/annotations/instances_train2017_bak.json
wget https://paddle-device.bj.bcebos.com/tipc/instances_train2017.json
mv instances_train2017.json $REPO_ROOT_PATH/dataset/coco/annotations/
rm -f instances_train2017.json
fi
# pass parameters to test_train_inference_python.sh
cmd="bash test_tipc/test_train_inference_python.sh ${FILENAME} $2"
echo $cmd
eval $cmd
| PaddleDetection/test_tipc/test_train_inference_python_npu.sh/0 | {
"file_path": "PaddleDetection/test_tipc/test_train_inference_python_npu.sh",
"repo_id": "PaddleDetection",
"token_count": 1530
} | 83 |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
# add python path of PaddleDetection to sys.path
parent_path = os.path.abspath(os.path.join(__file__, *(['..'] * 2)))
sys.path.insert(0, parent_path)
# ignore warning log
import warnings
warnings.filterwarnings('ignore')
import paddle
from ppdet.core.workspace import load_config, merge_config
from ppdet.engine import Trainer, TrainerCot, init_parallel_env, set_random_seed, init_fleet_env
from ppdet.engine.trainer_ssod import Trainer_DenseTeacher, Trainer_ARSL, Trainer_Semi_RTDETR
from ppdet.slim import build_slim_model
from ppdet.utils.cli import ArgsParser, merge_args
import ppdet.utils.check as check
from ppdet.utils.logger import setup_logger
logger = setup_logger('train')
def parse_args():
parser = ArgsParser()
parser.add_argument(
"--eval",
action='store_true',
default=False,
help="Whether to perform evaluation in train")
parser.add_argument(
"-r", "--resume", default=None, help="weights path for resume")
parser.add_argument(
"--slim_config",
default=None,
type=str,
help="Configuration file of slim method.")
parser.add_argument(
"--enable_ce",
type=bool,
default=False,
help="If set True, enable continuous evaluation job."
"This flag is only used for internal test.")
parser.add_argument(
"--amp",
action='store_true',
default=False,
help="Enable auto mixed precision training.")
parser.add_argument(
"--fleet", action='store_true', default=False, help="Use fleet or not")
parser.add_argument(
"--use_vdl",
type=bool,
default=False,
help="whether to record the data to VisualDL.")
parser.add_argument(
'--vdl_log_dir',
type=str,
default="vdl_log_dir/scalar",
help='VisualDL logging directory for scalar.')
parser.add_argument(
"--use_wandb",
type=bool,
default=False,
help="whether to record the data to wandb.")
parser.add_argument(
'--save_prediction_only',
action='store_true',
default=False,
help='Whether to save the evaluation results only')
parser.add_argument(
'--profiler_options',
type=str,
default=None,
help="The option of profiler, which should be in "
"format \"key1=value1;key2=value2;key3=value3\"."
"please see ppdet/utils/profiler.py for detail.")
parser.add_argument(
'--save_proposals',
action='store_true',
default=False,
help='Whether to save the train proposals')
parser.add_argument(
'--proposals_path',
type=str,
default="sniper/proposals.json",
help='Train proposals directory')
parser.add_argument(
"--to_static",
action='store_true',
default=False,
help="Enable dy2st to train.")
args = parser.parse_args()
return args
def run(FLAGS, cfg):
# init fleet environment
if cfg.fleet:
init_fleet_env(cfg.get('find_unused_parameters', False))
else:
# init parallel environment if nranks > 1
init_parallel_env()
if FLAGS.enable_ce:
set_random_seed(0)
# build trainer
ssod_method = cfg.get('ssod_method', None)
if ssod_method is not None:
if ssod_method == 'DenseTeacher':
trainer = Trainer_DenseTeacher(cfg, mode='train')
elif ssod_method == 'ARSL':
trainer = Trainer_ARSL(cfg, mode='train')
elif ssod_method == 'Semi_RTDETR':
trainer = Trainer_Semi_RTDETR(cfg, mode='train')
else:
raise ValueError(
"Semi-Supervised Object Detection only no support this method.")
elif cfg.get('use_cot', False):
trainer = TrainerCot(cfg, mode='train')
else:
trainer = Trainer(cfg, mode='train')
# load weights
if FLAGS.resume is not None:
trainer.resume_weights(FLAGS.resume)
elif 'pretrain_student_weights' in cfg and 'pretrain_teacher_weights' in cfg \
and cfg.pretrain_teacher_weights and cfg.pretrain_student_weights:
trainer.load_semi_weights(cfg.pretrain_teacher_weights,
cfg.pretrain_student_weights)
elif 'pretrain_weights' in cfg and cfg.pretrain_weights:
trainer.load_weights(cfg.pretrain_weights)
# training
trainer.train(FLAGS.eval)
def main():
FLAGS = parse_args()
cfg = load_config(FLAGS.config)
merge_args(cfg, FLAGS)
merge_config(FLAGS.opt)
# disable npu in config by default
if 'use_npu' not in cfg:
cfg.use_npu = False
# disable xpu in config by default
if 'use_xpu' not in cfg:
cfg.use_xpu = False
if 'use_gpu' not in cfg:
cfg.use_gpu = False
# disable mlu in config by default
if 'use_mlu' not in cfg:
cfg.use_mlu = False
if cfg.use_gpu:
place = paddle.set_device('gpu')
elif cfg.use_npu:
place = paddle.set_device('npu')
elif cfg.use_xpu:
place = paddle.set_device('xpu')
elif cfg.use_mlu:
place = paddle.set_device('mlu')
else:
place = paddle.set_device('cpu')
if FLAGS.slim_config:
cfg = build_slim_model(cfg, FLAGS.slim_config)
# FIXME: Temporarily solve the priority problem of FLAGS.opt
merge_config(FLAGS.opt)
check.check_config(cfg)
check.check_gpu(cfg.use_gpu)
check.check_npu(cfg.use_npu)
check.check_xpu(cfg.use_xpu)
check.check_mlu(cfg.use_mlu)
check.check_version()
run(FLAGS, cfg)
if __name__ == "__main__":
main()
| PaddleDetection/tools/train.py/0 | {
"file_path": "PaddleDetection/tools/train.py",
"repo_id": "PaddleDetection",
"token_count": 2681
} | 84 |
FROM tensorflow/tensorflow:latest-gpu
COPY requirements.txt /
RUN pip config set global.index-url https://pypi.tuna.tsinghua.edu.cn/simple \
&& pip install -r requirements.txt \ | euryale/Dockerfile/0 | {
"file_path": "euryale/Dockerfile",
"repo_id": "euryale",
"token_count": 64
} | 85 |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
# Modified by Brendan Dolan-Gavitt, 2022
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import configparser
import multiprocessing
import numpy as np
from pathlib import Path
import torch
import os
import sys
from transformers import GPTJForCausalLM
dir_path = os.path.dirname(os.path.realpath(__file__))
sys.path.append(dir_path + "/../../../..")
sys.path.append(dir_path)
def get_weight_data_type(data_type):
if data_type == "fp32":
return np.float32
elif data_type == "fp16":
return np.float16
else:
assert False, f"Invalid weight data type {data_type}"
def split_and_convert_process(i, saved_dir, factor, key, val):
if key.find("input_layernorm.weight") != -1 or key.find("input_layernorm.bias") != -1 or \
key.find("attention.dense.bias") != -1 or key.find("post_attention_layernorm.weight") != -1 or \
key.find("post_attention_layernorm.bias") != -1 or key.find("mlp.dense_4h_to_h.bias") != -1 or \
key.find("final_layernorm.weight") != -1 or key.find("final_layernorm.bias") != -1:
# shared weights, only need to convert the weights of rank 0
if i == 0:
saved_path = saved_dir + "/model." + key + ".bin"
val.tofile(saved_path)
elif key.find("attention.dense.weight") != -1 or key.find("mlp.dense_4h_to_h.weight") != -1:
split_vals = np.split(val, factor, axis=0)
for j in range(factor):
saved_path = saved_dir + "/model." + key + ".%d.bin" % (i * factor + j)
split_vals[j].tofile(saved_path)
elif key.find("mlp.dense_h_to_4h.weight") != -1 or key.find("mlp.dense_h_to_4h.bias") != -1:
split_vals = np.split(val, factor, axis=-1)
for j in range(factor):
saved_path = saved_dir + "/model." + key + ".%d.bin" % (i * factor + j)
split_vals[j].tofile(saved_path)
elif key.find("attention.query_key_value.weight") != -1:
split_vals = np.split(val, factor, axis=-1)
for j in range(factor):
saved_path = saved_dir + "/model." + key + ".%d.bin" % (i * factor + j)
split_vals[j].tofile(saved_path)
else:
print("[ERROR] cannot find key '{}'".format(key))
def split_and_convert(args):
saved_dir = args.saved_dir + "/%d-gpu/" % args.infer_gpu_num
if os.path.exists(saved_dir) is False:
os.makedirs(saved_dir)
t_gpu_num = args.trained_gpu_num
i_gpu_num = args.infer_gpu_num
assert (i_gpu_num % t_gpu_num == 0)
factor = (int)(i_gpu_num / t_gpu_num)
model = GPTJForCausalLM.from_pretrained(args.in_file)
try:
config = configparser.ConfigParser()
config["gpt"] = {}
for key in vars(args):
config["gpt"][key] = f"{vars(args)[key]}"
for k, v in vars(model.config).items():
config["gpt"][k] = f"{v}"
config["gpt"]["weight_data_type"] = args.weight_data_type
with open((Path(saved_dir) / "config.ini").as_posix(), 'w') as configfile:
config.write(configfile)
except Exception:
print("Fail to save the config in config.ini.")
np_weight_data_type = get_weight_data_type(args.weight_data_type)
huggingface_model_name_pattern = [
"ln_1.bias",
"ln_1.weight",
"attn.q_proj.weight",
"attn.out_proj.weight",
"mlp.fc_in.bias",
"mlp.fc_in.weight",
"mlp.fc_out.bias",
"mlp.fc_out.weight",
]
ft_model_name_pattern = [
"input_layernorm.bias",
"input_layernorm.weight",
"attention.query_key_value.weight",
"attention.dense.weight",
"mlp.dense_h_to_4h.bias",
"mlp.dense_h_to_4h.weight",
"mlp.dense_4h_to_h.bias",
"mlp.dense_4h_to_h.weight",
]
torch.multiprocessing.set_start_method("spawn")
pool = multiprocessing.Pool(args.processes)
for name, param in model.named_parameters():
if name.find("weight") == -1 and name.find("bias") == -1:
continue
print(name)
if name == 'transformer.wte.weight':
param.detach().cpu().numpy().astype(np_weight_data_type).tofile(saved_dir + "model.wte.bin")
elif name == 'transformer.ln_f.bias':
param.detach().cpu().numpy().astype(np_weight_data_type).tofile(
saved_dir + "model.final_layernorm.bias.bin")
elif name == 'transformer.ln_f.weight':
param.detach().cpu().numpy().astype(np_weight_data_type).tofile(
saved_dir + "model.final_layernorm.weight.bin")
elif name == 'lm_head.weight':
param.detach().cpu().numpy().astype(np_weight_data_type).tofile(saved_dir + "model.lm_head.weight.bin")
elif name == 'lm_head.bias':
param.detach().cpu().numpy().astype(np_weight_data_type).tofile(saved_dir + "model.lm_head.bias.bin")
else:
for i in range(len(huggingface_model_name_pattern)):
if name.find(huggingface_model_name_pattern[i]) != -1:
# Special case for QKV weights
if name.find("attn.q_proj.weight") != -1:
layer = name.split('.')[2]
base_k = f'transformer.h.{layer}.'
w = model.state_dict()
QKV_w = torch.stack([
w[base_k + "attn.q_proj.weight"],
w[base_k + "attn.k_proj.weight"],
w[base_k + "attn.v_proj.weight"],
]) # [qkv, n_heads * dim_head, latent_space]
QKV_w = QKV_w.permute(2, 0, 1)
weights = QKV_w.detach().cpu().numpy().astype(np_weight_data_type)
else:
weights = param.detach().cpu().numpy().astype(np_weight_data_type)
# Some weights need to be transposed
if name.find("mlp.fc_in.weight") != -1 or name.find("mlp.fc_out.weight") != -1 or \
name.find("attn.out_proj.weight") != -1:
weights = weights.T
new_name = name.replace("transformer.h.", "layers.").replace(huggingface_model_name_pattern[i],
ft_model_name_pattern[i])
pool.starmap(split_and_convert_process,
[(0, saved_dir, factor, new_name,
weights)], )
pool.close()
pool.join()
if __name__ == "__main__":
parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-saved_dir', '-o', type=str, help='file name of output file', required=True)
parser.add_argument('-in_file', '-i', type=str, help='HF model name or directory', required=True)
parser.add_argument('-trained_gpu_num', '-t_g', type=int, help='How many gpus for training', default=1)
parser.add_argument('-infer_gpu_num', '-i_g', type=int, help='How many gpus for inference', required=True)
parser.add_argument("-processes", "-p", type=int, help="How many processes to spawn for conversion (default: 4)",
default=4)
parser.add_argument("-weight_data_type", type=str, default="fp32", choices=["fp32", "fp16"],
help="output weight data type")
args = parser.parse_args()
print("\n=============== Argument ===============")
for key in vars(args):
print("{}: {}".format(key, vars(args)[key]))
print("========================================")
split_and_convert(args)
| fauxpilot/converter/huggingface_gptj_convert.py/0 | {
"file_path": "fauxpilot/converter/huggingface_gptj_convert.py",
"repo_id": "fauxpilot",
"token_count": 3931
} | 86 |
We offer some ways to connect to the FauxPilot Server. For example, you can create a client by how to open the Openai API, Copilot Plugin, REST API.
## API
Once everything is up and running, you should have a server listening for requests on `http://localhost:5000`. You can now talk to it using the standard [OpenAI API](https://beta.openai.com/docs/api-reference/) (although the full API isn't implemented yet). For example, from Python, using the [OpenAI Python bindings](https://github.com/openai/openai-python):
```python
$ ipython
Python 3.8.10 (default, Mar 15 2022, 12:22:08)
Type 'copyright', 'credits' or 'license' for more information
IPython 8.2.0 -- An enhanced Interactive Python. Type '?' for help.
In [1]: import openai
In [2]: openai.api_key = 'dummy'
In [3]: openai.api_base = 'http://127.0.0.1:5000/v1'
In [4]: result = openai.Completion.create(model='codegen', prompt='def hello', max_tokens=16, temperature=0.1, stop=["\n\n"])
In [5]: result
Out[5]:
<OpenAIObject text_completion id=cmpl-6hqu8Rcaq25078IHNJNVooU4xLY6w at 0x7f602c3d2f40> JSON: {
"choices": [
{
"finish_reason": "stop",
"index": 0,
"logprobs": null,
"text": "() {\n return \"Hello, World!\";\n}"
}
],
"created": 1659492191,
"id": "cmpl-6hqu8Rcaq25078IHNJNVooU4xLY6w",
"model": "codegen",
"object": "text_completion",
"usage": {
"completion_tokens": 15,
"prompt_tokens": 2,
"total_tokens": 17
}
}
```
## Curl with RESTful APIs
```bash
$ curl -s -H "Accept: application/json" -H "Content-type: application/json" -X POST -d '{"prompt":"def hello","max_tokens":100,"temperature":0.1,"stop":["\n\n"]}' http://localhost:5000/v1/engines/codegen/completions
```
## Copilot Plugin
Perhaps more excitingly, you can configure the official [VSCode Copilot plugin](https://marketplace.visualstudio.com/items?itemName=GitHub.copilot) to use your local server. Just edit your `settings.json` to add:
```json
"github.copilot.advanced": {
"debug.overrideEngine": "codegen",
"debug.testOverrideProxyUrl": "http://localhost:5000",
"debug.overrideProxyUrl": "http://localhost:5000"
}
```
And you should be able to use Copilot with your own locally hosted suggestions! Of course, probably a lot of stuff is subtly broken. In particular, the probabilities returned by the server are partly fake. Fixing this would require changing FasterTransformer so that it can return log-probabilities for the top k tokens rather that just the chosen token.
Another issue with using the Copilot plugin is that its tokenizer (the component that turns text into a sequence of integers for the model) is slightly different from the one used by CodeGen, so the plugin will sometimes send a request that is longer than CodeGen can handle. You can work around this by replacing the `vocab.bpe` and `tokenizer.json` found in the Copilot extension (something like `.vscode/extensions/github.copilot-[version]/dist/`) with the ones found [here](https://github.com/moyix/fauxpilot/tree/main/copilot_proxy/cgtok/openai_format).
Have fun!
| fauxpilot/documentation/client.md/0 | {
"file_path": "fauxpilot/documentation/client.md",
"repo_id": "fauxpilot",
"token_count": 1051
} | 87 |
#!/usr/bin/env bash
source .env
# On newer versions, docker-compose is docker compose
docker compose down --remove-orphans || docker-compose down --remove-orphans
| fauxpilot/shutdown.sh/0 | {
"file_path": "fauxpilot/shutdown.sh",
"repo_id": "fauxpilot",
"token_count": 48
} | 88 |
1. Go to https://github.com/settings/tokens and create new token by clicking `Generate New Token` button. Give read access to public repositories.
2. Copy the access token and set the env variable via `export GH_ACCESS_TOKEN=<copied access token>`.
3. `cd dataset_creation` and Run `python clone_hf_repos.py`
4. The data in `hf_public_repos` folder in current repo should look like below:
```
accelerate candle datasets diffusers notebooks pytorch-image-models tokenizers trl
alignment-handbook chat-ui deep-rl-class diffusion-models-class peft text-generation-inference transformers
```
5. Download nltk punkt
```python
import nltk
nltk.download('punkt')
```
6. Run Data Pipeline on a machine with 16 CPUs:
```
python pipeline.py
```
7. Collate and push to hub:
```
python prepare_hf_dataset.py
``` | get-data/README.md/0 | {
"file_path": "get-data/README.md",
"repo_id": "get-data",
"token_count": 303
} | 89 |
# coding=utf-8
# Copyright 2024 Sourab Mangrulkar. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import json
import re
from typing import Callable
from datatrove.pipeline.readers.base import BaseDiskReader
from datatrove.io import DataFolderLike
# Block the following formats.
IMAGE = ["png", "jpg", "jpeg", "gif"]
VIDEO = ["mp4", "jfif"]
DOC = ["key", "PDF", "pdf", "docx", "xlsx", "pptx", "csv", "tsv", "txt"]
AUDIO = ["flac", "ogg", "mid", "webm", "wav", "mp3"]
ARCHIVE = ["jar", "aar", "gz", "zip", "bz2"]
MODEL = ["onnx", "pickle", "model", "neuron"]
OTHERS = [
"npy",
"index",
"inv",
"index",
"DS_Store",
"rdb",
"pack",
"idx",
"glb",
"gltf",
"len",
"otf",
"unitypackage",
"ttf",
"xz",
"pcm",
"opus",
"package-lock.json",
"yarn.lock",
"Cargo.lock",
"poetry.lock",
"lock",
]
ANTI_FOMATS = tuple(IMAGE + VIDEO + DOC + AUDIO + ARCHIVE + OTHERS)
def segment_blocks(content):
cells = []
cell_types = []
for cell in content["cells"]:
if len(cell["source"]) > 0:
output = "_____no_output_____"
if "outputs" in cell.keys():
if len(cell["outputs"]) > 0:
if "text" in cell["outputs"][0].keys():
output = cell["outputs"][0]["text"]
cells.append(["".join(cell["source"]), "".join(output)])
cell_types.append(cell["cell_type"])
return cells, cell_types
def segment(sample):
try:
content = json.loads(sample)
if "py" in json.dumps(content["metadata"]):
cells, types = segment_blocks(content)
cell_type_groups = [list(g) for k, g in itertools.groupby(types)]
cell_types = [k for k, g in itertools.groupby(types)]
cell_groups = []
group_start = 0
for g in cell_type_groups:
cell_groups.append(cells[group_start : group_start + len(g)])
group_start += len(g)
else:
cell_groups = [[["empty"]]]
cell_types = ["empty"]
cell_type_groups = [["empty"]]
except: # noqa: E722
cell_groups = [[["empty"]]]
cell_types = ["empty"]
cell_type_groups = [["empty"]]
content = parse_data(cell_groups, cell_types)
return content
def clean_markdown(text):
text = re.sub(r"<.*?>", "", text)
text = re.sub(r"\n+", "", text)
text = text.replace("#", "")
return text
def parse_data(cells, types):
"""Parse data into markdown-code pairs"""
content = ""
if len(types) > 0:
if types[0] == "code":
# add dummy markdown
cells.insert(0, [["empty"]])
types.insert(0, "markdown")
if len(types) > 0:
if types[-1] == "markdown":
cells = cells[:-1]
types = types[:-1]
if len(cells) % 2 == 0:
inner_markdowns = [cells[j] for j in range(len(cells)) if j % 2 == 0]
inner_code_snippets = [
cells[j + 1] for j in range(len(cells) - 1) if j % 2 == 0
]
content += "<jupyter_start>"
for markdown_block, code_snippet in zip(
inner_markdowns, inner_code_snippets
):
markdown_block = " ".join(
[clean_markdown(block[0]) for block in markdown_block]
)
code = "\n".join([snippet[0] for snippet in code_snippet])
output = [snippet[1] for snippet in code_snippet][-1]
content += build_content(markdown_block, code, output)
return content
def build_content(markdown, code, output):
# Define a regex pattern to match Markdown and HTML base64 image syntax
image_pattern = re.compile(r"!\[.*?\]\(.*?\)|<img.*?>|data:image\/.*?;base64,.*?")
markdown = re.sub(image_pattern, "", markdown)
if len(output) > 1000:
output_str = output[:1000] + "[...]"
elif output == "_____no_output_____":
output_str = "<empty_output>"
else:
output_str = output
if markdown.strip() != "empty":
content = f"<jupyter_text>{markdown.strip()}<jupyter_code>{code.strip()}<jupyter_output>{output_str.strip()}"
else:
content = f"<jupyter_code>{code.strip()}<jupyter_output>{output_str.strip()}"
return content
class PersonalCopilotDatasetReader(BaseDiskReader):
name = "👾 PersonalCopilot"
def __init__(
self,
data_folder: DataFolderLike,
limit: int = -1,
progress: bool = False,
adapter: Callable = None,
text_key: str = "text",
id_key: str = "id",
default_metadata: dict = None,
recursive: bool = True,
glob_pattern: str | None = None,
):
super().__init__(
data_folder,
limit,
progress,
adapter,
text_key,
id_key,
default_metadata,
recursive,
glob_pattern,
)
self.empty_warning = False
def read_file(self, filepath: str):
try:
if filepath.endswith(ANTI_FOMATS) or any(
k in filepath for k in [".git", "__pycache__", "xcodeproj"]
):
content = ""
else:
with self.data_folder.open(filepath, "r", encoding="utf-8") as file:
content = file.read()
if filepath.endswith("ipynb"):
content = segment(content)
except Exception:
content = ""
if not content:
content = "remove"
data = {"text": content}
with self.track_time():
document = self.get_document_from_dict(data, filepath, 0)
document.metadata["file_path"] = document.metadata["file_path"].split(
self.data_folder.path
)[-1][1:]
document.metadata["repo_id"] = document.metadata["file_path"].split("/")[0]
yield document
| get-data/reader.py/0 | {
"file_path": "get-data/reader.py",
"repo_id": "get-data",
"token_count": 3189
} | 90 |
import numpy as np
from easydict import EasyDict as edict
config = edict()
#default training/dataset config
config.num_classes = 68
config.record_img_size = 384
config.base_scale = 256
config.input_img_size = 128
config.output_label_size = 64
config.label_xfirst = False
config.losstype = 'heatmap'
config.net_coherent = False
config.multiplier = 1.0
config.gaussian = 0
# network settings
network = edict()
network.hourglass = edict()
network.hourglass.net_coherent = False
network.hourglass.net_sta = 0
network.hourglass.net_n = 3
network.hourglass.net_dcn = 0
network.hourglass.net_stacks = 2
network.hourglass.net_block = 'resnet'
network.hourglass.net_binarize = False
network.hourglass.losstype = 'heatmap'
network.sdu = edict()
network.sdu.net_coherent = False
network.sdu.net_sta = 1
network.sdu.net_n = 3
network.sdu.net_dcn = 3
network.sdu.net_stacks = 2
network.sdu.net_block = 'cab'
network.sdu.net_binarize = False
network.sdu.losstype = 'heatmap'
# dataset settings
dataset = edict()
dataset.i2d = edict()
dataset.i2d.dataset = '2D'
dataset.i2d.landmark_type = '2d'
dataset.i2d.dataset_path = './data_2d'
dataset.i2d.num_classes = 68
dataset.i2d.record_img_size = 384
dataset.i2d.base_scale = 256
dataset.i2d.input_img_size = 128
dataset.i2d.output_label_size = 64
dataset.i2d.label_xfirst = False
dataset.i2d.val_targets = ['ibug', 'cofw_testset', '300W']
dataset.i3d = edict()
dataset.i3d.dataset = '3D'
dataset.i3d.landmark_type = '3d'
dataset.i3d.dataset_path = './data_3d'
dataset.i3d.num_classes = 68
dataset.i3d.record_img_size = 384
dataset.i3d.base_scale = 256
dataset.i3d.input_img_size = 128
dataset.i3d.output_label_size = 64
dataset.i3d.label_xfirst = False
dataset.i3d.val_targets = ['AFLW2000-3D']
# default settings
default = edict()
# default network
default.network = 'hourglass'
default.pretrained = ''
default.pretrained_epoch = 0
# default dataset
default.dataset = 'i2d'
default.frequent = 20
default.verbose = 200
default.kvstore = 'device'
default.prefix = 'model/A'
default.end_epoch = 10000
default.lr = 0.00025
default.wd = 0.0
default.per_batch_size = 20
default.lr_step = '16000,24000,30000'
def generate_config(_network, _dataset):
for k, v in network[_network].items():
config[k] = v
default[k] = v
for k, v in dataset[_dataset].items():
config[k] = v
default[k] = v
config.network = _network
config.dataset = _dataset
| insightface/alignment/heatmap/sample_config.py/0 | {
"file_path": "insightface/alignment/heatmap/sample_config.py",
"repo_id": "insightface",
"token_count": 1016
} | 91 |
import sys
from torch.utils.data import Dataset, DataLoader
import os
import os.path as osp
import glob
import numpy as np
import random
import cv2
import pickle as pkl
import json
import h5py
import torch
import matplotlib.pyplot as plt
from lib.utils.misc import process_dataset_for_video
class SurrealDataset(Dataset):
def __init__(self, config, is_train=True):
self.is_train = is_train
self.frame_interval = config.DATA.FRAME_INTERVAL
# randomization will lead to inferior performance
# since diff will only be used when training
self.data_path = config.DATA.TRAIN_PATH if self.is_train else config.DATA.VALID_PATH
self.use_same_norm_2d = config.DATA.USE_SAME_NORM_2D
self.use_same_norm_3d = config.DATA.USE_SAME_NORM_2D
self.seed_set = False
self.head_root_distance = 1 / config.TRAIN.CAMERA_SKELETON_DISTANCE
# whether to use dataset adapted from k[MaÌ]inetics
self.use_gt = config.USE_GT
self.min_diff_dist = config.DATA.MIN_DIFF_DIST
self.bound_azim = config.TRAIN.BOUND_AZIM # y axis rotation
self.bound_elev = config.TRAIN.BOUND_ELEV
self._load_data_set()
def get_seqnames(self):
return self.sequence_names
def _load_data_set(self):
# self.v3d_2d_to_ours = np.arange(17)
if self.is_train:
print('start loading surreal {} data.'.format("train" if self.is_train else "test"))
key = "original_joint_2d_gt" if self.use_gt else "joint_2d_pre"
assert self.use_gt
fp = h5py.File(self.data_path, "r")
self.kp2ds = np.array(fp[key])
self.kp2ds[:, :, 0] = (self.kp2ds[:, :, 0] - 160.0) / 160.0
self.kp2ds[:, :, 1] = (self.kp2ds[:, :, 1] - 160.0) / 160.0
# locate root at the origin
# self.kp2ds[:, 12] = (self.kp2ds[:, 8] + self.kp2ds[:, 9]) / 2
self.kp2ds = self.kp2ds - self.kp2ds[:, 13:14]
self.kp2ds[:, 13] = 1e-5
# imagenames will be used to sample frames
self.imagenames = [name.decode() for name in fp['imagename'][:]]
if 'seqname' not in fp.keys():
fp.close()
print("Process corresponding dataset...")
process_dataset_for_video(self.data_path, is_surreal=True)
fp = h5py.File(self.data_path, "r")
self.sequence_lens = np.array(fp['seqlen'])
self.sequence_names = [name.decode() for name in fp['seqname'][:]]
self.indices_in_seq = np.array(fp['index_in_seq'])
# normlize again so that the mean distance of head and root is 1/c
if not self.use_same_norm_2d:
factor_gt = self.head_root_distance / (np.tile(np.linalg.norm(self.kp2ds[:, -1] - self.kp2ds[:, 13], axis=1).reshape(-1, 1, 1), (1, 17, 2)) + 1e-8)
else:
factor_gt = self.head_root_distance / np.linalg.norm(self.kp2ds[:, -1] - self.kp2ds[:, 13], axis=1).mean()
self.kp2ds = self.kp2ds * factor_gt
self.kp3ds = np.array(fp['joint_3d_gt'])
# self.kp3ds[:, 12] = (self.kp3ds[:, 8] + self.kp3ds[:, 9]) / 2
factor_3d = np.linalg.norm(self.kp3ds[:, -1] - self.kp3ds[:, 13], axis=1).mean()
factor_filename = "../data/surreal_{}_factor_3d.pkl".format("train" if self.is_train else "test")
if not self.use_same_norm_3d and not osp.exists(factor_filename):
factor_3d = (np.tile(np.linalg.norm(self.kp3ds[:, -1] - self.kp3ds[:, 13], axis=1).reshape(-1, 1, 1), (1, 17, 3)) + 1e-8)
with open(factor_filename, "wb") as f:
pkl.dump(factor_3d, f)
fp.close()
print('finished load surreal {} data, total {} samples'.format("train" if self.is_train else "test", \
self.kp2ds.shape[0]))
# get random diff1
self.diff_indices = []
for index in range(self.kp2ds.shape[0]):
index_in_seq = self.indices_in_seq[index]
seq_len = self.sequence_lens[index]
if seq_len == 1:
diff1_index = index
elif index_in_seq + self.frame_interval < seq_len:
diff_index = index + self.frame_interval
else:
diff_index = index - self.frame_interval
self.diff_indices.append(diff_index)
# generate the rotation factors
num_examples = self.kp2ds.shape[0]
np.random.seed(2019)
rotation_y = (2 * np.random.random_sample((num_examples, 1)) - 1) * self.bound_azim
rotation_x = (2 * np.random.random_sample((num_examples, 1)) - 1) * self.bound_elev
rotation_z = (2 * np.random.random_sample((num_examples, 1)) - 1) * self.bound_elev / 2
rotation_1 = np.concatenate((rotation_y, rotation_x, rotation_z), axis=1)
rotation_2 = rotation_1.copy()
rotation_2[:, 0] = rotation_2[:, 0] + np.pi
self.rotation = np.concatenate((rotation_1, rotation_2), axis=0)
np.random.shuffle(self.rotation)
self.rotation = torch.from_numpy(self.rotation).float()
self.kp2ds = torch.from_numpy(self.kp2ds).float()
self.kp3ds = torch.from_numpy(self.kp3ds).float()
def __len__(self):
return self.kp2ds.shape[0]
def __getitem__(self, index):
if not self.seed_set:
self.seed_set = True
random.seed(index)
np.random.seed(index)
seq_len = self.sequence_lens[index]
index_in_seq = self.indices_in_seq[index]
kps_3d = self.kp3ds[index]
rot = self.rotation[index]
# index in its sequence
kps_2d = self.kp2ds[index]
kps_3d = self.kp3ds[index]
diff1 = self.kp2ds[self.diff_indices[index]]
if seq_len == 1:
diff_dist = 0
else:
diff_dist = np.random.randint(-index_in_seq, seq_len-index_in_seq)
while abs(diff_dist) < self.min_diff_dist:
diff_dist = np.random.randint(-index_in_seq, seq_len-index_in_seq)
diff2_index = index + diff_dist
diff2 = self.kp2ds[diff2_index]
# current form: F * J * 2
# we need to swap the last two axis, so that the item will be in the form J * 2 * F where
# J is the number of keypoints and F is the number of frames
# kps_2d = kps_2d.permute(1, 2, 0).contiguous()
# diff = self.diff[all_indices].permute(1, 2, 0).contiguous()
kps_2d = self.kp2ds[index]
rot = self.rotation[index]
# the flag will always be 1 when no extra data is used
# flag = self.flags[index]
# for valdiation, simply ignore scale
scale = 0
return kps_2d, kps_3d, rot, diff1, diff2, scale
| insightface/body/human_pose/ambiguity_aware/lib/dataloader/surreal.py/0 | {
"file_path": "insightface/body/human_pose/ambiguity_aware/lib/dataloader/surreal.py",
"repo_id": "insightface",
"token_count": 3253
} | 92 |
## FRVT-Implementation
<div align="left">
<img src="https://insightface.ai/assets/img/custom/logo3.jpg" width="240"/>
</div>
## Introduction
We provide a [FRVT-1:1](https://pages.nist.gov/frvt/html/frvt11.html) implementation example here. One can easily build FRVT-1:1 submission by simply putting insightface trained ONNX models into the codebase.
Coming soon.
| insightface/challenges/frvt-impl/README.md/0 | {
"file_path": "insightface/challenges/frvt-impl/README.md",
"repo_id": "insightface",
"token_count": 126
} | 93 |
cimport cython
import numpy as np
cimport numpy as np
DTYPE = np.float32
ctypedef np.float32_t DTYPE_t
def anchors_cython(int height, int width, int stride, np.ndarray[DTYPE_t, ndim=2] base_anchors):
"""
Parameters
----------
height: height of plane
width: width of plane
stride: stride ot the original image
anchors_base: (A, 4) a base set of anchors
Returns
-------
all_anchors: (height, width, A, 4) ndarray of anchors spreading over the plane
"""
cdef unsigned int A = base_anchors.shape[0]
cdef np.ndarray[DTYPE_t, ndim=4] all_anchors = np.zeros((height, width, A, 4), dtype=DTYPE)
cdef unsigned int iw, ih
cdef unsigned int k
cdef unsigned int sh
cdef unsigned int sw
for iw in range(width):
sw = iw * stride
for ih in range(height):
sh = ih * stride
for k in range(A):
all_anchors[ih, iw, k, 0] = base_anchors[k, 0] + sw
all_anchors[ih, iw, k, 1] = base_anchors[k, 1] + sh
all_anchors[ih, iw, k, 2] = base_anchors[k, 2] + sw
all_anchors[ih, iw, k, 3] = base_anchors[k, 3] + sh
return all_anchors | insightface/detection/retinaface/rcnn/cython/anchors.pyx/0 | {
"file_path": "insightface/detection/retinaface/rcnn/cython/anchors.pyx",
"repo_id": "insightface",
"token_count": 546
} | 94 |
import numpy as np
from easydict import EasyDict as edict
config = edict()
# network related params
config.PIXEL_MEANS = np.array([103.939, 116.779, 123.68])
config.PIXEL_STDS = np.array([1.0, 1.0, 1.0])
config.PIXEL_SCALE = 1.0
config.IMAGE_STRIDE = 0
# dataset related params
config.NUM_CLASSES = 2
config.PRE_SCALES = [(1200, 1600)
] # first is scale (the shorter side); second is max size
config.SCALES = [(640, 640)
] # first is scale (the shorter side); second is max size
#config.SCALES = [(800, 800)] # first is scale (the shorter side); second is max size
config.ORIGIN_SCALE = False
_ratio = (1., )
RAC_SSH = {
'32': {
'SCALES': (32, 16),
'BASE_SIZE': 16,
'RATIOS': _ratio,
'ALLOWED_BORDER': 9999
},
'16': {
'SCALES': (8, 4),
'BASE_SIZE': 16,
'RATIOS': _ratio,
'ALLOWED_BORDER': 9999
},
'8': {
'SCALES': (2, 1),
'BASE_SIZE': 16,
'RATIOS': _ratio,
'ALLOWED_BORDER': 9999
},
}
_ratio = (1., 1.5)
RAC_SSH2 = {
'32': {
'SCALES': (32, 16),
'BASE_SIZE': 16,
'RATIOS': _ratio,
'ALLOWED_BORDER': 9999
},
'16': {
'SCALES': (8, 4),
'BASE_SIZE': 16,
'RATIOS': _ratio,
'ALLOWED_BORDER': 9999
},
'8': {
'SCALES': (2, 1),
'BASE_SIZE': 16,
'RATIOS': _ratio,
'ALLOWED_BORDER': 9999
},
}
_ratio = (1., 1.5)
RAC_SSH3 = {
'32': {
'SCALES': (32, 16),
'BASE_SIZE': 16,
'RATIOS': _ratio,
'ALLOWED_BORDER': 9999
},
'16': {
'SCALES': (8, 4),
'BASE_SIZE': 16,
'RATIOS': _ratio,
'ALLOWED_BORDER': 9999
},
'8': {
'SCALES': (2, 1),
'BASE_SIZE': 16,
'RATIOS': _ratio,
'ALLOWED_BORDER': 9999
},
'4': {
'SCALES': (2, 1),
'BASE_SIZE': 16,
'RATIOS': _ratio,
'ALLOWED_BORDER': 9999
},
}
RAC_RETINA = {}
_ratios = (1.0, )
_ass = 2.0**(1.0 / 3)
_basescale = 1.0
for _stride in [4, 8, 16, 32, 64]:
key = str(_stride)
value = {'BASE_SIZE': 16, 'RATIOS': _ratios, 'ALLOWED_BORDER': 9999}
scales = []
for _ in range(3):
scales.append(_basescale)
_basescale *= _ass
value['SCALES'] = tuple(scales)
RAC_RETINA[key] = value
config.RPN_ANCHOR_CFG = RAC_SSH #default
config.NET_MODE = 2
config.HEAD_MODULE = 'SSH'
#config.HEAD_MODULE = 'RF'
config.LR_MODE = 0
config.LANDMARK_LR_MULT = 2.0
config.HEAD_FILTER_NUM = 256
config.CONTEXT_FILTER_RATIO = 1
config.max_feat_channel = 9999
config.USE_CROP = True
config.USE_FPN = True
config.USE_DCN = 0
config.FACE_LANDMARK = True
config.USE_OCCLUSION = False
config.USE_BLUR = False
config.MORE_SMALL_BOX = True
config.LAYER_FIX = False
config.CASCADE = 0
config.CASCADE_MODE = 1
#config.CASCADE_CLS_STRIDES = [16,8,4]
#config.CASCADE_BBOX_STRIDES = [64,32]
config.CASCADE_CLS_STRIDES = [64, 32, 16, 8, 4]
config.CASCADE_BBOX_STRIDES = [64, 32, 16, 8, 4]
#config.CASCADE_BBOX_STRIDES = [64,32,16,8]
config.HEAD_BOX = False
config.DENSE_ANCHOR = False
config.USE_MAXOUT = 0
config.SHARE_WEIGHT_BBOX = False
config.SHARE_WEIGHT_LANDMARK = False
config.RANDOM_FEAT_STRIDE = False
config.NUM_CPU = 4
config.MIXUP = 0.0
config.USE_3D = False
#config.BBOX_MASK_THRESH = 0
config.COLOR_MODE = 2
config.COLOR_JITTERING = 0.125
#config.COLOR_JITTERING = 0
#config.COLOR_JITTERING = 0.2
config.TRAIN = edict()
config.TRAIN.IMAGE_ALIGN = 0
config.TRAIN.MIN_BOX_SIZE = 0
config.BBOX_MASK_THRESH = config.TRAIN.MIN_BOX_SIZE
# R-CNN and RPN
# size of images for each device, 2 for rcnn, 1 for rpn and e2e
config.TRAIN.BATCH_IMAGES = 8
# e2e changes behavior of anchor loader and metric
config.TRAIN.END2END = True
# group images with similar aspect ratio
config.TRAIN.ASPECT_GROUPING = False
# RPN anchor loader
# rpn anchors batch size
config.TRAIN.RPN_ENABLE_OHEM = 2
config.TRAIN.OHEM_MODE = 1
config.TRAIN.RPN_BATCH_SIZE = 256
# rpn anchors sampling params
config.TRAIN.RPN_FG_FRACTION = 0.25
config.TRAIN.RPN_POSITIVE_OVERLAP = 0.5
config.TRAIN.RPN_NEGATIVE_OVERLAP = 0.3
if config.CASCADE > 0:
config.TRAIN.RPN_POSITIVE_OVERLAP = 0.7
config.TRAIN.CASCADE_OVERLAP = [0.4, 0.5]
config.TRAIN.RPN_CLOBBER_POSITIVES = False
config.TRAIN.RPN_FORCE_POSITIVE = False
# rpn bounding box regression params
config.TRAIN.BBOX_STDS = (1.0, 1.0, 1.0, 1.0)
config.TRAIN.LANDMARK_STD = 1.0
config.TEST = edict()
# R-CNN testing
# use rpn to generate proposal
config.TEST.HAS_RPN = False
# size of images for each device
config.TEST.BATCH_IMAGES = 1
# RPN proposal
config.TEST.CXX_PROPOSAL = True
config.TEST.RPN_NMS_THRESH = 0.3
config.TEST.RPN_PRE_NMS_TOP_N = 1000
config.TEST.RPN_POST_NMS_TOP_N = 3000
#config.TEST.RPN_MIN_SIZE = config.RPN_FEAT_STRIDE
#config.TEST.RPN_MIN_SIZE = [0,0,0]
# RCNN nms
config.TEST.NMS = 0.3
config.TEST.SCORE_THRESH = 0.05
config.TEST.IOU_THRESH = 0.5
# network settings
network = edict()
network.ssh = edict()
network.mnet = edict()
#network.mnet.pretrained = 'model/mnasnet'
#network.mnet.pretrained = 'model/mobilenetv2_0_5'
#network.mnet.pretrained = 'model/mobilenet_0_5'
#network.mnet.MULTIPLIER = 0.5
#network.mnet.pretrained = 'model/mobilenet_0_25'
#network.mnet.pretrained_epoch = 0
#network.mnet.PIXEL_MEANS = np.array([0.406, 0.456, 0.485])
#network.mnet.PIXEL_STDS = np.array([0.225, 0.224, 0.229])
#network.mnet.PIXEL_SCALE = 255.0
network.mnet.FIXED_PARAMS = ['^stage1', '^.*upsampling']
network.mnet.BATCH_IMAGES = 16
network.mnet.HEAD_FILTER_NUM = 64
network.mnet.CONTEXT_FILTER_RATIO = 1
network.mnet.PIXEL_MEANS = np.array([0.0, 0.0, 0.0])
network.mnet.PIXEL_STDS = np.array([1.0, 1.0, 1.0])
network.mnet.PIXEL_SCALE = 1.0
#network.mnet.pretrained = 'model/mobilenetfd_0_25' #78
#network.mnet.pretrained = 'model/mobilenetfd2' #75
network.mnet.pretrained = 'model/mobilenet025fd0' #78
#network.mnet.pretrained = 'model/mobilenet025fd1' #75
#network.mnet.pretrained = 'model/mobilenet025fd2' #
network.mnet.pretrained_epoch = 0
network.mnet.max_feat_channel = 8888
network.mnet.COLOR_MODE = 1
network.mnet.USE_CROP = True
network.mnet.RPN_ANCHOR_CFG = RAC_SSH
network.mnet.LAYER_FIX = True
network.mnet.LANDMARK_LR_MULT = 2.5
network.resnet = edict()
#network.resnet.pretrained = 'model/ResNet50_v1d'
#network.resnet.pretrained = 'model/resnet-50'
network.resnet.pretrained = 'model/resnet-152'
#network.resnet.pretrained = 'model/senet154'
#network.resnet.pretrained = 'model/densenet161'
network.resnet.pretrained_epoch = 0
#network.mnet.PIXEL_MEANS = np.array([103.939, 116.779, 123.68])
#network.mnet.PIXEL_STDS = np.array([57.375, 57.12, 58.393])
#network.resnet.PIXEL_MEANS = np.array([0.406, 0.456, 0.485])
#network.resnet.PIXEL_STDS = np.array([0.225, 0.224, 0.229])
#network.resnet.PIXEL_SCALE = 255.0
network.resnet.lr_step = '1,2,3,4,5,55,68,80'
network.resnet.lr = 0.001
network.resnet.PIXEL_MEANS = np.array([0.0, 0.0, 0.0])
network.resnet.PIXEL_STDS = np.array([1.0, 1.0, 1.0])
network.resnet.PIXEL_SCALE = 1.0
network.resnet.FIXED_PARAMS = ['^stage1', '^.*upsampling']
network.resnet.BATCH_IMAGES = 8
network.resnet.HEAD_FILTER_NUM = 256
network.resnet.CONTEXT_FILTER_RATIO = 1
network.resnet.USE_DCN = 2
network.resnet.RPN_BATCH_SIZE = 256
network.resnet.RPN_ANCHOR_CFG = RAC_RETINA
network.resnet.USE_DCN = 0
network.resnet.pretrained = 'model/resnet-50'
network.resnet.RPN_ANCHOR_CFG = RAC_SSH
# dataset settings
dataset = edict()
dataset.widerface = edict()
dataset.widerface.dataset = 'widerface'
dataset.widerface.image_set = 'train'
dataset.widerface.test_image_set = 'val'
dataset.widerface.root_path = 'data'
dataset.widerface.dataset_path = 'data/widerface'
dataset.widerface.NUM_CLASSES = 2
dataset.retinaface = edict()
dataset.retinaface.dataset = 'retinaface'
dataset.retinaface.image_set = 'train'
dataset.retinaface.test_image_set = 'val'
dataset.retinaface.root_path = 'data'
dataset.retinaface.dataset_path = 'data/retinaface'
dataset.retinaface.NUM_CLASSES = 2
# default settings
default = edict()
config.FIXED_PARAMS = ['^conv1', '^conv2', '^conv3', '^.*upsampling']
#config.FIXED_PARAMS = ['^.*upsampling']
#config.FIXED_PARAMS = ['^conv1', '^conv2', '^conv3']
#config.FIXED_PARAMS = ['^conv0', '^stage1', 'gamma', 'beta'] #for resnet
# default network
default.network = 'resnet'
default.pretrained = 'model/resnet-152'
#default.network = 'resnetssh'
default.pretrained_epoch = 0
# default dataset
default.dataset = 'retinaface'
default.image_set = 'train'
default.test_image_set = 'val'
default.root_path = 'data'
default.dataset_path = 'data/retinaface'
# default training
default.frequent = 20
default.kvstore = 'device'
# default e2e
default.prefix = 'model/retinaface'
default.end_epoch = 10000
default.lr_step = '55,68,80'
default.lr = 0.01
default.wd = 0.0005
def generate_config(_network, _dataset):
for k, v in network[_network].items():
if k in config:
config[k] = v
elif k in default:
default[k] = v
if k in config.TRAIN:
config.TRAIN[k] = v
for k, v in dataset[_dataset].items():
if k in config:
config[k] = v
elif k in default:
default[k] = v
if k in config.TRAIN:
config.TRAIN[k] = v
config.network = _network
config.dataset = _dataset
config.RPN_FEAT_STRIDE = []
num_anchors = []
for k in config.RPN_ANCHOR_CFG:
config.RPN_FEAT_STRIDE.append(int(k))
_num_anchors = len(config.RPN_ANCHOR_CFG[k]['SCALES']) * len(
config.RPN_ANCHOR_CFG[k]['RATIOS'])
if config.DENSE_ANCHOR:
_num_anchors *= 2
config.RPN_ANCHOR_CFG[k]['NUM_ANCHORS'] = _num_anchors
num_anchors.append(_num_anchors)
config.RPN_FEAT_STRIDE = sorted(config.RPN_FEAT_STRIDE, reverse=True)
for j in range(1, len(num_anchors)):
assert num_anchors[0] == num_anchors[j]
config.NUM_ANCHORS = num_anchors[0]
| insightface/detection/retinaface/rcnn/sample_config.py/0 | {
"file_path": "insightface/detection/retinaface/rcnn/sample_config.py",
"repo_id": "insightface",
"token_count": 4775
} | 95 |
from .load_model import load_checkpoint
from .save_model import save_checkpoint
def combine_model(prefix1, epoch1, prefix2, epoch2, prefix_out, epoch_out):
args1, auxs1 = load_checkpoint(prefix1, epoch1)
args2, auxs2 = load_checkpoint(prefix2, epoch2)
arg_names = args1.keys() + args2.keys()
aux_names = auxs1.keys() + auxs2.keys()
args = dict()
for arg in arg_names:
if arg in args1:
args[arg] = args1[arg]
else:
args[arg] = args2[arg]
auxs = dict()
for aux in aux_names:
if aux in auxs1:
auxs[aux] = auxs1[aux]
else:
auxs[aux] = auxs2[aux]
save_checkpoint(prefix_out, epoch_out, args, auxs)
| insightface/detection/retinaface/rcnn/utils/combine_model.py/0 | {
"file_path": "insightface/detection/retinaface/rcnn/utils/combine_model.py",
"repo_id": "insightface",
"token_count": 335
} | 96 |
from __future__ import print_function
import sys
import os
import datetime
import time
import numpy as np
import mxnet as mx
from mxnet import ndarray as nd
import cv2
#from rcnn import config
#from rcnn.processing.bbox_transform import nonlinear_pred, clip_boxes, landmark_pred
from rcnn.processing.bbox_transform import clip_boxes
from rcnn.processing.generate_anchor import generate_anchors_fpn, anchors_plane
from rcnn.processing.nms import gpu_nms_wrapper, cpu_nms_wrapper
from rcnn.processing.bbox_transform import bbox_overlaps
class RetinaFaceCoV:
def __init__(self,
prefix,
epoch,
ctx_id=0,
network='net3',
nms=0.4,
nocrop=False):
self.ctx_id = ctx_id
self.network = network
self.nms_threshold = nms
self.nocrop = nocrop
self.debug = False
self.fpn_keys = []
self.anchor_cfg = None
pixel_means = [0.0, 0.0, 0.0]
pixel_stds = [1.0, 1.0, 1.0]
pixel_scale = 1.0
self.bbox_stds = [1.0, 1.0, 1.0, 1.0]
self.landmark_std = 1.0
self.preprocess = False
_ratio = (1., )
fmc = 3
if network == 'ssh' or network == 'vgg':
pixel_means = [103.939, 116.779, 123.68]
self.preprocess = True
elif network == 'net3':
_ratio = (1., )
elif network == 'net3l':
_ratio = (1., )
self.landmark_std = 0.2
elif network == 'net3a':
_ratio = (1., 1.5)
elif network == 'net6': #like pyramidbox or s3fd
fmc = 6
elif network == 'net5': #retinaface
fmc = 5
elif network == 'net5a':
fmc = 5
_ratio = (1., 1.5)
elif network == 'net4':
fmc = 4
elif network == 'net4a':
fmc = 4
_ratio = (1., 1.5)
elif network == 'x5':
fmc = 5
pixel_means = [103.52, 116.28, 123.675]
pixel_stds = [57.375, 57.12, 58.395]
elif network == 'x3':
fmc = 3
pixel_means = [103.52, 116.28, 123.675]
pixel_stds = [57.375, 57.12, 58.395]
elif network == 'x3a':
fmc = 3
_ratio = (1., 1.5)
pixel_means = [103.52, 116.28, 123.675]
pixel_stds = [57.375, 57.12, 58.395]
else:
assert False, 'network setting error %s' % network
if fmc == 3:
self._feat_stride_fpn = [32, 16, 8]
self.anchor_cfg = {
'32': {
'SCALES': (32, 16),
'BASE_SIZE': 16,
'RATIOS': _ratio,
'ALLOWED_BORDER': 9999
},
'16': {
'SCALES': (8, 4),
'BASE_SIZE': 16,
'RATIOS': _ratio,
'ALLOWED_BORDER': 9999
},
'8': {
'SCALES': (2, 1),
'BASE_SIZE': 16,
'RATIOS': _ratio,
'ALLOWED_BORDER': 9999
},
}
elif fmc == 4:
self._feat_stride_fpn = [32, 16, 8, 4]
self.anchor_cfg = {
'32': {
'SCALES': (32, 16),
'BASE_SIZE': 16,
'RATIOS': _ratio,
'ALLOWED_BORDER': 9999
},
'16': {
'SCALES': (8, 4),
'BASE_SIZE': 16,
'RATIOS': _ratio,
'ALLOWED_BORDER': 9999
},
'8': {
'SCALES': (2, 1),
'BASE_SIZE': 16,
'RATIOS': _ratio,
'ALLOWED_BORDER': 9999
},
'4': {
'SCALES': (2, 1),
'BASE_SIZE': 16,
'RATIOS': _ratio,
'ALLOWED_BORDER': 9999
},
}
elif fmc == 6:
self._feat_stride_fpn = [128, 64, 32, 16, 8, 4]
self.anchor_cfg = {
'128': {
'SCALES': (32, ),
'BASE_SIZE': 16,
'RATIOS': _ratio,
'ALLOWED_BORDER': 9999
},
'64': {
'SCALES': (16, ),
'BASE_SIZE': 16,
'RATIOS': _ratio,
'ALLOWED_BORDER': 9999
},
'32': {
'SCALES': (8, ),
'BASE_SIZE': 16,
'RATIOS': _ratio,
'ALLOWED_BORDER': 9999
},
'16': {
'SCALES': (4, ),
'BASE_SIZE': 16,
'RATIOS': _ratio,
'ALLOWED_BORDER': 9999
},
'8': {
'SCALES': (2, ),
'BASE_SIZE': 16,
'RATIOS': _ratio,
'ALLOWED_BORDER': 9999
},
'4': {
'SCALES': (1, ),
'BASE_SIZE': 16,
'RATIOS': _ratio,
'ALLOWED_BORDER': 9999
},
}
elif fmc == 5:
self._feat_stride_fpn = [64, 32, 16, 8, 4]
self.anchor_cfg = {}
_ass = 2.0**(1.0 / 3)
_basescale = 1.0
for _stride in [4, 8, 16, 32, 64]:
key = str(_stride)
value = {
'BASE_SIZE': 16,
'RATIOS': _ratio,
'ALLOWED_BORDER': 9999
}
scales = []
for _ in range(3):
scales.append(_basescale)
_basescale *= _ass
value['SCALES'] = tuple(scales)
self.anchor_cfg[key] = value
#print(self._feat_stride_fpn, self.anchor_cfg)
for s in self._feat_stride_fpn:
self.fpn_keys.append('stride%s' % s)
dense_anchor = False
#self._anchors_fpn = dict(zip(self.fpn_keys, generate_anchors_fpn(base_size=fpn_base_size, scales=self._scales, ratios=self._ratios)))
self._anchors_fpn = dict(
zip(
self.fpn_keys,
generate_anchors_fpn(dense_anchor=dense_anchor,
cfg=self.anchor_cfg)))
for k in self._anchors_fpn:
v = self._anchors_fpn[k].astype(np.float32)
self._anchors_fpn[k] = v
self._num_anchors = dict(
zip(self.fpn_keys,
[anchors.shape[0] for anchors in self._anchors_fpn.values()]))
#self._bbox_pred = nonlinear_pred
#self._landmark_pred = landmark_pred
sym, arg_params, aux_params = mx.model.load_checkpoint(prefix, epoch)
if self.ctx_id >= 0:
self.ctx = mx.gpu(self.ctx_id)
self.nms = gpu_nms_wrapper(self.nms_threshold, self.ctx_id)
else:
self.ctx = mx.cpu()
self.nms = cpu_nms_wrapper(self.nms_threshold)
self.pixel_means = np.array(pixel_means, dtype=np.float32)
self.pixel_stds = np.array(pixel_stds, dtype=np.float32)
self.pixel_scale = float(pixel_scale)
#print('means', self.pixel_means)
self.use_landmarks = True
#print('use_landmarks', self.use_landmarks)
self.cascade = 0
if self.debug:
c = len(sym) // len(self._feat_stride_fpn)
sym = sym[(c * 0):]
self._feat_stride_fpn = [32, 16, 8]
#print('sym size:', len(sym))
image_size = (640, 640)
self.model = mx.mod.Module(symbol=sym,
context=self.ctx,
label_names=None)
self.model.bind(data_shapes=[('data', (1, 3, image_size[0],
image_size[1]))],
for_training=False)
self.model.set_params(arg_params, aux_params)
def get_input(self, img):
im = img.astype(np.float32)
im_tensor = np.zeros((1, 3, im.shape[0], im.shape[1]))
for i in range(3):
im_tensor[
0,
i, :, :] = (im[:, :, 2 - i] / self.pixel_scale -
self.pixel_means[2 - i]) / self.pixel_stds[2 - i]
#if self.debug:
# timeb = datetime.datetime.now()
# diff = timeb - timea
# print('X2 uses', diff.total_seconds(), 'seconds')
data = nd.array(im_tensor)
return data
def detect(self, img, threshold=0.5, scales=[1.0], do_flip=False):
#print('in_detect', threshold, scales, do_flip, do_nms)
proposals_list = []
scores_list = []
mask_scores_list = []
landmarks_list = []
strides_list = []
timea = datetime.datetime.now()
flips = [0]
if do_flip:
flips = [0, 1]
imgs = [img]
if isinstance(img, list):
imgs = img
for img in imgs:
for im_scale in scales:
for flip in flips:
if im_scale != 1.0:
im = cv2.resize(img,
None,
None,
fx=im_scale,
fy=im_scale,
interpolation=cv2.INTER_LINEAR)
else:
im = img.copy()
if flip:
im = im[:, ::-1, :]
if self.nocrop:
if im.shape[0] % 32 == 0:
h = im.shape[0]
else:
h = (im.shape[0] // 32 + 1) * 32
if im.shape[1] % 32 == 0:
w = im.shape[1]
else:
w = (im.shape[1] // 32 + 1) * 32
_im = np.zeros((h, w, 3), dtype=np.float32)
_im[0:im.shape[0], 0:im.shape[1], :] = im
im = _im
else:
im = im.astype(np.float32)
if self.debug:
timeb = datetime.datetime.now()
diff = timeb - timea
print('X1 uses', diff.total_seconds(), 'seconds')
#self.model.bind(data_shapes=[('data', (1, 3, image_size[0], image_size[1]))], for_training=False)
#im_info = [im.shape[0], im.shape[1], im_scale]
im_info = [im.shape[0], im.shape[1]]
im_tensor = np.zeros((1, 3, im.shape[0], im.shape[1]))
for i in range(3):
im_tensor[0, i, :, :] = (
im[:, :, 2 - i] / self.pixel_scale -
self.pixel_means[2 - i]) / self.pixel_stds[2 - i]
if self.debug:
timeb = datetime.datetime.now()
diff = timeb - timea
print('X2 uses', diff.total_seconds(), 'seconds')
data = nd.array(im_tensor)
db = mx.io.DataBatch(data=(data, ),
provide_data=[('data', data.shape)])
if self.debug:
timeb = datetime.datetime.now()
diff = timeb - timea
print('X3 uses', diff.total_seconds(), 'seconds')
self.model.forward(db, is_train=False)
net_out = self.model.get_outputs()
#post_nms_topN = self._rpn_post_nms_top_n
#min_size_dict = self._rpn_min_size_fpn
sym_idx = 0
for _idx, s in enumerate(self._feat_stride_fpn):
#if len(scales)>1 and s==32 and im_scale==scales[-1]:
# continue
_key = 'stride%s' % s
stride = int(s)
is_cascade = False
#if self.vote and stride==4 and len(scales)>2 and (im_scale==scales[0]):
# continue
#print('getting', im_scale, stride, idx, len(net_out), data.shape, file=sys.stderr)
scores = net_out[sym_idx].asnumpy()
type_scores = net_out[sym_idx + 3].asnumpy()
print(scores.shape, type_scores.shape)
if self.debug:
timeb = datetime.datetime.now()
diff = timeb - timea
print('A uses', diff.total_seconds(), 'seconds')
A = self._num_anchors['stride%s' % s]
#print(scores.shape)
#print('scores',stride, scores.shape, file=sys.stderr)
scores = scores[:, A:, :, :]
mask_scores = type_scores[:, A * 2:, :, :] #x, A, x, x
bbox_deltas = net_out[sym_idx + 1].asnumpy()
#if DEBUG:
# print 'im_size: ({}, {})'.format(im_info[0], im_info[1])
# print 'scale: {}'.format(im_info[2])
#_height, _width = int(im_info[0] / stride), int(im_info[1] / stride)
height, width = bbox_deltas.shape[
2], bbox_deltas.shape[3]
K = height * width
anchors_fpn = self._anchors_fpn['stride%s' % s]
anchors = anchors_plane(height, width, stride,
anchors_fpn)
#print((height, width), (_height, _width), anchors.shape, bbox_deltas.shape, scores.shape, file=sys.stderr)
anchors = anchors.reshape((K * A, 4))
#print('num_anchors', self._num_anchors['stride%s'%s], file=sys.stderr)
#print('HW', (height, width), file=sys.stderr)
#print('anchors_fpn', anchors_fpn.shape, file=sys.stderr)
#print('anchors', anchors.shape, file=sys.stderr)
#print('bbox_deltas', bbox_deltas.shape, file=sys.stderr)
#print('scores', scores.shape, file=sys.stderr)
#scores = self._clip_pad(scores, (height, width))
scores = scores.transpose((0, 2, 3, 1)).reshape(
(-1, 1))
mask_scores = mask_scores.transpose(
(0, 2, 3, 1)).reshape((-1, 1))
#print('pre', bbox_deltas.shape, height, width)
#bbox_deltas = self._clip_pad(bbox_deltas, (height, width))
#print('after', bbox_deltas.shape, height, width)
bbox_deltas = bbox_deltas.transpose((0, 2, 3, 1))
bbox_pred_len = bbox_deltas.shape[3] // A
#print(bbox_deltas.shape)
bbox_deltas = bbox_deltas.reshape((-1, bbox_pred_len))
bbox_deltas[:,
0::4] = bbox_deltas[:, 0::
4] * self.bbox_stds[0]
bbox_deltas[:,
1::4] = bbox_deltas[:, 1::
4] * self.bbox_stds[1]
bbox_deltas[:,
2::4] = bbox_deltas[:, 2::
4] * self.bbox_stds[2]
bbox_deltas[:,
3::4] = bbox_deltas[:, 3::
4] * self.bbox_stds[3]
proposals = self.bbox_pred(anchors, bbox_deltas)
proposals = clip_boxes(proposals, im_info[:2])
#if self.vote:
# if im_scale>1.0:
# keep = self._filter_boxes2(proposals, 160*im_scale, -1)
# else:
# keep = self._filter_boxes2(proposals, -1, 100*im_scale)
# if stride==4:
# keep = self._filter_boxes2(proposals, 12*im_scale, -1)
# proposals = proposals[keep, :]
# scores = scores[keep]
#keep = self._filter_boxes(proposals, min_size_dict['stride%s'%s] * im_info[2])
#proposals = proposals[keep, :]
#scores = scores[keep]
#print('333', proposals.shape)
if stride == 4 and self.decay4 < 1.0:
scores *= self.decay4
scores_ravel = scores.ravel()
#mask_scores_ravel = mask_scores.ravel()
#print('__shapes', proposals.shape, scores_ravel.shape)
#print('max score', np.max(scores_ravel))
order = np.where(scores_ravel >= threshold)[0]
#_scores = scores_ravel[order]
#_order = _scores.argsort()[::-1]
#order = order[_order]
proposals = proposals[order, :]
scores = scores[order]
mask_scores = mask_scores[order]
if flip:
oldx1 = proposals[:, 0].copy()
oldx2 = proposals[:, 2].copy()
proposals[:, 0] = im.shape[1] - oldx2 - 1
proposals[:, 2] = im.shape[1] - oldx1 - 1
proposals[:, 0:4] /= im_scale
proposals_list.append(proposals)
scores_list.append(scores)
mask_scores_list.append(mask_scores)
landmark_deltas = net_out[sym_idx + 2].asnumpy()
#landmark_deltas = self._clip_pad(landmark_deltas, (height, width))
landmark_pred_len = landmark_deltas.shape[1] // A
landmark_deltas = landmark_deltas.transpose(
(0, 2, 3, 1)).reshape(
(-1, 5, landmark_pred_len // 5))
landmark_deltas *= self.landmark_std
#print(landmark_deltas.shape, landmark_deltas)
landmarks = self.landmark_pred(anchors,
landmark_deltas)
landmarks = landmarks[order, :]
if flip:
landmarks[:, :,
0] = im.shape[1] - landmarks[:, :, 0] - 1
#for a in range(5):
# oldx1 = landmarks[:, a].copy()
# landmarks[:,a] = im.shape[1] - oldx1 - 1
order = [1, 0, 2, 4, 3]
flandmarks = landmarks.copy()
for idx, a in enumerate(order):
flandmarks[:, idx, :] = landmarks[:, a, :]
#flandmarks[:, idx*2] = landmarks[:,a*2]
#flandmarks[:, idx*2+1] = landmarks[:,a*2+1]
landmarks = flandmarks
landmarks[:, :, 0:2] /= im_scale
#landmarks /= im_scale
#landmarks = landmarks.reshape( (-1, landmark_pred_len) )
landmarks_list.append(landmarks)
#proposals = np.hstack((proposals, landmarks))
sym_idx += 4
if self.debug:
timeb = datetime.datetime.now()
diff = timeb - timea
print('B uses', diff.total_seconds(), 'seconds')
proposals = np.vstack(proposals_list)
landmarks = None
if proposals.shape[0] == 0:
landmarks = np.zeros((0, 5, 2))
return np.zeros((0, 6)), landmarks
scores = np.vstack(scores_list)
mask_scores = np.vstack(mask_scores_list)
#print('shapes', proposals.shape, scores.shape)
scores_ravel = scores.ravel()
order = scores_ravel.argsort()[::-1]
#if config.TEST.SCORE_THRESH>0.0:
# _count = np.sum(scores_ravel>config.TEST.SCORE_THRESH)
# order = order[:_count]
proposals = proposals[order, :]
scores = scores[order]
mask_scores = mask_scores[order]
landmarks = np.vstack(landmarks_list)
landmarks = landmarks[order].astype(np.float32, copy=False)
pre_det = np.hstack((proposals[:, 0:4], scores)).astype(np.float32,
copy=False)
keep = self.nms(pre_det)
det = np.hstack((pre_det, mask_scores))
det = det[keep, :]
landmarks = landmarks[keep]
if self.debug:
timeb = datetime.datetime.now()
diff = timeb - timea
print('C uses', diff.total_seconds(), 'seconds')
return det, landmarks
def detect_center(self, img, threshold=0.5, scales=[1.0], do_flip=False):
det, landmarks = self.detect(img, threshold, scales, do_flip)
if det.shape[0] == 0:
return None, None
bindex = 0
if det.shape[0] > 1:
img_size = np.asarray(img.shape)[0:2]
bounding_box_size = (det[:, 2] - det[:, 0]) * (det[:, 3] -
det[:, 1])
img_center = img_size / 2
offsets = np.vstack([(det[:, 0] + det[:, 2]) / 2 - img_center[1],
(det[:, 1] + det[:, 3]) / 2 - img_center[0]])
offset_dist_squared = np.sum(np.power(offsets, 2.0), 0)
bindex = np.argmax(bounding_box_size - offset_dist_squared *
2.0) # some extra weight on the centering
bbox = det[bindex, :]
landmark = landmarks[bindex, :, :]
return bbox, landmark
@staticmethod
def check_large_pose(landmark, bbox):
assert landmark.shape == (5, 2)
assert len(bbox) == 4
def get_theta(base, x, y):
vx = x - base
vy = y - base
vx[1] *= -1
vy[1] *= -1
tx = np.arctan2(vx[1], vx[0])
ty = np.arctan2(vy[1], vy[0])
d = ty - tx
d = np.degrees(d)
#print(vx, tx, vy, ty, d)
#if d<-1.*math.pi:
# d+=2*math.pi
#elif d>math.pi:
# d-=2*math.pi
if d < -180.0:
d += 360.
elif d > 180.0:
d -= 360.0
return d
landmark = landmark.astype(np.float32)
theta1 = get_theta(landmark[0], landmark[3], landmark[2])
theta2 = get_theta(landmark[1], landmark[2], landmark[4])
#print(va, vb, theta2)
theta3 = get_theta(landmark[0], landmark[2], landmark[1])
theta4 = get_theta(landmark[1], landmark[0], landmark[2])
theta5 = get_theta(landmark[3], landmark[4], landmark[2])
theta6 = get_theta(landmark[4], landmark[2], landmark[3])
theta7 = get_theta(landmark[3], landmark[2], landmark[0])
theta8 = get_theta(landmark[4], landmark[1], landmark[2])
#print(theta1, theta2, theta3, theta4, theta5, theta6, theta7, theta8)
left_score = 0.0
right_score = 0.0
up_score = 0.0
down_score = 0.0
if theta1 <= 0.0:
left_score = 10.0
elif theta2 <= 0.0:
right_score = 10.0
else:
left_score = theta2 / theta1
right_score = theta1 / theta2
if theta3 <= 10.0 or theta4 <= 10.0:
up_score = 10.0
else:
up_score = max(theta1 / theta3, theta2 / theta4)
if theta5 <= 10.0 or theta6 <= 10.0:
down_score = 10.0
else:
down_score = max(theta7 / theta5, theta8 / theta6)
mleft = (landmark[0][0] + landmark[3][0]) / 2
mright = (landmark[1][0] + landmark[4][0]) / 2
box_center = ((bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2)
ret = 0
if left_score >= 3.0:
ret = 1
if ret == 0 and left_score >= 2.0:
if mright <= box_center[0]:
ret = 1
if ret == 0 and right_score >= 3.0:
ret = 2
if ret == 0 and right_score >= 2.0:
if mleft >= box_center[0]:
ret = 2
if ret == 0 and up_score >= 2.0:
ret = 3
if ret == 0 and down_score >= 5.0:
ret = 4
return ret, left_score, right_score, up_score, down_score
@staticmethod
def _filter_boxes(boxes, min_size):
""" Remove all boxes with any side smaller than min_size """
ws = boxes[:, 2] - boxes[:, 0] + 1
hs = boxes[:, 3] - boxes[:, 1] + 1
keep = np.where((ws >= min_size) & (hs >= min_size))[0]
return keep
@staticmethod
def _filter_boxes2(boxes, max_size, min_size):
""" Remove all boxes with any side smaller than min_size """
ws = boxes[:, 2] - boxes[:, 0] + 1
hs = boxes[:, 3] - boxes[:, 1] + 1
if max_size > 0:
keep = np.where(np.minimum(ws, hs) < max_size)[0]
elif min_size > 0:
keep = np.where(np.maximum(ws, hs) > min_size)[0]
return keep
@staticmethod
def _clip_pad(tensor, pad_shape):
"""
Clip boxes of the pad area.
:param tensor: [n, c, H, W]
:param pad_shape: [h, w]
:return: [n, c, h, w]
"""
H, W = tensor.shape[2:]
h, w = pad_shape
if h < H or w < W:
tensor = tensor[:, :, :h, :w].copy()
return tensor
@staticmethod
def bbox_pred(boxes, box_deltas):
"""
Transform the set of class-agnostic boxes into class-specific boxes
by applying the predicted offsets (box_deltas)
:param boxes: !important [N 4]
:param box_deltas: [N, 4 * num_classes]
:return: [N 4 * num_classes]
"""
if boxes.shape[0] == 0:
return np.zeros((0, box_deltas.shape[1]))
boxes = boxes.astype(np.float, copy=False)
widths = boxes[:, 2] - boxes[:, 0] + 1.0
heights = boxes[:, 3] - boxes[:, 1] + 1.0
ctr_x = boxes[:, 0] + 0.5 * (widths - 1.0)
ctr_y = boxes[:, 1] + 0.5 * (heights - 1.0)
dx = box_deltas[:, 0:1]
dy = box_deltas[:, 1:2]
dw = box_deltas[:, 2:3]
dh = box_deltas[:, 3:4]
pred_ctr_x = dx * widths[:, np.newaxis] + ctr_x[:, np.newaxis]
pred_ctr_y = dy * heights[:, np.newaxis] + ctr_y[:, np.newaxis]
pred_w = np.exp(dw) * widths[:, np.newaxis]
pred_h = np.exp(dh) * heights[:, np.newaxis]
pred_boxes = np.zeros(box_deltas.shape)
# x1
pred_boxes[:, 0:1] = pred_ctr_x - 0.5 * (pred_w - 1.0)
# y1
pred_boxes[:, 1:2] = pred_ctr_y - 0.5 * (pred_h - 1.0)
# x2
pred_boxes[:, 2:3] = pred_ctr_x + 0.5 * (pred_w - 1.0)
# y2
pred_boxes[:, 3:4] = pred_ctr_y + 0.5 * (pred_h - 1.0)
if box_deltas.shape[1] > 4:
pred_boxes[:, 4:] = box_deltas[:, 4:]
return pred_boxes
@staticmethod
def landmark_pred(boxes, landmark_deltas):
if boxes.shape[0] == 0:
return np.zeros((0, landmark_deltas.shape[1]))
boxes = boxes.astype(np.float, copy=False)
widths = boxes[:, 2] - boxes[:, 0] + 1.0
heights = boxes[:, 3] - boxes[:, 1] + 1.0
ctr_x = boxes[:, 0] + 0.5 * (widths - 1.0)
ctr_y = boxes[:, 1] + 0.5 * (heights - 1.0)
pred = landmark_deltas.copy()
for i in range(5):
pred[:, i, 0] = landmark_deltas[:, i, 0] * widths + ctr_x
pred[:, i, 1] = landmark_deltas[:, i, 1] * heights + ctr_y
return pred
#preds = []
#for i in range(landmark_deltas.shape[1]):
# if i%2==0:
# pred = (landmark_deltas[:,i]*widths + ctr_x)
# else:
# pred = (landmark_deltas[:,i]*heights + ctr_y)
# preds.append(pred)
#preds = np.vstack(preds).transpose()
#return preds
def vote(self, det):
#order = det[:, 4].ravel().argsort()[::-1]
#det = det[order, :]
if det.shape[0] == 0:
return np.zeros((0, 5))
#dets = np.array([[10, 10, 20, 20, 0.002]])
#det = np.empty(shape=[0, 5])
dets = None
while det.shape[0] > 0:
if dets is not None and dets.shape[0] >= 750:
break
# IOU
area = (det[:, 2] - det[:, 0] + 1) * (det[:, 3] - det[:, 1] + 1)
xx1 = np.maximum(det[0, 0], det[:, 0])
yy1 = np.maximum(det[0, 1], det[:, 1])
xx2 = np.minimum(det[0, 2], det[:, 2])
yy2 = np.minimum(det[0, 3], det[:, 3])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
o = inter / (area[0] + area[:] - inter)
# nms
merge_index = np.where(o >= self.nms_threshold)[0]
det_accu = det[merge_index, :]
det = np.delete(det, merge_index, 0)
if merge_index.shape[0] <= 1:
if det.shape[0] == 0:
try:
dets = np.row_stack((dets, det_accu))
except:
dets = det_accu
continue
det_accu[:,
0:4] = det_accu[:, 0:4] * np.tile(det_accu[:, -1:],
(1, 4))
max_score = np.max(det_accu[:, 4])
det_accu_sum = np.zeros((1, 5))
det_accu_sum[:, 0:4] = np.sum(det_accu[:, 0:4], axis=0) / np.sum(
det_accu[:, -1:])
det_accu_sum[:, 4] = max_score
if dets is None:
dets = det_accu_sum
else:
dets = np.row_stack((dets, det_accu_sum))
dets = dets[0:750, :]
return dets
| insightface/detection/retinaface_anticov/retinaface_cov.py/0 | {
"file_path": "insightface/detection/retinaface_anticov/retinaface_cov.py",
"repo_id": "insightface",
"token_count": 19079
} | 97 |
# model settings
model = dict(
type='CascadeRCNN',
pretrained='torchvision://resnet50',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
scales=[8],
ratios=[0.5, 1.0, 2.0],
strides=[4, 8, 16, 32, 64]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
roi_head=dict(
type='CascadeRoIHead',
num_stages=3,
stage_loss_weights=[1, 0.5, 0.25],
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=[
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.05, 0.05, 0.1, 0.1]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.033, 0.033, 0.067, 0.067]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))
],
mask_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
mask_head=dict(
type='FCNMaskHead',
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=80,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))))
# model training and testing settings
train_cfg = dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
match_low_quality=True,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_across_levels=False,
nms_pre=2000,
nms_post=2000,
max_num=2000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=[
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
match_low_quality=False,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.6,
neg_iou_thr=0.6,
min_pos_iou=0.6,
match_low_quality=False,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.7,
min_pos_iou=0.7,
match_low_quality=False,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False)
])
test_cfg = dict(
rpn=dict(
nms_across_levels=False,
nms_pre=1000,
nms_post=1000,
max_num=1000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100,
mask_thr_binary=0.5))
| insightface/detection/scrfd/configs/_base_/models/cascade_mask_rcnn_r50_fpn.py/0 | {
"file_path": "insightface/detection/scrfd/configs/_base_/models/cascade_mask_rcnn_r50_fpn.py",
"repo_id": "insightface",
"token_count": 4251
} | 98 |
from .approx_max_iou_assigner import ApproxMaxIoUAssigner
from .assign_result import AssignResult
from .atss_assigner import ATSSAssigner
from .base_assigner import BaseAssigner
from .center_region_assigner import CenterRegionAssigner
from .grid_assigner import GridAssigner
from .hungarian_assigner import HungarianAssigner
from .max_iou_assigner import MaxIoUAssigner
from .point_assigner import PointAssigner
__all__ = [
'BaseAssigner', 'MaxIoUAssigner', 'ApproxMaxIoUAssigner', 'AssignResult',
'PointAssigner', 'ATSSAssigner', 'CenterRegionAssigner', 'GridAssigner',
'HungarianAssigner'
]
| insightface/detection/scrfd/mmdet/core/bbox/assigners/__init__.py/0 | {
"file_path": "insightface/detection/scrfd/mmdet/core/bbox/assigners/__init__.py",
"repo_id": "insightface",
"token_count": 224
} | 99 |
from ..builder import BBOX_CODERS
from .base_bbox_coder import BaseBBoxCoder
@BBOX_CODERS.register_module()
class PseudoBBoxCoder(BaseBBoxCoder):
"""Pseudo bounding box coder."""
def __init__(self, **kwargs):
super(BaseBBoxCoder, self).__init__(**kwargs)
def encode(self, bboxes, gt_bboxes):
"""torch.Tensor: return the given ``bboxes``"""
return gt_bboxes
def decode(self, bboxes, pred_bboxes):
"""torch.Tensor: return the given ``pred_bboxes``"""
return pred_bboxes
| insightface/detection/scrfd/mmdet/core/bbox/coder/pseudo_bbox_coder.py/0 | {
"file_path": "insightface/detection/scrfd/mmdet/core/bbox/coder/pseudo_bbox_coder.py",
"repo_id": "insightface",
"token_count": 222
} | 100 |
import torch
from mmcv.ops import nms_match
from ..builder import BBOX_SAMPLERS
from ..transforms import bbox2roi
from .base_sampler import BaseSampler
from .sampling_result import SamplingResult
@BBOX_SAMPLERS.register_module()
class ScoreHLRSampler(BaseSampler):
r"""Importance-based Sample Reweighting (ISR_N), described in `Prime Sample
Attention in Object Detection <https://arxiv.org/abs/1904.04821>`_.
Score hierarchical local rank (HLR) differentiates with RandomSampler in
negative part. It firstly computes Score-HLR in a two-step way,
then linearly maps score hlr to the loss weights.
Args:
num (int): Total number of sampled RoIs.
pos_fraction (float): Fraction of positive samples.
context (:class:`BaseRoIHead`): RoI head that the sampler belongs to.
neg_pos_ub (int): Upper bound of the ratio of num negative to num
positive, -1 means no upper bound.
add_gt_as_proposals (bool): Whether to add ground truth as proposals.
k (float): Power of the non-linear mapping.
bias (float): Shift of the non-linear mapping.
score_thr (float): Minimum score that a negative sample is to be
considered as valid bbox.
"""
def __init__(self,
num,
pos_fraction,
context,
neg_pos_ub=-1,
add_gt_as_proposals=True,
k=0.5,
bias=0,
score_thr=0.05,
iou_thr=0.5,
**kwargs):
super().__init__(num, pos_fraction, neg_pos_ub, add_gt_as_proposals)
self.k = k
self.bias = bias
self.score_thr = score_thr
self.iou_thr = iou_thr
self.context = context
# context of cascade detectors is a list, so distinguish them here.
if not hasattr(context, 'num_stages'):
self.bbox_roi_extractor = context.bbox_roi_extractor
self.bbox_head = context.bbox_head
self.with_shared_head = context.with_shared_head
if self.with_shared_head:
self.shared_head = context.shared_head
else:
self.bbox_roi_extractor = context.bbox_roi_extractor[
context.current_stage]
self.bbox_head = context.bbox_head[context.current_stage]
@staticmethod
def random_choice(gallery, num):
"""Randomly select some elements from the gallery.
If `gallery` is a Tensor, the returned indices will be a Tensor;
If `gallery` is a ndarray or list, the returned indices will be a
ndarray.
Args:
gallery (Tensor | ndarray | list): indices pool.
num (int): expected sample num.
Returns:
Tensor or ndarray: sampled indices.
"""
assert len(gallery) >= num
is_tensor = isinstance(gallery, torch.Tensor)
if not is_tensor:
if torch.cuda.is_available():
device = torch.cuda.current_device()
else:
device = 'cpu'
gallery = torch.tensor(gallery, dtype=torch.long, device=device)
perm = torch.randperm(gallery.numel(), device=gallery.device)[:num]
rand_inds = gallery[perm]
if not is_tensor:
rand_inds = rand_inds.cpu().numpy()
return rand_inds
def _sample_pos(self, assign_result, num_expected, **kwargs):
"""Randomly sample some positive samples."""
pos_inds = torch.nonzero(assign_result.gt_inds > 0).flatten()
if pos_inds.numel() <= num_expected:
return pos_inds
else:
return self.random_choice(pos_inds, num_expected)
def _sample_neg(self,
assign_result,
num_expected,
bboxes,
feats=None,
img_meta=None,
**kwargs):
"""Sample negative samples.
Score-HLR sampler is done in the following steps:
1. Take the maximum positive score prediction of each negative samples
as s_i.
2. Filter out negative samples whose s_i <= score_thr, the left samples
are called valid samples.
3. Use NMS-Match to divide valid samples into different groups,
samples in the same group will greatly overlap with each other
4. Rank the matched samples in two-steps to get Score-HLR.
(1) In the same group, rank samples with their scores.
(2) In the same score rank across different groups,
rank samples with their scores again.
5. Linearly map Score-HLR to the final label weights.
Args:
assign_result (:obj:`AssignResult`): result of assigner.
num_expected (int): Expected number of samples.
bboxes (Tensor): bbox to be sampled.
feats (Tensor): Features come from FPN.
img_meta (dict): Meta information dictionary.
"""
neg_inds = torch.nonzero(assign_result.gt_inds == 0).flatten()
num_neg = neg_inds.size(0)
if num_neg == 0:
return neg_inds, None
with torch.no_grad():
neg_bboxes = bboxes[neg_inds]
neg_rois = bbox2roi([neg_bboxes])
bbox_result = self.context._bbox_forward(feats, neg_rois)
cls_score, bbox_pred = bbox_result['cls_score'], bbox_result[
'bbox_pred']
ori_loss = self.bbox_head.loss(
cls_score=cls_score,
bbox_pred=None,
rois=None,
labels=neg_inds.new_full((num_neg, ),
self.bbox_head.num_classes),
label_weights=cls_score.new_ones(num_neg),
bbox_targets=None,
bbox_weights=None,
reduction_override='none')['loss_cls']
# filter out samples with the max score lower than score_thr
max_score, argmax_score = cls_score.softmax(-1)[:, :-1].max(-1)
valid_inds = (max_score > self.score_thr).nonzero().view(-1)
invalid_inds = (max_score <= self.score_thr).nonzero().view(-1)
num_valid = valid_inds.size(0)
num_invalid = invalid_inds.size(0)
num_expected = min(num_neg, num_expected)
num_hlr = min(num_valid, num_expected)
num_rand = num_expected - num_hlr
if num_valid > 0:
valid_rois = neg_rois[valid_inds]
valid_max_score = max_score[valid_inds]
valid_argmax_score = argmax_score[valid_inds]
valid_bbox_pred = bbox_pred[valid_inds]
# valid_bbox_pred shape: [num_valid, #num_classes, 4]
valid_bbox_pred = valid_bbox_pred.view(
valid_bbox_pred.size(0), -1, 4)
selected_bbox_pred = valid_bbox_pred[range(num_valid),
valid_argmax_score]
pred_bboxes = self.bbox_head.bbox_coder.decode(
valid_rois[:, 1:], selected_bbox_pred)
pred_bboxes_with_score = torch.cat(
[pred_bboxes, valid_max_score[:, None]], -1)
group = nms_match(pred_bboxes_with_score, self.iou_thr)
# imp: importance
imp = cls_score.new_zeros(num_valid)
for g in group:
g_score = valid_max_score[g]
# g_score has already sorted
rank = g_score.new_tensor(range(g_score.size(0)))
imp[g] = num_valid - rank + g_score
_, imp_rank_inds = imp.sort(descending=True)
_, imp_rank = imp_rank_inds.sort()
hlr_inds = imp_rank_inds[:num_expected]
if num_rand > 0:
rand_inds = torch.randperm(num_invalid)[:num_rand]
select_inds = torch.cat(
[valid_inds[hlr_inds], invalid_inds[rand_inds]])
else:
select_inds = valid_inds[hlr_inds]
neg_label_weights = cls_score.new_ones(num_expected)
up_bound = max(num_expected, num_valid)
imp_weights = (up_bound -
imp_rank[hlr_inds].float()) / up_bound
neg_label_weights[:num_hlr] = imp_weights
neg_label_weights[num_hlr:] = imp_weights.min()
neg_label_weights = (self.bias +
(1 - self.bias) * neg_label_weights).pow(
self.k)
ori_selected_loss = ori_loss[select_inds]
new_loss = ori_selected_loss * neg_label_weights
norm_ratio = ori_selected_loss.sum() / new_loss.sum()
neg_label_weights *= norm_ratio
else:
neg_label_weights = cls_score.new_ones(num_expected)
select_inds = torch.randperm(num_neg)[:num_expected]
return neg_inds[select_inds], neg_label_weights
def sample(self,
assign_result,
bboxes,
gt_bboxes,
gt_labels=None,
img_meta=None,
**kwargs):
"""Sample positive and negative bboxes.
This is a simple implementation of bbox sampling given candidates,
assigning results and ground truth bboxes.
Args:
assign_result (:obj:`AssignResult`): Bbox assigning results.
bboxes (Tensor): Boxes to be sampled from.
gt_bboxes (Tensor): Ground truth bboxes.
gt_labels (Tensor, optional): Class labels of ground truth bboxes.
Returns:
tuple[:obj:`SamplingResult`, Tensor]: Sampling result and negetive
label weights.
"""
bboxes = bboxes[:, :4]
gt_flags = bboxes.new_zeros((bboxes.shape[0], ), dtype=torch.uint8)
if self.add_gt_as_proposals:
bboxes = torch.cat([gt_bboxes, bboxes], dim=0)
assign_result.add_gt_(gt_labels)
gt_ones = bboxes.new_ones(gt_bboxes.shape[0], dtype=torch.uint8)
gt_flags = torch.cat([gt_ones, gt_flags])
num_expected_pos = int(self.num * self.pos_fraction)
pos_inds = self.pos_sampler._sample_pos(
assign_result, num_expected_pos, bboxes=bboxes, **kwargs)
num_sampled_pos = pos_inds.numel()
num_expected_neg = self.num - num_sampled_pos
if self.neg_pos_ub >= 0:
_pos = max(1, num_sampled_pos)
neg_upper_bound = int(self.neg_pos_ub * _pos)
if num_expected_neg > neg_upper_bound:
num_expected_neg = neg_upper_bound
neg_inds, neg_label_weights = self.neg_sampler._sample_neg(
assign_result,
num_expected_neg,
bboxes,
img_meta=img_meta,
**kwargs)
return SamplingResult(pos_inds, neg_inds, bboxes, gt_bboxes,
assign_result, gt_flags), neg_label_weights
| insightface/detection/scrfd/mmdet/core/bbox/samplers/score_hlr_sampler.py/0 | {
"file_path": "insightface/detection/scrfd/mmdet/core/bbox/samplers/score_hlr_sampler.py",
"repo_id": "insightface",
"token_count": 5703
} | 101 |
import mmcv
import numpy as np
import pycocotools.mask as mask_util
def split_combined_polys(polys, poly_lens, polys_per_mask):
"""Split the combined 1-D polys into masks.
A mask is represented as a list of polys, and a poly is represented as
a 1-D array. In dataset, all masks are concatenated into a single 1-D
tensor. Here we need to split the tensor into original representations.
Args:
polys (list): a list (length = image num) of 1-D tensors
poly_lens (list): a list (length = image num) of poly length
polys_per_mask (list): a list (length = image num) of poly number
of each mask
Returns:
list: a list (length = image num) of list (length = mask num) of \
list (length = poly num) of numpy array.
"""
mask_polys_list = []
for img_id in range(len(polys)):
polys_single = polys[img_id]
polys_lens_single = poly_lens[img_id].tolist()
polys_per_mask_single = polys_per_mask[img_id].tolist()
split_polys = mmcv.slice_list(polys_single, polys_lens_single)
mask_polys = mmcv.slice_list(split_polys, polys_per_mask_single)
mask_polys_list.append(mask_polys)
return mask_polys_list
# TODO: move this function to more proper place
def encode_mask_results(mask_results):
"""Encode bitmap mask to RLE code.
Args:
mask_results (list | tuple[list]): bitmap mask results.
In mask scoring rcnn, mask_results is a tuple of (segm_results,
segm_cls_score).
Returns:
list | tuple: RLE encoded mask.
"""
if isinstance(mask_results, tuple): # mask scoring
cls_segms, cls_mask_scores = mask_results
else:
cls_segms = mask_results
num_classes = len(cls_segms)
encoded_mask_results = [[] for _ in range(num_classes)]
for i in range(len(cls_segms)):
for cls_segm in cls_segms[i]:
encoded_mask_results[i].append(
mask_util.encode(
np.array(
cls_segm[:, :, np.newaxis], order='F',
dtype='uint8'))[0]) # encoded with RLE
if isinstance(mask_results, tuple):
return encoded_mask_results, cls_mask_scores
else:
return encoded_mask_results
| insightface/detection/scrfd/mmdet/core/mask/utils.py/0 | {
"file_path": "insightface/detection/scrfd/mmdet/core/mask/utils.py",
"repo_id": "insightface",
"token_count": 1012
} | 102 |
import copy
import cv2
import mmcv
import numpy as np
from ..builder import PIPELINES
from .compose import Compose
_MAX_LEVEL = 10
def level_to_value(level, max_value):
"""Map from level to values based on max_value."""
return (level / _MAX_LEVEL) * max_value
def enhance_level_to_value(level, a=1.8, b=0.1):
"""Map from level to values."""
return (level / _MAX_LEVEL) * a + b
def random_negative(value, random_negative_prob):
"""Randomly negate value based on random_negative_prob."""
return -value if np.random.rand() < random_negative_prob else value
def bbox2fields():
"""The key correspondence from bboxes to labels, masks and
segmentations."""
bbox2label = {
'gt_bboxes': 'gt_labels',
'gt_bboxes_ignore': 'gt_labels_ignore'
}
bbox2mask = {
'gt_bboxes': 'gt_masks',
'gt_bboxes_ignore': 'gt_masks_ignore'
}
bbox2seg = {
'gt_bboxes': 'gt_semantic_seg',
}
return bbox2label, bbox2mask, bbox2seg
@PIPELINES.register_module()
class AutoAugment(object):
"""Auto augmentation.
This data augmentation is proposed in `Learning Data Augmentation
Strategies for Object Detection <https://arxiv.org/pdf/1906.11172>`_.
TODO: Implement 'Shear', 'Sharpness' and 'Rotate' transforms
Args:
policies (list[list[dict]]): The policies of auto augmentation. Each
policy in ``policies`` is a specific augmentation policy, and is
composed by several augmentations (dict). When AutoAugment is
called, a random policy in ``policies`` will be selected to
augment images.
Examples:
>>> replace = (104, 116, 124)
>>> policies = [
>>> [
>>> dict(type='Sharpness', prob=0.0, level=8),
>>> dict(
>>> type='Shear',
>>> prob=0.4,
>>> level=0,
>>> replace=replace,
>>> axis='x')
>>> ],
>>> [
>>> dict(
>>> type='Rotate',
>>> prob=0.6,
>>> level=10,
>>> replace=replace),
>>> dict(type='Color', prob=1.0, level=6)
>>> ]
>>> ]
>>> augmentation = AutoAugment(policies)
>>> img = np.ones(100, 100, 3)
>>> gt_bboxes = np.ones(10, 4)
>>> results = dict(img=img, gt_bboxes=gt_bboxes)
>>> results = augmentation(results)
"""
def __init__(self, policies):
assert isinstance(policies, list) and len(policies) > 0, \
'Policies must be a non-empty list.'
for policy in policies:
assert isinstance(policy, list) and len(policy) > 0, \
'Each policy in policies must be a non-empty list.'
for augment in policy:
assert isinstance(augment, dict) and 'type' in augment, \
'Each specific augmentation must be a dict with key' \
' "type".'
self.policies = copy.deepcopy(policies)
self.transforms = [Compose(policy) for policy in self.policies]
def __call__(self, results):
transform = np.random.choice(self.transforms)
return transform(results)
def __repr__(self):
return f'{self.__class__.__name__}(policies={self.policies})'
@PIPELINES.register_module()
class Shear(object):
"""Apply Shear Transformation to image (and its corresponding bbox, mask,
segmentation).
Args:
level (int | float): The level should be in range [0,_MAX_LEVEL].
img_fill_val (int | float | tuple): The filled values for image border.
If float, the same fill value will be used for all the three
channels of image. If tuple, the should be 3 elements.
seg_ignore_label (int): The fill value used for segmentation map.
Note this value must equals ``ignore_label`` in ``semantic_head``
of the corresponding config. Default 255.
prob (float): The probability for performing Shear and should be in
range [0, 1].
direction (str): The direction for shear, either "horizontal"
or "vertical".
max_shear_magnitude (float): The maximum magnitude for Shear
transformation.
random_negative_prob (float): The probability that turns the
offset negative. Should be in range [0,1]
interpolation (str): Same as in :func:`mmcv.imshear`.
"""
def __init__(self,
level,
img_fill_val=128,
seg_ignore_label=255,
prob=0.5,
direction='horizontal',
max_shear_magnitude=0.3,
random_negative_prob=0.5,
interpolation='bilinear'):
assert isinstance(level, (int, float)), 'The level must be type ' \
f'int or float, got {type(level)}.'
assert 0 <= level <= _MAX_LEVEL, 'The level should be in range ' \
f'[0,{_MAX_LEVEL}], got {level}.'
if isinstance(img_fill_val, (float, int)):
img_fill_val = tuple([float(img_fill_val)] * 3)
elif isinstance(img_fill_val, tuple):
assert len(img_fill_val) == 3, 'img_fill_val as tuple must ' \
f'have 3 elements. got {len(img_fill_val)}.'
img_fill_val = tuple([float(val) for val in img_fill_val])
else:
raise ValueError(
'img_fill_val must be float or tuple with 3 elements.')
assert np.all([0 <= val <= 255 for val in img_fill_val]), 'all ' \
'elements of img_fill_val should between range [0,255].' \
f'got {img_fill_val}.'
assert 0 <= prob <= 1.0, 'The probability of shear should be in ' \
f'range [0,1]. got {prob}.'
assert direction in ('horizontal', 'vertical'), 'direction must ' \
f'in be either "horizontal" or "vertical". got {direction}.'
assert isinstance(max_shear_magnitude, float), 'max_shear_magnitude ' \
f'should be type float. got {type(max_shear_magnitude)}.'
assert 0. <= max_shear_magnitude <= 1., 'Defaultly ' \
'max_shear_magnitude should be in range [0,1]. ' \
f'got {max_shear_magnitude}.'
self.level = level
self.magnitude = level_to_value(level, max_shear_magnitude)
self.img_fill_val = img_fill_val
self.seg_ignore_label = seg_ignore_label
self.prob = prob
self.direction = direction
self.max_shear_magnitude = max_shear_magnitude
self.random_negative_prob = random_negative_prob
self.interpolation = interpolation
def _shear_img(self,
results,
magnitude,
direction='horizontal',
interpolation='bilinear'):
"""Shear the image.
Args:
results (dict): Result dict from loading pipeline.
magnitude (int | float): The magnitude used for shear.
direction (str): The direction for shear, either "horizontal"
or "vertical".
interpolation (str): Same as in :func:`mmcv.imshear`.
"""
for key in results.get('img_fields', ['img']):
img = results[key]
img_sheared = mmcv.imshear(
img,
magnitude,
direction,
border_value=self.img_fill_val,
interpolation=interpolation)
results[key] = img_sheared.astype(img.dtype)
def _shear_bboxes(self, results, magnitude):
"""Shear the bboxes."""
h, w, c = results['img_shape']
if self.direction == 'horizontal':
shear_matrix = np.stack([[1, magnitude],
[0, 1]]).astype(np.float32) # [2, 2]
else:
shear_matrix = np.stack([[1, 0], [magnitude,
1]]).astype(np.float32)
for key in results.get('bbox_fields', []):
min_x, min_y, max_x, max_y = np.split(
results[key], results[key].shape[-1], axis=-1)
coordinates = np.stack([[min_x, min_y], [max_x, min_y],
[min_x, max_y],
[max_x, max_y]]) # [4, 2, nb_box, 1]
coordinates = coordinates[..., 0].transpose(
(2, 1, 0)).astype(np.float32) # [nb_box, 2, 4]
new_coords = np.matmul(shear_matrix[None, :, :],
coordinates) # [nb_box, 2, 4]
min_x = np.min(new_coords[:, 0, :], axis=-1)
min_y = np.min(new_coords[:, 1, :], axis=-1)
max_x = np.max(new_coords[:, 0, :], axis=-1)
max_y = np.max(new_coords[:, 1, :], axis=-1)
min_x = np.clip(min_x, a_min=0, a_max=w)
min_y = np.clip(min_y, a_min=0, a_max=h)
max_x = np.clip(max_x, a_min=min_x, a_max=w)
max_y = np.clip(max_y, a_min=min_y, a_max=h)
results[key] = np.stack([min_x, min_y, max_x, max_y],
axis=-1).astype(results[key].dtype)
def _shear_masks(self,
results,
magnitude,
direction='horizontal',
fill_val=0,
interpolation='bilinear'):
"""Shear the masks."""
h, w, c = results['img_shape']
for key in results.get('mask_fields', []):
masks = results[key]
results[key] = masks.shear((h, w),
magnitude,
direction,
border_value=fill_val,
interpolation=interpolation)
def _shear_seg(self,
results,
magnitude,
direction='horizontal',
fill_val=255,
interpolation='bilinear'):
"""Shear the segmentation maps."""
for key in results.get('seg_fields', []):
seg = results[key]
results[key] = mmcv.imshear(
seg,
magnitude,
direction,
border_value=fill_val,
interpolation=interpolation).astype(seg.dtype)
def _filter_invalid(self, results, min_bbox_size=0):
"""Filter bboxes and corresponding masks too small after shear
augmentation."""
bbox2label, bbox2mask, _ = bbox2fields()
for key in results.get('bbox_fields', []):
bbox_w = results[key][:, 2] - results[key][:, 0]
bbox_h = results[key][:, 3] - results[key][:, 1]
valid_inds = (bbox_w > min_bbox_size) & (bbox_h > min_bbox_size)
valid_inds = np.nonzero(valid_inds)[0]
results[key] = results[key][valid_inds]
# label fields. e.g. gt_labels and gt_labels_ignore
label_key = bbox2label.get(key)
if label_key in results:
results[label_key] = results[label_key][valid_inds]
# mask fields, e.g. gt_masks and gt_masks_ignore
mask_key = bbox2mask.get(key)
if mask_key in results:
results[mask_key] = results[mask_key][valid_inds]
def __call__(self, results):
"""Call function to shear images, bounding boxes, masks and semantic
segmentation maps.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Sheared results.
"""
if np.random.rand() > self.prob:
return results
magnitude = random_negative(self.magnitude, self.random_negative_prob)
self._shear_img(results, magnitude, self.direction, self.interpolation)
self._shear_bboxes(results, magnitude)
# fill_val set to 0 for background of mask.
self._shear_masks(
results,
magnitude,
self.direction,
fill_val=0,
interpolation=self.interpolation)
self._shear_seg(
results,
magnitude,
self.direction,
fill_val=self.seg_ignore_label,
interpolation=self.interpolation)
self._filter_invalid(results)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(level={self.level}, '
repr_str += f'img_fill_val={self.img_fill_val}, '
repr_str += f'seg_ignore_label={self.seg_ignore_label}, '
repr_str += f'prob={self.prob}, '
repr_str += f'direction={self.direction}, '
repr_str += f'max_shear_magnitude={self.max_shear_magnitude}, '
repr_str += f'random_negative_prob={self.random_negative_prob}, '
repr_str += f'interpolation={self.interpolation})'
return repr_str
@PIPELINES.register_module()
class Rotate(object):
"""Apply Rotate Transformation to image (and its corresponding bbox, mask,
segmentation).
Args:
level (int | float): The level should be in range (0,_MAX_LEVEL].
scale (int | float): Isotropic scale factor. Same in
``mmcv.imrotate``.
center (int | float | tuple[float]): Center point (w, h) of the
rotation in the source image. If None, the center of the
image will be used. Same in ``mmcv.imrotate``.
img_fill_val (int | float | tuple): The fill value for image border.
If float, the same value will be used for all the three
channels of image. If tuple, the should be 3 elements (e.g.
equals the number of channels for image).
seg_ignore_label (int): The fill value used for segmentation map.
Note this value must equals ``ignore_label`` in ``semantic_head``
of the corresponding config. Default 255.
prob (float): The probability for perform transformation and
should be in range 0 to 1.
max_rotate_angle (int | float): The maximum angles for rotate
transformation.
random_negative_prob (float): The probability that turns the
offset negative.
"""
def __init__(self,
level,
scale=1,
center=None,
img_fill_val=128,
seg_ignore_label=255,
prob=0.5,
max_rotate_angle=30,
random_negative_prob=0.5):
assert isinstance(level, (int, float)), \
f'The level must be type int or float. got {type(level)}.'
assert 0 <= level <= _MAX_LEVEL, \
f'The level should be in range (0,{_MAX_LEVEL}]. got {level}.'
assert isinstance(scale, (int, float)), \
f'The scale must be type int or float. got type {type(scale)}.'
if isinstance(center, (int, float)):
center = (center, center)
elif isinstance(center, tuple):
assert len(center) == 2, 'center with type tuple must have '\
f'2 elements. got {len(center)} elements.'
else:
assert center is None, 'center must be None or type int, '\
f'float or tuple, got type {type(center)}.'
if isinstance(img_fill_val, (float, int)):
img_fill_val = tuple([float(img_fill_val)] * 3)
elif isinstance(img_fill_val, tuple):
assert len(img_fill_val) == 3, 'img_fill_val as tuple must '\
f'have 3 elements. got {len(img_fill_val)}.'
img_fill_val = tuple([float(val) for val in img_fill_val])
else:
raise ValueError(
'img_fill_val must be float or tuple with 3 elements.')
assert np.all([0 <= val <= 255 for val in img_fill_val]), \
'all elements of img_fill_val should between range [0,255]. '\
f'got {img_fill_val}.'
assert 0 <= prob <= 1.0, 'The probability should be in range [0,1]. '\
'got {prob}.'
assert isinstance(max_rotate_angle, (int, float)), 'max_rotate_angle '\
f'should be type int or float. got type {type(max_rotate_angle)}.'
self.level = level
self.scale = scale
# Rotation angle in degrees. Positive values mean
# clockwise rotation.
self.angle = level_to_value(level, max_rotate_angle)
self.center = center
self.img_fill_val = img_fill_val
self.seg_ignore_label = seg_ignore_label
self.prob = prob
self.max_rotate_angle = max_rotate_angle
self.random_negative_prob = random_negative_prob
def _rotate_img(self, results, angle, center=None, scale=1.0):
"""Rotate the image.
Args:
results (dict): Result dict from loading pipeline.
angle (float): Rotation angle in degrees, positive values
mean clockwise rotation. Same in ``mmcv.imrotate``.
center (tuple[float], optional): Center point (w, h) of the
rotation. Same in ``mmcv.imrotate``.
scale (int | float): Isotropic scale factor. Same in
``mmcv.imrotate``.
"""
for key in results.get('img_fields', ['img']):
img = results[key].copy()
img_rotated = mmcv.imrotate(
img, angle, center, scale, border_value=self.img_fill_val)
results[key] = img_rotated.astype(img.dtype)
def _rotate_bboxes(self, results, rotate_matrix):
"""Rotate the bboxes."""
h, w, c = results['img_shape']
for key in results.get('bbox_fields', []):
min_x, min_y, max_x, max_y = np.split(
results[key], results[key].shape[-1], axis=-1)
coordinates = np.stack([[min_x, min_y], [max_x, min_y],
[min_x, max_y],
[max_x, max_y]]) # [4, 2, nb_bbox, 1]
# pad 1 to convert from format [x, y] to homogeneous
# coordinates format [x, y, 1]
coordinates = np.concatenate(
(coordinates,
np.ones((4, 1, coordinates.shape[2], 1), coordinates.dtype)),
axis=1) # [4, 3, nb_bbox, 1]
coordinates = coordinates.transpose(
(2, 0, 1, 3)) # [nb_bbox, 4, 3, 1]
rotated_coords = np.matmul(rotate_matrix,
coordinates) # [nb_bbox, 4, 2, 1]
rotated_coords = rotated_coords[..., 0] # [nb_bbox, 4, 2]
min_x, min_y = np.min(
rotated_coords[:, :, 0], axis=1), np.min(
rotated_coords[:, :, 1], axis=1)
max_x, max_y = np.max(
rotated_coords[:, :, 0], axis=1), np.max(
rotated_coords[:, :, 1], axis=1)
min_x, min_y = np.clip(
min_x, a_min=0, a_max=w), np.clip(
min_y, a_min=0, a_max=h)
max_x, max_y = np.clip(
max_x, a_min=min_x, a_max=w), np.clip(
max_y, a_min=min_y, a_max=h)
results[key] = np.stack([min_x, min_y, max_x, max_y],
axis=-1).astype(results[key].dtype)
def _rotate_masks(self,
results,
angle,
center=None,
scale=1.0,
fill_val=0):
"""Rotate the masks."""
h, w, c = results['img_shape']
for key in results.get('mask_fields', []):
masks = results[key]
results[key] = masks.rotate((h, w), angle, center, scale, fill_val)
def _rotate_seg(self,
results,
angle,
center=None,
scale=1.0,
fill_val=255):
"""Rotate the segmentation map."""
for key in results.get('seg_fields', []):
seg = results[key].copy()
results[key] = mmcv.imrotate(
seg, angle, center, scale,
border_value=fill_val).astype(seg.dtype)
def _filter_invalid(self, results, min_bbox_size=0):
"""Filter bboxes and corresponding masks too small after rotate
augmentation."""
bbox2label, bbox2mask, _ = bbox2fields()
for key in results.get('bbox_fields', []):
bbox_w = results[key][:, 2] - results[key][:, 0]
bbox_h = results[key][:, 3] - results[key][:, 1]
valid_inds = (bbox_w > min_bbox_size) & (bbox_h > min_bbox_size)
valid_inds = np.nonzero(valid_inds)[0]
results[key] = results[key][valid_inds]
# label fields. e.g. gt_labels and gt_labels_ignore
label_key = bbox2label.get(key)
if label_key in results:
results[label_key] = results[label_key][valid_inds]
# mask fields, e.g. gt_masks and gt_masks_ignore
mask_key = bbox2mask.get(key)
if mask_key in results:
results[mask_key] = results[mask_key][valid_inds]
def __call__(self, results):
"""Call function to rotate images, bounding boxes, masks and semantic
segmentation maps.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Rotated results.
"""
if np.random.rand() > self.prob:
return results
h, w = results['img'].shape[:2]
center = self.center
if center is None:
center = ((w - 1) * 0.5, (h - 1) * 0.5)
angle = random_negative(self.angle, self.random_negative_prob)
self._rotate_img(results, angle, center, self.scale)
rotate_matrix = cv2.getRotationMatrix2D(center, -angle, self.scale)
self._rotate_bboxes(results, rotate_matrix)
self._rotate_masks(results, angle, center, self.scale, fill_val=0)
self._rotate_seg(
results, angle, center, self.scale, fill_val=self.seg_ignore_label)
self._filter_invalid(results)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(level={self.level}, '
repr_str += f'scale={self.scale}, '
repr_str += f'center={self.center}, '
repr_str += f'img_fill_val={self.img_fill_val}, '
repr_str += f'seg_ignore_label={self.seg_ignore_label}, '
repr_str += f'prob={self.prob}, '
repr_str += f'max_rotate_angle={self.max_rotate_angle}, '
repr_str += f'random_negative_prob={self.random_negative_prob})'
return repr_str
@PIPELINES.register_module()
class Translate(object):
"""Translate the images, bboxes, masks and segmentation maps horizontally
or vertically.
Args:
level (int | float): The level for Translate and should be in
range [0,_MAX_LEVEL].
prob (float): The probability for performing translation and
should be in range [0, 1].
img_fill_val (int | float | tuple): The filled value for image
border. If float, the same fill value will be used for all
the three channels of image. If tuple, the should be 3
elements (e.g. equals the number of channels for image).
seg_ignore_label (int): The fill value used for segmentation map.
Note this value must equals ``ignore_label`` in ``semantic_head``
of the corresponding config. Default 255.
direction (str): The translate direction, either "horizontal"
or "vertical".
max_translate_offset (int | float): The maximum pixel's offset for
Translate.
random_negative_prob (float): The probability that turns the
offset negative.
min_size (int | float): The minimum pixel for filtering
invalid bboxes after the translation.
"""
def __init__(self,
level,
prob=0.5,
img_fill_val=128,
seg_ignore_label=255,
direction='horizontal',
max_translate_offset=250.,
random_negative_prob=0.5,
min_size=0):
assert isinstance(level, (int, float)), \
'The level must be type int or float.'
assert 0 <= level <= _MAX_LEVEL, \
'The level used for calculating Translate\'s offset should be ' \
'in range [0,_MAX_LEVEL]'
assert 0 <= prob <= 1.0, \
'The probability of translation should be in range [0, 1].'
if isinstance(img_fill_val, (float, int)):
img_fill_val = tuple([float(img_fill_val)] * 3)
elif isinstance(img_fill_val, tuple):
assert len(img_fill_val) == 3, \
'img_fill_val as tuple must have 3 elements.'
img_fill_val = tuple([float(val) for val in img_fill_val])
else:
raise ValueError('img_fill_val must be type float or tuple.')
assert np.all([0 <= val <= 255 for val in img_fill_val]), \
'all elements of img_fill_val should between range [0,255].'
assert direction in ('horizontal', 'vertical'), \
'direction should be "horizontal" or "vertical".'
assert isinstance(max_translate_offset, (int, float)), \
'The max_translate_offset must be type int or float.'
# the offset used for translation
self.offset = int(level_to_value(level, max_translate_offset))
self.level = level
self.prob = prob
self.img_fill_val = img_fill_val
self.seg_ignore_label = seg_ignore_label
self.direction = direction
self.max_translate_offset = max_translate_offset
self.random_negative_prob = random_negative_prob
self.min_size = min_size
def _translate_img(self, results, offset, direction='horizontal'):
"""Translate the image.
Args:
results (dict): Result dict from loading pipeline.
offset (int | float): The offset for translate.
direction (str): The translate direction, either "horizontal"
or "vertical".
"""
for key in results.get('img_fields', ['img']):
img = results[key].copy()
results[key] = mmcv.imtranslate(
img, offset, direction, self.img_fill_val).astype(img.dtype)
def _translate_bboxes(self, results, offset):
"""Shift bboxes horizontally or vertically, according to offset."""
h, w, c = results['img_shape']
for key in results.get('bbox_fields', []):
min_x, min_y, max_x, max_y = np.split(
results[key], results[key].shape[-1], axis=-1)
if self.direction == 'horizontal':
min_x = np.maximum(0, min_x + offset)
max_x = np.minimum(w, max_x + offset)
elif self.direction == 'vertical':
min_y = np.maximum(0, min_y + offset)
max_y = np.minimum(h, max_y + offset)
# the boxs translated outside of image will be filtered along with
# the corresponding masks, by invoking ``_filter_invalid``.
results[key] = np.concatenate([min_x, min_y, max_x, max_y],
axis=-1)
def _translate_masks(self,
results,
offset,
direction='horizontal',
fill_val=0):
"""Translate masks horizontally or vertically."""
h, w, c = results['img_shape']
for key in results.get('mask_fields', []):
masks = results[key]
results[key] = masks.translate((h, w), offset, direction, fill_val)
def _translate_seg(self,
results,
offset,
direction='horizontal',
fill_val=255):
"""Translate segmentation maps horizontally or vertically."""
for key in results.get('seg_fields', []):
seg = results[key].copy()
results[key] = mmcv.imtranslate(seg, offset, direction,
fill_val).astype(seg.dtype)
def _filter_invalid(self, results, min_size=0):
"""Filter bboxes and masks too small or translated out of image."""
bbox2label, bbox2mask, _ = bbox2fields()
for key in results.get('bbox_fields', []):
bbox_w = results[key][:, 2] - results[key][:, 0]
bbox_h = results[key][:, 3] - results[key][:, 1]
valid_inds = (bbox_w > min_size) & (bbox_h > min_size)
valid_inds = np.nonzero(valid_inds)[0]
results[key] = results[key][valid_inds]
# label fields. e.g. gt_labels and gt_labels_ignore
label_key = bbox2label.get(key)
if label_key in results:
results[label_key] = results[label_key][valid_inds]
# mask fields, e.g. gt_masks and gt_masks_ignore
mask_key = bbox2mask.get(key)
if mask_key in results:
results[mask_key] = results[mask_key][valid_inds]
return results
def __call__(self, results):
"""Call function to translate images, bounding boxes, masks and
semantic segmentation maps.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Translated results.
"""
if np.random.rand() > self.prob:
return results
offset = random_negative(self.offset, self.random_negative_prob)
self._translate_img(results, offset, self.direction)
self._translate_bboxes(results, offset)
# fill_val defaultly 0 for BitmapMasks and None for PolygonMasks.
self._translate_masks(results, offset, self.direction)
# fill_val set to ``seg_ignore_label`` for the ignored value
# of segmentation map.
self._translate_seg(
results, offset, self.direction, fill_val=self.seg_ignore_label)
self._filter_invalid(results, min_size=self.min_size)
return results
@PIPELINES.register_module()
class ColorTransform(object):
"""Apply Color transformation to image. The bboxes, masks, and
segmentations are not modified.
Args:
level (int | float): Should be in range [0,_MAX_LEVEL].
prob (float): The probability for performing Color transformation.
"""
def __init__(self, level, prob=0.5):
assert isinstance(level, (int, float)), \
'The level must be type int or float.'
assert 0 <= level <= _MAX_LEVEL, \
'The level should be in range [0,_MAX_LEVEL].'
assert 0 <= prob <= 1.0, \
'The probability should be in range [0,1].'
self.level = level
self.prob = prob
self.factor = enhance_level_to_value(level)
def _adjust_color_img(self, results, factor=1.0):
"""Apply Color transformation to image."""
for key in results.get('img_fields', ['img']):
# NOTE defaultly the image should be BGR format
img = results[key]
results[key] = mmcv.adjust_color(img, factor).astype(img.dtype)
def __call__(self, results):
"""Call function for Color transformation.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Colored results.
"""
if np.random.rand() > self.prob:
return results
self._adjust_color_img(results, self.factor)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(level={self.level}, '
repr_str += f'prob={self.prob})'
return repr_str
@PIPELINES.register_module()
class EqualizeTransform(object):
"""Apply Equalize transformation to image. The bboxes, masks and
segmentations are not modified.
Args:
prob (float): The probability for performing Equalize transformation.
"""
def __init__(self, prob=0.5):
assert 0 <= prob <= 1.0, \
'The probability should be in range [0,1].'
self.prob = prob
def _imequalize(self, results):
"""Equalizes the histogram of one image."""
for key in results.get('img_fields', ['img']):
img = results[key]
results[key] = mmcv.imequalize(img).astype(img.dtype)
def __call__(self, results):
"""Call function for Equalize transformation.
Args:
results (dict): Results dict from loading pipeline.
Returns:
dict: Results after the transformation.
"""
if np.random.rand() > self.prob:
return results
self._imequalize(results)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(prob={self.prob})'
@PIPELINES.register_module()
class BrightnessTransform(object):
"""Apply Brightness transformation to image. The bboxes, masks and
segmentations are not modified.
Args:
level (int | float): Should be in range [0,_MAX_LEVEL].
prob (float): The probability for performing Brightness transformation.
"""
def __init__(self, level, prob=0.5):
assert isinstance(level, (int, float)), \
'The level must be type int or float.'
assert 0 <= level <= _MAX_LEVEL, \
'The level should be in range [0,_MAX_LEVEL].'
assert 0 <= prob <= 1.0, \
'The probability should be in range [0,1].'
self.level = level
self.prob = prob
self.factor = enhance_level_to_value(level)
def _adjust_brightness_img(self, results, factor=1.0):
"""Adjust the brightness of image."""
for key in results.get('img_fields', ['img']):
img = results[key]
results[key] = mmcv.adjust_brightness(img,
factor).astype(img.dtype)
def __call__(self, results):
"""Call function for Brightness transformation.
Args:
results (dict): Results dict from loading pipeline.
Returns:
dict: Results after the transformation.
"""
if np.random.rand() > self.prob:
return results
self._adjust_brightness_img(results, self.factor)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(level={self.level}, '
repr_str += f'prob={self.prob})'
return repr_str
@PIPELINES.register_module()
class ContrastTransform(object):
"""Apply Contrast transformation to image. The bboxes, masks and
segmentations are not modified.
Args:
level (int | float): Should be in range [0,_MAX_LEVEL].
prob (float): The probability for performing Contrast transformation.
"""
def __init__(self, level, prob=0.5):
assert isinstance(level, (int, float)), \
'The level must be type int or float.'
assert 0 <= level <= _MAX_LEVEL, \
'The level should be in range [0,_MAX_LEVEL].'
assert 0 <= prob <= 1.0, \
'The probability should be in range [0,1].'
self.level = level
self.prob = prob
self.factor = enhance_level_to_value(level)
def _adjust_contrast_img(self, results, factor=1.0):
"""Adjust the image contrast."""
for key in results.get('img_fields', ['img']):
img = results[key]
results[key] = mmcv.adjust_contrast(img, factor).astype(img.dtype)
def __call__(self, results):
"""Call function for Contrast transformation.
Args:
results (dict): Results dict from loading pipeline.
Returns:
dict: Results after the transformation.
"""
if np.random.rand() > self.prob:
return results
self._adjust_contrast_img(results, self.factor)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(level={self.level}, '
repr_str += f'prob={self.prob})'
return repr_str
| insightface/detection/scrfd/mmdet/datasets/pipelines/auto_augment.py/0 | {
"file_path": "insightface/detection/scrfd/mmdet/datasets/pipelines/auto_augment.py",
"repo_id": "insightface",
"token_count": 17497
} | 103 |
from .darknet import Darknet
from .detectors_resnet import DetectoRS_ResNet
from .detectors_resnext import DetectoRS_ResNeXt
from .hourglass import HourglassNet
from .hrnet import HRNet
from .regnet import RegNet
from .res2net import Res2Net
from .resnest import ResNeSt
from .resnet import ResNet, ResNetV1d, ResNetV1e
from .resnext import ResNeXt
from .ssd_vgg import SSDVGG
from .trident_resnet import TridentResNet
from .mobilenet import MobileNetV1
__all__ = [
'RegNet', 'ResNet', 'ResNetV1d', 'ResNetV1e', 'ResNeXt', 'SSDVGG', 'HRNet', 'Res2Net',
'HourglassNet', 'DetectoRS_ResNet', 'DetectoRS_ResNeXt', 'Darknet',
'ResNeSt', 'TridentResNet', 'MobileNetV1'
]
| insightface/detection/scrfd/mmdet/models/backbones/__init__.py/0 | {
"file_path": "insightface/detection/scrfd/mmdet/models/backbones/__init__.py",
"repo_id": "insightface",
"token_count": 262
} | 104 |
from abc import abstractmethod
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule, bias_init_with_prob, normal_init
from mmcv.runner import force_fp32
from mmdet.core import multi_apply
from ..builder import HEADS, build_loss
from .base_dense_head import BaseDenseHead
from .dense_test_mixins import BBoxTestMixin
@HEADS.register_module()
class AnchorFreeHead(BaseDenseHead, BBoxTestMixin):
"""Anchor-free head (FCOS, Fovea, RepPoints, etc.).
Args:
num_classes (int): Number of categories excluding the background
category.
in_channels (int): Number of channels in the input feature map.
feat_channels (int): Number of hidden channels. Used in child classes.
stacked_convs (int): Number of stacking convs of the head.
strides (tuple): Downsample factor of each feature map.
dcn_on_last_conv (bool): If true, use dcn in the last layer of
towers. Default: False.
conv_bias (bool | str): If specified as `auto`, it will be decided by
the norm_cfg. Bias of conv will be set as True if `norm_cfg` is
None, otherwise False. Default: "auto".
loss_cls (dict): Config of classification loss.
loss_bbox (dict): Config of localization loss.
conv_cfg (dict): Config dict for convolution layer. Default: None.
norm_cfg (dict): Config dict for normalization layer. Default: None.
train_cfg (dict): Training config of anchor head.
test_cfg (dict): Testing config of anchor head.
""" # noqa: W605
_version = 1
def __init__(self,
num_classes,
in_channels,
feat_channels=256,
stacked_convs=4,
strides=(4, 8, 16, 32, 64),
dcn_on_last_conv=False,
conv_bias='auto',
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='IoULoss', loss_weight=1.0),
conv_cfg=None,
norm_cfg=None,
train_cfg=None,
test_cfg=None):
super(AnchorFreeHead, self).__init__()
self.num_classes = num_classes
self.cls_out_channels = num_classes
self.in_channels = in_channels
self.feat_channels = feat_channels
self.stacked_convs = stacked_convs
self.strides = strides
self.dcn_on_last_conv = dcn_on_last_conv
assert conv_bias == 'auto' or isinstance(conv_bias, bool)
self.conv_bias = conv_bias
self.loss_cls = build_loss(loss_cls)
self.loss_bbox = build_loss(loss_bbox)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.fp16_enabled = False
self._init_layers()
def _init_layers(self):
"""Initialize layers of the head."""
self._init_cls_convs()
self._init_reg_convs()
self._init_predictor()
def _init_cls_convs(self):
"""Initialize classification conv layers of the head."""
self.cls_convs = nn.ModuleList()
for i in range(self.stacked_convs):
chn = self.in_channels if i == 0 else self.feat_channels
if self.dcn_on_last_conv and i == self.stacked_convs - 1:
conv_cfg = dict(type='DCNv2')
else:
conv_cfg = self.conv_cfg
self.cls_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=self.norm_cfg,
bias=self.conv_bias))
def _init_reg_convs(self):
"""Initialize bbox regression conv layers of the head."""
self.reg_convs = nn.ModuleList()
for i in range(self.stacked_convs):
chn = self.in_channels if i == 0 else self.feat_channels
if self.dcn_on_last_conv and i == self.stacked_convs - 1:
conv_cfg = dict(type='DCNv2')
else:
conv_cfg = self.conv_cfg
self.reg_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=self.norm_cfg,
bias=self.conv_bias))
def _init_predictor(self):
"""Initialize predictor layers of the head."""
self.conv_cls = nn.Conv2d(
self.feat_channels, self.cls_out_channels, 3, padding=1)
self.conv_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1)
def init_weights(self):
"""Initialize weights of the head."""
for m in self.cls_convs:
if isinstance(m.conv, nn.Conv2d):
normal_init(m.conv, std=0.01)
for m in self.reg_convs:
if isinstance(m.conv, nn.Conv2d):
normal_init(m.conv, std=0.01)
bias_cls = bias_init_with_prob(0.01)
normal_init(self.conv_cls, std=0.01, bias=bias_cls)
normal_init(self.conv_reg, std=0.01)
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
missing_keys, unexpected_keys, error_msgs):
"""Hack some keys of the model state dict so that can load checkpoints
of previous version."""
version = local_metadata.get('version', None)
if version is None:
# the key is different in early versions
# for example, 'fcos_cls' become 'conv_cls' now
bbox_head_keys = [
k for k in state_dict.keys() if k.startswith(prefix)
]
ori_predictor_keys = []
new_predictor_keys = []
# e.g. 'fcos_cls' or 'fcos_reg'
for key in bbox_head_keys:
ori_predictor_keys.append(key)
key = key.split('.')
conv_name = None
if key[1].endswith('cls'):
conv_name = 'conv_cls'
elif key[1].endswith('reg'):
conv_name = 'conv_reg'
elif key[1].endswith('centerness'):
conv_name = 'conv_centerness'
else:
assert NotImplementedError
if conv_name is not None:
key[1] = conv_name
new_predictor_keys.append('.'.join(key))
else:
ori_predictor_keys.pop(-1)
for i in range(len(new_predictor_keys)):
state_dict[new_predictor_keys[i]] = state_dict.pop(
ori_predictor_keys[i])
super()._load_from_state_dict(state_dict, prefix, local_metadata,
strict, missing_keys, unexpected_keys,
error_msgs)
def forward(self, feats):
"""Forward features from the upstream network.
Args:
feats (tuple[Tensor]): Features from the upstream network, each is
a 4D-tensor.
Returns:
tuple: Usually contain classification scores and bbox predictions.
cls_scores (list[Tensor]): Box scores for each scale level,
each is a 4D-tensor, the channel number is
num_points * num_classes.
bbox_preds (list[Tensor]): Box energies / deltas for each scale
level, each is a 4D-tensor, the channel number is
num_points * 4.
"""
return multi_apply(self.forward_single, feats)[:2]
def forward_single(self, x):
"""Forward features of a single scale levle.
Args:
x (Tensor): FPN feature maps of the specified stride.
Returns:
tuple: Scores for each class, bbox predictions, features
after classification and regression conv layers, some
models needs these features like FCOS.
"""
cls_feat = x
reg_feat = x
for cls_layer in self.cls_convs:
cls_feat = cls_layer(cls_feat)
cls_score = self.conv_cls(cls_feat)
for reg_layer in self.reg_convs:
reg_feat = reg_layer(reg_feat)
bbox_pred = self.conv_reg(reg_feat)
return cls_score, bbox_pred, cls_feat, reg_feat
@abstractmethod
@force_fp32(apply_to=('cls_scores', 'bbox_preds'))
def loss(self,
cls_scores,
bbox_preds,
gt_bboxes,
gt_labels,
img_metas,
gt_bboxes_ignore=None):
"""Compute loss of the head.
Args:
cls_scores (list[Tensor]): Box scores for each scale level,
each is a 4D-tensor, the channel number is
num_points * num_classes.
bbox_preds (list[Tensor]): Box energies / deltas for each scale
level, each is a 4D-tensor, the channel number is
num_points * 4.
gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
gt_labels (list[Tensor]): class indices corresponding to each box
img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
gt_bboxes_ignore (None | list[Tensor]): specify which bounding
boxes can be ignored when computing the loss.
"""
raise NotImplementedError
@abstractmethod
@force_fp32(apply_to=('cls_scores', 'bbox_preds'))
def get_bboxes(self,
cls_scores,
bbox_preds,
img_metas,
cfg=None,
rescale=None):
"""Transform network output for a batch into bbox predictions.
Args:
cls_scores (list[Tensor]): Box scores for each scale level
Has shape (N, num_points * num_classes, H, W)
bbox_preds (list[Tensor]): Box energies / deltas for each scale
level with shape (N, num_points * 4, H, W)
img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
cfg (mmcv.Config): Test / postprocessing configuration,
if None, test_cfg would be used
rescale (bool): If True, return boxes in original image space
"""
raise NotImplementedError
@abstractmethod
def get_targets(self, points, gt_bboxes_list, gt_labels_list):
"""Compute regression, classification and centerss targets for points
in multiple images.
Args:
points (list[Tensor]): Points of each fpn level, each has shape
(num_points, 2).
gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image,
each has shape (num_gt, 4).
gt_labels_list (list[Tensor]): Ground truth labels of each box,
each has shape (num_gt,).
"""
raise NotImplementedError
def _get_points_single(self,
featmap_size,
stride,
dtype,
device,
flatten=False):
"""Get points of a single scale level."""
h, w = featmap_size
x_range = torch.arange(w, dtype=dtype, device=device)
y_range = torch.arange(h, dtype=dtype, device=device)
y, x = torch.meshgrid(y_range, x_range)
if flatten:
y = y.flatten()
x = x.flatten()
return y, x
def get_points(self, featmap_sizes, dtype, device, flatten=False):
"""Get points according to feature map sizes.
Args:
featmap_sizes (list[tuple]): Multi-level feature map sizes.
dtype (torch.dtype): Type of points.
device (torch.device): Device of points.
Returns:
tuple: points of each image.
"""
mlvl_points = []
for i in range(len(featmap_sizes)):
mlvl_points.append(
self._get_points_single(featmap_sizes[i], self.strides[i],
dtype, device, flatten))
return mlvl_points
def aug_test(self, feats, img_metas, rescale=False):
"""Test function with test time augmentation.
Args:
feats (list[Tensor]): the outer list indicates test-time
augmentations and inner Tensor should have a shape NxCxHxW,
which contains features for all images in the batch.
img_metas (list[list[dict]]): the outer list indicates test-time
augs (multiscale, flip, etc.) and the inner list indicates
images in a batch. each dict has image information.
rescale (bool, optional): Whether to rescale the results.
Defaults to False.
Returns:
list[ndarray]: bbox results of each class
"""
return self.aug_test_bboxes(feats, img_metas, rescale=rescale)
| insightface/detection/scrfd/mmdet/models/dense_heads/anchor_free_head.py/0 | {
"file_path": "insightface/detection/scrfd/mmdet/models/dense_heads/anchor_free_head.py",
"repo_id": "insightface",
"token_count": 6891
} | 105 |
import numpy as np
import torch
from mmcv.runner import force_fp32
from mmdet.core import multi_apply, multiclass_nms
from mmdet.core.bbox.iou_calculators import bbox_overlaps
from mmdet.models import HEADS
from mmdet.models.dense_heads import ATSSHead
EPS = 1e-12
try:
import sklearn.mixture as skm
except ImportError:
skm = None
def levels_to_images(mlvl_tensor):
"""Concat multi-level feature maps by image.
[feature_level0, feature_level1...] -> [feature_image0, feature_image1...]
Convert the shape of each element in mlvl_tensor from (N, C, H, W) to
(N, H*W , C), then split the element to N elements with shape (H*W, C), and
concat elements in same image of all level along first dimension.
Args:
mlvl_tensor (list[torch.Tensor]): list of Tensor which collect from
corresponding level. Each element is of shape (N, C, H, W)
Returns:
list[torch.Tensor]: A list that contains N tensors and each tensor is
of shape (num_elements, C)
"""
batch_size = mlvl_tensor[0].size(0)
batch_list = [[] for _ in range(batch_size)]
channels = mlvl_tensor[0].size(1)
for t in mlvl_tensor:
t = t.permute(0, 2, 3, 1)
t = t.view(batch_size, -1, channels).contiguous()
for img in range(batch_size):
batch_list[img].append(t[img])
return [torch.cat(item, 0) for item in batch_list]
@HEADS.register_module()
class PAAHead(ATSSHead):
"""Head of PAAAssignment: Probabilistic Anchor Assignment with IoU
Prediction for Object Detection.
Code is modified from the `official github repo
<https://github.com/kkhoot/PAA/blob/master/paa_core
/modeling/rpn/paa/loss.py>`_.
More details can be found in the `paper
<https://arxiv.org/abs/2007.08103>`_ .
Args:
topk (int): Select topk samples with smallest loss in
each level.
score_voting (bool): Whether to use score voting in post-process.
covariance_type : String describing the type of covariance parameters
to be used in :class:`sklearn.mixture.GaussianMixture`.
It must be one of:
- 'full': each component has its own general covariance matrix
- 'tied': all components share the same general covariance matrix
- 'diag': each component has its own diagonal covariance matrix
- 'spherical': each component has its own single variance
Default: 'diag'. From 'full' to 'spherical', the gmm fitting
process is faster yet the performance could be influenced. For most
cases, 'diag' should be a good choice.
"""
def __init__(self,
*args,
topk=9,
score_voting=True,
covariance_type='diag',
**kwargs):
# topk used in paa reassign process
self.topk = topk
self.with_score_voting = score_voting
self.covariance_type = covariance_type
super(PAAHead, self).__init__(*args, **kwargs)
@force_fp32(apply_to=('cls_scores', 'bbox_preds', 'iou_preds'))
def loss(self,
cls_scores,
bbox_preds,
iou_preds,
gt_bboxes,
gt_labels,
img_metas,
gt_bboxes_ignore=None):
"""Compute losses of the head.
Args:
cls_scores (list[Tensor]): Box scores for each scale level
Has shape (N, num_anchors * num_classes, H, W)
bbox_preds (list[Tensor]): Box energies / deltas for each scale
level with shape (N, num_anchors * 4, H, W)
iou_preds (list[Tensor]): iou_preds for each scale
level with shape (N, num_anchors * 1, H, W)
gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
gt_labels (list[Tensor]): class indices corresponding to each box
img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
gt_bboxes_ignore (list[Tensor] | None): Specify which bounding
boxes can be ignored when are computing the loss.
Returns:
dict[str, Tensor]: A dictionary of loss gmm_assignment.
"""
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
assert len(featmap_sizes) == self.anchor_generator.num_levels
device = cls_scores[0].device
anchor_list, valid_flag_list = self.get_anchors(
featmap_sizes, img_metas, device=device)
label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1
cls_reg_targets = self.get_targets(
anchor_list,
valid_flag_list,
gt_bboxes,
img_metas,
gt_bboxes_ignore_list=gt_bboxes_ignore,
gt_labels_list=gt_labels,
label_channels=label_channels,
)
(labels, labels_weight, bboxes_target, bboxes_weight, pos_inds,
pos_gt_index) = cls_reg_targets
cls_scores = levels_to_images(cls_scores)
cls_scores = [
item.reshape(-1, self.cls_out_channels) for item in cls_scores
]
bbox_preds = levels_to_images(bbox_preds)
bbox_preds = [item.reshape(-1, 4) for item in bbox_preds]
iou_preds = levels_to_images(iou_preds)
iou_preds = [item.reshape(-1, 1) for item in iou_preds]
pos_losses_list, = multi_apply(self.get_pos_loss, anchor_list,
cls_scores, bbox_preds, labels,
labels_weight, bboxes_target,
bboxes_weight, pos_inds)
with torch.no_grad():
labels, label_weights, bbox_weights, num_pos = multi_apply(
self.paa_reassign,
pos_losses_list,
labels,
labels_weight,
bboxes_weight,
pos_inds,
pos_gt_index,
anchor_list,
)
num_pos = sum(num_pos)
# convert all tensor list to a flatten tensor
cls_scores = torch.cat(cls_scores, 0).view(-1, cls_scores[0].size(-1))
bbox_preds = torch.cat(bbox_preds, 0).view(-1, bbox_preds[0].size(-1))
iou_preds = torch.cat(iou_preds, 0).view(-1, iou_preds[0].size(-1))
labels = torch.cat(labels, 0).view(-1)
flatten_anchors = torch.cat(
[torch.cat(item, 0) for item in anchor_list])
labels_weight = torch.cat(labels_weight, 0).view(-1)
bboxes_target = torch.cat(bboxes_target,
0).view(-1, bboxes_target[0].size(-1))
pos_inds_flatten = ((labels >= 0)
&
(labels < self.num_classes)).nonzero().reshape(-1)
losses_cls = self.loss_cls(
cls_scores,
labels,
labels_weight,
avg_factor=max(num_pos, len(img_metas))) # avoid num_pos=0
if num_pos:
pos_bbox_pred = self.bbox_coder.decode(
flatten_anchors[pos_inds_flatten],
bbox_preds[pos_inds_flatten])
pos_bbox_target = bboxes_target[pos_inds_flatten]
iou_target = bbox_overlaps(
pos_bbox_pred.detach(), pos_bbox_target, is_aligned=True)
losses_iou = self.loss_centerness(
iou_preds[pos_inds_flatten],
iou_target.unsqueeze(-1),
avg_factor=num_pos)
losses_bbox = self.loss_bbox(
pos_bbox_pred,
pos_bbox_target,
iou_target.clamp(min=EPS),
avg_factor=iou_target.sum())
else:
losses_iou = iou_preds.sum() * 0
losses_bbox = bbox_preds.sum() * 0
return dict(
loss_cls=losses_cls, loss_bbox=losses_bbox, loss_iou=losses_iou)
def get_pos_loss(self, anchors, cls_score, bbox_pred, label, label_weight,
bbox_target, bbox_weight, pos_inds):
"""Calculate loss of all potential positive samples obtained from first
match process.
Args:
anchors (list[Tensor]): Anchors of each scale.
cls_score (Tensor): Box scores of single image with shape
(num_anchors, num_classes)
bbox_pred (Tensor): Box energies / deltas of single image
with shape (num_anchors, 4)
label (Tensor): classification target of each anchor with
shape (num_anchors,)
label_weight (Tensor): Classification loss weight of each
anchor with shape (num_anchors).
bbox_target (dict): Regression target of each anchor with
shape (num_anchors, 4).
bbox_weight (Tensor): Bbox weight of each anchor with shape
(num_anchors, 4).
pos_inds (Tensor): Index of all positive samples got from
first assign process.
Returns:
Tensor: Losses of all positive samples in single image.
"""
if not len(pos_inds):
return cls_score.new([]),
anchors_all_level = torch.cat(anchors, 0)
pos_scores = cls_score[pos_inds]
pos_bbox_pred = bbox_pred[pos_inds]
pos_label = label[pos_inds]
pos_label_weight = label_weight[pos_inds]
pos_bbox_target = bbox_target[pos_inds]
pos_bbox_weight = bbox_weight[pos_inds]
pos_anchors = anchors_all_level[pos_inds]
pos_bbox_pred = self.bbox_coder.decode(pos_anchors, pos_bbox_pred)
# to keep loss dimension
loss_cls = self.loss_cls(
pos_scores,
pos_label,
pos_label_weight,
avg_factor=self.loss_cls.loss_weight,
reduction_override='none')
loss_bbox = self.loss_bbox(
pos_bbox_pred,
pos_bbox_target,
pos_bbox_weight,
avg_factor=self.loss_cls.loss_weight,
reduction_override='none')
loss_cls = loss_cls.sum(-1)
pos_loss = loss_bbox + loss_cls
return pos_loss,
def paa_reassign(self, pos_losses, label, label_weight, bbox_weight,
pos_inds, pos_gt_inds, anchors):
"""Fit loss to GMM distribution and separate positive, ignore, negative
samples again with GMM model.
Args:
pos_losses (Tensor): Losses of all positive samples in
single image.
label (Tensor): classification target of each anchor with
shape (num_anchors,)
label_weight (Tensor): Classification loss weight of each
anchor with shape (num_anchors).
bbox_weight (Tensor): Bbox weight of each anchor with shape
(num_anchors, 4).
pos_inds (Tensor): Index of all positive samples got from
first assign process.
pos_gt_inds (Tensor): Gt_index of all positive samples got
from first assign process.
anchors (list[Tensor]): Anchors of each scale.
Returns:
tuple: Usually returns a tuple containing learning targets.
- label (Tensor): classification target of each anchor after
paa assign, with shape (num_anchors,)
- label_weight (Tensor): Classification loss weight of each
anchor after paa assign, with shape (num_anchors).
- bbox_weight (Tensor): Bbox weight of each anchor with shape
(num_anchors, 4).
- num_pos (int): The number of positive samples after paa
assign.
"""
if not len(pos_inds):
return label, label_weight, bbox_weight, 0
num_gt = pos_gt_inds.max() + 1
num_level = len(anchors)
num_anchors_each_level = [item.size(0) for item in anchors]
num_anchors_each_level.insert(0, 0)
inds_level_interval = np.cumsum(num_anchors_each_level)
pos_level_mask = []
for i in range(num_level):
mask = (pos_inds >= inds_level_interval[i]) & (
pos_inds < inds_level_interval[i + 1])
pos_level_mask.append(mask)
pos_inds_after_paa = [label.new_tensor([])]
ignore_inds_after_paa = [label.new_tensor([])]
for gt_ind in range(num_gt):
pos_inds_gmm = []
pos_loss_gmm = []
gt_mask = pos_gt_inds == gt_ind
for level in range(num_level):
level_mask = pos_level_mask[level]
level_gt_mask = level_mask & gt_mask
value, topk_inds = pos_losses[level_gt_mask].topk(
min(level_gt_mask.sum(), self.topk), largest=False)
pos_inds_gmm.append(pos_inds[level_gt_mask][topk_inds])
pos_loss_gmm.append(value)
pos_inds_gmm = torch.cat(pos_inds_gmm)
pos_loss_gmm = torch.cat(pos_loss_gmm)
# fix gmm need at least two sample
if len(pos_inds_gmm) < 2:
continue
device = pos_inds_gmm.device
pos_loss_gmm, sort_inds = pos_loss_gmm.sort()
pos_inds_gmm = pos_inds_gmm[sort_inds]
pos_loss_gmm = pos_loss_gmm.view(-1, 1).cpu().numpy()
min_loss, max_loss = pos_loss_gmm.min(), pos_loss_gmm.max()
means_init = np.array([min_loss, max_loss]).reshape(2, 1)
weights_init = np.array([0.5, 0.5])
precisions_init = np.array([1.0, 1.0]).reshape(2, 1, 1) # full
if self.covariance_type == 'spherical':
precisions_init = precisions_init.reshape(2)
elif self.covariance_type == 'diag':
precisions_init = precisions_init.reshape(2, 1)
elif self.covariance_type == 'tied':
precisions_init = np.array([[1.0]])
if skm is None:
raise ImportError('Please run "pip install sklearn" '
'to install sklearn first.')
gmm = skm.GaussianMixture(
2,
weights_init=weights_init,
means_init=means_init,
precisions_init=precisions_init,
covariance_type=self.covariance_type)
gmm.fit(pos_loss_gmm)
gmm_assignment = gmm.predict(pos_loss_gmm)
scores = gmm.score_samples(pos_loss_gmm)
gmm_assignment = torch.from_numpy(gmm_assignment).to(device)
scores = torch.from_numpy(scores).to(device)
pos_inds_temp, ignore_inds_temp = self.gmm_separation_scheme(
gmm_assignment, scores, pos_inds_gmm)
pos_inds_after_paa.append(pos_inds_temp)
ignore_inds_after_paa.append(ignore_inds_temp)
pos_inds_after_paa = torch.cat(pos_inds_after_paa)
ignore_inds_after_paa = torch.cat(ignore_inds_after_paa)
reassign_mask = (pos_inds.unsqueeze(1) != pos_inds_after_paa).all(1)
reassign_ids = pos_inds[reassign_mask]
label[reassign_ids] = self.num_classes
label_weight[ignore_inds_after_paa] = 0
bbox_weight[reassign_ids] = 0
num_pos = len(pos_inds_after_paa)
return label, label_weight, bbox_weight, num_pos
def gmm_separation_scheme(self, gmm_assignment, scores, pos_inds_gmm):
"""A general separation scheme for gmm model.
It separates a GMM distribution of candidate samples into three
parts, 0 1 and uncertain areas, and you can implement other
separation schemes by rewriting this function.
Args:
gmm_assignment (Tensor): The prediction of GMM which is of shape
(num_samples,). The 0/1 value indicates the distribution
that each sample comes from.
scores (Tensor): The probability of sample coming from the
fit GMM distribution. The tensor is of shape (num_samples,).
pos_inds_gmm (Tensor): All the indexes of samples which are used
to fit GMM model. The tensor is of shape (num_samples,)
Returns:
tuple[Tensor]: The indices of positive and ignored samples.
- pos_inds_temp (Tensor): Indices of positive samples.
- ignore_inds_temp (Tensor): Indices of ignore samples.
"""
# The implementation is (c) in Fig.3 in origin paper intead of (b).
# You can refer to issues such as
# https://github.com/kkhoot/PAA/issues/8 and
# https://github.com/kkhoot/PAA/issues/9.
fgs = gmm_assignment == 0
pos_inds_temp = fgs.new_tensor([], dtype=torch.long)
ignore_inds_temp = fgs.new_tensor([], dtype=torch.long)
if fgs.nonzero().numel():
_, pos_thr_ind = scores[fgs].topk(1)
pos_inds_temp = pos_inds_gmm[fgs][:pos_thr_ind + 1]
ignore_inds_temp = pos_inds_gmm.new_tensor([])
return pos_inds_temp, ignore_inds_temp
def get_targets(
self,
anchor_list,
valid_flag_list,
gt_bboxes_list,
img_metas,
gt_bboxes_ignore_list=None,
gt_labels_list=None,
label_channels=1,
unmap_outputs=True,
):
"""Get targets for PAA head.
This method is almost the same as `AnchorHead.get_targets()`. We direct
return the results from _get_targets_single instead map it to levels
by images_to_levels function.
Args:
anchor_list (list[list[Tensor]]): Multi level anchors of each
image. The outer list indicates images, and the inner list
corresponds to feature levels of the image. Each element of
the inner list is a tensor of shape (num_anchors, 4).
valid_flag_list (list[list[Tensor]]): Multi level valid flags of
each image. The outer list indicates images, and the inner list
corresponds to feature levels of the image. Each element of
the inner list is a tensor of shape (num_anchors, )
gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image.
img_metas (list[dict]): Meta info of each image.
gt_bboxes_ignore_list (list[Tensor]): Ground truth bboxes to be
ignored.
gt_labels_list (list[Tensor]): Ground truth labels of each box.
label_channels (int): Channel of label.
unmap_outputs (bool): Whether to map outputs back to the original
set of anchors.
Returns:
tuple: Usually returns a tuple containing learning targets.
- labels (list[Tensor]): Labels of all anchors, each with
shape (num_anchors,).
- label_weights (list[Tensor]): Label weights of all anchor.
each with shape (num_anchors,).
- bbox_targets (list[Tensor]): BBox targets of all anchors.
each with shape (num_anchors, 4).
- bbox_weights (list[Tensor]): BBox weights of all anchors.
each with shape (num_anchors, 4).
- pos_inds (list[Tensor]): Contains all index of positive
sample in all anchor.
- gt_inds (list[Tensor]): Contains all gt_index of positive
sample in all anchor.
"""
num_imgs = len(img_metas)
assert len(anchor_list) == len(valid_flag_list) == num_imgs
concat_anchor_list = []
concat_valid_flag_list = []
for i in range(num_imgs):
assert len(anchor_list[i]) == len(valid_flag_list[i])
concat_anchor_list.append(torch.cat(anchor_list[i]))
concat_valid_flag_list.append(torch.cat(valid_flag_list[i]))
# compute targets for each image
if gt_bboxes_ignore_list is None:
gt_bboxes_ignore_list = [None for _ in range(num_imgs)]
if gt_labels_list is None:
gt_labels_list = [None for _ in range(num_imgs)]
results = multi_apply(
self._get_targets_single,
concat_anchor_list,
concat_valid_flag_list,
gt_bboxes_list,
gt_bboxes_ignore_list,
gt_labels_list,
img_metas,
label_channels=label_channels,
unmap_outputs=unmap_outputs)
(labels, label_weights, bbox_targets, bbox_weights, valid_pos_inds,
valid_neg_inds, sampling_result) = results
# Due to valid flag of anchors, we have to calculate the real pos_inds
# in origin anchor set.
pos_inds = []
for i, single_labels in enumerate(labels):
pos_mask = (0 <= single_labels) & (
single_labels < self.num_classes)
pos_inds.append(pos_mask.nonzero().view(-1))
gt_inds = [item.pos_assigned_gt_inds for item in sampling_result]
return (labels, label_weights, bbox_targets, bbox_weights, pos_inds,
gt_inds)
def _get_targets_single(self,
flat_anchors,
valid_flags,
gt_bboxes,
gt_bboxes_ignore,
gt_labels,
img_meta,
label_channels=1,
unmap_outputs=True):
"""Compute regression and classification targets for anchors in a
single image.
This method is same as `AnchorHead._get_targets_single()`.
"""
assert unmap_outputs, 'We must map outputs back to the original' \
'set of anchors in PAAhead'
return super(ATSSHead, self)._get_targets_single(
flat_anchors,
valid_flags,
gt_bboxes,
gt_bboxes_ignore,
gt_labels,
img_meta,
label_channels=1,
unmap_outputs=True)
def _get_bboxes_single(self,
cls_scores,
bbox_preds,
iou_preds,
mlvl_anchors,
img_shape,
scale_factor,
cfg,
rescale=False,
with_nms=True):
"""Transform outputs for a single batch item into labeled boxes.
This method is almost same as `ATSSHead._get_bboxes_single()`.
We use sqrt(iou_preds * cls_scores) in NMS process instead of just
cls_scores. Besides, score voting is used when `` score_voting``
is set to True.
"""
assert with_nms, 'PAA only supports "with_nms=True" now'
assert len(cls_scores) == len(bbox_preds) == len(mlvl_anchors)
mlvl_bboxes = []
mlvl_scores = []
mlvl_iou_preds = []
for cls_score, bbox_pred, iou_preds, anchors in zip(
cls_scores, bbox_preds, iou_preds, mlvl_anchors):
assert cls_score.size()[-2:] == bbox_pred.size()[-2:]
scores = cls_score.permute(1, 2, 0).reshape(
-1, self.cls_out_channels).sigmoid()
bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4)
iou_preds = iou_preds.permute(1, 2, 0).reshape(-1).sigmoid()
nms_pre = cfg.get('nms_pre', -1)
if nms_pre > 0 and scores.shape[0] > nms_pre:
max_scores, _ = (scores * iou_preds[:, None]).sqrt().max(dim=1)
_, topk_inds = max_scores.topk(nms_pre)
anchors = anchors[topk_inds, :]
bbox_pred = bbox_pred[topk_inds, :]
scores = scores[topk_inds, :]
iou_preds = iou_preds[topk_inds]
bboxes = self.bbox_coder.decode(
anchors, bbox_pred, max_shape=img_shape)
mlvl_bboxes.append(bboxes)
mlvl_scores.append(scores)
mlvl_iou_preds.append(iou_preds)
mlvl_bboxes = torch.cat(mlvl_bboxes)
if rescale:
mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor)
mlvl_scores = torch.cat(mlvl_scores)
# Add a dummy background class to the backend when using sigmoid
# remind that we set FG labels to [0, num_class-1] since mmdet v2.0
# BG cat_id: num_class
padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1)
mlvl_scores = torch.cat([mlvl_scores, padding], dim=1)
mlvl_iou_preds = torch.cat(mlvl_iou_preds)
mlvl_nms_scores = (mlvl_scores * mlvl_iou_preds[:, None]).sqrt()
det_bboxes, det_labels = multiclass_nms(
mlvl_bboxes,
mlvl_nms_scores,
cfg.score_thr,
cfg.nms,
cfg.max_per_img,
score_factors=None)
if self.with_score_voting:
det_bboxes, det_labels = self.score_voting(det_bboxes, det_labels,
mlvl_bboxes,
mlvl_nms_scores,
cfg.score_thr)
return det_bboxes, det_labels
def score_voting(self, det_bboxes, det_labels, mlvl_bboxes,
mlvl_nms_scores, score_thr):
"""Implementation of score voting method works on each remaining boxes
after NMS procedure.
Args:
det_bboxes (Tensor): Remaining boxes after NMS procedure,
with shape (k, 5), each dimension means
(x1, y1, x2, y2, score).
det_labels (Tensor): The label of remaining boxes, with shape
(k, 1),Labels are 0-based.
mlvl_bboxes (Tensor): All boxes before the NMS procedure,
with shape (num_anchors,4).
mlvl_nms_scores (Tensor): The scores of all boxes which is used
in the NMS procedure, with shape (num_anchors, num_class)
mlvl_iou_preds (Tensot): The predictions of IOU of all boxes
before the NMS procedure, with shape (num_anchors, 1)
score_thr (float): The score threshold of bboxes.
Returns:
tuple: Usually returns a tuple containing voting results.
- det_bboxes_voted (Tensor): Remaining boxes after
score voting procedure, with shape (k, 5), each
dimension means (x1, y1, x2, y2, score).
- det_labels_voted (Tensor): Label of remaining bboxes
after voting, with shape (num_anchors,).
"""
candidate_mask = mlvl_nms_scores > score_thr
candidate_mask_nozeros = candidate_mask.nonzero()
candidate_inds = candidate_mask_nozeros[:, 0]
candidate_labels = candidate_mask_nozeros[:, 1]
candidate_bboxes = mlvl_bboxes[candidate_inds]
candidate_scores = mlvl_nms_scores[candidate_mask]
det_bboxes_voted = []
det_labels_voted = []
for cls in range(self.cls_out_channels):
candidate_cls_mask = candidate_labels == cls
if not candidate_cls_mask.any():
continue
candidate_cls_scores = candidate_scores[candidate_cls_mask]
candidate_cls_bboxes = candidate_bboxes[candidate_cls_mask]
det_cls_mask = det_labels == cls
det_cls_bboxes = det_bboxes[det_cls_mask].view(
-1, det_bboxes.size(-1))
det_candidate_ious = bbox_overlaps(det_cls_bboxes[:, :4],
candidate_cls_bboxes)
for det_ind in range(len(det_cls_bboxes)):
single_det_ious = det_candidate_ious[det_ind]
pos_ious_mask = single_det_ious > 0.01
pos_ious = single_det_ious[pos_ious_mask]
pos_bboxes = candidate_cls_bboxes[pos_ious_mask]
pos_scores = candidate_cls_scores[pos_ious_mask]
pis = (torch.exp(-(1 - pos_ious)**2 / 0.025) *
pos_scores)[:, None]
voted_box = torch.sum(
pis * pos_bboxes, dim=0) / torch.sum(
pis, dim=0)
voted_score = det_cls_bboxes[det_ind][-1:][None, :]
det_bboxes_voted.append(
torch.cat((voted_box[None, :], voted_score), dim=1))
det_labels_voted.append(cls)
det_bboxes_voted = torch.cat(det_bboxes_voted, dim=0)
det_labels_voted = det_labels.new_tensor(det_labels_voted)
return det_bboxes_voted, det_labels_voted
| insightface/detection/scrfd/mmdet/models/dense_heads/paa_head.py/0 | {
"file_path": "insightface/detection/scrfd/mmdet/models/dense_heads/paa_head.py",
"repo_id": "insightface",
"token_count": 14776
} | 106 |
from ..builder import DETECTORS
from .single_stage import SingleStageDetector
@DETECTORS.register_module()
class ATSS(SingleStageDetector):
"""Implementation of `ATSS <https://arxiv.org/abs/1912.02424>`_."""
def __init__(self,
backbone,
neck,
bbox_head,
train_cfg=None,
test_cfg=None,
pretrained=None):
super(ATSS, self).__init__(backbone, neck, bbox_head, train_cfg,
test_cfg, pretrained)
| insightface/detection/scrfd/mmdet/models/detectors/atss.py/0 | {
"file_path": "insightface/detection/scrfd/mmdet/models/detectors/atss.py",
"repo_id": "insightface",
"token_count": 288
} | 107 |
from ..builder import DETECTORS
from .single_stage import SingleStageDetector
@DETECTORS.register_module()
class PAA(SingleStageDetector):
"""Implementation of `PAA <https://arxiv.org/pdf/2007.08103.pdf>`_."""
def __init__(self,
backbone,
neck,
bbox_head,
train_cfg=None,
test_cfg=None,
pretrained=None):
super(PAA, self).__init__(backbone, neck, bbox_head, train_cfg,
test_cfg, pretrained)
| insightface/detection/scrfd/mmdet/models/detectors/paa.py/0 | {
"file_path": "insightface/detection/scrfd/mmdet/models/detectors/paa.py",
"repo_id": "insightface",
"token_count": 287
} | 108 |
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..builder import LOSSES
from .utils import weight_reduce_loss
def cross_entropy(pred,
label,
weight=None,
reduction='mean',
avg_factor=None,
class_weight=None):
"""Calculate the CrossEntropy loss.
Args:
pred (torch.Tensor): The prediction with shape (N, C), C is the number
of classes.
label (torch.Tensor): The learning label of the prediction.
weight (torch.Tensor, optional): Sample-wise loss weight.
reduction (str, optional): The method used to reduce the loss.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
class_weight (list[float], optional): The weight for each class.
Returns:
torch.Tensor: The calculated loss
"""
# element-wise losses
loss = F.cross_entropy(pred, label, weight=class_weight, reduction='none')
# apply weights and do the reduction
if weight is not None:
weight = weight.float()
loss = weight_reduce_loss(
loss, weight=weight, reduction=reduction, avg_factor=avg_factor)
return loss
def _expand_onehot_labels(labels, label_weights, label_channels):
bin_labels = labels.new_full((labels.size(0), label_channels), 0)
inds = torch.nonzero(
(labels >= 0) & (labels < label_channels), as_tuple=False).squeeze()
if inds.numel() > 0:
bin_labels[inds, labels[inds]] = 1
if label_weights is None:
bin_label_weights = None
else:
bin_label_weights = label_weights.view(-1, 1).expand(
label_weights.size(0), label_channels)
return bin_labels, bin_label_weights
def binary_cross_entropy(pred,
label,
weight=None,
reduction='mean',
avg_factor=None,
class_weight=None):
"""Calculate the binary CrossEntropy loss.
Args:
pred (torch.Tensor): The prediction with shape (N, 1).
label (torch.Tensor): The learning label of the prediction.
weight (torch.Tensor, optional): Sample-wise loss weight.
reduction (str, optional): The method used to reduce the loss.
Options are "none", "mean" and "sum".
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
class_weight (list[float], optional): The weight for each class.
Returns:
torch.Tensor: The calculated loss
"""
if pred.dim() != label.dim():
label, weight = _expand_onehot_labels(label, weight, pred.size(-1))
# weighted element-wise losses
if weight is not None:
weight = weight.float()
loss = F.binary_cross_entropy_with_logits(
pred, label.float(), pos_weight=class_weight, reduction='none')
# do the reduction for the weighted loss
loss = weight_reduce_loss(
loss, weight, reduction=reduction, avg_factor=avg_factor)
return loss
def mask_cross_entropy(pred,
target,
label,
reduction='mean',
avg_factor=None,
class_weight=None):
"""Calculate the CrossEntropy loss for masks.
Args:
pred (torch.Tensor): The prediction with shape (N, C), C is the number
of classes.
target (torch.Tensor): The learning label of the prediction.
label (torch.Tensor): ``label`` indicates the class label of the mask'
corresponding object. This will be used to select the mask in the
of the class which the object belongs to when the mask prediction
if not class-agnostic.
reduction (str, optional): The method used to reduce the loss.
Options are "none", "mean" and "sum".
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
class_weight (list[float], optional): The weight for each class.
Returns:
torch.Tensor: The calculated loss
"""
# TODO: handle these two reserved arguments
assert reduction == 'mean' and avg_factor is None
num_rois = pred.size()[0]
inds = torch.arange(0, num_rois, dtype=torch.long, device=pred.device)
pred_slice = pred[inds, label].squeeze(1)
return F.binary_cross_entropy_with_logits(
pred_slice, target, weight=class_weight, reduction='mean')[None]
@LOSSES.register_module()
class CrossEntropyLoss(nn.Module):
def __init__(self,
use_sigmoid=False,
use_mask=False,
reduction='mean',
class_weight=None,
loss_weight=1.0):
"""CrossEntropyLoss.
Args:
use_sigmoid (bool, optional): Whether the prediction uses sigmoid
of softmax. Defaults to False.
use_mask (bool, optional): Whether to use mask cross entropy loss.
Defaults to False.
reduction (str, optional): . Defaults to 'mean'.
Options are "none", "mean" and "sum".
class_weight (list[float], optional): Weight of each class.
Defaults to None.
loss_weight (float, optional): Weight of the loss. Defaults to 1.0.
"""
super(CrossEntropyLoss, self).__init__()
assert (use_sigmoid is False) or (use_mask is False)
self.use_sigmoid = use_sigmoid
self.use_mask = use_mask
self.reduction = reduction
self.loss_weight = loss_weight
self.class_weight = class_weight
if self.use_sigmoid:
self.cls_criterion = binary_cross_entropy
elif self.use_mask:
self.cls_criterion = mask_cross_entropy
else:
self.cls_criterion = cross_entropy
def forward(self,
cls_score,
label,
weight=None,
avg_factor=None,
reduction_override=None,
**kwargs):
"""Forward function.
Args:
cls_score (torch.Tensor): The prediction.
label (torch.Tensor): The learning label of the prediction.
weight (torch.Tensor, optional): Sample-wise loss weight.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction (str, optional): The method used to reduce the loss.
Options are "none", "mean" and "sum".
Returns:
torch.Tensor: The calculated loss
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
if self.class_weight is not None:
class_weight = cls_score.new_tensor(
self.class_weight, device=cls_score.device)
else:
class_weight = None
loss_cls = self.loss_weight * self.cls_criterion(
cls_score,
label,
weight,
class_weight=class_weight,
reduction=reduction,
avg_factor=avg_factor,
**kwargs)
return loss_cls
| insightface/detection/scrfd/mmdet/models/losses/cross_entropy_loss.py/0 | {
"file_path": "insightface/detection/scrfd/mmdet/models/losses/cross_entropy_loss.py",
"repo_id": "insightface",
"token_count": 3319
} | 109 |
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule, caffe2_xavier_init
from torch.utils.checkpoint import checkpoint
from ..builder import NECKS
@NECKS.register_module()
class HRFPN(nn.Module):
"""HRFPN (High Resolution Feature Pyrmamids)
paper: `High-Resolution Representations for Labeling Pixels and Regions
<https://arxiv.org/abs/1904.04514>`_.
Args:
in_channels (list): number of channels for each branch.
out_channels (int): output channels of feature pyramids.
num_outs (int): number of output stages.
pooling_type (str): pooling for generating feature pyramids
from {MAX, AVG}.
conv_cfg (dict): dictionary to construct and config conv layer.
norm_cfg (dict): dictionary to construct and config norm layer.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed.
stride (int): stride of 3x3 convolutional layers
"""
def __init__(self,
in_channels,
out_channels,
num_outs=5,
pooling_type='AVG',
conv_cfg=None,
norm_cfg=None,
with_cp=False,
stride=1):
super(HRFPN, self).__init__()
assert isinstance(in_channels, list)
self.in_channels = in_channels
self.out_channels = out_channels
self.num_ins = len(in_channels)
self.num_outs = num_outs
self.with_cp = with_cp
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.reduction_conv = ConvModule(
sum(in_channels),
out_channels,
kernel_size=1,
conv_cfg=self.conv_cfg,
act_cfg=None)
self.fpn_convs = nn.ModuleList()
for i in range(self.num_outs):
self.fpn_convs.append(
ConvModule(
out_channels,
out_channels,
kernel_size=3,
padding=1,
stride=stride,
conv_cfg=self.conv_cfg,
act_cfg=None))
if pooling_type == 'MAX':
self.pooling = F.max_pool2d
else:
self.pooling = F.avg_pool2d
def init_weights(self):
"""Initialize the weights of module."""
for m in self.modules():
if isinstance(m, nn.Conv2d):
caffe2_xavier_init(m)
def forward(self, inputs):
"""Forward function."""
assert len(inputs) == self.num_ins
outs = [inputs[0]]
for i in range(1, self.num_ins):
outs.append(
F.interpolate(inputs[i], scale_factor=2**i, mode='bilinear'))
out = torch.cat(outs, dim=1)
if out.requires_grad and self.with_cp:
out = checkpoint(self.reduction_conv, out)
else:
out = self.reduction_conv(out)
outs = [out]
for i in range(1, self.num_outs):
outs.append(self.pooling(out, kernel_size=2**i, stride=2**i))
outputs = []
for i in range(self.num_outs):
if outs[i].requires_grad and self.with_cp:
tmp_out = checkpoint(self.fpn_convs[i], outs[i])
else:
tmp_out = self.fpn_convs[i](outs[i])
outputs.append(tmp_out)
return tuple(outputs)
| insightface/detection/scrfd/mmdet/models/necks/hrfpn.py/0 | {
"file_path": "insightface/detection/scrfd/mmdet/models/necks/hrfpn.py",
"repo_id": "insightface",
"token_count": 1752
} | 110 |
import numpy as np
import torch
from mmdet.core import bbox2roi
from mmdet.models.losses import SmoothL1Loss
from ..builder import HEADS
from .standard_roi_head import StandardRoIHead
EPS = 1e-15
@HEADS.register_module()
class DynamicRoIHead(StandardRoIHead):
"""RoI head for `Dynamic R-CNN <https://arxiv.org/abs/2004.06002>`_."""
def __init__(self, **kwargs):
super(DynamicRoIHead, self).__init__(**kwargs)
assert isinstance(self.bbox_head.loss_bbox, SmoothL1Loss)
# the IoU history of the past `update_iter_interval` iterations
self.iou_history = []
# the beta history of the past `update_iter_interval` iterations
self.beta_history = []
def forward_train(self,
x,
img_metas,
proposal_list,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=None,
gt_masks=None):
"""Forward function for training.
Args:
x (list[Tensor]): list of multi-level img features.
img_metas (list[dict]): list of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmdet/datasets/pipelines/formatting.py:Collect`.
proposals (list[Tensors]): list of region proposals.
gt_bboxes (list[Tensor]): each item are the truth boxes for each
image in [tl_x, tl_y, br_x, br_y] format.
gt_labels (list[Tensor]): class indices corresponding to each box
gt_bboxes_ignore (None | list[Tensor]): specify which bounding
boxes can be ignored when computing the loss.
gt_masks (None | Tensor) : true segmentation masks for each box
used if the architecture supports a segmentation task.
Returns:
dict[str, Tensor]: a dictionary of loss components
"""
# assign gts and sample proposals
if self.with_bbox or self.with_mask:
num_imgs = len(img_metas)
if gt_bboxes_ignore is None:
gt_bboxes_ignore = [None for _ in range(num_imgs)]
sampling_results = []
cur_iou = []
for i in range(num_imgs):
assign_result = self.bbox_assigner.assign(
proposal_list[i], gt_bboxes[i], gt_bboxes_ignore[i],
gt_labels[i])
sampling_result = self.bbox_sampler.sample(
assign_result,
proposal_list[i],
gt_bboxes[i],
gt_labels[i],
feats=[lvl_feat[i][None] for lvl_feat in x])
# record the `iou_topk`-th largest IoU in an image
iou_topk = min(self.train_cfg.dynamic_rcnn.iou_topk,
len(assign_result.max_overlaps))
ious, _ = torch.topk(assign_result.max_overlaps, iou_topk)
cur_iou.append(ious[-1].item())
sampling_results.append(sampling_result)
# average the current IoUs over images
cur_iou = np.mean(cur_iou)
self.iou_history.append(cur_iou)
losses = dict()
# bbox head forward and loss
if self.with_bbox:
bbox_results = self._bbox_forward_train(x, sampling_results,
gt_bboxes, gt_labels,
img_metas)
losses.update(bbox_results['loss_bbox'])
# mask head forward and loss
if self.with_mask:
mask_results = self._mask_forward_train(x, sampling_results,
bbox_results['bbox_feats'],
gt_masks, img_metas)
losses.update(mask_results['loss_mask'])
# update IoU threshold and SmoothL1 beta
update_iter_interval = self.train_cfg.dynamic_rcnn.update_iter_interval
if len(self.iou_history) % update_iter_interval == 0:
new_iou_thr, new_beta = self.update_hyperparameters()
return losses
def _bbox_forward_train(self, x, sampling_results, gt_bboxes, gt_labels,
img_metas):
num_imgs = len(img_metas)
rois = bbox2roi([res.bboxes for res in sampling_results])
bbox_results = self._bbox_forward(x, rois)
bbox_targets = self.bbox_head.get_targets(sampling_results, gt_bboxes,
gt_labels, self.train_cfg)
# record the `beta_topk`-th smallest target
# `bbox_targets[2]` and `bbox_targets[3]` stand for bbox_targets
# and bbox_weights, respectively
pos_inds = bbox_targets[3][:, 0].nonzero().squeeze(1)
num_pos = len(pos_inds)
cur_target = bbox_targets[2][pos_inds, :2].abs().mean(dim=1)
beta_topk = min(self.train_cfg.dynamic_rcnn.beta_topk * num_imgs,
num_pos)
cur_target = torch.kthvalue(cur_target, beta_topk)[0].item()
self.beta_history.append(cur_target)
loss_bbox = self.bbox_head.loss(bbox_results['cls_score'],
bbox_results['bbox_pred'], rois,
*bbox_targets)
bbox_results.update(loss_bbox=loss_bbox)
return bbox_results
def update_hyperparameters(self):
"""Update hyperparameters like IoU thresholds for assigner and beta for
SmoothL1 loss based on the training statistics.
Returns:
tuple[float]: the updated ``iou_thr`` and ``beta``.
"""
new_iou_thr = max(self.train_cfg.dynamic_rcnn.initial_iou,
np.mean(self.iou_history))
self.iou_history = []
self.bbox_assigner.pos_iou_thr = new_iou_thr
self.bbox_assigner.neg_iou_thr = new_iou_thr
self.bbox_assigner.min_pos_iou = new_iou_thr
if (np.median(self.beta_history) < EPS):
# avoid 0 or too small value for new_beta
new_beta = self.bbox_head.loss_bbox.beta
else:
new_beta = min(self.train_cfg.dynamic_rcnn.initial_beta,
np.median(self.beta_history))
self.beta_history = []
self.bbox_head.loss_bbox.beta = new_beta
return new_iou_thr, new_beta
| insightface/detection/scrfd/mmdet/models/roi_heads/dynamic_roi_head.py/0 | {
"file_path": "insightface/detection/scrfd/mmdet/models/roi_heads/dynamic_roi_head.py",
"repo_id": "insightface",
"token_count": 3479
} | 111 |
from mmcv.cnn.bricks import build_plugin_layer
from mmcv.runner import force_fp32
from mmdet.models.builder import ROI_EXTRACTORS
from .base_roi_extractor import BaseRoIExtractor
@ROI_EXTRACTORS.register_module()
class GenericRoIExtractor(BaseRoIExtractor):
"""Extract RoI features from all level feature maps levels.
This is the implementation of `A novel Region of Interest Extraction Layer
for Instance Segmentation <https://arxiv.org/abs/2004.13665>`_.
Args:
aggregation (str): The method to aggregate multiple feature maps.
Options are 'sum', 'concat'. Default: 'sum'.
pre_cfg (dict | None): Specify pre-processing modules. Default: None.
post_cfg (dict | None): Specify post-processing modules. Default: None.
kwargs (keyword arguments): Arguments that are the same
as :class:`BaseRoIExtractor`.
"""
def __init__(self,
aggregation='sum',
pre_cfg=None,
post_cfg=None,
**kwargs):
super(GenericRoIExtractor, self).__init__(**kwargs)
assert aggregation in ['sum', 'concat']
self.aggregation = aggregation
self.with_post = post_cfg is not None
self.with_pre = pre_cfg is not None
# build pre/post processing modules
if self.with_post:
self.post_module = build_plugin_layer(post_cfg, '_post_module')[1]
if self.with_pre:
self.pre_module = build_plugin_layer(pre_cfg, '_pre_module')[1]
@force_fp32(apply_to=('feats', ), out_fp16=True)
def forward(self, feats, rois, roi_scale_factor=None):
"""Forward function."""
if len(feats) == 1:
return self.roi_layers[0](feats[0], rois)
out_size = self.roi_layers[0].output_size
num_levels = len(feats)
roi_feats = feats[0].new_zeros(
rois.size(0), self.out_channels, *out_size)
# some times rois is an empty tensor
if roi_feats.shape[0] == 0:
return roi_feats
if roi_scale_factor is not None:
rois = self.roi_rescale(rois, roi_scale_factor)
# mark the starting channels for concat mode
start_channels = 0
for i in range(num_levels):
roi_feats_t = self.roi_layers[i](feats[i], rois)
end_channels = start_channels + roi_feats_t.size(1)
if self.with_pre:
# apply pre-processing to a RoI extracted from each layer
roi_feats_t = self.pre_module(roi_feats_t)
if self.aggregation == 'sum':
# and sum them all
roi_feats += roi_feats_t
else:
# and concat them along channel dimension
roi_feats[:, start_channels:end_channels] = roi_feats_t
# update channels starting position
start_channels = end_channels
# check if concat channels match at the end
if self.aggregation == 'concat':
assert start_channels == self.out_channels
if self.with_post:
# apply post-processing before return the result
roi_feats = self.post_module(roi_feats)
return roi_feats
| insightface/detection/scrfd/mmdet/models/roi_heads/roi_extractors/generic_roi_extractor.py/0 | {
"file_path": "insightface/detection/scrfd/mmdet/models/roi_heads/roi_extractors/generic_roi_extractor.py",
"repo_id": "insightface",
"token_count": 1475
} | 112 |
import asyncio
import contextlib
import logging
import os
import time
from typing import List
import torch
logger = logging.getLogger(__name__)
DEBUG_COMPLETED_TIME = bool(os.environ.get('DEBUG_COMPLETED_TIME', False))
@contextlib.asynccontextmanager
async def completed(trace_name='',
name='',
sleep_interval=0.05,
streams: List[torch.cuda.Stream] = None):
"""Async context manager that waits for work to complete on given CUDA
streams."""
if not torch.cuda.is_available():
yield
return
stream_before_context_switch = torch.cuda.current_stream()
if not streams:
streams = [stream_before_context_switch]
else:
streams = [s if s else stream_before_context_switch for s in streams]
end_events = [
torch.cuda.Event(enable_timing=DEBUG_COMPLETED_TIME) for _ in streams
]
if DEBUG_COMPLETED_TIME:
start = torch.cuda.Event(enable_timing=True)
stream_before_context_switch.record_event(start)
cpu_start = time.monotonic()
logger.debug('%s %s starting, streams: %s', trace_name, name, streams)
grad_enabled_before = torch.is_grad_enabled()
try:
yield
finally:
current_stream = torch.cuda.current_stream()
assert current_stream == stream_before_context_switch
if DEBUG_COMPLETED_TIME:
cpu_end = time.monotonic()
for i, stream in enumerate(streams):
event = end_events[i]
stream.record_event(event)
grad_enabled_after = torch.is_grad_enabled()
# observed change of torch.is_grad_enabled() during concurrent run of
# async_test_bboxes code
assert (grad_enabled_before == grad_enabled_after
), 'Unexpected is_grad_enabled() value change'
are_done = [e.query() for e in end_events]
logger.debug('%s %s completed: %s streams: %s', trace_name, name,
are_done, streams)
with torch.cuda.stream(stream_before_context_switch):
while not all(are_done):
await asyncio.sleep(sleep_interval)
are_done = [e.query() for e in end_events]
logger.debug(
'%s %s completed: %s streams: %s',
trace_name,
name,
are_done,
streams,
)
current_stream = torch.cuda.current_stream()
assert current_stream == stream_before_context_switch
if DEBUG_COMPLETED_TIME:
cpu_time = (cpu_end - cpu_start) * 1000
stream_times_ms = ''
for i, stream in enumerate(streams):
elapsed_time = start.elapsed_time(end_events[i])
stream_times_ms += f' {stream} {elapsed_time:.2f} ms'
logger.info('%s %s %.2f ms %s', trace_name, name, cpu_time,
stream_times_ms)
@contextlib.asynccontextmanager
async def concurrent(streamqueue: asyncio.Queue,
trace_name='concurrent',
name='stream'):
"""Run code concurrently in different streams.
:param streamqueue: asyncio.Queue instance.
Queue tasks define the pool of streams used for concurrent execution.
"""
if not torch.cuda.is_available():
yield
return
initial_stream = torch.cuda.current_stream()
with torch.cuda.stream(initial_stream):
stream = await streamqueue.get()
assert isinstance(stream, torch.cuda.Stream)
try:
with torch.cuda.stream(stream):
logger.debug('%s %s is starting, stream: %s', trace_name, name,
stream)
yield
current = torch.cuda.current_stream()
assert current == stream
logger.debug('%s %s has finished, stream: %s', trace_name,
name, stream)
finally:
streamqueue.task_done()
streamqueue.put_nowait(stream)
| insightface/detection/scrfd/mmdet/utils/contextmanagers.py/0 | {
"file_path": "insightface/detection/scrfd/mmdet/utils/contextmanagers.py",
"repo_id": "insightface",
"token_count": 1885
} | 113 |
import os, sys
def train(group, prefix, idx, gpuid):
assert idx>=0
cmd = "CUDA_VISIBLE_DEVICES='%d' PORT=%d bash ./tools/dist_train.sh ./configs/%s/%s_%d.py 1 --no-validate"%(gpuid,29100+idx, group, prefix, idx)
print(cmd)
os.system(cmd)
gpuid = int(sys.argv[1])
idx_from = int(sys.argv[2])
idx_to = int(sys.argv[3])
group = 'scrfdgen'
if len(sys.argv)>4:
group = sys.argv[4]
for idx in range(idx_from, idx_to):
train(group, group, idx, gpuid)
| insightface/detection/scrfd/search_tools/search_train.py/0 | {
"file_path": "insightface/detection/scrfd/search_tools/search_train.py",
"repo_id": "insightface",
"token_count": 235
} | 114 |
import argparse
import os
import os.path as osp
import pickle
import numpy as np
import warnings
import mmcv
import torch
from mmcv import Config, DictAction
from mmcv.cnn import fuse_conv_bn
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import (get_dist_info, init_dist, load_checkpoint,
wrap_fp16_model)
from mmdet.apis import multi_gpu_test, single_gpu_test
from mmdet.datasets import (build_dataloader, build_dataset,
replace_ImageToTensor)
from mmdet.models import build_detector
from mmdet.core.evaluation import wider_evaluation, get_widerface_gts
def parse_args():
parser = argparse.ArgumentParser(
description='MMDet test (and eval) a model')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument('--out', default='wout', help='output folder')
parser.add_argument(
'--eval',
type=str,
nargs='+',
help='evaluation metrics, which depends on the dataset, e.g., "bbox",'
' "segm", "proposal" for COCO, and "mAP", "recall" for PASCAL VOC')
parser.add_argument('--show', action='store_true', help='show results')
parser.add_argument('--save-preds', action='store_true', help='save results')
parser.add_argument('--show-assign', action='store_true', help='show bbox assign')
parser.add_argument('--debug', action='store_true', help='debug flag')
parser.add_argument(
'--show-dir', help='directory where painted images will be saved')
parser.add_argument(
'--show-score-thr',
type=float,
default=0.3,
help='score threshold (default: 0.3)')
parser.add_argument(
'--thr',
type=float,
default=0.02,
help='score threshold')
parser.add_argument('--local_rank', type=int, default=0)
parser.add_argument('--mode', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if cfg.get('custom_imports', None):
from mmcv.utils import import_modules_from_strings
import_modules_from_strings(**cfg['custom_imports'])
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
cfg.model.pretrained = None
if cfg.model.get('neck'):
if isinstance(cfg.model.neck, list):
for neck_cfg in cfg.model.neck:
if neck_cfg.get('rfp_backbone'):
if neck_cfg.rfp_backbone.get('pretrained'):
neck_cfg.rfp_backbone.pretrained = None
elif cfg.model.neck.get('rfp_backbone'):
if cfg.model.neck.rfp_backbone.get('pretrained'):
cfg.model.neck.rfp_backbone.pretrained = None
# in case the test dataset is concatenated
if isinstance(cfg.data.test, dict):
cfg.data.test.test_mode = True
elif isinstance(cfg.data.test, list):
for ds_cfg in cfg.data.test:
ds_cfg.test_mode = True
gt_path = os.path.join(os.path.dirname(cfg.data.test.ann_file), 'gt')
pipelines = cfg.data.test.pipeline
for pipeline in pipelines:
if pipeline.type=='MultiScaleFlipAug':
if args.mode==0: #640 scale
pipeline.img_scale = (640, 640)
elif args.mode==1: #for single scale in other pages
pipeline.img_scale = (1100, 1650)
elif args.mode==2: #original scale
pipeline.img_scale = None
pipeline.scale_factor = 1.0
elif args.mode>30:
pipeline.img_scale = (args.mode, args.mode)
transforms = pipeline.transforms
for transform in transforms:
if transform.type=='Pad':
if args.mode!=2:
transform.size = pipeline.img_scale
else:
transform.size = None
transform.size_divisor = 32
print(cfg.data.test.pipeline)
distributed = False
# build the dataloader
samples_per_gpu = cfg.data.test.pop('samples_per_gpu', 1)
if samples_per_gpu > 1:
# Replace 'ImageToTensor' to 'DefaultFormatBundle'
cfg.data.test.pipeline = replace_ImageToTensor(cfg.data.test.pipeline)
dataset = build_dataset(cfg.data.test)
data_loader = build_dataloader(
dataset,
samples_per_gpu=samples_per_gpu,
workers_per_gpu=cfg.data.workers_per_gpu,
dist=distributed,
shuffle=False)
cfg.test_cfg.score_thr = args.thr
# build the model and load checkpoint
model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
fp16_cfg = cfg.get('fp16', None)
checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
if 'CLASSES' in checkpoint['meta']:
model.CLASSES = checkpoint['meta']['CLASSES']
else:
model.CLASSES = dataset.CLASSES
if args.show_assign:
gts_easy, gts_medium, gts_hard = get_widerface_gts(gt_path)
assign_stat = [0, 0]
gts_size = []
model = MMDataParallel(model, device_ids=[0])
model.eval()
results = {}
output_folder = args.out
if not os.path.exists(output_folder):
os.makedirs(output_folder)
dataset = data_loader.dataset
prog_bar = mmcv.ProgressBar(len(dataset))
for i, data in enumerate(data_loader):
with torch.no_grad():
result = model(return_loss=False, rescale=True, **data)
assert len(result)==1
batch_size = 1
result = result[0][0]
img_metas = data['img_metas'][0].data[0][0]
filepath = img_metas['ori_filename']
det_scale = img_metas['scale_factor'][0]
#print(img_metas)
ori_shape = img_metas['ori_shape']
img_width = ori_shape[1]
img_height = ori_shape[0]
_vec = filepath.split('/')
pa, pb = _vec[-2], _vec[1]
if pa not in results:
results[pa] = {}
xywh = result.copy()
w = xywh[:,2] - xywh[:,0]
h = xywh[:,3] - xywh[:,1]
xywh[:,2] = w
xywh[:,3] = h
event_name = pa
img_name = pb.rstrip('.jpg')
results[event_name][img_name] = xywh
if args.save_preds:
out_dir = os.path.join(output_folder, pa)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
out_file = os.path.join(out_dir, pb.replace('jpg', 'txt'))
boxes = result
with open(out_file, 'w') as f:
name = '/'.join([pa, pb])
f.write("%s\n"%(name))
f.write("%d\n"%(boxes.shape[0]))
for b in range(boxes.shape[0]):
box = boxes[b]
f.write("%.5f %.5f %.5f %.5f %g\n"%(box[0], box[1], box[2]-box[0], box[3]-box[1], box[4]))
if args.show_assign:
assert args.mode==0
input_height, input_width = 640, 640
gt_hard = gts_hard[event_name][img_name]
#print(event_name, img_name, gt_hard.shape)
gt_bboxes = gt_hard * det_scale
bbox_width = gt_bboxes[:,2] - gt_bboxes[:,0]
bbox_height = gt_bboxes[:,3] - gt_bboxes[:,1]
bbox_area = bbox_width * bbox_height
gt_size = np.sqrt(bbox_area+0.0001)
gts_size += list(gt_size)
anchor_cxs = []
anchor_cys = []
for idx, stride in enumerate([8,16,32,64,128]):
height = input_height // stride
width = input_width // stride
anchor_centers = np.stack(np.mgrid[:height, :width][::-1], axis=-1).astype(np.float32)
anchor_centers = (anchor_centers * stride).reshape( (-1, 2) )
anchor_cx = anchor_centers[:,0]
anchor_cy = anchor_centers[:,1]
anchor_cxs += list(anchor_cx)
anchor_cys += list(anchor_cy)
anchor_cx = np.array(anchor_cxs, dtype=np.float32)
anchor_cy = np.array(anchor_cys, dtype=np.float32)
num_gts = gt_bboxes.shape[0]
num_anchors = anchor_cx.shape[0]
anchor_cx = np.broadcast_to(anchor_cx.reshape((1,-1)), (num_gts, num_anchors)).reshape(num_anchors, num_gts)
anchor_cy = np.broadcast_to(anchor_cy.reshape((1,-1)), (num_gts, num_anchors)).reshape(num_anchors, num_gts)
gt_x1 = gt_bboxes[:,0]
gt_y1 = gt_bboxes[:,1]
gt_x2 = gt_bboxes[:,2]
gt_y2 = gt_bboxes[:,3]
gt_cover = np.zeros( (gt_bboxes.shape[0], ), dtype=np.float32)
l_ = anchor_cx - gt_x1
t_ = anchor_cy - gt_y1
r_ = gt_x2 - anchor_cx
b_ = gt_y2 - anchor_cy
dist = np.stack([l_, t_, r_, b_], axis=1).min(axis=1)
gt_dist = dist.max(axis=0)
gt_dist = gt_dist / gt_size
center_thres = 0.01
#center_thres = -0.25
gt_cover_inds = np.where(gt_dist>center_thres)[0]
num_assigned = len(gt_cover_inds)
assign_stat[0] += num_gts
assign_stat[1] += num_assigned
for _ in range(batch_size):
prog_bar.update()
aps = wider_evaluation(results, gt_path, 0.5, args.debug)
with open(os.path.join(output_folder, 'aps'), 'w') as f:
f.write("%f,%f,%f\n"%(aps[0],aps[1],aps[2]))
print('APS:', aps)
if args.show_assign:
print('ASSIGN:', assign_stat)
gts_size = np.array(gts_size, dtype=np.float32)
gts_size = np.sort(gts_size)
assert len(gts_size)==assign_stat[0]
print(gts_size[assign_stat[0]//2])
if __name__ == '__main__':
main()
| insightface/detection/scrfd/tools/test_widerface.py/0 | {
"file_path": "insightface/detection/scrfd/tools/test_widerface.py",
"repo_id": "insightface",
"token_count": 5001
} | 115 |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
@Author : Qingping Zheng
@Contact : qingpingzheng2014@gmail.com
@File : datasets.py
@Time : 10/01/21 00:00 PM
@Desc :
@License : Licensed under the Apache License, Version 2.0 (the "License");
@Copyright : Copyright 2015 The Authors. All Rights Reserved.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cv2
import numpy as np
import os.path as osp
import pickle
import random
import torch
import torchvision.transforms.functional as TF
from glob import glob
from typing import Tuple
from utils import transforms
class FaceDataSet(torch.utils.data.Dataset):
"""Face data set for model training and validating
Examples:
./CelebAMask
|---test
|---train
|---images
|---0.jpg
|---1.jpg
|---labels
|---0.png
|---1.png
|---edges
|---0.png
|---1.png
|---valid
|---label_names.txt
|---test_list.txt
|---train_list.txt
|---images/0.jpg labels/0.png
|---images/1.jpg labels/1.png
|---valid_list.txt
Args:
root: A string, training/validating dataset path, e.g. "./CelebAMask"
dataset: A string, one of `"train"`, `"test"`, `"valid"`.
crop_size: A list of two intergers.
scale_factor: A float number.
rotation_factor: An integer number.
ignore_label: An integer number, default is 255.
transformer: A function of torchvision.transforms.Compose([])
"""
def __init__(self,
root: str,
dataset: str,
crop_size: list=[473, 473],
scale_factor: float=0.25,
rotation_factor: int=30,
ignore_label: int =255,
transform=None) -> None:
self.root = root
self.dataset = dataset
self.crop_size = np.asarray(crop_size)
self.scale_factor = scale_factor
self.rotation_factor = rotation_factor
self.ignore_label = ignore_label
self.transform = transform
self.flip_prob = 0.5
self.flip_pairs = [[4, 5], [6, 7]]
self.aspect_ratio = crop_size[1] * 1.0 / crop_size[0]
self.file_list_name = osp.join(root, dataset + '_list.txt')
self.im_list = [line.split()[0][7:-4] for line in open(self.file_list_name).readlines()]
self.number_samples = len(self.im_list)
def __len__(self) -> int:
return self.number_samples
def _box2cs(self, box: list) -> tuple:
x, y, w, h = box[:4]
return self._xywh2cs(x, y, w, h)
def _xywh2cs(self, x: float, y: float, w: float, h: float) -> tuple:
center = np.zeros((2), dtype=np.float32)
center[0] = x + w * 0.5
center[1] = y + h * 0.5
if w > self.aspect_ratio * h:
h = w * 1.0 / self.aspect_ratio
elif w < self.aspect_ratio * h:
w = h * self.aspect_ratio
scale = np.array([w * 1.0, h * 1.0], dtype=np.float32)
return center, scale
def __getitem__(self, index: int) -> tuple:
# Load training image
im_name = self.im_list[index]
im_path = osp.join(self.root, self.dataset, 'images', im_name + '.jpg')
im = cv2.imread(im_path, cv2.IMREAD_COLOR)
h, w, _ = im.shape
parsing_anno = np.zeros((h, w), dtype=np.long)
# Get center and scale
center, s = self._box2cs([0, 0, w - 1, h - 1])
r = 0
if self.dataset not in ['test', 'valid']:
edge_path = osp.join(self.root, self.dataset, 'edges', im_name + '.png')
edge = cv2.imread(edge_path, cv2.IMREAD_GRAYSCALE)
parsing_anno_path = osp.join(self.root, self.dataset, 'labels', im_name + '.png')
parsing_anno = cv2.imread(parsing_anno_path, cv2.IMREAD_GRAYSCALE)
if self.dataset in 'train':
sf = self.scale_factor
rf = self.rotation_factor
s = s * np.clip(np.random.randn() * sf + 1, 1 - sf, 1 + sf)
r = np.clip(np.random.randn() * rf, -rf * 2, rf * 2) \
if random.random() <= 0.6 else 0
trans = transforms.get_affine_transform(center, s, r, self.crop_size)
image = cv2.warpAffine(
im,
trans,
(int(self.crop_size[1]), int(self.crop_size[0])),
flags=cv2.INTER_LINEAR,
borderMode=cv2.BORDER_CONSTANT,
borderValue=(0, 0, 0))
if self.dataset not in ['test', 'valid']:
edge = cv2.warpAffine(
edge,
trans,
(int(self.crop_size[1]), int(self.crop_size[0])),
flags=cv2.INTER_LINEAR,
borderMode=cv2.BORDER_CONSTANT,
borderValue=(0, 0, 0))
if self.transform:
image = self.transform(image)
meta = {
'name': im_name,
'center': center,
'height': h,
'width': w,
'scale': s,
'rotation': r,
'origin': image
}
if self.dataset not in 'train':
return image, meta
else:
label_parsing = cv2.warpAffine(
parsing_anno,
trans,
(int(self.crop_size[1]), int(self.crop_size[0])),
flags=cv2.INTER_NEAREST,
borderMode=cv2.BORDER_CONSTANT,
borderValue=(255))
label_parsing = torch.from_numpy(label_parsing)
return image, label_parsing, edge, meta
| insightface/parsing/dml_csr/dataset/datasets.py/0 | {
"file_path": "insightface/parsing/dml_csr/dataset/datasets.py",
"repo_id": "insightface",
"token_count": 3084
} | 116 |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
@Author : Qingping Zheng
@Contact : qingpingzheng2014@gmail.com
@File : transforms.py
@Time : 10/01/21 00:00 PM
@Desc :
@License : Licensed under the Apache License, Version 2.0 (the "License");
@Copyright : Copyright 2022 The Authors. All Rights Reserved.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import cv2
def flip_back(output_flipped, matched_parts):
'''
ouput_flipped: numpy.ndarray(batch_size, num_joints, height, width)
'''
assert output_flipped.ndim == 4,\
'output_flipped should be [batch_size, num_joints, height, width]'
output_flipped = output_flipped[:, :, :, ::-1]
for pair in matched_parts:
tmp = output_flipped[:, pair[0], :, :].copy()
output_flipped[:, pair[0], :, :] = output_flipped[:, pair[1], :, :]
output_flipped[:, pair[1], :, :] = tmp
return output_flipped
def transform_parsing(pred, center, scale, width, height, input_size):
if center is not None:
trans = get_affine_transform(center, scale, 0, input_size, inv=1)
target_pred = cv2.warpAffine(
pred,
trans,
(int(width), int(height)), #(int(width), int(height)),
flags=cv2.INTER_NEAREST,
borderMode=cv2.BORDER_CONSTANT,
borderValue=(0))
else:
target_pred = cv2.resize(pred, (int(width), int(height)), interpolation=cv2.INTER_NEAREST)
return target_pred
def get_affine_transform(center,
scale,
rot,
output_size,
shift=np.array([0, 0], dtype=np.float32),
inv=0):
if not isinstance(scale, np.ndarray) and not isinstance(scale, list):
print(scale)
scale = np.array([scale, scale])
scale_tmp = scale
src_w = scale_tmp[0]
dst_w = output_size[1]
dst_h = output_size[0]
rot_rad = np.pi * rot / 180
src_dir = get_dir([0, src_w * -0.5], rot_rad)
dst_dir = np.array([0, dst_w * -0.5], np.float32)
src = np.zeros((3, 2), dtype=np.float32)
dst = np.zeros((3, 2), dtype=np.float32)
src[0, :] = center + scale_tmp * shift
src[1, :] = center + src_dir + scale_tmp * shift
dst[0, :] = [dst_w * 0.5, dst_h * 0.5]
dst[1, :] = np.array([dst_w * 0.5, dst_h * 0.5]) + dst_dir
src[2:, :] = get_3rd_point(src[0, :], src[1, :])
dst[2:, :] = get_3rd_point(dst[0, :], dst[1, :])
if inv:
trans = cv2.getAffineTransform(np.float32(dst), np.float32(src))
else:
trans = cv2.getAffineTransform(np.float32(src), np.float32(dst))
return trans
def affine_transform(pt, t):
new_pt = np.array([pt[0], pt[1], 1.]).T
new_pt = np.dot(t, new_pt)
return new_pt[:2]
def get_3rd_point(a, b):
direct = a - b
return b + np.array([-direct[1], direct[0]], dtype=np.float32)
def get_dir(src_point, rot_rad):
sn, cs = np.sin(rot_rad), np.cos(rot_rad)
src_result = [0, 0]
src_result[0] = src_point[0] * cs - src_point[1] * sn
src_result[1] = src_point[0] * sn + src_point[1] * cs
return src_result
def crop(img, center, scale, output_size, rot=0):
trans = get_affine_transform(center, scale, rot, output_size)
dst_img = cv2.warpAffine(img,
trans,
(int(output_size[1]), int(output_size[0])),
flags=cv2.INTER_LINEAR)
return dst_img
| insightface/parsing/dml_csr/utils/transforms.py/0 | {
"file_path": "insightface/parsing/dml_csr/utils/transforms.py",
"repo_id": "insightface",
"token_count": 1776
} | 117 |
import os
import os.path as osp
import zipfile
from .download import download_file
BASE_REPO_URL = 'https://github.com/deepinsight/insightface/releases/download/v0.7'
def download(sub_dir, name, force=False, root='~/.insightface'):
_root = os.path.expanduser(root)
dir_path = os.path.join(_root, sub_dir, name)
if osp.exists(dir_path) and not force:
return dir_path
print('download_path:', dir_path)
zip_file_path = os.path.join(_root, sub_dir, name + '.zip')
model_url = "%s/%s.zip"%(BASE_REPO_URL, name)
download_file(model_url,
path=zip_file_path,
overwrite=True)
if not os.path.exists(dir_path):
os.makedirs(dir_path)
with zipfile.ZipFile(zip_file_path) as zf:
zf.extractall(dir_path)
#os.remove(zip_file_path)
return dir_path
def ensure_available(sub_dir, name, root='~/.insightface'):
return download(sub_dir, name, force=False, root=root)
def download_onnx(sub_dir, model_file, force=False, root='~/.insightface', download_zip=False):
_root = os.path.expanduser(root)
model_root = osp.join(_root, sub_dir)
new_model_file = osp.join(model_root, model_file)
if osp.exists(new_model_file) and not force:
return new_model_file
if not osp.exists(model_root):
os.makedirs(model_root)
print('download_path:', new_model_file)
if not download_zip:
model_url = "%s/%s"%(BASE_REPO_URL, model_file)
download_file(model_url,
path=new_model_file,
overwrite=True)
else:
model_url = "%s/%s.zip"%(BASE_REPO_URL, model_file)
zip_file_path = new_model_file+".zip"
download_file(model_url,
path=zip_file_path,
overwrite=True)
with zipfile.ZipFile(zip_file_path) as zf:
zf.extractall(model_root)
return new_model_file
| insightface/python-package/insightface/utils/storage.py/0 | {
"file_path": "insightface/python-package/insightface/utils/storage.py",
"repo_id": "insightface",
"token_count": 885
} | 118 |
## I. CPP-Align
-
## II. Face Mask Renderer
We provide a simple tool to add masks on face images automatically.
We can use this tool to do data augmentation while training our face recognition models.
| Face Image | OP | Mask Image | Out |
| ------- | ------ | --------- | ----------- |
| <img src="https://github.com/deepinsight/insightface/blob/master/python-package/insightface/data/images/Tom_Hanks_54745.png" alt="face" height="112" /> | +F | <img src="https://github.com/nttstar/insightface-resources/blob/master/images/mask1.jpg" alt="mask" height="112" /> | <img src="https://github.com/nttstar/insightface-resources/blob/master/images/mask_out1.jpg?raw=true" alt="mask" height="112" /> |
| <img src="https://github.com/deepinsight/insightface/blob/master/python-package/insightface/data/images/Tom_Hanks_54745.png" alt="face" height="112" /> | +F | <img src="https://github.com/nttstar/insightface-resources/blob/master/images/black-mask.png" alt="mask" height="112" /> | <img src="https://github.com/nttstar/insightface-resources/blob/master/images/mask_out3.jpg?raw=true" alt="mask" height="112" /> |
| <img src="https://github.com/deepinsight/insightface/blob/master/python-package/insightface/data/images/Tom_Hanks_54745.png" alt="face" height="112" /> | +H | <img src="https://github.com/nttstar/insightface-resources/blob/master/images/mask2.jpg?raw=true" alt="mask" height="112" /> | <img src="https://github.com/nttstar/insightface-resources/blob/master/images/mask_out2h.jpg?raw=true" alt="mask" height="112" /> |
**F** means FULL while **H** means HALF.
### Prepare
- insightface package library
``pip install -U insightface``
- insightface model pack
``bash> insightface-cli model.download antelope``
- BFM models
Please follow the tutorial of [https://github.com/YadiraF/face3d/tree/master/examples/Data/BFM](https://github.com/YadiraF/face3d/tree/master/examples/Data/BFM) to generate `BFM.mat` and `BFM_UV.mat`. Put them into the insightface model pack directory, such as ``~/.insightface/models/antelope/``
- mask images
some mask images are included in insightface package, such as 'mask\_blue', 'mask\_white', 'mask\_black' and 'mask\_green'.
### Add Mask to Face Image
Please refer to `make_renderer.py` for detail example.
(1) init renderer:
```
import insightface
from insightface.app import MaskRenderer
tool = MaskRenderer()
tool.prepare(ctx_id=0, det_size=(128,128)) #use gpu
```
(2) load face and mask images
```
from insightface.data import get_image as ins_get_image
image = ins_get_image('Tom_Hanks_54745')
mask_image = "mask_blue"
```
(3) build necessary params for face image, this can be done in offline.
```
params = tool.build_params(image)
```
(4) do mask render, it costs about `10ms` on 224x224 UV size, CPU single thread.
```
mask_out = tool.render_mask(image, mask_image, params)
```
(5) do half mask render.
```
mask_half_out = tool.render_mask(image, mask_image, params, positions=[0.1, 0.5, 0.9, 0.7])
```
| insightface/recognition/_tools_/README.md/0 | {
"file_path": "insightface/recognition/_tools_/README.md",
"repo_id": "insightface",
"token_count": 1102
} | 119 |
"""Helper for evaluation on the Labeled Faces in the Wild dataset
"""
# MIT License
#
# Copyright (c) 2016 David Sandberg
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import datetime
import os
import pickle
import numpy as np
import sklearn
import oneflow as flow
from scipy import interpolate
from sklearn.decomposition import PCA
from sklearn.model_selection import KFold
import cv2 as cv
import logging
class LFold:
def __init__(self, n_splits=2, shuffle=False):
self.n_splits = n_splits
if self.n_splits > 1:
self.k_fold = KFold(n_splits=n_splits, shuffle=shuffle)
def split(self, indices):
if self.n_splits > 1:
return self.k_fold.split(indices)
else:
return [(indices, indices)]
def calculate_roc(
thresholds, embeddings1, embeddings2, actual_issame, nrof_folds=10, pca=0
):
assert embeddings1.shape[0] == embeddings2.shape[0]
assert embeddings1.shape[1] == embeddings2.shape[1]
nrof_pairs = min(len(actual_issame), embeddings1.shape[0])
nrof_thresholds = len(thresholds)
k_fold = LFold(n_splits=nrof_folds, shuffle=False)
tprs = np.zeros((nrof_folds, nrof_thresholds))
fprs = np.zeros((nrof_folds, nrof_thresholds))
accuracy = np.zeros((nrof_folds))
indices = np.arange(nrof_pairs)
if pca == 0:
diff = np.subtract(embeddings1, embeddings2)
dist = np.sum(np.square(diff), 1)
for fold_idx, (train_set, test_set) in enumerate(k_fold.split(indices)):
if pca > 0:
print("doing pca on", fold_idx)
embed1_train = embeddings1[train_set]
embed2_train = embeddings2[train_set]
_embed_train = np.concatenate((embed1_train, embed2_train), axis=0)
pca_model = PCA(n_components=pca)
pca_model.fit(_embed_train)
embed1 = pca_model.transform(embeddings1)
embed2 = pca_model.transform(embeddings2)
embed1 = sklearn.preprocessing.normalize(embed1)
embed2 = sklearn.preprocessing.normalize(embed2)
diff = np.subtract(embed1, embed2)
dist = np.sum(np.square(diff), 1)
# Find the best threshold for the fold
acc_train = np.zeros((nrof_thresholds))
for threshold_idx, threshold in enumerate(thresholds):
_, _, acc_train[threshold_idx] = calculate_accuracy(
threshold, dist[train_set], actual_issame[train_set]
)
best_threshold_index = np.argmax(acc_train)
for threshold_idx, threshold in enumerate(thresholds):
(
tprs[fold_idx, threshold_idx],
fprs[fold_idx, threshold_idx],
_,
) = calculate_accuracy(threshold, dist[test_set], actual_issame[test_set])
_, _, accuracy[fold_idx] = calculate_accuracy(
thresholds[best_threshold_index], dist[test_set], actual_issame[test_set]
)
tpr = np.mean(tprs, 0)
fpr = np.mean(fprs, 0)
return tpr, fpr, accuracy
def calculate_accuracy(threshold, dist, actual_issame):
predict_issame = np.less(dist, threshold)
tp = np.sum(np.logical_and(predict_issame, actual_issame))
fp = np.sum(np.logical_and(predict_issame, np.logical_not(actual_issame)))
tn = np.sum(
np.logical_and(np.logical_not(predict_issame), np.logical_not(actual_issame))
)
fn = np.sum(np.logical_and(np.logical_not(predict_issame), actual_issame))
tpr = 0 if (tp + fn == 0) else float(tp) / float(tp + fn)
fpr = 0 if (fp + tn == 0) else float(fp) / float(fp + tn)
acc = float(tp + tn) / dist.size
return tpr, fpr, acc
def calculate_val(
thresholds, embeddings1, embeddings2, actual_issame, far_target, nrof_folds=10
):
assert embeddings1.shape[0] == embeddings2.shape[0]
assert embeddings1.shape[1] == embeddings2.shape[1]
nrof_pairs = min(len(actual_issame), embeddings1.shape[0])
nrof_thresholds = len(thresholds)
k_fold = LFold(n_splits=nrof_folds, shuffle=False)
val = np.zeros(nrof_folds)
far = np.zeros(nrof_folds)
diff = np.subtract(embeddings1, embeddings2)
dist = np.sum(np.square(diff), 1)
indices = np.arange(nrof_pairs)
for fold_idx, (train_set, test_set) in enumerate(k_fold.split(indices)):
# Find the threshold that gives FAR = far_target
far_train = np.zeros(nrof_thresholds)
for threshold_idx, threshold in enumerate(thresholds):
_, far_train[threshold_idx] = calculate_val_far(
threshold, dist[train_set], actual_issame[train_set]
)
if np.max(far_train) >= far_target:
f = interpolate.interp1d(far_train, thresholds, kind="slinear")
threshold = f(far_target)
else:
threshold = 0.0
val[fold_idx], far[fold_idx] = calculate_val_far(
threshold, dist[test_set], actual_issame[test_set]
)
val_mean = np.mean(val)
far_mean = np.mean(far)
val_std = np.std(val)
return val_mean, val_std, far_mean
def calculate_val_far(threshold, dist, actual_issame):
predict_issame = np.less(dist, threshold)
true_accept = np.sum(np.logical_and(predict_issame, actual_issame))
false_accept = np.sum(np.logical_and(predict_issame, np.logical_not(actual_issame)))
n_same = np.sum(actual_issame)
n_diff = np.sum(np.logical_not(actual_issame))
# print(true_accept, false_accept)
# print(n_same, n_diff)
val = float(true_accept) / float(n_same)
far = float(false_accept) / float(n_diff)
return val, far
def evaluate(embeddings, actual_issame, nrof_folds=10, pca=0):
# Calculate evaluation metrics
thresholds = np.arange(0, 4, 0.01)
embeddings1 = embeddings[0::2]
embeddings2 = embeddings[1::2]
tpr, fpr, accuracy = calculate_roc(
thresholds,
embeddings1,
embeddings2,
np.asarray(actual_issame),
nrof_folds=nrof_folds,
pca=pca,
)
thresholds = np.arange(0, 4, 0.001)
val, val_std, far = calculate_val(
thresholds,
embeddings1,
embeddings2,
np.asarray(actual_issame),
1e-3,
nrof_folds=nrof_folds,
)
return tpr, fpr, accuracy, val, val_std, far
def load_bin_cv(path, image_size):
bins, issame_list = pickle.load(open(path, "rb"), encoding="bytes")
data_list = []
for flip in [0, 1]:
data = flow.empty(len(issame_list) * 2, 3, image_size[0], image_size[1])
data_list.append(data)
for i in range(len(issame_list) * 2):
_bin = bins[i]
img_ori = cv.imdecode(_bin, cv.IMREAD_COLOR)[:, :, ::-1]
for flip in [0, 1]:
img = img_ori.copy()
if flip == 1:
img = cv.flip(img, 1)
img = np.array(img).transpose((2, 0, 1))
img = (img - 127.5) * 0.00784313725
data_list[flip][i] = flow.tensor(img, dtype=flow.float)
if i % 1000 == 0:
logging.info("loading bin:%d", i)
logging.info(data_list[0].shape)
return data_list, issame_list
@flow.no_grad()
def test(data_set, backbone, batch_size, nfolds=10, is_consistent=False):
logging.info("testing verification..")
data_list = data_set[0]
issame_list = data_set[1]
embeddings_list = []
time_consumed = 0.0
if is_consistent:
placement = flow.env.all_device_placement("cpu")
sbp = flow.sbp.split(0)
for i in range(len(data_list)):
data = data_list[i]
embeddings = None
ba = 0
while ba < data.shape[0]:
bb = min(ba + batch_size, data.shape[0])
count = bb - ba
img = data[bb - batch_size : bb]
time0 = datetime.datetime.now()
with flow.no_grad():
if is_consistent:
img = img.to_consistent(placement=placement, sbp=sbp)
net_out = backbone(img.to("cuda"))
if is_consistent:
_embeddings = net_out.to_local().numpy()
else:
_embeddings = net_out.detach().numpy()
time_now = datetime.datetime.now()
diff = time_now - time0
time_consumed += diff.total_seconds()
if embeddings is None:
embeddings = np.zeros((data.shape[0], _embeddings.shape[1]))
embeddings[ba:bb, :] = _embeddings[(batch_size - count) :, :]
ba = bb
embeddings_list.append(embeddings)
_xnorm = 0.0
_xnorm_cnt = 0
for embed in embeddings_list:
for i in range(embed.shape[0]):
_em = embed[i]
_norm = np.linalg.norm(_em)
_xnorm += _norm
_xnorm_cnt += 1
_xnorm /= _xnorm_cnt
embeddings = embeddings_list[0].copy()
embeddings = sklearn.preprocessing.normalize(embeddings)
acc1 = 0.0
std1 = 0.0
embeddings = embeddings_list[0] + embeddings_list[1]
embeddings = sklearn.preprocessing.normalize(embeddings)
logging.info(embeddings.shape)
logging.info("infer time:%f" % time_consumed)
_, _, accuracy, val, val_std, far = evaluate(
embeddings, issame_list, nrof_folds=nfolds
)
acc2, std2 = np.mean(accuracy), np.std(accuracy)
return acc1, std1, acc2, std2, _xnorm, embeddings_list
def dumpR(data_set, backbone, batch_size, name="", data_extra=None, label_shape=None):
print("dump verification embedding..")
data_list = data_set[0]
issame_list = data_set[1]
embeddings_list = []
time_consumed = 0.0
for i in range(len(data_list)):
data = data_list[i]
embeddings = None
ba = 0
while ba < data.shape[0]:
bb = min(ba + batch_size, data.shape[0])
count = bb - ba
_data = nd.slice_axis(data, axis=0, begin=bb - batch_size, end=bb)
time0 = datetime.datetime.now()
if data_extra is None:
db = mx.io.DataBatch(data=(_data,), label=(_label,))
else:
db = mx.io.DataBatch(data=(_data, _data_extra), label=(_label,))
model.forward(db, is_train=False)
net_out = model.get_outputs()
_embeddings = net_out[0].asnumpy()
time_now = datetime.datetime.now()
diff = time_now - time0
time_consumed += diff.total_seconds()
if embeddings is None:
embeddings = np.zeros((data.shape[0], _embeddings.shape[1]))
embeddings[ba:bb, :] = _embeddings[(batch_size - count) :, :]
ba = bb
embeddings_list.append(embeddings)
embeddings = embeddings_list[0] + embeddings_list[1]
embeddings = sklearn.preprocessing.normalize(embeddings)
actual_issame = np.asarray(issame_list)
outname = os.path.join("temp.bin")
with open(outname, "wb") as f:
pickle.dump((embeddings, issame_list), f, protocol=pickle.HIGHEST_PROTOCOL)
| insightface/recognition/arcface_oneflow/eval/verification.py/0 | {
"file_path": "insightface/recognition/arcface_oneflow/eval/verification.py",
"repo_id": "insightface",
"token_count": 5550
} | 120 |
import oneflow as flow
from utils.utils_callbacks import CallBackVerification
from backbones import get_model
from graph import TrainGraph, EvalGraph
import logging
import argparse
from utils.utils_config import get_config
def main(args):
cfg = get_config(args.config)
logging.basicConfig(level=logging.NOTSET)
logging.info(args.model_path)
backbone = get_model(cfg.network, dropout=0.0, num_features=cfg.embedding_size).to(
"cuda"
)
val_callback = CallBackVerification(
1, 0, cfg.val_targets, cfg.ofrecord_path)
state_dict = flow.load(args.model_path)
new_parameters = dict()
for key, value in state_dict.items():
if "num_batches_tracked" not in key:
if key == "fc.weight":
continue
new_key = key.replace("backbone.", "")
new_parameters[new_key] = value
backbone.load_state_dict(new_parameters)
infer_graph = EvalGraph(backbone, cfg)
val_callback(1000, backbone, infer_graph)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="OneFlow ArcFace val")
parser.add_argument("config", type=str, help="py config file")
parser.add_argument("--model_path", type=str, help="model path")
main(parser.parse_args())
| insightface/recognition/arcface_oneflow/val.py/0 | {
"file_path": "insightface/recognition/arcface_oneflow/val.py",
"repo_id": "insightface",
"token_count": 494
} | 121 |
# 基于PaddleServing的服务部署
(简体中文|[English](./README.md))
本文档将介绍如何使用[PaddleServing](https://github.com/PaddlePaddle/Serving/blob/develop/README_CN.md)工具部署 Arcface 动态图模型的pipeline在线服务。
PaddleServing具备以下优点:
- 支持客户端和服务端之间高并发和高效通信
- 支持 工业级的服务能力 例如模型管理,在线加载,在线A/B测试等
- 支持 多种编程语言 开发客户端,例如C++, Python和Java
更多有关PaddleServing服务化部署框架介绍和使用教程参考[文档](https://github.com/PaddlePaddle/Serving/blob/develop/README_CN.md)。
## 目录
- [环境准备](#环境准备)
- [模型转换](#模型转换)
- [Paddle Serving pipeline部署](#部署)
- [FAQ](#FAQ)
<a name="环境准备"></a>
## 环境准备
需要准备 Arcface 的运行环境和Paddle Serving的运行环境。
- 准备 Arcface 的运行环境[链接](../../README_cn.md)
根据环境下载对应的paddle whl包,推荐安装2.2+版本
- 准备PaddleServing的运行环境,步骤如下
1. 安装serving,用于启动服务
```
pip3 install paddle-serving-server==0.6.3 # for CPU
pip3 install paddle-serving-server-gpu==0.6.3 # for GPU
# 其他GPU环境需要确认环境再选择执行如下命令
pip3 install paddle-serving-server-gpu==0.6.3.post101 # GPU with CUDA10.1 + TensorRT6
pip3 install paddle-serving-server-gpu==0.6.3.post11 # GPU with CUDA11 + TensorRT7
```
2. 安装client,用于向服务发送请求
```
pip3 install paddle_serving_client==0.6.3
```
3. 安装serving-app
```
pip3 install paddle-serving-app==0.6.3
```
**Note:** 如果要安装最新版本的PaddleServing参考[链接](https://github.com/PaddlePaddle/Serving/blob/develop/doc/LATEST_PACKAGES.md)。
<a name="模型转换"></a>
## 模型转换
使用PaddleServing做服务化部署时,需要将保存的inference模型转换为serving易于部署的模型。
首先,下载Arcface的inference模型
```
# 下载并解压 Arcface 模型
wget -nc -P ./inference https://paddle-model-ecology.bj.bcebos.com/model/insight-face/mobileface_v1.0_infer.tar
tar xf inference/mobileface_v1.0_infer.tar --strip-components 1 -C inference
```
接下来,用安装的paddle_serving_client把下载的inference模型转换成易于server部署的模型格式。
```
python3 -m paddle_serving_client.convert --dirname ./inference/ \
--model_filename inference.pdmodel \
--params_filename inference.pdiparams \
--serving_server ./MobileFaceNet_128_serving/ \
--serving_client ./MobileFaceNet_128_client/
```
检测模型转换完成后,会在当前文件夹多出`MobileFaceNet_128_serving/` 和`MobileFaceNet_128_client`的文件夹,具备如下格式:
```
MobileFaceNet_128_serving
├── __model__
├── __params__
├── serving_server_conf.prototxt
└── serving_server_conf.stream.prototxt
MobileFaceNet_128_client/
├── serving_client_conf.prototxt
└── serving_client_conf.stream.prototxt
```
<a name="部署"></a>
## Paddle Serving pipeline部署
1. 下载insightface代码,若已下载可跳过此步骤
```
git clone https://github.com/deepinsight/insightface
# 进入到工作目录
cd recognition/arcface_paddle/deploy/pdserving
```
pdserving目录包含启动pipeline服务和发送预测请求的代码,包括:
```
__init__.py
config.yml # 启动服务的配置文件
pipeline_http_client.py # web方式发送pipeline预测请求的脚本
pipeline_rpc_client.py # rpc方式发送pipeline预测请求的脚本
web_service.py # 启动pipeline服务端的脚本
```
2. 启动服务可运行如下命令:
```
# 启动服务,运行日志保存在log.txt
python3 web_service.py &>log.txt &
```
成功启动服务后,log.txt中会打印类似如下日志

3. 发送服务请求:
```
python3 pipeline_http_client.py
```
成功运行后,模型预测的结果会打印在cmd窗口中,结果示例为:

调整 config.yml 中的并发个数获得最大的QPS, 一般检测和识别的并发数为2:1
```
ArcFace:
#并发数,is_thread_op=True时,为线程并发;否则为进程并发
concurrency: 8
...
```
有需要的话可以同时发送多个服务请求
预测性能数据会被自动写入 `PipelineServingLogs/pipeline.tracer` 文件中。
在700张真实图片上测试,V100 GPU 上 QPS 均值可达到57左右:
```
2021-11-04 13:38:52,507 Op(ArcFace):
2021-11-04 13:38:52,507 in[135.4579597902098 ms]
2021-11-04 13:38:52,507 prep[0.9921311188811189 ms]
2021-11-04 13:38:52,507 midp[3.9232132867132865 ms]
2021-11-04 13:38:52,507 postp[0.12166258741258741 ms]
2021-11-04 13:38:52,507 out[0.9898286713286714 ms]
2021-11-04 13:38:52,508 idle[0.9643989520087675]
2021-11-04 13:38:52,508 DAGExecutor:
2021-11-04 13:38:52,508 Query count[573]
2021-11-04 13:38:52,508 QPS[57.3 q/s]
2021-11-04 13:38:52,509 Succ[0.9982547993019197]
2021-11-04 13:38:52,509 Error req[394]
2021-11-04 13:38:52,509 Latency:
2021-11-04 13:38:52,509 ave[11.52941186736475 ms]
2021-11-04 13:38:52,509 .50[11.492 ms]
2021-11-04 13:38:52,509 .60[11.658 ms]
2021-11-04 13:38:52,509 .70[11.95 ms]
2021-11-04 13:38:52,509 .80[12.251 ms]
2021-11-04 13:38:52,509 .90[12.736 ms]
2021-11-04 13:38:52,509 .95[13.21 ms]
2021-11-04 13:38:52,509 .99[13.987 ms]
2021-11-04 13:38:52,510 Channel (server worker num[10]):
2021-11-04 13:38:52,510 chl0(In: ['@DAGExecutor'], Out: ['ArcFace']) size[0/0]
2021-11-04 13:38:52,510 chl1(In: ['ArcFace'], Out: ['@DAGExecutor']) size[0/0]
```
<a name="FAQ"></a>
## FAQ
**Q1**: 发送请求后没有结果返回或者提示输出解码报错
**A1**: 启动服务和发送请求时不要设置代理,可以在启动服务前和发送请求前关闭代理,关闭代理的命令是:
```
unset https_proxy
unset http_proxy
```
| insightface/recognition/arcface_paddle/deploy/pdserving/README_CN.md/0 | {
"file_path": "insightface/recognition/arcface_paddle/deploy/pdserving/README_CN.md",
"repo_id": "insightface",
"token_count": 3699
} | 122 |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import os
import sys
import numpy as np
import logging
import paddle
from visualdl import LogWriter
from utils.logging import AverageMeter, init_logging, CallBackLogging
from datasets import CommonDataset, SyntheticDataset
from utils import losses
from .utils.verification import CallBackVerification
from .utils.io import Checkpoint
from .utils.amp import LSCGradScaler
from . import classifiers
from . import backbones
RELATED_FLAGS_SETTING = {
'FLAGS_cudnn_exhaustive_search': 1,
'FLAGS_cudnn_batchnorm_spatial_persistent': 1,
'FLAGS_max_inplace_grad_add': 8,
'FLAGS_fraction_of_gpu_memory_to_use': 0.9999,
}
paddle.fluid.set_flags(RELATED_FLAGS_SETTING)
def train(args):
writer = LogWriter(logdir=args.logdir)
rank = int(os.getenv("PADDLE_TRAINER_ID", 0))
world_size = int(os.getenv("PADDLE_TRAINERS_NUM", 1))
gpu_id = int(os.getenv("FLAGS_selected_gpus", 0))
place = paddle.CUDAPlace(gpu_id)
if world_size > 1:
import paddle.distributed.fleet as fleet
from .utils.data_parallel import sync_gradients, sync_params
strategy = fleet.DistributedStrategy()
strategy.without_graph_optimization = True
fleet.init(is_collective=True, strategy=strategy)
if args.use_synthetic_dataset:
trainset = SyntheticDataset(args.num_classes, fp16=args.fp16)
else:
trainset = CommonDataset(
root_dir=args.data_dir,
label_file=args.label_file,
fp16=args.fp16,
is_bin=args.is_bin)
num_image = len(trainset)
total_batch_size = args.batch_size * world_size
steps_per_epoch = num_image // total_batch_size
if args.train_unit == 'epoch':
warmup_steps = steps_per_epoch * args.warmup_num
total_steps = steps_per_epoch * args.train_num
decay_steps = [x * steps_per_epoch for x in args.decay_boundaries]
total_epoch = args.train_num
else:
warmup_steps = args.warmup_num
total_steps = args.train_num
decay_steps = [x for x in args.decay_boundaries]
total_epoch = (total_steps + steps_per_epoch - 1) // steps_per_epoch
if rank == 0:
logging.info('world_size: {}'.format(world_size))
logging.info('total_batch_size: {}'.format(total_batch_size))
logging.info('warmup_steps: {}'.format(warmup_steps))
logging.info('steps_per_epoch: {}'.format(steps_per_epoch))
logging.info('total_steps: {}'.format(total_steps))
logging.info('total_epoch: {}'.format(total_epoch))
logging.info('decay_steps: {}'.format(decay_steps))
base_lr = total_batch_size * args.lr / 512
lr_scheduler = paddle.optimizer.lr.PiecewiseDecay(
boundaries=decay_steps,
values=[
base_lr * (args.lr_decay**i) for i in range(len(decay_steps) + 1)
])
if warmup_steps > 0:
lr_scheduler = paddle.optimizer.lr.LinearWarmup(
lr_scheduler, warmup_steps, 0, base_lr)
if args.fp16:
paddle.set_default_dtype("float16")
margin_loss_params = eval("losses.{}".format(args.loss))()
backbone = eval("backbones.{}".format(args.backbone))(
num_features=args.embedding_size, dropout=args.dropout)
classifier = eval("classifiers.{}".format(args.classifier))(
rank=rank,
world_size=world_size,
num_classes=args.num_classes,
margin1=margin_loss_params.margin1,
margin2=margin_loss_params.margin2,
margin3=margin_loss_params.margin3,
scale=margin_loss_params.scale,
sample_ratio=args.sample_ratio,
embedding_size=args.embedding_size,
fp16=args.fp16)
backbone.train()
classifier.train()
optimizer = paddle.optimizer.Momentum(
parameters=[{
'params': backbone.parameters(),
}, {
'params': classifier.parameters(),
}],
learning_rate=lr_scheduler,
momentum=args.momentum,
weight_decay=args.weight_decay)
if args.fp16:
optimizer._dtype = 'float32'
if world_size > 1:
# sync backbone params for data parallel
sync_params(backbone.parameters())
if args.do_validation_while_train:
callback_verification = CallBackVerification(
args.validation_interval_step,
rank,
args.batch_size,
args.val_targets,
args.data_dir,
fp16=args.fp16, )
callback_logging = CallBackLogging(args.log_interval_step, rank,
world_size, total_steps,
args.batch_size, writer)
checkpoint = Checkpoint(
rank=rank,
world_size=world_size,
embedding_size=args.embedding_size,
num_classes=args.num_classes,
model_save_dir=os.path.join(args.output, args.backbone),
checkpoint_dir=args.checkpoint_dir,
max_num_last_checkpoint=args.max_num_last_checkpoint)
start_epoch = 0
global_step = 0
loss_avg = AverageMeter()
if args.resume:
extra_info = checkpoint.load(
backbone, classifier, optimizer, for_train=True)
start_epoch = extra_info['epoch'] + 1
lr_state = extra_info['lr_state']
# there last_epoch means last_step in for PiecewiseDecay
# since we always use step style for lr_scheduler
global_step = lr_state['last_epoch']
lr_scheduler.set_state_dict(lr_state)
train_loader = paddle.io.DataLoader(
trainset,
places=place,
num_workers=args.num_workers,
batch_sampler=paddle.io.DistributedBatchSampler(
dataset=trainset,
batch_size=args.batch_size,
shuffle=True,
drop_last=True))
scaler = LSCGradScaler(
enable=args.fp16,
init_loss_scaling=args.init_loss_scaling,
incr_ratio=args.incr_ratio,
decr_ratio=args.decr_ratio,
incr_every_n_steps=args.incr_every_n_steps,
decr_every_n_nan_or_inf=args.decr_every_n_nan_or_inf,
use_dynamic_loss_scaling=args.use_dynamic_loss_scaling)
for epoch in range(start_epoch, total_epoch):
train_reader_cost = 0.0
train_run_cost = 0.0
total_samples = 0
reader_start = time.time()
for step, (img, label) in enumerate(train_loader):
train_reader_cost += time.time() - reader_start
global_step += 1
train_start = time.time()
with paddle.amp.auto_cast(enable=args.fp16):
features = backbone(img)
loss_v = classifier(features, label)
scaler.scale(loss_v).backward()
if world_size > 1:
# data parallel sync backbone gradients
sync_gradients(backbone.parameters())
scaler.step(optimizer)
classifier.step(optimizer)
optimizer.clear_grad()
classifier.clear_grad()
train_run_cost += time.time() - train_start
total_samples += len(img)
lr_value = optimizer.get_lr()
loss_avg.update(loss_v.item(), 1)
callback_logging(
global_step,
loss_avg,
epoch,
lr_value,
avg_reader_cost=train_reader_cost / args.log_interval_step,
avg_batch_cost=(train_reader_cost + train_run_cost) / args.log_interval_step,
avg_samples=total_samples / args.log_interval_step,
ips=total_samples / (train_reader_cost + train_run_cost))
if args.do_validation_while_train:
callback_verification(global_step, backbone)
lr_scheduler.step()
if global_step >= total_steps:
break
sys.stdout.flush()
if rank is 0 and global_step > 0 and global_step % args.log_interval_step == 0:
train_reader_cost = 0.0
train_run_cost = 0.0
total_samples = 0
reader_start = time.time()
checkpoint.save(
backbone, classifier, optimizer, epoch=epoch, for_train=True)
writer.close()
| insightface/recognition/arcface_paddle/dynamic/train.py/0 | {
"file_path": "insightface/recognition/arcface_paddle/dynamic/train.py",
"repo_id": "insightface",
"token_count": 4048
} | 123 |
#!/bin/bash
source test_tipc/common_func.sh
FILENAME=$1
# MODE be one of ['lite_train_infer' 'serving_infer']
MODE=$2
dataline=$(cat ${FILENAME})
lines=(${dataline})
IFS=$'\n'
# The training params
model_name=$(func_parser_value "${lines[1]}")
trainer_list=$(func_parser_value "${lines[14]}")
MODE=$2
if [ ${MODE} = "lite_train_lite_infer" ];then
rm -rf MS1M_v2; mkdir MS1M_v2
# pretrain lite train data
tar xf test_tipc/data/small_dataset.tar --strip-components 1 -C MS1M_v2
# wget -nc -P ./MS1M_v2/ https://paddle-model-ecology.bj.bcebos.com/whole_chain/insight-face/lfw.bin
cp test_tipc/data/small_lfw.bin MS1M_v2/lfw.bin
elif [ ${MODE} = "serving_infer" ];then
# prepare serving env
python_name=$(func_parser_value "${lines[2]}")
rm paddle_serving_server_gpu-0.0.0.post101-py3-none-any.whl
wget https://paddle-serving.bj.bcebos.com/chain/paddle_serving_server_gpu-0.0.0.post101-py3-none-any.whl
${python_name} -m pip install install paddle_serving_server_gpu-0.0.0.post101-py3-none-any.whl
${python_name} -m pip install paddle_serving_client==0.6.3
${python_name} -m pip install paddle-serving-app==0.6.3
${python_name} -m pip install werkzeug==2.0.2
rm -rf ./inference
wget -nc -P ./inference https://paddle-model-ecology.bj.bcebos.com/model/insight-face/mobileface_v1.0_infer.tar
tar xf inference/mobileface_v1.0_infer.tar --strip-components 1 -C inference
fi
| insightface/recognition/arcface_paddle/test_tipc/prepare.sh/0 | {
"file_path": "insightface/recognition/arcface_paddle/test_tipc/prepare.sh",
"repo_id": "insightface",
"token_count": 634
} | 124 |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import sys
import time
class AverageMeter(object):
"""Computes and stores the average and current value
"""
def __init__(self):
self.val = None
self.avg = None
self.sum = None
self.count = None
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def init_logging(rank, models_root):
if rank is 0:
log_root = logging.getLogger()
log_root.setLevel(logging.INFO)
formatter = logging.Formatter("Training: %(asctime)s - %(message)s")
handler_file = logging.FileHandler(
os.path.join(models_root, "training.log"))
handler_stream = logging.StreamHandler(sys.stdout)
handler_file.setFormatter(formatter)
handler_stream.setFormatter(formatter)
log_root.addHandler(handler_file)
log_root.addHandler(handler_stream)
log_root.info('rank: %d' % rank)
class CallBackLogging(object):
def __init__(self,
frequent,
rank,
world_size,
total_step,
batch_size,
writer=None):
self.frequent: int = frequent
self.rank: int = rank
self.world_size: int = world_size
self.time_start = time.time()
self.total_step: int = total_step
self.batch_size: int = batch_size
self.writer = writer
self.tic = time.time()
def __call__(self, global_step, loss: AverageMeter, epoch: int, lr_value, avg_reader_cost, avg_batch_cost, avg_samples, ips):
if self.rank is 0 and global_step > 0 and global_step % self.frequent == 0:
time_now = (time.time() - self.time_start) / 3600
time_total = time_now / ((global_step + 1) / self.total_step)
time_for_end = time_total - time_now
if self.writer is not None:
self.writer.add_scalar('time_for_end', time_for_end,
global_step)
self.writer.add_scalar('loss', loss.avg, global_step)
# ips is throughput
msg = "loss %.4f, lr: %f, epoch: %d, step: %d, eta: %1.2f hours, avg_reader_cost: %.5f sec, avg_batch_cost: %.5f sec, avg_samples: %.5f, ips: %.5f images/sec" % (
loss.avg, lr_value, epoch, global_step, time_for_end,avg_reader_cost, avg_batch_cost, avg_samples, ips * self.world_size)
logging.info(msg)
loss.reset()
self.tic = time.time()
| insightface/recognition/arcface_paddle/utils/logging.py/0 | {
"file_path": "insightface/recognition/arcface_paddle/utils/logging.py",
"repo_id": "insightface",
"token_count": 1489
} | 125 |
import logging
import os
import time
from typing import List
import torch
from eval import verification
from utils.utils_logging import AverageMeter
from torch.utils.tensorboard import SummaryWriter
from torch import distributed
class CallBackVerification(object):
def __init__(self, val_targets, rec_prefix, summary_writer=None, image_size=(112, 112), wandb_logger=None):
self.rank: int = distributed.get_rank()
self.highest_acc: float = 0.0
self.highest_acc_list: List[float] = [0.0] * len(val_targets)
self.ver_list: List[object] = []
self.ver_name_list: List[str] = []
if self.rank is 0:
self.init_dataset(val_targets=val_targets, data_dir=rec_prefix, image_size=image_size)
self.summary_writer = summary_writer
self.wandb_logger = wandb_logger
def ver_test(self, backbone: torch.nn.Module, global_step: int):
results = []
for i in range(len(self.ver_list)):
acc1, std1, acc2, std2, xnorm, embeddings_list = verification.test(
self.ver_list[i], backbone, 10, 10)
logging.info('[%s][%d]XNorm: %f' % (self.ver_name_list[i], global_step, xnorm))
logging.info('[%s][%d]Accuracy-Flip: %1.5f+-%1.5f' % (self.ver_name_list[i], global_step, acc2, std2))
self.summary_writer: SummaryWriter
self.summary_writer.add_scalar(tag=self.ver_name_list[i], scalar_value=acc2, global_step=global_step, )
if self.wandb_logger:
import wandb
self.wandb_logger.log({
f'Acc/val-Acc1 {self.ver_name_list[i]}': acc1,
f'Acc/val-Acc2 {self.ver_name_list[i]}': acc2,
# f'Acc/val-std1 {self.ver_name_list[i]}': std1,
# f'Acc/val-std2 {self.ver_name_list[i]}': acc2,
})
if acc2 > self.highest_acc_list[i]:
self.highest_acc_list[i] = acc2
logging.info(
'[%s][%d]Accuracy-Highest: %1.5f' % (self.ver_name_list[i], global_step, self.highest_acc_list[i]))
results.append(acc2)
def init_dataset(self, val_targets, data_dir, image_size):
for name in val_targets:
path = os.path.join(data_dir, name + ".bin")
if os.path.exists(path):
data_set = verification.load_bin(path, image_size)
self.ver_list.append(data_set)
self.ver_name_list.append(name)
def __call__(self, num_update, backbone: torch.nn.Module):
if self.rank is 0 and num_update > 0:
backbone.eval()
self.ver_test(backbone, num_update)
backbone.train()
class CallBackLogging(object):
def __init__(self, frequent, total_step, batch_size, start_step=0,writer=None):
self.frequent: int = frequent
self.rank: int = distributed.get_rank()
self.world_size: int = distributed.get_world_size()
self.time_start = time.time()
self.total_step: int = total_step
self.start_step: int = start_step
self.batch_size: int = batch_size
self.writer = writer
self.init = False
self.tic = 0
def __call__(self,
global_step: int,
loss: AverageMeter,
epoch: int,
fp16: bool,
learning_rate: float,
grad_scaler: torch.cuda.amp.GradScaler):
if self.rank == 0 and global_step > 0 and global_step % self.frequent == 0:
if self.init:
try:
speed: float = self.frequent * self.batch_size / (time.time() - self.tic)
speed_total = speed * self.world_size
except ZeroDivisionError:
speed_total = float('inf')
#time_now = (time.time() - self.time_start) / 3600
#time_total = time_now / ((global_step + 1) / self.total_step)
#time_for_end = time_total - time_now
time_now = time.time()
time_sec = int(time_now - self.time_start)
time_sec_avg = time_sec / (global_step - self.start_step + 1)
eta_sec = time_sec_avg * (self.total_step - global_step - 1)
time_for_end = eta_sec/3600
if self.writer is not None:
self.writer.add_scalar('time_for_end', time_for_end, global_step)
self.writer.add_scalar('learning_rate', learning_rate, global_step)
self.writer.add_scalar('loss', loss.avg, global_step)
if fp16:
msg = "Speed %.2f samples/sec Loss %.4f LearningRate %.6f Epoch: %d Global Step: %d " \
"Fp16 Grad Scale: %2.f Required: %1.f hours" % (
speed_total, loss.avg, learning_rate, epoch, global_step,
grad_scaler.get_scale(), time_for_end
)
else:
msg = "Speed %.2f samples/sec Loss %.4f LearningRate %.6f Epoch: %d Global Step: %d " \
"Required: %1.f hours" % (
speed_total, loss.avg, learning_rate, epoch, global_step, time_for_end
)
logging.info(msg)
loss.reset()
self.tic = time.time()
else:
self.init = True
self.tic = time.time()
| insightface/recognition/arcface_torch/utils/utils_callbacks.py/0 | {
"file_path": "insightface/recognition/arcface_torch/utils/utils_callbacks.py",
"repo_id": "insightface",
"token_count": 2903
} | 126 |
import argparse
import os
import pickle
import timeit
import warnings
from pathlib import Path
import cv2
import matplotlib
import matplotlib.pyplot as plt
import mxnet as mx
import numpy as np
import pandas as pd
import sklearn
from menpo.visualize.viewmatplotlib import sample_colours_from_colourmap
from mxnet.gluon.data import Dataset, DataLoader
from prettytable import PrettyTable
from skimage import transform as trans
from sklearn import preprocessing
from sklearn.metrics import roc_curve, auc
from tqdm import tqdm
matplotlib.use('Agg')
warnings.filterwarnings("ignore")
parser = argparse.ArgumentParser(description='do ijb test')
# general
parser.add_argument('--model-prefix', default='', help='path to load model.')
parser.add_argument('--model-epoch', default=1, type=int, help='')
parser.add_argument('--image-path', default='', type=str, help='')
parser.add_argument('--result-dir', default='.', type=str, help='')
parser.add_argument('--gpu', default='0', type=str, help='gpu id')
parser.add_argument('--batch-size', default=128, type=int, help='')
parser.add_argument('--job', default='insightface', type=str, help='job name')
parser.add_argument('-es', '--emb-size', type=int, help='embedding size')
parser.add_argument('--target',
default='IJBC',
type=str,
help='target, set to IJBC or IJBB')
args = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
target = args.target
model_path = args.model_prefix
image_path = args.image_path
result_dir = args.result_dir
epoch = args.model_epoch
use_norm_score = True # if Ture, TestMode(N1)
use_detector_score = True # if Ture, TestMode(D1)
use_flip_test = True # if Ture, TestMode(F1)
job = args.job
batch_size = args.batch_size
class DatasetIJB(Dataset):
def __init__(self, root, lines, align=True):
self.src = np.array(
[[30.2946, 51.6963], [65.5318, 51.5014], [48.0252, 71.7366],
[33.5493, 92.3655], [62.7299, 92.2041]],
dtype=np.float32)
self.src[:, 0] += 8.0
self.lines = lines
self.img_root = root
self.align = align
def __len__(self):
return len(self.lines)
def __getitem__(self, idx):
each_line = self.lines[idx]
name_lmk_score = each_line.strip().split(' ') # "name lmk score"
img_name = os.path.join(self.img_root, name_lmk_score[0])
img = cv2.imread(img_name)
if self.align:
landmark = np.array([float(x) for x in name_lmk_score[1:-1]],
dtype=np.float32)
landmark = landmark.reshape((5, 2))
#
assert landmark.shape[0] == 68 or landmark.shape[0] == 5
assert landmark.shape[1] == 2
if landmark.shape[0] == 68:
landmark5 = np.zeros((5, 2), dtype=np.float32)
landmark5[0] = (landmark[36] + landmark[39]) / 2
landmark5[1] = (landmark[42] + landmark[45]) / 2
landmark5[2] = landmark[30]
landmark5[3] = landmark[48]
landmark5[4] = landmark[54]
else:
landmark5 = landmark
#
tform = trans.SimilarityTransform()
tform.estimate(landmark5, self.src)
#
M = tform.params[0:2, :]
img = cv2.warpAffine(img, M, (112, 112), borderValue=0.0)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img_flip = np.fliplr(img)
img = np.transpose(img, (2, 0, 1)) # 3*112*112, RGB
img_flip = np.transpose(img_flip, (2, 0, 1))
input_blob = np.zeros((2, 3, 112, 112), dtype=np.uint8)
input_blob[0] = img
input_blob[1] = img_flip
return mx.nd.array(input_blob)
def extract_parallel(prefix, epoch, dataset, batch_size, size):
# init
model_list = list()
num_ctx = len(os.environ['CUDA_VISIBLE_DEVICES'].split(","))
num_iter = 0
feat_mat = mx.nd.zeros(shape=(len(dataset), 2 * size))
def batchify_fn(data):
return mx.nd.concat(*data, dim=0)
data_loader = DataLoader(dataset,
batch_size,
last_batch='keep',
num_workers=8,
thread_pool=True,
prefetch=16,
batchify_fn=batchify_fn)
symbol, arg_params, aux_params = mx.module.module.load_checkpoint(
prefix, epoch)
all_layers = symbol.get_internals()
symbol = all_layers['fc1_output']
# init model list
for i in range(num_ctx):
model = mx.mod.Module(symbol, context=mx.gpu(i), label_names=None)
model.bind(for_training=False,
data_shapes=[('data', (2 * batch_size, 3, 112, 112))])
model.set_params(arg_params, aux_params)
model_list.append(model)
# extract parallel and async
num_model = len(model_list)
for image in tqdm(data_loader):
data_batch = mx.io.DataBatch(data=(image, ))
model_list[num_iter % num_model].forward(data_batch, is_train=False)
feat = model_list[num_iter %
num_model].get_outputs(merge_multi_context=True)[0]
feat = mx.nd.L2Normalization(feat)
feat = mx.nd.reshape(feat, (-1, size * 2))
feat_mat[batch_size * num_iter:batch_size * num_iter +
feat.shape[0], :] = feat.as_in_context(mx.cpu())
num_iter += 1
#if num_iter % 20 == 0:
# mx.nd.waitall()
return feat_mat.asnumpy()
# 将一个list尽量均分成n份,限制len(list)==n,份数大于原list内元素个数则分配空list[]
def divideIntoNstrand(listTemp, n):
twoList = [[] for i in range(n)]
for i, e in enumerate(listTemp):
twoList[i % n].append(e)
return twoList
def read_template_media_list(path):
ijb_meta = pd.read_csv(path, sep=' ', header=None).values
templates = ijb_meta[:, 1].astype(np.int)
medias = ijb_meta[:, 2].astype(np.int)
return templates, medias
def read_template_pair_list(path):
pairs = pd.read_csv(path, sep=' ', header=None).values
t1 = pairs[:, 0].astype(np.int)
t2 = pairs[:, 1].astype(np.int)
label = pairs[:, 2].astype(np.int)
return t1, t2, label
def read_image_feature(path):
with open(path, 'rb') as fid:
img_feats = pickle.load(fid)
return img_feats
def image2template_feature(img_feats=None, templates=None, medias=None):
# ==========================================================
# 1. face image feature l2 normalization. img_feats:[number_image x feats_dim]
# 2. compute media feature.
# 3. compute template feature.
# ==========================================================
unique_templates = np.unique(templates)
template_feats = np.zeros((len(unique_templates), img_feats.shape[1]))
for count_template, uqt in enumerate(unique_templates):
(ind_t, ) = np.where(templates == uqt)
face_norm_feats = img_feats[ind_t]
face_medias = medias[ind_t]
unique_medias, unique_media_counts = np.unique(face_medias,
return_counts=True)
media_norm_feats = []
for u, ct in zip(unique_medias, unique_media_counts):
(ind_m, ) = np.where(face_medias == u)
if ct == 1:
media_norm_feats += [face_norm_feats[ind_m]]
else: # image features from the same video will be aggregated into one feature
media_norm_feats += [
np.mean(face_norm_feats[ind_m], axis=0, keepdims=True)
]
media_norm_feats = np.array(media_norm_feats)
# media_norm_feats = media_norm_feats / np.sqrt(np.sum(media_norm_feats ** 2, -1, keepdims=True))
template_feats[count_template] = np.sum(media_norm_feats, axis=0)
if count_template % 2000 == 0:
print('Finish Calculating {} template features.'.format(
count_template))
# template_norm_feats = template_feats / np.sqrt(np.sum(template_feats ** 2, -1, keepdims=True))
template_norm_feats = sklearn.preprocessing.normalize(template_feats)
# print(template_norm_feats.shape)
return template_norm_feats, unique_templates
# In[ ]:
def verification(template_norm_feats=None,
unique_templates=None,
p1=None,
p2=None):
# ==========================================================
# Compute set-to-set Similarity Score.
# ==========================================================
template2id = np.zeros((max(unique_templates) + 1, 1), dtype=int)
for count_template, uqt in enumerate(unique_templates):
template2id[uqt] = count_template
score = np.zeros((len(p1), )) # save cosine distance between pairs
total_pairs = np.array(range(len(p1)))
batchsize = 100000 # small batchsize instead of all pairs in one batch due to the memory limiation
sublists = [
total_pairs[i:i + batchsize] for i in range(0, len(p1), batchsize)
]
total_sublists = len(sublists)
for c, s in enumerate(sublists):
feat1 = template_norm_feats[template2id[p1[s]]]
feat2 = template_norm_feats[template2id[p2[s]]]
similarity_score = np.sum(feat1 * feat2, -1)
score[s] = similarity_score.flatten()
if c % 10 == 0:
print('Finish {}/{} pairs.'.format(c, total_sublists))
return score
# In[ ]:
def verification2(template_norm_feats=None,
unique_templates=None,
p1=None,
p2=None):
template2id = np.zeros((max(unique_templates) + 1, 1), dtype=int)
for count_template, uqt in enumerate(unique_templates):
template2id[uqt] = count_template
score = np.zeros((len(p1), )) # save cosine distance between pairs
total_pairs = np.array(range(len(p1)))
batchsize = 100000 # small batchsize instead of all pairs in one batch due to the memory limiation
sublists = [
total_pairs[i:i + batchsize] for i in range(0, len(p1), batchsize)
]
total_sublists = len(sublists)
for c, s in enumerate(sublists):
feat1 = template_norm_feats[template2id[p1[s]]]
feat2 = template_norm_feats[template2id[p2[s]]]
similarity_score = np.sum(feat1 * feat2, -1)
score[s] = similarity_score.flatten()
if c % 10 == 0:
print('Finish {}/{} pairs.'.format(c, total_sublists))
return score
def read_score(path):
with open(path, 'rb') as fid:
img_feats = pickle.load(fid)
return img_feats
# # Step1: Load Meta Data
assert target == 'IJBC' or target == 'IJBB'
# =============================================================
# load image and template relationships for template feature embedding
# tid --> template id, mid --> media id
# format:
# image_name tid mid
# =============================================================
start = timeit.default_timer()
templates, medias = read_template_media_list(
os.path.join('%s/meta' % image_path,
'%s_face_tid_mid.txt' % target.lower()))
stop = timeit.default_timer()
print('Time: %.2f s. ' % (stop - start))
# =============================================================
# load template pairs for template-to-template verification
# tid : template id, label : 1/0
# format:
# tid_1 tid_2 label
# =============================================================
start = timeit.default_timer()
p1, p2, label = read_template_pair_list(
os.path.join('%s/meta' % image_path,
'%s_template_pair_label.txt' % target.lower()))
stop = timeit.default_timer()
print('Time: %.2f s. ' % (stop - start))
# # Step 2: Get Image Features
# =============================================================
# load image features
# format:
# img_feats: [image_num x feats_dim] (227630, 512)
# =============================================================
start = timeit.default_timer()
img_path = '%s/loose_crop' % image_path
img_list_path = '%s/meta/%s_name_5pts_score.txt' % (image_path, target.lower())
img_list = open(img_list_path)
files = img_list.readlines()
dataset = DatasetIJB(root=img_path, lines=files, align=True)
img_feats = extract_parallel(args.model_prefix,
args.model_epoch,
dataset,
args.batch_size,
size=args.emb_size)
faceness_scores = []
for each_line in files:
name_lmk_score = each_line.split()
faceness_scores.append(name_lmk_score[-1])
faceness_scores = np.array(faceness_scores).astype(np.float32)
stop = timeit.default_timer()
print('Time: %.2f s. ' % (stop - start))
print('Feature Shape: ({} , {}) .'.format(img_feats.shape[0],
img_feats.shape[1]))
# # Step3: Get Template Features
# In[ ]:
# =============================================================
# compute template features from image features.
# =============================================================
start = timeit.default_timer()
# ==========================================================
# Norm feature before aggregation into template feature?
# Feature norm from embedding network and faceness score are able to decrease weights for noise samples (not face).
# ==========================================================
# 1. FaceScore (Feature Norm)
# 2. FaceScore (Detector)
if use_flip_test:
# concat --- F1
# img_input_feats = img_feats
# add --- F2
img_input_feats = img_feats[:, 0:img_feats.shape[1] //
2] + img_feats[:, img_feats.shape[1] // 2:]
else:
img_input_feats = img_feats[:, 0:img_feats.shape[1] // 2]
if use_norm_score:
img_input_feats = img_input_feats
else:
# normalise features to remove norm information
img_input_feats = img_input_feats / np.sqrt(
np.sum(img_input_feats**2, -1, keepdims=True))
if use_detector_score:
print(img_input_feats.shape, faceness_scores.shape)
# img_input_feats = img_input_feats * np.matlib.repmat(faceness_scores[:,np.newaxis], 1, img_input_feats.shape[1])
img_input_feats = img_input_feats * faceness_scores[:, np.newaxis]
else:
img_input_feats = img_input_feats
template_norm_feats, unique_templates = image2template_feature(
img_input_feats, templates, medias)
stop = timeit.default_timer()
print('Time: %.2f s. ' % (stop - start))
# # Step 4: Get Template Similarity Scores
# In[ ]:
# =============================================================
# compute verification scores between template pairs.
# =============================================================
start = timeit.default_timer()
score = verification(template_norm_feats, unique_templates, p1, p2)
stop = timeit.default_timer()
print('Time: %.2f s. ' % (stop - start))
# In[ ]:
save_path = result_dir + '/%s_result' % target
if not os.path.exists(save_path):
os.makedirs(save_path)
score_save_file = os.path.join(save_path, "%s.npy" % job)
np.save(score_save_file, score)
# # Step 5: Get ROC Curves and TPR@FPR Table
# In[ ]:
files = [score_save_file]
methods = []
scores = []
for file in files:
methods.append(Path(file).stem)
scores.append(np.load(file))
methods = np.array(methods)
scores = dict(zip(methods, scores))
colours = dict(
zip(methods, sample_colours_from_colourmap(methods.shape[0], 'Set2')))
# x_labels = [1/(10**x) for x in np.linspace(6, 0, 6)]
x_labels = [10**-6, 10**-5, 10**-4, 10**-3, 10**-2, 10**-1]
tpr_fpr_table = PrettyTable(['Methods'] + [str(x) for x in x_labels])
fig = plt.figure()
for method in methods:
fpr, tpr, _ = roc_curve(label, scores[method])
roc_auc = auc(fpr, tpr)
fpr = np.flipud(fpr)
tpr = np.flipud(tpr) # select largest tpr at same fpr
plt.plot(fpr,
tpr,
color=colours[method],
lw=1,
label=('[%s (AUC = %0.4f %%)]' %
(method.split('-')[-1], roc_auc * 100)))
tpr_fpr_row = []
tpr_fpr_row.append("%s-%s" % (method, target))
for fpr_iter in np.arange(len(x_labels)):
_, min_index = min(
list(zip(abs(fpr - x_labels[fpr_iter]), range(len(fpr)))))
# tpr_fpr_row.append('%.4f' % tpr[min_index])
tpr_fpr_row.append('%.2f' % (tpr[min_index] * 100))
tpr_fpr_table.add_row(tpr_fpr_row)
plt.xlim([10**-6, 0.1])
plt.ylim([0.3, 1.0])
plt.grid(linestyle='--', linewidth=1)
plt.xticks(x_labels)
plt.yticks(np.linspace(0.3, 1.0, 8, endpoint=True))
plt.xscale('log')
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC on IJB')
plt.legend(loc="lower right")
# plt.show()
fig.savefig(os.path.join(save_path, '%s.pdf' % job))
print(tpr_fpr_table)
| insightface/recognition/partial_fc/mxnet/evaluation/ijb.py/0 | {
"file_path": "insightface/recognition/partial_fc/mxnet/evaluation/ijb.py",
"repo_id": "insightface",
"token_count": 7439
} | 127 |
import os
import shutil
import datetime
import sys
from mxnet import ndarray as nd
import mxnet as mx
import random
import argparse
import numbers
import cv2
import time
import pickle
import sklearn
import sklearn.preprocessing
from easydict import EasyDict as edict
import numpy as np
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'common'))
from rec_builder import *
def get_embedding(args, imgrec, a, b, image_size, model):
ocontents = []
for idx in range(a, b):
s = imgrec.read_idx(idx)
ocontents.append(s)
embeddings = None
#print(len(ocontents))
ba = 0
rlabel = -1
imgs = []
contents = []
while True:
bb = min(ba + args.batch_size, len(ocontents))
if ba >= bb:
break
_batch_size = bb - ba
#_batch_size2 = max(_batch_size, args.ctx_num)
_batch_size2 = _batch_size
if _batch_size % args.ctx_num != 0:
_batch_size2 = ((_batch_size // args.ctx_num) + 1) * args.ctx_num
data = np.zeros((_batch_size2, 3, image_size[0], image_size[1]))
count = bb - ba
ii = 0
for i in range(ba, bb):
header, img = mx.recordio.unpack(ocontents[i])
contents.append(img)
label = header.label
if not isinstance(label, numbers.Number):
label = label[0]
if rlabel < 0:
rlabel = int(label)
img = mx.image.imdecode(img)
rgb = img.asnumpy()
bgr = rgb[:, :, ::-1]
imgs.append(bgr)
img = rgb.transpose((2, 0, 1))
data[ii] = img
ii += 1
while ii < _batch_size2:
data[ii] = data[0]
ii += 1
nddata = nd.array(data)
db = mx.io.DataBatch(data=(nddata, ))
model.forward(db, is_train=False)
net_out = model.get_outputs()
net_out = net_out[0].asnumpy()
if embeddings is None:
embeddings = np.zeros((len(ocontents), net_out.shape[1]))
embeddings[ba:bb, :] = net_out[0:_batch_size, :]
ba = bb
embeddings = sklearn.preprocessing.normalize(embeddings)
return embeddings, rlabel, contents
def main(args):
print(args)
image_size = (112, 112)
print('image_size', image_size)
vec = args.model.split(',')
prefix = vec[0]
epoch = int(vec[1])
print('loading', prefix, epoch)
ctx = []
cvd = os.environ['CUDA_VISIBLE_DEVICES'].strip()
if len(cvd) > 0:
for i in range(len(cvd.split(','))):
ctx.append(mx.gpu(i))
if len(ctx) == 0:
ctx = [mx.cpu()]
print('use cpu')
else:
print('gpu num:', len(ctx))
args.ctx_num = len(ctx)
args.batch_size *= args.ctx_num
sym, arg_params, aux_params = mx.model.load_checkpoint(prefix, epoch)
W = None
i = 0
while True:
key = 'fc7_%d_weight' % i
i += 1
if key not in arg_params:
break
_W = arg_params[key].asnumpy()
#_W = _W.reshape( (-1, 10, 512) )
if W is None:
W = _W
else:
W = np.concatenate((W, _W), axis=0)
K = args.k
W = sklearn.preprocessing.normalize(W)
W = W.reshape((-1, K, 512))
all_layers = sym.get_internals()
sym = all_layers['fc1_output']
model = mx.mod.Module(symbol=sym, context=ctx, label_names=None)
model.bind(data_shapes=[('data', (args.ctx_num, 3, image_size[0],
image_size[1]))])
model.set_params(arg_params, aux_params)
print('W:', W.shape)
path_imgrec = os.path.join(args.data, 'train.rec')
path_imgidx = os.path.join(args.data, 'train.idx')
imgrec = mx.recordio.MXIndexedRecordIO(path_imgidx, path_imgrec, 'r') # pylint: disable=redefined-variable-type
id_list = []
s = imgrec.read_idx(0)
header, _ = mx.recordio.unpack(s)
assert header.flag > 0
print('header0 label', header.label)
header0 = (int(header.label[0]), int(header.label[1]))
#assert(header.flag==1)
imgidx = range(1, int(header.label[0]))
id2range = {}
a, b = int(header.label[0]), int(header.label[1])
seq_identity = range(a, b)
print(len(seq_identity))
image_count = 0
pp = 0
for wid, identity in enumerate(seq_identity):
pp += 1
s = imgrec.read_idx(identity)
header, _ = mx.recordio.unpack(s)
contents = []
a, b = int(header.label[0]), int(header.label[1])
_count = b - a
id_list.append((wid, a, b, _count))
image_count += _count
pp = 0
if not os.path.exists(args.output):
os.makedirs(args.output)
ret = np.zeros((image_count, K + 1), dtype=np.float32)
output_dir = args.output
builder = SeqRecBuilder(output_dir)
print(ret.shape)
imid = 0
da = datetime.datetime.now()
label = 0
num_images = 0
cos_thresh = np.cos(np.pi * args.threshold / 180.0)
for id_item in id_list:
wid = id_item[0]
pp += 1
if pp % 40 == 0:
db = datetime.datetime.now()
print('processing id', pp, (db - da).total_seconds())
da = db
x, _, contents = get_embedding(args, imgrec, id_item[1], id_item[2],
image_size, model)
subcenters = W[wid]
K_stat = np.zeros((K, ), dtype=np.int)
for i in range(x.shape[0]):
_x = x[i]
sim = np.dot(subcenters, _x) # len(sim)==K
mc = np.argmax(sim)
K_stat[mc] += 1
dominant_index = np.argmax(K_stat)
dominant_center = subcenters[dominant_index]
sim = np.dot(x, dominant_center)
idx = np.where(sim > cos_thresh)[0]
num_drop = x.shape[0] - len(idx)
if len(idx) == 0:
continue
#print("labelid %d dropped %d, from %d to %d"% (wid, num_drop, x.shape[0], len(idx)))
num_images += len(idx)
for _idx in idx:
c = contents[_idx]
builder.add(label, c, is_image=False)
label += 1
builder.close()
print('total:', num_images)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='')
# general
parser.add_argument('--data',
default='/bigdata/faces_ms1m_full',
type=str,
help='')
parser.add_argument('--output',
default='/bigdata/ms1m_full_k3drop075',
type=str,
help='')
parser.add_argument(
'--model',
default=
'../Evaluation/IJB/pretrained_models/r50-arcfacesc-msf-k3z/model,2',
help='path to load model.')
parser.add_argument('--batch-size', default=16, type=int, help='')
parser.add_argument('--threshold', default=75, type=float, help='')
parser.add_argument('--k', default=3, type=int, help='')
args = parser.parse_args()
main(args)
| insightface/recognition/subcenter_arcface/drop.py/0 | {
"file_path": "insightface/recognition/subcenter_arcface/drop.py",
"repo_id": "insightface",
"token_count": 3544
} | 128 |
import torch
import torch.nn as nn
from utils import rend_util
class RayTracing(nn.Module):
def __init__(
self,
object_bounding_sphere=1.0,
sdf_threshold=5.0e-5,
line_search_step=0.5,
line_step_iters=1,
sphere_tracing_iters=10,
n_steps=100,
n_secant_steps=8,
):
super().__init__()
self.object_bounding_sphere = object_bounding_sphere
self.sdf_threshold = sdf_threshold
self.sphere_tracing_iters = sphere_tracing_iters
self.line_step_iters = line_step_iters
self.line_search_step = line_search_step
self.n_steps = n_steps
self.n_secant_steps = n_secant_steps
def forward(self,
sdf,
cam_loc,
object_mask,
ray_directions
):
batch_size, num_pixels, _ = ray_directions.shape
sphere_intersections, mask_intersect = rend_util.get_sphere_intersection(cam_loc, ray_directions, r=self.object_bounding_sphere)
curr_start_points, unfinished_mask_start, acc_start_dis, acc_end_dis, min_dis, max_dis = \
self.sphere_tracing(batch_size, num_pixels, sdf, cam_loc, ray_directions, mask_intersect, sphere_intersections)
network_object_mask = (acc_start_dis < acc_end_dis)
# The non convergent rays should be handled by the sampler
sampler_mask = unfinished_mask_start
sampler_net_obj_mask = torch.zeros_like(sampler_mask).bool().cuda()
if sampler_mask.sum() > 0:
sampler_min_max = torch.zeros((batch_size, num_pixels, 2)).cuda()
sampler_min_max.reshape(-1, 2)[sampler_mask, 0] = acc_start_dis[sampler_mask]
sampler_min_max.reshape(-1, 2)[sampler_mask, 1] = acc_end_dis[sampler_mask]
sampler_pts, sampler_net_obj_mask, sampler_dists = self.ray_sampler(sdf,
cam_loc,
object_mask,
ray_directions,
sampler_min_max,
sampler_mask
)
curr_start_points[sampler_mask] = sampler_pts[sampler_mask]
acc_start_dis[sampler_mask] = sampler_dists[sampler_mask]
network_object_mask[sampler_mask] = sampler_net_obj_mask[sampler_mask]
print('----------------------------------------------------------------')
print('RayTracing: object = {0}/{1}, secant on {2}/{3}.'
.format(network_object_mask.sum(), len(network_object_mask), sampler_net_obj_mask.sum(), sampler_mask.sum()))
print('----------------------------------------------------------------')
if not self.training:
return curr_start_points, \
network_object_mask, \
acc_start_dis
ray_directions = ray_directions.reshape(-1, 3)
mask_intersect = mask_intersect.reshape(-1)
in_mask = ~network_object_mask & object_mask & ~sampler_mask
out_mask = ~object_mask & ~sampler_mask
mask_left_out = (in_mask | out_mask) & ~mask_intersect
if mask_left_out.sum() > 0: # project the origin to the not intersect points on the sphere
cam_left_out = cam_loc.unsqueeze(1).repeat(1, num_pixels, 1).reshape(-1, 3)[mask_left_out]
rays_left_out = ray_directions[mask_left_out]
acc_start_dis[mask_left_out] = -torch.bmm(rays_left_out.view(-1, 1, 3), cam_left_out.view(-1, 3, 1)).squeeze()
curr_start_points[mask_left_out] = cam_left_out + acc_start_dis[mask_left_out].unsqueeze(1) * rays_left_out
mask = (in_mask | out_mask) & mask_intersect
if mask.sum() > 0:
min_dis[network_object_mask & out_mask] = acc_start_dis[network_object_mask & out_mask]
min_mask_points, min_mask_dist = self.minimal_sdf_points(num_pixels, sdf, cam_loc, ray_directions, mask, min_dis, max_dis)
curr_start_points[mask] = min_mask_points
acc_start_dis[mask] = min_mask_dist
return curr_start_points, \
network_object_mask, \
acc_start_dis
def sphere_tracing(self, batch_size, num_pixels, sdf, cam_loc, ray_directions, mask_intersect, sphere_intersections):
''' Run sphere tracing algorithm for max iterations from both sides of unit sphere intersection '''
sphere_intersections_points = cam_loc.reshape(batch_size, 1, 1, 3) + sphere_intersections.unsqueeze(-1) * ray_directions.unsqueeze(2)
unfinished_mask_start = mask_intersect.reshape(-1).clone()
unfinished_mask_end = mask_intersect.reshape(-1).clone()
# Initialize start current points
curr_start_points = torch.zeros(batch_size * num_pixels, 3).cuda().float()
curr_start_points[unfinished_mask_start] = sphere_intersections_points[:,:,0,:].reshape(-1,3)[unfinished_mask_start]
acc_start_dis = torch.zeros(batch_size * num_pixels).cuda().float()
acc_start_dis[unfinished_mask_start] = sphere_intersections.reshape(-1,2)[unfinished_mask_start,0]
# Initialize end current points
curr_end_points = torch.zeros(batch_size * num_pixels, 3).cuda().float()
curr_end_points[unfinished_mask_end] = sphere_intersections_points[:,:,1,:].reshape(-1,3)[unfinished_mask_end]
acc_end_dis = torch.zeros(batch_size * num_pixels).cuda().float()
acc_end_dis[unfinished_mask_end] = sphere_intersections.reshape(-1,2)[unfinished_mask_end,1]
# Initizliae min and max depth
min_dis = acc_start_dis.clone()
max_dis = acc_end_dis.clone()
# Iterate on the rays (from both sides) till finding a surface
iters = 0
next_sdf_start = torch.zeros_like(acc_start_dis).cuda()
next_sdf_start[unfinished_mask_start] = sdf(curr_start_points[unfinished_mask_start])
next_sdf_end = torch.zeros_like(acc_end_dis).cuda()
next_sdf_end[unfinished_mask_end] = sdf(curr_end_points[unfinished_mask_end])
while True:
# Update sdf
curr_sdf_start = torch.zeros_like(acc_start_dis).cuda()
curr_sdf_start[unfinished_mask_start] = next_sdf_start[unfinished_mask_start]
curr_sdf_start[curr_sdf_start <= self.sdf_threshold] = 0
curr_sdf_end = torch.zeros_like(acc_end_dis).cuda()
curr_sdf_end[unfinished_mask_end] = next_sdf_end[unfinished_mask_end]
curr_sdf_end[curr_sdf_end <= self.sdf_threshold] = 0
# Update masks
unfinished_mask_start = unfinished_mask_start & (curr_sdf_start > self.sdf_threshold)
unfinished_mask_end = unfinished_mask_end & (curr_sdf_end > self.sdf_threshold)
if (unfinished_mask_start.sum() == 0 and unfinished_mask_end.sum() == 0) or iters == self.sphere_tracing_iters:
break
iters += 1
# Make step
# Update distance
acc_start_dis = acc_start_dis + curr_sdf_start
acc_end_dis = acc_end_dis - curr_sdf_end
# Update points
curr_start_points = (cam_loc.unsqueeze(1) + acc_start_dis.reshape(batch_size, num_pixels, 1) * ray_directions).reshape(-1, 3)
curr_end_points = (cam_loc.unsqueeze(1) + acc_end_dis.reshape(batch_size, num_pixels, 1) * ray_directions).reshape(-1, 3)
# Fix points which wrongly crossed the surface
next_sdf_start = torch.zeros_like(acc_start_dis).cuda()
next_sdf_start[unfinished_mask_start] = sdf(curr_start_points[unfinished_mask_start])
next_sdf_end = torch.zeros_like(acc_end_dis).cuda()
next_sdf_end[unfinished_mask_end] = sdf(curr_end_points[unfinished_mask_end])
not_projected_start = next_sdf_start < 0
not_projected_end = next_sdf_end < 0
not_proj_iters = 0
while (not_projected_start.sum() > 0 or not_projected_end.sum() > 0) and not_proj_iters < self.line_step_iters:
# Step backwards
acc_start_dis[not_projected_start] -= ((1 - self.line_search_step) / (2 ** not_proj_iters)) * curr_sdf_start[not_projected_start]
curr_start_points[not_projected_start] = (cam_loc.unsqueeze(1) + acc_start_dis.reshape(batch_size, num_pixels, 1) * ray_directions).reshape(-1, 3)[not_projected_start]
acc_end_dis[not_projected_end] += ((1 - self.line_search_step) / (2 ** not_proj_iters)) * curr_sdf_end[not_projected_end]
curr_end_points[not_projected_end] = (cam_loc.unsqueeze(1) + acc_end_dis.reshape(batch_size, num_pixels, 1) * ray_directions).reshape(-1, 3)[not_projected_end]
# Calc sdf
next_sdf_start[not_projected_start] = sdf(curr_start_points[not_projected_start])
next_sdf_end[not_projected_end] = sdf(curr_end_points[not_projected_end])
# Update mask
not_projected_start = next_sdf_start < 0
not_projected_end = next_sdf_end < 0
not_proj_iters += 1
unfinished_mask_start = unfinished_mask_start & (acc_start_dis < acc_end_dis)
unfinished_mask_end = unfinished_mask_end & (acc_start_dis < acc_end_dis)
return curr_start_points, unfinished_mask_start, acc_start_dis, acc_end_dis, min_dis, max_dis
def ray_sampler(self, sdf, cam_loc, object_mask, ray_directions, sampler_min_max, sampler_mask):
''' Sample the ray in a given range and run secant on rays which have sign transition '''
batch_size, num_pixels, _ = ray_directions.shape
n_total_pxl = batch_size * num_pixels
sampler_pts = torch.zeros(n_total_pxl, 3).cuda().float()
sampler_dists = torch.zeros(n_total_pxl).cuda().float()
intervals_dist = torch.linspace(0, 1, steps=self.n_steps).cuda().view(1, 1, -1)
pts_intervals = sampler_min_max[:, :, 0].unsqueeze(-1) + intervals_dist * (sampler_min_max[:, :, 1] - sampler_min_max[:, :, 0]).unsqueeze(-1)
points = cam_loc.reshape(batch_size, 1, 1, 3) + pts_intervals.unsqueeze(-1) * ray_directions.unsqueeze(2)
# Get the non convergent rays
mask_intersect_idx = torch.nonzero(sampler_mask).flatten()
points = points.reshape((-1, self.n_steps, 3))[sampler_mask, :, :]
pts_intervals = pts_intervals.reshape((-1, self.n_steps))[sampler_mask]
sdf_val_all = []
for pnts in torch.split(points.reshape(-1, 3), 100000, dim=0):
sdf_val_all.append(sdf(pnts))
sdf_val = torch.cat(sdf_val_all).reshape(-1, self.n_steps)
tmp = torch.sign(sdf_val) * torch.arange(self.n_steps, 0, -1).cuda().float().reshape((1, self.n_steps)) # Force argmin to return the first min value
sampler_pts_ind = torch.argmin(tmp, -1)
sampler_pts[mask_intersect_idx] = points[torch.arange(points.shape[0]), sampler_pts_ind, :]
sampler_dists[mask_intersect_idx] = pts_intervals[torch.arange(pts_intervals.shape[0]), sampler_pts_ind]
true_surface_pts = object_mask[sampler_mask]
net_surface_pts = (sdf_val[torch.arange(sdf_val.shape[0]), sampler_pts_ind] < 0)
# take points with minimal SDF value for P_out pixels
p_out_mask = ~(true_surface_pts & net_surface_pts)
n_p_out = p_out_mask.sum()
if n_p_out > 0:
out_pts_idx = torch.argmin(sdf_val[p_out_mask, :], -1)
sampler_pts[mask_intersect_idx[p_out_mask]] = points[p_out_mask, :, :][torch.arange(n_p_out), out_pts_idx, :]
sampler_dists[mask_intersect_idx[p_out_mask]] = pts_intervals[p_out_mask, :][torch.arange(n_p_out), out_pts_idx]
# Get Network object mask
sampler_net_obj_mask = sampler_mask.clone()
sampler_net_obj_mask[mask_intersect_idx[~net_surface_pts]] = False
# Run Secant method
secant_pts = net_surface_pts & true_surface_pts if self.training else net_surface_pts
n_secant_pts = secant_pts.sum()
if n_secant_pts > 0:
# Get secant z predictions
z_high = pts_intervals[torch.arange(pts_intervals.shape[0]), sampler_pts_ind][secant_pts]
sdf_high = sdf_val[torch.arange(sdf_val.shape[0]), sampler_pts_ind][secant_pts]
z_low = pts_intervals[secant_pts][torch.arange(n_secant_pts), sampler_pts_ind[secant_pts] - 1]
sdf_low = sdf_val[secant_pts][torch.arange(n_secant_pts), sampler_pts_ind[secant_pts] - 1]
cam_loc_secant = cam_loc.unsqueeze(1).repeat(1, num_pixels, 1).reshape((-1, 3))[mask_intersect_idx[secant_pts]]
ray_directions_secant = ray_directions.reshape((-1, 3))[mask_intersect_idx[secant_pts]]
z_pred_secant = self.secant(sdf_low, sdf_high, z_low, z_high, cam_loc_secant, ray_directions_secant, sdf)
# Get points
sampler_pts[mask_intersect_idx[secant_pts]] = cam_loc_secant + z_pred_secant.unsqueeze(-1) * ray_directions_secant
sampler_dists[mask_intersect_idx[secant_pts]] = z_pred_secant
return sampler_pts, sampler_net_obj_mask, sampler_dists
def secant(self, sdf_low, sdf_high, z_low, z_high, cam_loc, ray_directions, sdf):
''' Runs the secant method for interval [z_low, z_high] for n_secant_steps '''
z_pred = - sdf_low * (z_high - z_low) / (sdf_high - sdf_low) + z_low
for i in range(self.n_secant_steps):
p_mid = cam_loc + z_pred.unsqueeze(-1) * ray_directions
sdf_mid = sdf(p_mid)
ind_low = sdf_mid > 0
if ind_low.sum() > 0:
z_low[ind_low] = z_pred[ind_low]
sdf_low[ind_low] = sdf_mid[ind_low]
ind_high = sdf_mid < 0
if ind_high.sum() > 0:
z_high[ind_high] = z_pred[ind_high]
sdf_high[ind_high] = sdf_mid[ind_high]
z_pred = - sdf_low * (z_high - z_low) / (sdf_high - sdf_low) + z_low
return z_pred
def minimal_sdf_points(self, num_pixels, sdf, cam_loc, ray_directions, mask, min_dis, max_dis):
''' Find points with minimal SDF value on rays for P_out pixels '''
n_mask_points = mask.sum()
n = self.n_steps
# steps = torch.linspace(0.0, 1.0,n).cuda()
steps = torch.empty(n).uniform_(0.0, 1.0).cuda()
mask_max_dis = max_dis[mask].unsqueeze(-1)
mask_min_dis = min_dis[mask].unsqueeze(-1)
steps = steps.unsqueeze(0).repeat(n_mask_points, 1) * (mask_max_dis - mask_min_dis) + mask_min_dis
mask_points = cam_loc.unsqueeze(1).repeat(1, num_pixels, 1).reshape(-1, 3)[mask]
mask_rays = ray_directions[mask, :]
mask_points_all = mask_points.unsqueeze(1).repeat(1, n, 1) + steps.unsqueeze(-1) * mask_rays.unsqueeze(
1).repeat(1, n, 1)
points = mask_points_all.reshape(-1, 3)
mask_sdf_all = []
for pnts in torch.split(points, 100000, dim=0):
mask_sdf_all.append(sdf(pnts))
mask_sdf_all = torch.cat(mask_sdf_all).reshape(-1, n)
min_vals, min_idx = mask_sdf_all.min(-1)
min_mask_points = mask_points_all.reshape(-1, n, 3)[torch.arange(0, n_mask_points), min_idx]
min_mask_dist = steps.reshape(-1, n)[torch.arange(0, n_mask_points), min_idx]
return min_mask_points, min_mask_dist
| insightface/reconstruction/PBIDR/code/model/ray_tracing.py/0 | {
"file_path": "insightface/reconstruction/PBIDR/code/model/ray_tracing.py",
"repo_id": "insightface",
"token_count": 7753
} | 129 |
import os
import os.path as osp
import numpy as np
import menpo.io as mio
def project_shape_in_image(verts, R_t, M_proj, M1):
verts_homo = verts
if verts_homo.shape[1] == 3:
ones = np.ones([verts_homo.shape[0], 1])
verts_homo = np.concatenate([verts_homo, ones], axis=1)
verts_out = verts_homo @ R_t @ M_proj @ M1
w_ = verts_out[:, [3]]
verts_out = verts_out / w_
return verts_out
class EyeDataset():
def __init__(self, root, load_data=True):
eyes_info = mio.import_pickle(osp.join(root,'eyes3d.pkl'))
idxs481 = eyes_info['mask481']['idxs']
tri481 = eyes_info['mask481']['trilist']
self.iris_idx_481 = eyes_info['mask481']['idxs_iris']
eyel_template = eyes_info['left_points'][idxs481]
eyer_template = eyes_info['right_points'][idxs481]
eyel_template_homo = np.append(eyel_template, np.ones((eyel_template.shape[0],1)), axis=1)
eyer_template_homo = np.append(eyer_template, np.ones((eyer_template.shape[0],1)), axis=1)
points = mio.import_pickle(osp.join(root,'eyespoints.pkl'))
self.homol = eyel_template_homo.T
self.homor = eyer_template_homo.T
if load_data:
self.worldl = {}
self.worldr = {}
#vector_norm = 0.035
for k in points:
p = k.find('/')
newk = k[p+1:]
value = points[k]
#el_inv = (value['left'] @ eyel_template_homo.T).T
#er_inv = (value['right'] @ eyer_template_homo.T).T
#print('V:', value['left'][:5,:])
#print('E:', el_inv[:5,:])
# gaze vector of left eye in world space
#gl_vector = el_inv[iris_idx_481].mean(axis=0) - el_inv[-1]
#gl_vector = (gl_vector / np.linalg.norm(gl_vector)) * vector_norm
#gl_point = el_inv[iris_idx_481].mean(axis=0) + gl_vector
## gaze vector of right eye in world space
#gr_vector = er_inv[iris_idx_481].mean(axis=0) - er_inv[-1]
#gr_vector = (gr_vector / np.linalg.norm(gr_vector)) * vector_norm
#gr_point = er_inv[iris_idx_481].mean(axis=0) + gr_vector
#self.world[newk] = (el_inv, er_inv, gl_point, gr_point)
self.worldl[newk] = value['left']
self.worldr[newk] = value['right']
#print(self.points.keys())
def get(self, key, to_homo=False):
if key not in self.worldl:
return None, None
left = self.worldl[key]
right = self.worldr[key]
if to_homo:
left = (left @ self.homol).T
right = (right @ self.homor).T
return left, right
def to_homo(self, eyel, eyer):
eyel = (eyel @ self.homol).T
eyer = (eyer @ self.homor).T
return eyel, eyer
| insightface/reconstruction/jmlr/eye_dataset.py/0 | {
"file_path": "insightface/reconstruction/jmlr/eye_dataset.py",
"repo_id": "insightface",
"token_count": 1513
} | 130 |
import torch
from PIL import Image
from torchvision.transforms import transforms
from FaceHairMask import deeplab_xception_transfer
from FaceHairMask.graphonomy_inference import inference
import numpy as np
import cv2
def preprocess(image, size=256, normalize=1):
if size is None:
image = transforms.Resize((1024, 1024))(image)
else:
image = transforms.Resize((size, size))(image)
image = transforms.ToTensor()(image)
if normalize is not None:
image = transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])(image)
return image
def postProcess(faceMask, hairMask):
hairMask = hairMask.cpu().permute(1,2,0).detach().numpy()
faceMask = faceMask.cpu().permute(1,2,0).detach().numpy()
return faceMask, hairMask
class MaskExtractor:
def __init__(self):
#? Hair Face Extractors
self.net = deeplab_xception_transfer.deeplab_xception_transfer_projection_savemem(n_classes=20, hidden_layers=128, source_classes=7)
stateDict = torch.load("models/Graphonomy/inference.pth")
self.net.load_source_model(stateDict)
self.net.to("cuda")
self.net.eval()
def processInput4(self, image):
preprocessedImage = preprocess(image, size=256, normalize=1)
preprocessedImage = preprocessedImage.unsqueeze(0).to("cuda")
return preprocessedImage
def getMask(self, image):
preprocessedImage = self.processInput4(image)
_, hairMask, faceMask = inference(net=self.net, img=preprocessedImage, device="cuda")
faceMask, hairMask = postProcess(faceMask, hairMask)
return hairMask, faceMask
def main(self, image):
image = (image.pixels_with_channels_at_back()[:, :, ::-1] * 255).astype('uint8')
hairMask, faceMask = self.getMask(Image.fromarray(image))
hairMask = transforms.Resize((Image.fromarray(image).size[1], Image.fromarray(image).size[0]))(Image.fromarray((hairMask[:,:,0]* 255).astype('uint8')))
faceMask = transforms.Resize((Image.fromarray(image).size[1], Image.fromarray(image).size[0]))(Image.fromarray((faceMask[:,:,0]* 255).astype('uint8')))
# Additional Morphology
hairMask = np.array(hairMask) / 255
faceMask = np.array(faceMask) / 255
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (10, 10))
faceMask = cv2.erode(faceMask, kernel, iterations=1)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (35, 35))
hairMask = cv2.dilate(hairMask, kernel, iterations=1)
faceMask = faceMask * (1 - hairMask)
return hairMask, faceMask | insightface/reconstruction/ostec/external/graphonomy/FaceHairMask/MaskExtractor.py/0 | {
"file_path": "insightface/reconstruction/ostec/external/graphonomy/FaceHairMask/MaskExtractor.py",
"repo_id": "insightface",
"token_count": 1092
} | 131 |
# Copyright (c) 2019, NVIDIA Corporation. All rights reserved.
#
# This work is made available under the Nvidia Source Code License-NC.
# To view a copy of this license, visit
# https://nvlabs.github.io/stylegan2/license.html
FROM tensorflow/tensorflow:1.15.0-gpu-py3
RUN pip install scipy==1.3.3
RUN pip install requests==2.22.0
RUN pip install Pillow==6.2.1
| insightface/reconstruction/ostec/external/stylegan2/Dockerfile/0 | {
"file_path": "insightface/reconstruction/ostec/external/stylegan2/Dockerfile",
"repo_id": "insightface",
"token_count": 125
} | 132 |
# Copyright (c) 2019, NVIDIA Corporation. All rights reserved.
#
# This work is made available under the Nvidia Source Code License-NC.
# To view a copy of this license, visit
# https://nvlabs.github.io/stylegan2/license.html
"""Custom TensorFlow ops for efficient bias and activation."""
import os
import numpy as np
import tensorflow as tf
from .. import custom_ops
from ...util import EasyDict
def _get_plugin():
return custom_ops.get_plugin(os.path.splitext(__file__)[0] + '.cu')
#----------------------------------------------------------------------------
activation_funcs = {
'linear': EasyDict(func=lambda x, **_: x, def_alpha=None, def_gain=1.0, cuda_idx=1, ref='y', zero_2nd_grad=True),
'relu': EasyDict(func=lambda x, **_: tf.nn.relu(x), def_alpha=None, def_gain=np.sqrt(2), cuda_idx=2, ref='y', zero_2nd_grad=True),
'lrelu': EasyDict(func=lambda x, alpha, **_: tf.nn.leaky_relu(x, alpha), def_alpha=0.2, def_gain=np.sqrt(2), cuda_idx=3, ref='y', zero_2nd_grad=True),
'tanh': EasyDict(func=lambda x, **_: tf.nn.tanh(x), def_alpha=None, def_gain=1.0, cuda_idx=4, ref='y', zero_2nd_grad=False),
'sigmoid': EasyDict(func=lambda x, **_: tf.nn.sigmoid(x), def_alpha=None, def_gain=1.0, cuda_idx=5, ref='y', zero_2nd_grad=False),
'elu': EasyDict(func=lambda x, **_: tf.nn.elu(x), def_alpha=None, def_gain=1.0, cuda_idx=6, ref='y', zero_2nd_grad=False),
'selu': EasyDict(func=lambda x, **_: tf.nn.selu(x), def_alpha=None, def_gain=1.0, cuda_idx=7, ref='y', zero_2nd_grad=False),
'softplus': EasyDict(func=lambda x, **_: tf.nn.softplus(x), def_alpha=None, def_gain=1.0, cuda_idx=8, ref='y', zero_2nd_grad=False),
'swish': EasyDict(func=lambda x, **_: tf.nn.sigmoid(x) * x, def_alpha=None, def_gain=np.sqrt(2), cuda_idx=9, ref='x', zero_2nd_grad=False),
}
#----------------------------------------------------------------------------
def fused_bias_act(x, b=None, axis=1, act='linear', alpha=None, gain=None, impl='cuda'):
r"""Fused bias and activation function.
Adds bias `b` to activation tensor `x`, evaluates activation function `act`,
and scales the result by `gain`. Each of the steps is optional. In most cases,
the fused op is considerably more efficient than performing the same calculation
using standard TensorFlow ops. It supports first and second order gradients,
but not third order gradients.
Args:
x: Input activation tensor. Can have any shape, but if `b` is defined, the
dimension corresponding to `axis`, as well as the rank, must be known.
b: Bias vector, or `None` to disable. Must be a 1D tensor of the same type
as `x`. The shape must be known, and it must match the dimension of `x`
corresponding to `axis`.
axis: The dimension in `x` corresponding to the elements of `b`.
The value of `axis` is ignored if `b` is not specified.
act: Name of the activation function to evaluate, or `"linear"` to disable.
Can be e.g. `"relu"`, `"lrelu"`, `"tanh"`, `"sigmoid"`, `"swish"`, etc.
See `activation_funcs` for a full list. `None` is not allowed.
alpha: Shape parameter for the activation function, or `None` to use the default.
gain: Scaling factor for the output tensor, or `None` to use default.
See `activation_funcs` for the default scaling of each activation function.
If unsure, consider specifying `1.0`.
impl: Name of the implementation to use. Can be `"ref"` or `"cuda"` (default).
Returns:
Tensor of the same shape and datatype as `x`.
"""
impl_dict = {
'ref': _fused_bias_act_ref,
'cuda': _fused_bias_act_cuda,
}
return impl_dict[impl](x=x, b=b, axis=axis, act=act, alpha=alpha, gain=gain)
#----------------------------------------------------------------------------
def _fused_bias_act_ref(x, b, axis, act, alpha, gain):
"""Slow reference implementation of `fused_bias_act()` using standard TensorFlow ops."""
# Validate arguments.
x = tf.convert_to_tensor(x)
b = tf.convert_to_tensor(b) if b is not None else tf.constant([], dtype=x.dtype)
act_spec = activation_funcs[act]
assert b.shape.rank == 1 and (b.shape[0] == 0 or b.shape[0] == x.shape[axis])
assert b.shape[0] == 0 or 0 <= axis < x.shape.rank
if alpha is None:
alpha = act_spec.def_alpha
if gain is None:
gain = act_spec.def_gain
# Add bias.
if b.shape[0] != 0:
x += tf.reshape(b, [-1 if i == axis else 1 for i in range(x.shape.rank)])
# Evaluate activation function.
x = act_spec.func(x, alpha=alpha)
# Scale by gain.
if gain != 1:
x *= gain
return x
#----------------------------------------------------------------------------
def _fused_bias_act_cuda(x, b, axis, act, alpha, gain):
"""Fast CUDA implementation of `fused_bias_act()` using custom ops."""
# Validate arguments.
x = tf.convert_to_tensor(x)
empty_tensor = tf.constant([], dtype=x.dtype)
b = tf.convert_to_tensor(b) if b is not None else empty_tensor
act_spec = activation_funcs[act]
assert b.shape.rank == 1 and (b.shape[0] == 0 or b.shape[0] == x.shape[axis])
assert b.shape[0] == 0 or 0 <= axis < x.shape.rank
if alpha is None:
alpha = act_spec.def_alpha
if gain is None:
gain = act_spec.def_gain
# Special cases.
if act == 'linear' and b is None and gain == 1.0:
return x
if act_spec.cuda_idx is None:
return _fused_bias_act_ref(x=x, b=b, axis=axis, act=act, alpha=alpha, gain=gain)
# CUDA kernel.
cuda_kernel = _get_plugin().fused_bias_act
cuda_kwargs = dict(axis=axis, act=act_spec.cuda_idx, alpha=alpha, gain=gain)
# Forward pass: y = func(x, b).
def func_y(x, b):
y = cuda_kernel(x=x, b=b, ref=empty_tensor, grad=0, **cuda_kwargs)
y.set_shape(x.shape)
return y
# Backward pass: dx, db = grad(dy, x, y)
def grad_dx(dy, x, y):
ref = {'x': x, 'y': y}[act_spec.ref]
dx = cuda_kernel(x=dy, b=empty_tensor, ref=ref, grad=1, **cuda_kwargs)
dx.set_shape(x.shape)
return dx
def grad_db(dx):
if b.shape[0] == 0:
return empty_tensor
db = dx
if axis < x.shape.rank - 1:
db = tf.reduce_sum(db, list(range(axis + 1, x.shape.rank)))
if axis > 0:
db = tf.reduce_sum(db, list(range(axis)))
db.set_shape(b.shape)
return db
# Second order gradients: d_dy, d_x = grad2(d_dx, d_db, x, y)
def grad2_d_dy(d_dx, d_db, x, y):
ref = {'x': x, 'y': y}[act_spec.ref]
d_dy = cuda_kernel(x=d_dx, b=d_db, ref=ref, grad=1, **cuda_kwargs)
d_dy.set_shape(x.shape)
return d_dy
def grad2_d_x(d_dx, d_db, x, y):
ref = {'x': x, 'y': y}[act_spec.ref]
d_x = cuda_kernel(x=d_dx, b=d_db, ref=ref, grad=2, **cuda_kwargs)
d_x.set_shape(x.shape)
return d_x
# Fast version for piecewise-linear activation funcs.
@tf.custom_gradient
def func_zero_2nd_grad(x, b):
y = func_y(x, b)
@tf.custom_gradient
def grad(dy):
dx = grad_dx(dy, x, y)
db = grad_db(dx)
def grad2(d_dx, d_db):
d_dy = grad2_d_dy(d_dx, d_db, x, y)
return d_dy
return (dx, db), grad2
return y, grad
# Slow version for general activation funcs.
@tf.custom_gradient
def func_nonzero_2nd_grad(x, b):
y = func_y(x, b)
def grad_wrap(dy):
@tf.custom_gradient
def grad_impl(dy, x):
dx = grad_dx(dy, x, y)
db = grad_db(dx)
def grad2(d_dx, d_db):
d_dy = grad2_d_dy(d_dx, d_db, x, y)
d_x = grad2_d_x(d_dx, d_db, x, y)
return d_dy, d_x
return (dx, db), grad2
return grad_impl(dy, x)
return y, grad_wrap
# Which version to use?
if act_spec.zero_2nd_grad:
return func_zero_2nd_grad(x, b)
return func_nonzero_2nd_grad(x, b)
#----------------------------------------------------------------------------
| insightface/reconstruction/ostec/external/stylegan2/dnnlib/tflib/ops/fused_bias_act.py/0 | {
"file_path": "insightface/reconstruction/ostec/external/stylegan2/dnnlib/tflib/ops/fused_bias_act.py",
"repo_id": "insightface",
"token_count": 3974
} | 133 |
# Copyright (c) 2019, NVIDIA Corporation. All rights reserved.
#
# This work is made available under the Nvidia Source Code License-NC.
# To view a copy of this license, visit
# https://nvlabs.github.io/stylegan2/license.html
"""Default metric definitions."""
from dnnlib import EasyDict
#----------------------------------------------------------------------------
metric_defaults = EasyDict([(args.name, args) for args in [
EasyDict(name='fid50k', func_name='metrics.frechet_inception_distance.FID', num_images=50000, minibatch_per_gpu=8),
EasyDict(name='is50k', func_name='metrics.inception_score.IS', num_images=50000, num_splits=10, minibatch_per_gpu=8),
EasyDict(name='ppl_zfull', func_name='metrics.perceptual_path_length.PPL', num_samples=50000, epsilon=1e-4, space='z', sampling='full', crop=True, minibatch_per_gpu=4, Gs_overrides=dict(dtype='float32', mapping_dtype='float32')),
EasyDict(name='ppl_wfull', func_name='metrics.perceptual_path_length.PPL', num_samples=50000, epsilon=1e-4, space='w', sampling='full', crop=True, minibatch_per_gpu=4, Gs_overrides=dict(dtype='float32', mapping_dtype='float32')),
EasyDict(name='ppl_zend', func_name='metrics.perceptual_path_length.PPL', num_samples=50000, epsilon=1e-4, space='z', sampling='end', crop=True, minibatch_per_gpu=4, Gs_overrides=dict(dtype='float32', mapping_dtype='float32')),
EasyDict(name='ppl_wend', func_name='metrics.perceptual_path_length.PPL', num_samples=50000, epsilon=1e-4, space='w', sampling='end', crop=True, minibatch_per_gpu=4, Gs_overrides=dict(dtype='float32', mapping_dtype='float32')),
EasyDict(name='ppl2_wend', func_name='metrics.perceptual_path_length.PPL', num_samples=50000, epsilon=1e-4, space='w', sampling='end', crop=False, minibatch_per_gpu=4, Gs_overrides=dict(dtype='float32', mapping_dtype='float32')),
EasyDict(name='ls', func_name='metrics.linear_separability.LS', num_samples=200000, num_keep=100000, attrib_indices=range(40), minibatch_per_gpu=4),
EasyDict(name='pr50k3', func_name='metrics.precision_recall.PR', num_images=50000, nhood_size=3, minibatch_per_gpu=8, row_batch_size=10000, col_batch_size=10000),
]])
#----------------------------------------------------------------------------
| insightface/reconstruction/ostec/external/stylegan2/metrics/metric_defaults.py/0 | {
"file_path": "insightface/reconstruction/ostec/external/stylegan2/metrics/metric_defaults.py",
"repo_id": "insightface",
"token_count": 905
} | 134 |
# Copyright (c) 2019, NVIDIA Corporation. All rights reserved.
#
# This work is made available under the Nvidia Source Code License-NC.
# To view a copy of this license, visit
# https://nvlabs.github.io/stylegan2/license.html
"""Main training script."""
import numpy as np
import tensorflow as tf
import dnnlib
import dnnlib.tflib as tflib
from dnnlib.tflib.autosummary import autosummary
from training import dataset
from training import misc
from metrics import metric_base
#----------------------------------------------------------------------------
# Just-in-time processing of training images before feeding them to the networks.
def process_reals(x, labels, lod, mirror_augment, drange_data, drange_net):
with tf.name_scope('DynamicRange'):
x = tf.cast(x, tf.float32)
x = misc.adjust_dynamic_range(x, drange_data, drange_net)
if mirror_augment:
with tf.name_scope('MirrorAugment'):
x = tf.where(tf.random_uniform([tf.shape(x)[0]]) < 0.5, x, tf.reverse(x, [3]))
with tf.name_scope('FadeLOD'): # Smooth crossfade between consecutive levels-of-detail.
s = tf.shape(x)
y = tf.reshape(x, [-1, s[1], s[2]//2, 2, s[3]//2, 2])
y = tf.reduce_mean(y, axis=[3, 5], keepdims=True)
y = tf.tile(y, [1, 1, 1, 2, 1, 2])
y = tf.reshape(y, [-1, s[1], s[2], s[3]])
x = tflib.lerp(x, y, lod - tf.floor(lod))
with tf.name_scope('UpscaleLOD'): # Upscale to match the expected input/output size of the networks.
s = tf.shape(x)
factor = tf.cast(2 ** tf.floor(lod), tf.int32)
x = tf.reshape(x, [-1, s[1], s[2], 1, s[3], 1])
x = tf.tile(x, [1, 1, 1, factor, 1, factor])
x = tf.reshape(x, [-1, s[1], s[2] * factor, s[3] * factor])
return x, labels
#----------------------------------------------------------------------------
# Evaluate time-varying training parameters.
def training_schedule(
cur_nimg,
training_set,
lod_initial_resolution = None, # Image resolution used at the beginning.
lod_training_kimg = 600, # Thousands of real images to show before doubling the resolution.
lod_transition_kimg = 600, # Thousands of real images to show when fading in new layers.
minibatch_size_base = 32, # Global minibatch size.
minibatch_size_dict = {}, # Resolution-specific overrides.
minibatch_gpu_base = 4, # Number of samples processed at a time by one GPU.
minibatch_gpu_dict = {}, # Resolution-specific overrides.
G_lrate_base = 0.002, # Learning rate for the generator.
G_lrate_dict = {}, # Resolution-specific overrides.
D_lrate_base = 0.002, # Learning rate for the discriminator.
D_lrate_dict = {}, # Resolution-specific overrides.
lrate_rampup_kimg = 0, # Duration of learning rate ramp-up.
tick_kimg_base = 4, # Default interval of progress snapshots.
tick_kimg_dict = {8:28, 16:24, 32:20, 64:16, 128:12, 256:8, 512:6, 1024:4}): # Resolution-specific overrides.
# Initialize result dict.
s = dnnlib.EasyDict()
s.kimg = cur_nimg / 1000.0
# Training phase.
phase_dur = lod_training_kimg + lod_transition_kimg
phase_idx = int(np.floor(s.kimg / phase_dur)) if phase_dur > 0 else 0
phase_kimg = s.kimg - phase_idx * phase_dur
# Level-of-detail and resolution.
if lod_initial_resolution is None:
s.lod = 0.0
else:
s.lod = training_set.resolution_log2
s.lod -= np.floor(np.log2(lod_initial_resolution))
s.lod -= phase_idx
if lod_transition_kimg > 0:
s.lod -= max(phase_kimg - lod_training_kimg, 0.0) / lod_transition_kimg
s.lod = max(s.lod, 0.0)
s.resolution = 2 ** (training_set.resolution_log2 - int(np.floor(s.lod)))
# Minibatch size.
s.minibatch_size = minibatch_size_dict.get(s.resolution, minibatch_size_base)
s.minibatch_gpu = minibatch_gpu_dict.get(s.resolution, minibatch_gpu_base)
# Learning rate.
s.G_lrate = G_lrate_dict.get(s.resolution, G_lrate_base)
s.D_lrate = D_lrate_dict.get(s.resolution, D_lrate_base)
if lrate_rampup_kimg > 0:
rampup = min(s.kimg / lrate_rampup_kimg, 1.0)
s.G_lrate *= rampup
s.D_lrate *= rampup
# Other parameters.
s.tick_kimg = tick_kimg_dict.get(s.resolution, tick_kimg_base)
return s
#----------------------------------------------------------------------------
# Main training script.
def training_loop(
G_args = {}, # Options for generator network.
D_args = {}, # Options for discriminator network.
G_opt_args = {}, # Options for generator optimizer.
D_opt_args = {}, # Options for discriminator optimizer.
G_loss_args = {}, # Options for generator loss.
D_loss_args = {}, # Options for discriminator loss.
dataset_args = {}, # Options for dataset.load_dataset().
sched_args = {}, # Options for train.TrainingSchedule.
grid_args = {}, # Options for train.setup_snapshot_image_grid().
metric_arg_list = [], # Options for MetricGroup.
tf_config = {}, # Options for tflib.init_tf().
data_dir = None, # Directory to load datasets from.
G_smoothing_kimg = 10.0, # Half-life of the running average of generator weights.
minibatch_repeats = 4, # Number of minibatches to run before adjusting training parameters.
lazy_regularization = True, # Perform regularization as a separate training step?
G_reg_interval = 4, # How often the perform regularization for G? Ignored if lazy_regularization=False.
D_reg_interval = 16, # How often the perform regularization for D? Ignored if lazy_regularization=False.
reset_opt_for_new_lod = True, # Reset optimizer internal state (e.g. Adam moments) when new layers are introduced?
total_kimg = 25000, # Total length of the training, measured in thousands of real images.
mirror_augment = False, # Enable mirror augment?
drange_net = [-1,1], # Dynamic range used when feeding image data to the networks.
image_snapshot_ticks = 50, # How often to save image snapshots? None = only save 'reals.png' and 'fakes-init.png'.
network_snapshot_ticks = 50, # How often to save network snapshots? None = only save 'networks-final.pkl'.
save_tf_graph = False, # Include full TensorFlow computation graph in the tfevents file?
save_weight_histograms = False, # Include weight histograms in the tfevents file?
resume_pkl = None, # Network pickle to resume training from, None = train from scratch.
resume_kimg = 0.0, # Assumed training progress at the beginning. Affects reporting and training schedule.
resume_time = 0.0, # Assumed wallclock time at the beginning. Affects reporting.
resume_with_new_nets = False): # Construct new networks according to G_args and D_args before resuming training?
# Initialize dnnlib and TensorFlow.
tflib.init_tf(tf_config)
num_gpus = dnnlib.submit_config.num_gpus
# Load training set.
training_set = dataset.load_dataset(data_dir=dnnlib.convert_path(data_dir), verbose=True, **dataset_args)
grid_size, grid_reals, grid_labels = misc.setup_snapshot_image_grid(training_set, **grid_args)
misc.save_image_grid(grid_reals, dnnlib.make_run_dir_path('reals.png'), drange=training_set.dynamic_range, grid_size=grid_size)
# Construct or load networks.
with tf.device('/gpu:0'):
if resume_pkl is None or resume_with_new_nets:
print('Constructing networks...')
G = tflib.Network('G', num_channels=training_set.shape[0], resolution=training_set.shape[1], label_size=training_set.label_size, **G_args)
D = tflib.Network('D', num_channels=training_set.shape[0], resolution=training_set.shape[1], label_size=training_set.label_size, **D_args)
Gs = G.clone('Gs')
if resume_pkl is not None:
print('Loading networks from "%s"...' % resume_pkl)
rG, rD, rGs = misc.load_pkl(resume_pkl)
if resume_with_new_nets: G.copy_vars_from(rG); D.copy_vars_from(rD); Gs.copy_vars_from(rGs)
else: G = rG; D = rD; Gs = rGs
# Print layers and generate initial image snapshot.
G.print_layers(); D.print_layers()
sched = training_schedule(cur_nimg=total_kimg*1000, training_set=training_set, **sched_args)
grid_latents = np.random.randn(np.prod(grid_size), *G.input_shape[1:])
grid_fakes = Gs.run(grid_latents, grid_labels, is_validation=True, minibatch_size=sched.minibatch_gpu)
misc.save_image_grid(grid_fakes, dnnlib.make_run_dir_path('fakes_init.png'), drange=drange_net, grid_size=grid_size)
# Setup training inputs.
print('Building TensorFlow graph...')
with tf.name_scope('Inputs'), tf.device('/cpu:0'):
lod_in = tf.placeholder(tf.float32, name='lod_in', shape=[])
lrate_in = tf.placeholder(tf.float32, name='lrate_in', shape=[])
minibatch_size_in = tf.placeholder(tf.int32, name='minibatch_size_in', shape=[])
minibatch_gpu_in = tf.placeholder(tf.int32, name='minibatch_gpu_in', shape=[])
minibatch_multiplier = minibatch_size_in // (minibatch_gpu_in * num_gpus)
Gs_beta = 0.5 ** tf.div(tf.cast(minibatch_size_in, tf.float32), G_smoothing_kimg * 1000.0) if G_smoothing_kimg > 0.0 else 0.0
# Setup optimizers.
G_opt_args = dict(G_opt_args)
D_opt_args = dict(D_opt_args)
for args, reg_interval in [(G_opt_args, G_reg_interval), (D_opt_args, D_reg_interval)]:
args['minibatch_multiplier'] = minibatch_multiplier
args['learning_rate'] = lrate_in
if lazy_regularization:
mb_ratio = reg_interval / (reg_interval + 1)
args['learning_rate'] *= mb_ratio
if 'beta1' in args: args['beta1'] **= mb_ratio
if 'beta2' in args: args['beta2'] **= mb_ratio
G_opt = tflib.Optimizer(name='TrainG', **G_opt_args)
D_opt = tflib.Optimizer(name='TrainD', **D_opt_args)
G_reg_opt = tflib.Optimizer(name='RegG', share=G_opt, **G_opt_args)
D_reg_opt = tflib.Optimizer(name='RegD', share=D_opt, **D_opt_args)
# Build training graph for each GPU.
data_fetch_ops = []
for gpu in range(num_gpus):
with tf.name_scope('GPU%d' % gpu), tf.device('/gpu:%d' % gpu):
# Create GPU-specific shadow copies of G and D.
G_gpu = G if gpu == 0 else G.clone(G.name + '_shadow')
D_gpu = D if gpu == 0 else D.clone(D.name + '_shadow')
# Fetch training data via temporary variables.
with tf.name_scope('DataFetch'):
sched = training_schedule(cur_nimg=int(resume_kimg*1000), training_set=training_set, **sched_args)
reals_var = tf.Variable(name='reals', trainable=False, initial_value=tf.zeros([sched.minibatch_gpu] + training_set.shape))
labels_var = tf.Variable(name='labels', trainable=False, initial_value=tf.zeros([sched.minibatch_gpu, training_set.label_size]))
reals_write, labels_write = training_set.get_minibatch_tf()
reals_write, labels_write = process_reals(reals_write, labels_write, lod_in, mirror_augment, training_set.dynamic_range, drange_net)
reals_write = tf.concat([reals_write, reals_var[minibatch_gpu_in:]], axis=0)
labels_write = tf.concat([labels_write, labels_var[minibatch_gpu_in:]], axis=0)
data_fetch_ops += [tf.assign(reals_var, reals_write)]
data_fetch_ops += [tf.assign(labels_var, labels_write)]
reals_read = reals_var[:minibatch_gpu_in]
labels_read = labels_var[:minibatch_gpu_in]
# Evaluate loss functions.
lod_assign_ops = []
if 'lod' in G_gpu.vars: lod_assign_ops += [tf.assign(G_gpu.vars['lod'], lod_in)]
if 'lod' in D_gpu.vars: lod_assign_ops += [tf.assign(D_gpu.vars['lod'], lod_in)]
with tf.control_dependencies(lod_assign_ops):
with tf.name_scope('G_loss'):
G_loss, G_reg = dnnlib.util.call_func_by_name(G=G_gpu, D=D_gpu, opt=G_opt, training_set=training_set, minibatch_size=minibatch_gpu_in, **G_loss_args)
with tf.name_scope('D_loss'):
D_loss, D_reg = dnnlib.util.call_func_by_name(G=G_gpu, D=D_gpu, opt=D_opt, training_set=training_set, minibatch_size=minibatch_gpu_in, reals=reals_read, labels=labels_read, **D_loss_args)
# Register gradients.
if not lazy_regularization:
if G_reg is not None: G_loss += G_reg
if D_reg is not None: D_loss += D_reg
else:
if G_reg is not None: G_reg_opt.register_gradients(tf.reduce_mean(G_reg * G_reg_interval), G_gpu.trainables)
if D_reg is not None: D_reg_opt.register_gradients(tf.reduce_mean(D_reg * D_reg_interval), D_gpu.trainables)
G_opt.register_gradients(tf.reduce_mean(G_loss), G_gpu.trainables)
D_opt.register_gradients(tf.reduce_mean(D_loss), D_gpu.trainables)
# Setup training ops.
data_fetch_op = tf.group(*data_fetch_ops)
G_train_op = G_opt.apply_updates()
D_train_op = D_opt.apply_updates()
G_reg_op = G_reg_opt.apply_updates(allow_no_op=True)
D_reg_op = D_reg_opt.apply_updates(allow_no_op=True)
Gs_update_op = Gs.setup_as_moving_average_of(G, beta=Gs_beta)
# Finalize graph.
with tf.device('/gpu:0'):
try:
peak_gpu_mem_op = tf.contrib.memory_stats.MaxBytesInUse()
except tf.errors.NotFoundError:
peak_gpu_mem_op = tf.constant(0)
tflib.init_uninitialized_vars()
print('Initializing logs...')
summary_log = tf.summary.FileWriter(dnnlib.make_run_dir_path())
if save_tf_graph:
summary_log.add_graph(tf.get_default_graph())
if save_weight_histograms:
G.setup_weight_histograms(); D.setup_weight_histograms()
metrics = metric_base.MetricGroup(metric_arg_list)
print('Training for %d kimg...\n' % total_kimg)
dnnlib.RunContext.get().update('', cur_epoch=resume_kimg, max_epoch=total_kimg)
maintenance_time = dnnlib.RunContext.get().get_last_update_interval()
cur_nimg = int(resume_kimg * 1000)
cur_tick = -1
tick_start_nimg = cur_nimg
prev_lod = -1.0
running_mb_counter = 0
while cur_nimg < total_kimg * 1000:
if dnnlib.RunContext.get().should_stop(): break
# Choose training parameters and configure training ops.
sched = training_schedule(cur_nimg=cur_nimg, training_set=training_set, **sched_args)
assert sched.minibatch_size % (sched.minibatch_gpu * num_gpus) == 0
training_set.configure(sched.minibatch_gpu, sched.lod)
if reset_opt_for_new_lod:
if np.floor(sched.lod) != np.floor(prev_lod) or np.ceil(sched.lod) != np.ceil(prev_lod):
G_opt.reset_optimizer_state(); D_opt.reset_optimizer_state()
prev_lod = sched.lod
# Run training ops.
feed_dict = {lod_in: sched.lod, lrate_in: sched.G_lrate, minibatch_size_in: sched.minibatch_size, minibatch_gpu_in: sched.minibatch_gpu}
for _repeat in range(minibatch_repeats):
rounds = range(0, sched.minibatch_size, sched.minibatch_gpu * num_gpus)
run_G_reg = (lazy_regularization and running_mb_counter % G_reg_interval == 0)
run_D_reg = (lazy_regularization and running_mb_counter % D_reg_interval == 0)
cur_nimg += sched.minibatch_size
running_mb_counter += 1
# Fast path without gradient accumulation.
if len(rounds) == 1:
tflib.run([G_train_op, data_fetch_op], feed_dict)
if run_G_reg:
tflib.run(G_reg_op, feed_dict)
tflib.run([D_train_op, Gs_update_op], feed_dict)
if run_D_reg:
tflib.run(D_reg_op, feed_dict)
# Slow path with gradient accumulation.
else:
for _round in rounds:
tflib.run(G_train_op, feed_dict)
if run_G_reg:
for _round in rounds:
tflib.run(G_reg_op, feed_dict)
tflib.run(Gs_update_op, feed_dict)
for _round in rounds:
tflib.run(data_fetch_op, feed_dict)
tflib.run(D_train_op, feed_dict)
if run_D_reg:
for _round in rounds:
tflib.run(D_reg_op, feed_dict)
# Perform maintenance tasks once per tick.
done = (cur_nimg >= total_kimg * 1000)
if cur_tick < 0 or cur_nimg >= tick_start_nimg + sched.tick_kimg * 1000 or done:
cur_tick += 1
tick_kimg = (cur_nimg - tick_start_nimg) / 1000.0
tick_start_nimg = cur_nimg
tick_time = dnnlib.RunContext.get().get_time_since_last_update()
total_time = dnnlib.RunContext.get().get_time_since_start() + resume_time
# Report progress.
print('tick %-5d kimg %-8.1f lod %-5.2f minibatch %-4d time %-12s sec/tick %-7.1f sec/kimg %-7.2f maintenance %-6.1f gpumem %.1f' % (
autosummary('Progress/tick', cur_tick),
autosummary('Progress/kimg', cur_nimg / 1000.0),
autosummary('Progress/lod', sched.lod),
autosummary('Progress/minibatch', sched.minibatch_size),
dnnlib.util.format_time(autosummary('Timing/total_sec', total_time)),
autosummary('Timing/sec_per_tick', tick_time),
autosummary('Timing/sec_per_kimg', tick_time / tick_kimg),
autosummary('Timing/maintenance_sec', maintenance_time),
autosummary('Resources/peak_gpu_mem_gb', peak_gpu_mem_op.eval() / 2**30)))
autosummary('Timing/total_hours', total_time / (60.0 * 60.0))
autosummary('Timing/total_days', total_time / (24.0 * 60.0 * 60.0))
# Save snapshots.
if image_snapshot_ticks is not None and (cur_tick % image_snapshot_ticks == 0 or done):
grid_fakes = Gs.run(grid_latents, grid_labels, is_validation=True, minibatch_size=sched.minibatch_gpu)
misc.save_image_grid(grid_fakes, dnnlib.make_run_dir_path('fakes%06d.png' % (cur_nimg // 1000)), drange=drange_net, grid_size=grid_size)
if network_snapshot_ticks is not None and (cur_tick % network_snapshot_ticks == 0 or done):
pkl = dnnlib.make_run_dir_path('network-snapshot-%06d.pkl' % (cur_nimg // 1000))
misc.save_pkl((G, D, Gs), pkl)
metrics.run(pkl, run_dir=dnnlib.make_run_dir_path(), data_dir=dnnlib.convert_path(data_dir), num_gpus=num_gpus, tf_config=tf_config)
# Update summaries and RunContext.
metrics.update_autosummaries()
tflib.autosummary.save_summaries(summary_log, cur_nimg)
dnnlib.RunContext.get().update('%.2f' % sched.lod, cur_epoch=cur_nimg // 1000, max_epoch=total_kimg)
maintenance_time = dnnlib.RunContext.get().get_last_update_interval() - tick_time
# Save final snapshot.
misc.save_pkl((G, D, Gs), dnnlib.make_run_dir_path('network-final.pkl'))
# All done.
summary_log.close()
training_set.close()
#----------------------------------------------------------------------------
| insightface/reconstruction/ostec/external/stylegan2/training/training_loop.py/0 | {
"file_path": "insightface/reconstruction/ostec/external/stylegan2/training/training_loop.py",
"repo_id": "insightface",
"token_count": 9171
} | 135 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe import params as P
import math
import numpy as np
from ._graph import Node, Graph
from MyCaffe import Function as myf
USE_DECONV_AS_UPSAMPLE = False
def _compare(a, b, encoding="utf8"): #type: (Text, Text, Text) -> bool
if isinstance(a, bytes):
a = a.decode(encoding)
if isinstance(b, bytes):
b = b.decode(encoding)
return a == b
def make_input(input):
name = input[0]
output = input[0]
output = [output]
shape = input[2]
shape = list(shape)
input_layer = myf("Input", name, [], output, input_param=dict(shape=dict(dim=shape)))
return input_layer
def _convert_conv(node, graph, err):
weight_name = node.inputs[1]
input_name = str(node.inputs[0])
output_name = str(node.outputs[0])
node_name = node.name
W = None
if weight_name in node.input_tensors:
W = node.input_tensors[weight_name]
else:
err.missing_initializer(node,
"Weight tensor: {} not found in the graph initializer".format(weight_name,))
is_deconv = False
if node.op_type.endswith("Transpose"):
is_deconv = True
bias_flag = False
bias = None
if len(node.inputs) > 2:
bias = node.input_tensors[node.inputs[2]]
bias_flag = True
dilations = node.attrs.get("dilations", [1, 1])
# groups = 1
groups = node.attrs.get("group", 1)
kernel_shape = node.attrs["kernel_shape"]
pads = node.attrs.get("pads", [0, 0, 0, 0])
strides = node.attrs["strides"]
layer = myf("Convolution", node_name, [input_name], [output_name],
kernel_h = kernel_shape[0],kernel_w = kernel_shape[1],
stride_h=strides[0], stride_w = strides[1], group = groups,
pad_h = pads[0], pad_w = pads[1],
num_output=W.shape[0], dilation = dilations[0], bias_term = bias_flag)
graph.channel_dims[output_name] = W.shape[0]
return layer
def _convert_relu(node,graph,err):
input_name = str(node.inputs[0])
output_name = str(node.outputs[0])
name = str(node.name)
if input_name==output_name:
inplace = True
else:
inplace = False
layer = myf("ReLU",name,[input_name],[output_name],in_place=inplace)
# l_top_relu1 = L.ReLU(l_bottom, name=name, in_place=True)
graph.channel_dims[output_name] = graph.channel_dims[input_name]
return layer
def _convert_sigmoid(node,graph,err):
input_name = str(node.inputs[0])
output_name = str(node.outputs[0])
name = str(node.name)
if input_name==output_name:
inplace = True
else:
inplace = False
layer = myf("Sigmoid",name,[input_name],[output_name],in_place=inplace)
# l_top_relu1 = L.ReLU(l_bottom, name=name, in_place=True)
graph.channel_dims[output_name] = graph.channel_dims[input_name]
return layer
def _convert_BatchNorm(node,graph,err):
epsilon = node.attrs.get("epsilon", 1e-5)
scale = node.input_tensors[node.inputs[1]]
bias = node.input_tensors[node.inputs[2]]
mean = node.input_tensors[node.inputs[3]]
var = node.input_tensors[node.inputs[4]]
node_name = node.name
input_name = str(node.inputs[0])
output_name = str(node.outputs[0])
if input_name==output_name:
inplace = True
else:
inplace = False
bn_layer = myf("BatchNorm", node_name+"_bn",[input_name],[output_name],eps = epsilon, use_global_stats = True, in_place=inplace)
scale_layer = myf("Scale", node_name, [output_name],[output_name],in_place=True,bias_term=True)
graph.channel_dims[output_name] = graph.channel_dims[input_name]
return bn_layer,scale_layer
def _convert_Add(node,graph,err):
input_name_list = [str(i) for i in node.inputs]
output_name = str(node.outputs[0])
node_name = node.name
max_dim = 0
for name in input_name_list:
if graph.channel_dims[name]>max_dim:
max_dim = graph.channel_dims[name]
if 'broadcast' in node.attrs:
if node.attrs['broadcast'] == 1:
input_node_number = len(input_name_list)
if input_node_number !=2:
return err.unsupported_op_configuration(node, "Broadcast Add must has 2 input, not {}".format(input_node_number))
axis = node.attrs['axis']
flat_layer = myf("Flatten",node_name+'_flat',[input_name_list[1]],[output_name+'_flat'])
layer = myf("Bias", node_name, [input_name_list[0],output_name+'_flat'], [output_name], axis = axis)
# layer = myf("Bias", node_name, input_name_list, [output_name], bias_term = False, axis = axis)
graph.channel_dims[output_name] = graph.channel_dims[input_name_list[0]]
return flat_layer,layer
layer = myf("Eltwise",node_name,input_name_list,[output_name],operation=P.Eltwise.SUM)
graph.channel_dims[output_name] = graph.channel_dims[input_name_list[0]]
return layer
def _convert_Mul(node,graph,err):
input_name_list = [str(i) for i in node.inputs]
output_name = str(node.outputs[0])
node_name = node.name
print('Mul:', node.name, node.attrs, input_name_list, output_name)
if len(node.attrs)==0:
assert len(node.input_tensors)==1
assert len(input_name_list)==2
inp_tensor = node.input_tensors[input_name_list[1]]
scale_value = float(inp_tensor)
print(scale_value)
layer = myf("Scale", node_name, [input_name_list[0]], [output_name], bias_term = False,
scale_param = dict(filler = dict(value=scale_value), bias_term=False))
return layer
#layer = myf("Reshape", node_name, [input_name], [output_name], reshape_param = dict(shape=dict(dim=list(shape))))
#print(len(node.input_tensors))
# max_dim = 0
# for name in input_name_list:
# if graph.channel_dims[name]>max_dim:
# max_dim = graph.channel_dims[name]
if 'broadcast' in node.attrs:
if node.attrs['broadcast'] == 1:
input_node_number = len(input_name_list)
if input_node_number !=2:
return err.unsupported_op_configuration(node, "Broadcast Mul must has 2 input, not {}".format(input_node_number))
axis = node.attrs['axis']
flat_layer = myf("Flatten",node_name+'_flat',[input_name_list[1]],[output_name+'_flat'])
layer = myf("Scale", node_name, [input_name_list[0],output_name+'_flat'], [output_name], bias_term = False, axis = axis)
graph.channel_dims[output_name] = graph.channel_dims[input_name_list[0]]
return flat_layer,layer
layer = myf("Eltwise",node_name,input_name_list,[output_name],operation=P.Eltwise.PROD)
graph.channel_dims[output_name] = graph.channel_dims[input_name_list[0]]
return layer
def _convert_Reshape(node,graph,err):
node_name = node.name
input_name = str(node.inputs[0])
output_name = str(node.outputs[0])
if len(node.inputs)==1:
shape = tuple(node.attrs.get('shape', ()))
else:
shape = tuple(node.input_tensors[node.inputs[1]])
# if shape == ():
#print('reshape to', shape)
if input_name==output_name:
inplace = True
else:
inplace = False
graph.channel_dims[output_name] = shape[1]
layer = myf("Reshape", node_name, [input_name], [output_name], reshape_param = dict(shape=dict(dim=list(shape))))
return layer
#if len(shape) == 2:
# layer = myf("Flatten",node_name,[input_name],[output_name],in_place=inplace)
# graph.channel_dims[output_name] = shape[1]
# return layer
#elif len(shape) == 4:
# graph.channel_dims[output_name] = shape[1]
# layer = myf("Reshape", node_name, [input_name], [output_name], reshape_param = dict(shape=dict(dim=list(shape))))
# return layer
#else:
# return err.unsupported_op_configuration(node, "Reshape dimention number shall be 2 or 4")
def _convert_Flatten(node,graph,err):
node_name = node.name
input_name = str(node.inputs[0])
output_name = str(node.outputs[0])
# shape = tuple(node.attrs.get('shape', ()))
if input_name==output_name:
inplace = True
else:
inplace = False
layer = myf("Flatten", node_name, [input_name], [output_name], in_place=inplace)
# graph.channel_dims[output_name] = shape[1]
return layer
def _convert_pool(node,graph,err):
node_name = node.name
input_name = str(node.inputs[0])
output_name = str(node.outputs[0])
if node.op_type.endswith("MaxPool"):
pool_type = P.Pooling.MAX
elif node.op_type.endswith("AveragePool"):
pool_type = P.Pooling.AVE
else:
return err.unsupported_op_configuration(node, "Unsupported pool type")
kernel_shape = node.attrs["kernel_shape"]
strides = node.attrs.get('strides', [1, 1])
pads = node.attrs.get('pads', [0, 0, 0, 0])
layer = myf("Pooling",node_name,[input_name],[output_name],pooling_param = dict(pool = pool_type,
kernel_h = kernel_shape[0],
kernel_w = kernel_shape[1],
stride_h = strides[0],
stride_w = strides[1],
pad_h = pads[0],
pad_w = pads[1]))
graph.channel_dims[output_name] = graph.channel_dims[input_name]
return layer
def _convert_dropout(node,graph,err):
node_name = node.name
input_name = str(node.inputs[0])
output_name = str(node.outputs[0])
ratio = node.attrs.get('ratio', 0.5)
layer = myf("Dropout", node_name, [input_name], [output_name], dropout_ratio =ratio)
graph.channel_dims[output_name] = graph.channel_dims[input_name]
return layer
def _convert_gemm(node,graph,err):
node_name = node.name
input_name = str(node.inputs[0])
output_name = str(node.outputs[0])
weight_name = node.inputs[1]
if weight_name in node.input_tensors:
W = node.input_tensors[weight_name]
else:
err.missing_initializer(node,
"Weight tensor: {} not found in the graph initializer".format(weight_name, ))
return
if node.attrs["broadcast"] != 1 or node.attrs["transB"] != 1:
return err.unsupported_op_configuration(node,"Gemm is supported only for inner_product layer")
b = None
bias_flag = False
if len(node.inputs) > 2:
b = node.input_tensors[node.inputs[2]]
if len(W.shape) != 2 or (b is not None and len(b.shape) != 1):
return err.unsupported_op_configuration(node, "Gemm is supported only for inner_product layer")
if b is not None:
bias_flag = True
if W.shape[0] != b.shape[0]:
return err.unsupported_op_configuration(node,
"Gemm is supported only for inner_product layer")
layer = myf("InnerProduct",node_name,[input_name],[output_name],num_output = W.shape[0],bias_term = bias_flag)
graph.channel_dims[output_name] = W.shape[0]
return layer
def _convert_upsample(node,graph,err):
factor = int(node.attrs["height_scale"])
node_name = node.name
input_name = str(node.inputs[0])
output_name = str(node.outputs[0])
# input_shape = graph.shape_dict[input_name]
# channels = input_shape[1]
channels = graph.channel_dims[input_name]
pad = int(math.ceil((factor - 1) / 2.))
# layer = myf("Deconvolution", node_name, [input_name], [output_name],
# kernel_size=2 * factor - factor % 2,
# stride=factor, group=channels,
# pad = pad, num_output=channels, bias_term = False)
mode = node.attrs["mode"]
#https://github.com/pytorch/pytorch/issues/6900
if mode=="bilinear":
layer = myf("Deconvolution", node_name, [input_name], [output_name],
convolution_param=dict(
num_output=channels,
kernel_size=2 * factor - factor % 2,
stride=factor,
pad=pad,
group=channels,
bias_term=False,
weight_filler=dict(type="bilinear_upsampling")
))
else:
layer = myf("Deconvolution", node_name, [input_name], [output_name],
convolution_param=dict(
num_output=channels,
kernel_size=factor,
stride=factor,
group=channels,
bias_term=False,
))
graph.channel_dims[output_name] = graph.channel_dims[input_name]
return layer
def _convert_resize(node,graph,err):
if not USE_DECONV_AS_UPSAMPLE:
#print(node, graph)
node_name = node.name
input_name = str(node.inputs[0])
output_name = str(node.outputs[0])
#print(node.attrs, node_name, input_name, output_name)
layer = myf("Upsample", node_name, [input_name], [output_name],
upsample_param=dict(
scale = 2
))
graph.channel_dims[output_name] = graph.channel_dims[input_name]
else:
print('add resize deconv operator')
factor = 2
node_name = node.name
input_name = str(node.inputs[0])
output_name = str(node.outputs[0])
# input_shape = graph.shape_dict[input_name]
# channels = input_shape[1]
channels = graph.channel_dims[input_name]
pad = int(math.ceil((factor - 1) / 2.))
layer = myf("Deconvolution", node_name, [input_name], [output_name],
convolution_param=dict(
num_output=channels,
kernel_size=factor,
stride=factor,
group=channels,
bias_term=False,
))
graph.channel_dims[output_name] = graph.channel_dims[input_name]
return layer
def _convert_transpose(node,graph,err):
#print(node, graph)
node_name = node.name
input_name = str(node.inputs[0])
output_name = str(node.outputs[0])
#print(node.attrs, node_name, input_name, output_name)
layer = myf("Permute", node_name, [input_name], [output_name],
permute_param=dict(
order = node.attrs['perm']
))
graph.channel_dims[output_name] = graph.channel_dims[input_name]
return layer
def _convert_softmax(node,graph,err):
#print(node, graph)
node_name = node.name
input_name = str(node.inputs[0])
output_name = str(node.outputs[0])
#print(node.attrs, node_name, input_name, output_name)
layer = myf("Softmax", node_name, [input_name], [output_name],
softmax_param=dict(
axis = node.attrs['axis']
))
graph.channel_dims[output_name] = graph.channel_dims[input_name]
return layer
def _convert_concat(node,graph,err):
node_name = node.name
input_name_list = [str(i) for i in node.inputs]
output_name = str(node.outputs[0])
axis = node.attrs.get("axis", 1)
layer = myf('Concat', node_name, input_name_list, [output_name], axis = axis)
if axis == 1:
dim = 0
for name in input_name_list:
dim+=graph.channel_dims[name]
graph.channel_dims[output_name] = dim
else:
graph.channel_dims[output_name] = graph.channel_dims[input_name_list[0]]
return layer
def _convert_conv_transpose(node,graph,err):
input_name = str(node.inputs[0])
output_name = str(node.outputs[0])
node_name = node.name
weight_name = node.inputs[1]
W = None
if weight_name in node.input_tensors:
W = node.input_tensors[weight_name]
else:
err.missing_initializer(node,
"Weight tensor: {} not found in the graph initializer".format(weight_name,))
bias_flag = False
bias = None
if len(node.inputs) > 2:
bias = node.input_tensors[node.inputs[2]]
bias_flag = True
dilations = node.attrs.get("dilations", [1, 1])
# groups = 1
groups = node.attrs.get("group", 1)
kernel_shape = node.attrs["kernel_shape"]
pads = node.attrs.get("pads", [0, 0, 0, 0])
strides = node.attrs["strides"]
layer = myf('Deconvolution', node_name, [input_name], [output_name],
convolution_param=dict(
num_output=W.shape[1],
kernel_h=kernel_shape[0],kernel_w=kernel_shape[1],
stride_h=strides[0],stride_w = strides[1],
group=groups,
pad_h=pads[0], pad_w=pads[1],
bias_term=bias_flag,
))
graph.channel_dims[output_name] = W.shape[1]
return layer
# l_top = L.Deconvolution(
# l_bottom,
# name=name,
# convolution_param=dict(
# num_output=W.shape[1],
# kernel_h=kernel_h,
# kernel_w=kernel_w,
# stride_h=stride_h,
# stride_w=stride_w,
# pad_h=pad_h,
# pad_w=pad_w,
# group=groups,
# bias_term=bias_term))
_ONNX_NODE_REGISTRY = {
"Conv": _convert_conv,
"Relu": _convert_relu,
"BatchNormalization": _convert_BatchNorm,
"Add": _convert_Add,
"Mul": _convert_Mul,
"Reshape": _convert_Reshape,
"MaxPool": _convert_pool,
"AveragePool": _convert_pool,
"Dropout": _convert_dropout,
"Gemm": _convert_gemm,
"Upsample": _convert_upsample,
"Concat": _convert_concat,
"ConvTranspose": _convert_conv_transpose,
"Sigmoid": _convert_sigmoid,
"Flatten": _convert_Flatten,
"Resize": _convert_resize,
"Transpose": _convert_transpose,
"Softmax": _convert_softmax,
}
| insightface/tools/onnx2caffe/onnx2caffe/_operators.py/0 | {
"file_path": "insightface/tools/onnx2caffe/onnx2caffe/_operators.py",
"repo_id": "insightface",
"token_count": 8816
} | 136 |
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ExternalStorageConfigurationManager" enabled="true" />
<component name="MavenProjectsManager">
<option name="originalFiles">
<list>
<option value="$PROJECT_DIR$/pom.xml" />
</list>
</option>
</component>
<component name="ProjectRootManager" version="2" languageLevel="JDK_21" default="true" project-jdk-name="graalvm-21" project-jdk-type="JavaSDK" />
</project> | mybatis-native-demo/.idea/misc.xml/0 | {
"file_path": "mybatis-native-demo/.idea/misc.xml",
"repo_id": "mybatis-native-demo",
"token_count": 171
} | 137 |
package com.example.nativedemo;
import org.springframework.boot.SpringApplication;
import org.springframework.boot.autoconfigure.SpringBootApplication;
@SpringBootApplication(proxyBeanMethods = false)
public class NativeDemoApplication {
public static void main(String[] args) {
SpringApplication.run(NativeDemoApplication.class, args);
}
}
| mybatis-native-demo/src/main/java/com/example/nativedemo/NativeDemoApplication.java/0 | {
"file_path": "mybatis-native-demo/src/main/java/com/example/nativedemo/NativeDemoApplication.java",
"repo_id": "mybatis-native-demo",
"token_count": 106
} | 138 |
[
"reflect-config.json",
"resource-config.json"
]
| mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/ch.qos.logback/logback-classic/1.4.1/index.json/0 | {
"file_path": "mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/ch.qos.logback/logback-classic/1.4.1/index.json",
"repo_id": "mybatis-native-demo",
"token_count": 22
} | 139 |
[
{
"condition": {
"typeReachable": "org.h2.mvstore.type.MetaType"
},
"name": "org.h2.mvstore.db.LobStorageMap$BlobMeta$Type",
"fields": [
{
"name": "INSTANCE"
}
]
},
{
"condition": {
"typeReachable": "org.h2.mvstore.type.MetaType"
},
"name": "org.h2.mvstore.db.LobStorageMap$BlobReference$Type",
"fields": [
{
"name": "INSTANCE"
}
]
},
{
"condition": {
"typeReachable": "org.h2.mvstore.type.MetaType"
},
"name": "org.h2.mvstore.db.NullValueDataType",
"fields": [
{
"name": "INSTANCE"
}
]
},
{
"condition": {
"typeReachable": "org.h2.mvstore.tx.VersionedValueType$Factory"
},
"name": "org.h2.mvstore.db.RowDataType$Factory",
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.h2.mvstore.type.MetaType"
},
"name": "org.h2.mvstore.tx.VersionedValueType$Factory",
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.h2.mvstore.type.MetaType"
},
"name": "org.h2.mvstore.type.ByteArrayDataType",
"fields": [
{
"name": "INSTANCE"
}
]
},
{
"condition": {
"typeReachable": "org.h2.mvstore.type.MetaType"
},
"name": "org.h2.mvstore.type.LongDataType",
"fields": [
{
"name": "INSTANCE"
}
]
},
{
"condition": {
"typeReachable": "org.h2.store.fs.FilePath"
},
"name": "org.h2.store.fs.async.FilePathAsync",
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.h2.store.fs.FilePath"
},
"name": "org.h2.store.fs.disk.FilePathDisk",
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.h2.store.fs.FilePath"
},
"name": "org.h2.store.fs.mem.FilePathMem",
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.h2.store.fs.FilePath"
},
"name": "org.h2.store.fs.mem.FilePathMemLZF",
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.h2.store.fs.FilePath"
},
"name": "org.h2.store.fs.niomapped.FilePathNioMapped",
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.h2.store.fs.FilePath"
},
"name": "org.h2.store.fs.niomem.FilePathNioMem",
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.h2.store.fs.FilePath"
},
"name": "org.h2.store.fs.niomem.FilePathNioMemLZF",
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.h2.store.fs.FilePath"
},
"name": "org.h2.store.fs.retry.FilePathRetryOnInterrupt",
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.h2.store.fs.FilePath"
},
"name": "org.h2.store.fs.split.FilePathSplit",
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.h2.store.fs.FilePath"
},
"name": "org.h2.store.fs.zip.FilePathZip",
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.h2.util.MathUtils"
},
"name": "sun.security.provider.SHA",
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.h2.util.MathUtils"
},
"name": "sun.security.provider.SecureRandom",
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
}
]
| mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/com.h2database/h2/2.1.210/reflect-config.json/0 | {
"file_path": "mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/com.h2database/h2/2.1.210/reflect-config.json",
"repo_id": "mybatis-native-demo",
"token_count": 2317
} | 140 |
[
{
"latest": true,
"override": true,
"metadata-version": "4.1.80.Final",
"module": "io.netty:netty-codec-http2",
"tested-versions": [
"4.1.80.Final"
]
}
]
| mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/io.netty/netty-codec-http2/index.json/0 | {
"file_path": "mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/io.netty/netty-codec-http2/index.json",
"repo_id": "mybatis-native-demo",
"token_count": 99
} | 141 |
[
{
"condition": {
"typeReachable": "io.opentelemetry.exporter.logging.LoggingSpanExporter"
},
"name": "io.opentelemetry.exporter.logging.LoggingSpanExporter"
}
]
| mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/io.opentelemetry/opentelemetry-exporter-logging/1.19.0/reflect-config.json/0 | {
"file_path": "mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/io.opentelemetry/opentelemetry-exporter-logging/1.19.0/reflect-config.json",
"repo_id": "mybatis-native-demo",
"token_count": 83
} | 142 |
{
"resources": {
"includes": [
{
"condition": {
"typeReachable": "io.undertow.Version"
},
"pattern": "\\Qio/undertow/version.properties\\E"
},
{
"condition": {
"typeReachable": "io.undertow.Undertow"
},
"pattern": "\\Qorg/jboss/threads/Version.properties\\E"
},
{
"condition": {
"typeReachable": "io.undertow.Undertow"
},
"pattern": "\\Qorg/xnio/Version.properties\\E"
},
{
"condition": {
"typeReachable": "io.undertow.Undertow"
},
"pattern": "\\Qorg/xnio/nio/Version.properties\\E"
}
]
}
}
| mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/io.undertow/undertow-core/2.2.19.Final/resource-config.json/0 | {
"file_path": "mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/io.undertow/undertow-core/2.2.19.Final/resource-config.json",
"repo_id": "mybatis-native-demo",
"token_count": 377
} | 143 |
[
{
"latest": true,
"metadata-version": "1.23.0",
"module": "org.apache.commons:commons-compress",
"tested-versions": [
"1.23.0"
]
}
]
| mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/org.apache.commons/commons-compress/index.json/0 | {
"file_path": "mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/org.apache.commons/commons-compress/index.json",
"repo_id": "mybatis-native-demo",
"token_count": 84
} | 144 |
[
{
"latest": true,
"metadata-version": "10.0.20",
"module": "org.apache.tomcat.embed:tomcat-embed-core",
"tested-versions": [
"10.0.20"
]
}
]
| mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/org.apache.tomcat.embed/tomcat-embed-core/index.json/0 | {
"file_path": "mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/org.apache.tomcat.embed/tomcat-embed-core/index.json",
"repo_id": "mybatis-native-demo",
"token_count": 87
} | 145 |
[
"reflect-config.json",
"resource-config.json"
]
| mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/org.eclipse.jgit/org.eclipse.jgit/6.5.0.202303070854-r/index.json/0 | {
"file_path": "mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/org.eclipse.jgit/org.eclipse.jgit/6.5.0.202303070854-r/index.json",
"repo_id": "mybatis-native-demo",
"token_count": 22
} | 146 |
{
"bundles": [
{
"name": "com.sun.org.apache.xerces.internal.impl.msg.SAXMessages",
"locales": [
"und",
"zh-CN"
]
},
{
"name": "com.sun.org.apache.xerces.internal.impl.xpath.regex.message",
"locales": [
"und",
"zh-CN"
]
},
{
"name": "com.sun.org.apache.xml.internal.serializer.XMLEntities",
"locales": [
"und"
]
}
],
"resources": {
"includes": [
{
"condition": {
"typeReachable": "org.ehcache.config.builders.UserManagedCacheBuilder"
},
"pattern": "\\QMETA-INF/services/org.ehcache.core.spi.service.ServiceFactory\\E"
},
{
"condition": {
"typeReachable": "org.ehcache.core.EhcacheManager"
},
"pattern": "\\QMETA-INF/services/org.ehcache.core.spi.service.ServiceFactory\\E"
},
{
"condition": {
"typeReachable": "org.ehcache.core.spi.ServiceLocator$DependencySet"
},
"pattern": "\\QMETA-INF/services/org.ehcache.core.spi.service.ServiceFactory\\E"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.XmlUtil"
},
"pattern": "\\QMETA-INF/services/org.ehcache.xml.CacheManagerServiceConfigurationParser\\E"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.XmlUtil"
},
"pattern": "\\QMETA-INF/services/org.ehcache.xml.CacheServiceConfigurationParser\\E"
},
{
"condition": {
"typeReachable": "org.ehcache.jsr107.internal.Jsr107Parser"
},
"pattern": "\\Qehcache-107-ext.xsd\\E"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.XmlConfiguration"
},
"pattern": "\\Qehcache-core.xsd\\E"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration"
},
"pattern": "\\Qehcache-multi.xsd\\E"
}
]
}
}
| mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/org.ehcache/ehcache/3.10.8-jakarta/resource-config.json/0 | {
"file_path": "mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/org.ehcache/ehcache/3.10.8-jakarta/resource-config.json",
"repo_id": "mybatis-native-demo",
"token_count": 1059
} | 147 |
[
{
"condition": {
"typeReachable": "java.util.ServiceLoader"
},
"name": "java.util.ServiceLoader$Provider",
"allPublicMethods": true
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "[Ljakarta.xml.bind.annotation.XmlElement;"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "[Ljakarta.xml.bind.annotation.XmlElementRef;"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.model.Column"
},
"name": "[Ljava.lang.Class;"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.EnversService"
},
"name": "org.hibernate.envers.boot.internal.LegacyModifiedColumnNamingStrategy",
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.EnversService"
},
"name": "org.hibernate.envers.boot.internal.ImprovedModifiedColumnNamingStrategy",
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.EnversService"
},
"name": "org.hibernate.envers.strategy.DefaultAuditStrategy",
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.EnversService"
},
"name": " org.hibernate.envers.strategy.internal.DefaultAuditStrategy",
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.EnversService"
},
"name": "org.hibernate.envers.strategy.ValidityAuditStrategy",
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.EnversService"
},
"name": " org.hibernate.envers.strategy.internal.ValidityAuditStrategy",
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.configuration.Configuration"
},
"name": "[Ljava.lang.Class;"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.configuration.internal.metadata.AbstractCollectionMetadataGenerator"
},
"name": "[Ljava.lang.Class;"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.configuration.internal.metadata.AuditEntityNameRegister"
},
"name": "[Ljava.lang.Class;"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.configuration.internal.metadata.AuditMetadataGenerator"
},
"name": "[Ljava.lang.Class;"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.configuration.internal.metadata.CollectionMappedByResolver"
},
"name": "[Ljava.lang.Class;"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.configuration.internal.metadata.IdMetadataGenerator"
},
"name": "[Ljava.lang.Class;"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.configuration.internal.metadata.JoinColumnCollectionMetadataGenerator"
},
"name": "[Ljava.lang.Class;"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.configuration.internal.metadata.MiddleTableCollectionMetadataGenerator"
},
"name": "[Ljava.lang.Class;"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.enhanced.OrderedSequenceStructure$OrderedSequence"
},
"name": "[Ljava.lang.Class;"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.event.spi.EnversPostInsertEventListenerImpl"
},
"name": "[Ljava.lang.Class;"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.exception.RevisionDoesNotExistException"
},
"name": "[Ljava.lang.Class;"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.entities.mapper.id.QueryParameterData"
},
"name": "[Ljava.lang.Class;"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.entities.mapper.id.VirtualEntitySingleIdMapper"
},
"name": "[Ljava.lang.Class;"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.entities.mapper.relation.MiddleMapKeyEnumeratedComponentMapper"
},
"name": "[Ljava.lang.Class;"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.entities.mapper.relation.component.MiddleEmbeddableComponentMapper"
},
"name": "[Ljava.lang.Class;"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.entities.mapper.relation.component.MiddleMapElementNotKeyComponentMapper"
},
"name": "[Ljava.lang.Class;"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.entities.mapper.relation.component.MiddleSimpleComponentMapper"
},
"name": "[Ljava.lang.Class;"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.entities.mapper.relation.lazy.proxy.CollectionProxy"
},
"name": "[Ljava.lang.Class;"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.entities.mapper.relation.lazy.proxy.MapProxy"
},
"name": "[Ljava.lang.Class;"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.entities.mapper.relation.query.AbstractRelationQueryGenerator"
},
"name": "[Ljava.lang.Class;"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.entities.mapper.relation.query.OneAuditEntityQueryGenerator"
},
"name": "[Ljava.lang.Class;"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.entities.mapper.relation.query.OneEntityQueryGenerator"
},
"name": "[Ljava.lang.Class;"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.entities.mapper.relation.query.ThreeEntityQueryGenerator"
},
"name": "[Ljava.lang.Class;"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.entities.mapper.relation.query.TwoEntityOneAuditedQueryGenerator"
},
"name": "[Ljava.lang.Class;"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.entities.mapper.relation.query.TwoEntityQueryGenerator"
},
"name": "[Ljava.lang.Class;"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.reader.AuditReaderImpl"
},
"name": "[Ljava.lang.Class;"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.revisioninfo.DefaultRevisionInfoGenerator"
},
"name": "[Ljava.lang.Class;"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.revisioninfo.RevisionInfoQueryCreator"
},
"name": "[Ljava.lang.Class;"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.revisioninfo.RevisionTimestampValueResolver"
},
"name": "[Ljava.lang.Class;"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.synchronization.AuditProcess"
},
"name": "[Ljava.lang.Class;"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.tools.ArgumentsTools"
},
"name": "[Ljava.lang.Class;"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.tools.MappingTools"
},
"name": "[Ljava.lang.Class;"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.tools.query.Parameters"
},
"name": "[Ljava.lang.Class;"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.tools.query.QueryBuilder"
},
"name": "[Ljava.lang.Class;"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.query.AuditQueryCreator"
},
"name": "[Ljava.lang.Class;"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.query.criteria.AggregatedAuditExpression"
},
"name": "[Ljava.lang.Class;"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.query.criteria.MatchMode$4"
},
"name": "[Ljava.lang.Class;"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.query.criteria.internal.CriteriaTools"
},
"name": "[Ljava.lang.Class;"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.query.criteria.internal.SimpleAuditExpression"
},
"name": "[Ljava.lang.Class;"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.query.internal.impl.AbstractAuditAssociationQuery"
},
"name": "[Ljava.lang.Class;"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.query.internal.impl.AbstractAuditQuery"
},
"name": "[Ljava.lang.Class;"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.query.internal.impl.EntitiesAtRevisionQuery"
},
"name": "[Ljava.lang.Class;"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.strategy.internal.DefaultAuditStrategy"
},
"name": "[Ljava.lang.Class;"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.strategy.internal.ValidityAuditStrategy"
},
"name": "[Ljava.lang.Class;"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "[Ljava.lang.Object;"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "[Ljava.lang.annotation.Annotation;"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.enhanced.OrderedSequenceStructure$OrderedSequence"
},
"name": "[Ljava.lang.invoke.LambdaForm$Name;"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.strategy.internal.DefaultAuditStrategy"
},
"name": "[Ljava.lang.invoke.LambdaForm$Name;"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.configuration.internal.metadata.CollectionMappedByResolver"
},
"name": "[Ljava.util.Iterator;"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.configuration.internal.metadata.reader.PersistentPropertiesSource$1"
},
"name": "[Ljava.util.Iterator;"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.reader.AuditReaderImpl"
},
"name": "[Ljava.util.Map$Entry;"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.query.internal.impl.EntitiesAtRevisionQuery"
},
"name": "[Ljava.util.Map$Entry;"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.reader.AuditReaderImpl"
},
"name": "[Lorg.antlr.v4.runtime.atn.ATNConfig;"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.revisioninfo.RevisionInfoQueryCreator"
},
"name": "[Lorg.antlr.v4.runtime.atn.ATNConfig;"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.tools.query.QueryBuilder"
},
"name": "[Lorg.antlr.v4.runtime.atn.ATNConfig;"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.query.internal.impl.AbstractAuditQuery"
},
"name": "[Lorg.antlr.v4.runtime.atn.ATNConfig;"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.query.internal.impl.EntitiesAtRevisionQuery"
},
"name": "[Lorg.antlr.v4.runtime.atn.ATNConfig;"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.reader.AuditReaderImpl"
},
"name": "[Lorg.antlr.v4.runtime.atn.PredictionContext;"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.revisioninfo.RevisionInfoQueryCreator"
},
"name": "[Lorg.antlr.v4.runtime.atn.PredictionContext;"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.tools.query.QueryBuilder"
},
"name": "[Lorg.antlr.v4.runtime.atn.PredictionContext;"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.query.internal.impl.AbstractAuditQuery"
},
"name": "[Lorg.antlr.v4.runtime.atn.PredictionContext;"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.query.internal.impl.EntitiesAtRevisionQuery"
},
"name": "[Lorg.antlr.v4.runtime.atn.PredictionContext;"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.query.internal.impl.AbstractAuditQuery"
},
"name": "[Lorg.antlr.v4.runtime.dfa.DFAState$PredPrediction;"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.reader.AuditReaderImpl"
},
"name": "[Lorg.antlr.v4.runtime.dfa.DFAState;"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.revisioninfo.RevisionInfoQueryCreator"
},
"name": "[Lorg.antlr.v4.runtime.dfa.DFAState;"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.query.internal.impl.AbstractAuditQuery"
},
"name": "[Lorg.antlr.v4.runtime.dfa.DFAState;"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.query.internal.impl.EntitiesAtRevisionQuery"
},
"name": "[Lorg.antlr.v4.runtime.dfa.DFAState;"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.event.spi.BaseEnversCollectionEventListener"
},
"name": "[Lorg.h2.expression.Expression;"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.event.spi.EnversPostInsertEventListenerImpl"
},
"name": "[Lorg.h2.expression.Expression;"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.event.spi.EnversPreUpdateEventListenerImpl"
},
"name": "[Lorg.h2.expression.Expression;"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.reader.AuditReaderImpl"
},
"name": "[Lorg.h2.expression.Expression;"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.revisioninfo.DefaultRevisionInfoGenerator"
},
"name": "[Lorg.h2.expression.Expression;"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.synchronization.AuditProcess"
},
"name": "[Lorg.h2.expression.Expression;"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.query.internal.impl.AbstractAuditQuery"
},
"name": "[Lorg.h2.expression.Expression;"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.synchronization.AuditProcess"
},
"name": "[Lorg.h2.table.Column;"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.event.spi.BaseEnversCollectionEventListener"
},
"name": "[Lorg.h2.table.TableFilter;"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.event.spi.EnversPostInsertEventListenerImpl"
},
"name": "[Lorg.h2.table.TableFilter;"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.event.spi.EnversPreUpdateEventListenerImpl"
},
"name": "[Lorg.h2.table.TableFilter;"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.reader.AuditReaderImpl"
},
"name": "[Lorg.h2.table.TableFilter;"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.revisioninfo.DefaultRevisionInfoGenerator"
},
"name": "[Lorg.h2.table.TableFilter;"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.synchronization.AuditProcess"
},
"name": "[Lorg.h2.table.TableFilter;"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.query.internal.impl.AbstractAuditQuery"
},
"name": "[Lorg.h2.table.TableFilter;"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.strategy.internal.ValidityAuditStrategy"
},
"name": "[Lorg.h2.value.Value;"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.EnversIntegrator"
},
"name": "[Lorg.hibernate.event.spi.PostCollectionRecreateEventListener;"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.EnversIntegrator"
},
"name": "[Lorg.hibernate.event.spi.PostDeleteEventListener;"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.EnversIntegrator"
},
"name": "[Lorg.hibernate.event.spi.PostInsertEventListener;"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.EnversIntegrator"
},
"name": "[Lorg.hibernate.event.spi.PostUpdateEventListener;"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.EnversIntegrator"
},
"name": "[Lorg.hibernate.event.spi.PreCollectionRemoveEventListener;"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.EnversIntegrator"
},
"name": "[Lorg.hibernate.event.spi.PreCollectionUpdateEventListener;"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.EnversIntegrator"
},
"name": "[Lorg.hibernate.event.spi.PreUpdateEventListener;"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.event.spi.BaseEnversCollectionEventListener"
},
"name": "[Lorg.hibernate.sql.ast.spi.SqlSelection;"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.event.spi.EnversPreUpdateEventListenerImpl"
},
"name": "[Lorg.hibernate.sql.ast.spi.SqlSelection;"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.reader.AuditReaderImpl"
},
"name": "[Lorg.hibernate.sql.ast.spi.SqlSelection;"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.tools.EntityTools"
},
"name": "[Lorg.hibernate.sql.ast.spi.SqlSelection;"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.query.internal.impl.AbstractAuditQuery"
},
"name": "[Lorg.hibernate.sql.ast.spi.SqlSelection;"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.configuration.internal.metadata.reader.AuditedPropertiesReader"
},
"name": "ee.estonia.entities.Child",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.hibernate.envers.event.spi.EnversPostCollectionRecreateEventListenerImpl"
},
"name": "ee.estonia.entities.Child",
"queryAllDeclaredMethods": true,
"fields": [
{
"name": "id"
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.entities.mapper.relation.lazy.proxy.CollectionProxy"
},
"name": "ee.estonia.entities.Child",
"queryAllDeclaredMethods": true,
"fields": [
{
"name": "data"
},
{
"name": "id"
}
],
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.configuration.internal.metadata.reader.AuditedPropertiesReader"
},
"name": "ee.estonia.entities.Parent",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.reader.AuditReaderImpl"
},
"name": "ee.estonia.entities.Parent",
"queryAllDeclaredMethods": true,
"fields": [
{
"name": "collection"
},
{
"name": "data"
}
],
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "jakarta.xml.bind.JAXBElement"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "jakarta.xml.bind.annotation.XmlAccessType",
"methods": [
{
"name": "values",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "jakarta.xml.bind.annotation.XmlAccessorType",
"queryAllDeclaredMethods": true,
"methods": [
{
"name": "<init>",
"parameterTypes": []
},
{
"name": "value",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "jakarta.xml.bind.annotation.XmlAttribute",
"queryAllDeclaredMethods": true,
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "jakarta.xml.bind.annotation.XmlElement",
"queryAllDeclaredMethods": true,
"methods": [
{
"name": "<init>",
"parameterTypes": []
},
{
"name": "type",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "jakarta.xml.bind.annotation.XmlElement$DEFAULT"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "jakarta.xml.bind.annotation.XmlElementDecl",
"queryAllDeclaredMethods": true,
"methods": [
{
"name": "<init>",
"parameterTypes": []
},
{
"name": "scope",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "jakarta.xml.bind.annotation.XmlElementDecl$GLOBAL"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "jakarta.xml.bind.annotation.XmlElementRef",
"queryAllDeclaredMethods": true,
"methods": [
{
"name": "<init>",
"parameterTypes": []
},
{
"name": "type",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "jakarta.xml.bind.annotation.XmlElementRef$DEFAULT"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "jakarta.xml.bind.annotation.XmlElementRefs",
"queryAllDeclaredMethods": true,
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "jakarta.xml.bind.annotation.XmlElements",
"queryAllDeclaredMethods": true,
"methods": [
{
"name": "<init>",
"parameterTypes": []
},
{
"name": "value",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "jakarta.xml.bind.annotation.XmlEnum",
"queryAllDeclaredMethods": true,
"methods": [
{
"name": "<init>",
"parameterTypes": []
},
{
"name": "value",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "jakarta.xml.bind.annotation.XmlEnumValue",
"queryAllDeclaredMethods": true,
"methods": [
{
"name": "<init>",
"parameterTypes": []
},
{
"name": "value",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "jakarta.xml.bind.annotation.XmlMixed",
"queryAllDeclaredMethods": true,
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "jakarta.xml.bind.annotation.XmlRootElement",
"queryAllDeclaredMethods": true,
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "jakarta.xml.bind.annotation.XmlSeeAlso",
"queryAllDeclaredMethods": true,
"methods": [
{
"name": "<init>",
"parameterTypes": []
},
{
"name": "value",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "jakarta.xml.bind.annotation.XmlType",
"queryAllDeclaredMethods": true,
"methods": [
{
"name": "<init>",
"parameterTypes": []
},
{
"name": "factoryClass",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "jakarta.xml.bind.annotation.XmlType$DEFAULT"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "jakarta.xml.bind.annotation.XmlValue",
"queryAllDeclaredMethods": true,
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "jakarta.xml.bind.annotation.adapters.XmlAdapter"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "jakarta.xml.bind.annotation.adapters.XmlJavaTypeAdapter",
"queryAllDeclaredMethods": true,
"methods": [
{
"name": "<init>",
"parameterTypes": []
},
{
"name": "type",
"parameterTypes": []
},
{
"name": "value",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "jakarta.xml.bind.annotation.adapters.XmlJavaTypeAdapter$DEFAULT"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "jdk.internal.ValueBased",
"queryAllDeclaredMethods": true,
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.engine.jdbc.dialect.internal.DialectFactoryImpl"
},
"name": "org.glassfish.jaxb.runtime.v2.ContextFactory",
"methods": [
{
"name": "createContext",
"parameterTypes": [
"[Ljava.lang.Class;",
"java.util.Map"
]
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.glassfish.jaxb.core.v2.model.nav.ReflectionNavigator",
"methods": [
{
"name": "getInstance",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.glassfish.jaxb.runtime.v2.ContextFactory",
"methods": [
{
"name": "createContext",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.glassfish.jaxb.runtime.v2.runtime.property.ArrayElementLeafProperty",
"queryAllPublicConstructors": true,
"methods": [
{
"name": "<init>",
"parameterTypes": [
"org.glassfish.jaxb.runtime.v2.runtime.JAXBContextImpl",
"org.glassfish.jaxb.runtime.v2.model.runtime.RuntimeElementPropertyInfo"
]
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.glassfish.jaxb.runtime.v2.runtime.property.ArrayElementNodeProperty",
"queryAllPublicConstructors": true,
"methods": [
{
"name": "<init>",
"parameterTypes": [
"org.glassfish.jaxb.runtime.v2.runtime.JAXBContextImpl",
"org.glassfish.jaxb.runtime.v2.model.runtime.RuntimeElementPropertyInfo"
]
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.glassfish.jaxb.runtime.v2.runtime.property.ArrayReferenceNodeProperty",
"queryAllPublicConstructors": true,
"methods": [
{
"name": "<init>",
"parameterTypes": [
"org.glassfish.jaxb.runtime.v2.runtime.JAXBContextImpl",
"org.glassfish.jaxb.runtime.v2.model.runtime.RuntimeReferencePropertyInfo"
]
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.glassfish.jaxb.runtime.v2.runtime.property.SingleElementLeafProperty",
"queryAllPublicConstructors": true,
"methods": [
{
"name": "<init>",
"parameterTypes": [
"org.glassfish.jaxb.runtime.v2.runtime.JAXBContextImpl",
"org.glassfish.jaxb.runtime.v2.model.runtime.RuntimeElementPropertyInfo"
]
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.glassfish.jaxb.runtime.v2.runtime.property.SingleElementNodeProperty",
"queryAllPublicConstructors": true,
"methods": [
{
"name": "<init>",
"parameterTypes": [
"org.glassfish.jaxb.runtime.v2.runtime.JAXBContextImpl",
"org.glassfish.jaxb.runtime.v2.model.runtime.RuntimeElementPropertyInfo"
]
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.glassfish.jaxb.runtime.v2.runtime.property.SingleMapNodeProperty",
"queryAllPublicConstructors": true
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.glassfish.jaxb.runtime.v2.runtime.property.SingleReferenceNodeProperty",
"queryAllPublicConstructors": true
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.CacheMode"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.FlushMode"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.LockMode"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.Adapter1",
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.Adapter2",
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.Adapter3",
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.Adapter4",
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.Adapter5",
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.Adapter6",
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.Adapter7",
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.Adapter8",
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.Adapter9",
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.JaxbHbmAnyAssociationType",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true,
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.JaxbHbmAnyValueMappingType",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true,
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.JaxbHbmArrayType",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true,
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.JaxbHbmAuxiliaryDatabaseObjectType",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true,
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.JaxbHbmAuxiliaryDatabaseObjectType$JaxbHbmDefinition",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true,
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.JaxbHbmBagCollectionType",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true,
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.JaxbHbmBaseVersionAttributeType",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true,
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.JaxbHbmBasicAttributeType",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true,
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.JaxbHbmBasicCollectionElementType",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true,
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.JaxbHbmCacheInclusionEnum",
"allDeclaredFields": true,
"methods": [
{
"name": "values",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.JaxbHbmCacheType",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true,
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.JaxbHbmClassRenameType",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true,
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.JaxbHbmCollectionIdType",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true,
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.JaxbHbmColumnType",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true,
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.JaxbHbmCompositeAttributeType",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true,
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.JaxbHbmCompositeCollectionElementType",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true,
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.JaxbHbmCompositeIdType",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true,
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.JaxbHbmCompositeIndexType",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true,
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.JaxbHbmCompositeKeyBasicAttributeType",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true,
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.JaxbHbmCompositeKeyManyToOneType",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true,
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.JaxbHbmConfigParameterContainer",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true,
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.JaxbHbmConfigParameterType",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true,
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.JaxbHbmCustomSqlDmlType",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true,
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.JaxbHbmDialectScopeType",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true,
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.JaxbHbmDiscriminatorSubclassEntityType",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true,
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.JaxbHbmDynamicComponentType",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true,
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.JaxbHbmEntityBaseDefinition",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true,
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.JaxbHbmEntityDiscriminatorType",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true,
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.JaxbHbmFetchProfileType",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true,
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.JaxbHbmFetchProfileType$JaxbHbmFetch",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true,
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.JaxbHbmFetchStyleEnum",
"allDeclaredFields": true,
"methods": [
{
"name": "values",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.JaxbHbmFetchStyleWithSubselectEnum",
"allDeclaredFields": true,
"methods": [
{
"name": "values",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.JaxbHbmFilterAliasMappingType",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true,
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.JaxbHbmFilterDefinitionType",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true,
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.JaxbHbmFilterParameterType",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true,
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.JaxbHbmFilterType",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true,
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.JaxbHbmGeneratorSpecificationType",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true,
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.JaxbHbmHibernateMapping",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true,
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.JaxbHbmIdBagCollectionType",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true,
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.JaxbHbmIdentifierGeneratorDefinitionType",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true,
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.JaxbHbmIndexManyToAnyType",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true,
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.JaxbHbmIndexManyToManyType",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true,
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.JaxbHbmIndexType",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true,
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.JaxbHbmJoinedSubclassEntityType",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true,
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.JaxbHbmKeyType",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true,
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.JaxbHbmLazyEnum",
"allDeclaredFields": true,
"methods": [
{
"name": "values",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.JaxbHbmLazyWithExtraEnum",
"allDeclaredFields": true,
"methods": [
{
"name": "values",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.JaxbHbmLazyWithNoProxyEnum",
"allDeclaredFields": true,
"methods": [
{
"name": "values",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.JaxbHbmListIndexType",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true,
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.JaxbHbmListType",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true,
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.JaxbHbmLoaderType",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true,
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.JaxbHbmManyToAnyCollectionElementType",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true,
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.JaxbHbmManyToManyCollectionElementType",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true,
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.JaxbHbmManyToOneType",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true,
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.JaxbHbmMapKeyBasicType",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true,
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.JaxbHbmMapKeyCompositeType",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true,
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.JaxbHbmMapKeyManyToManyType",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true,
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.JaxbHbmMapType",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true,
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.JaxbHbmMultiTenancyType",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true,
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.JaxbHbmNamedNativeQueryType",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true,
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.JaxbHbmNamedQueryType",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true,
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.JaxbHbmNativeQueryCollectionLoadReturnType",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true,
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.JaxbHbmNativeQueryJoinReturnType",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true,
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.JaxbHbmNativeQueryPropertyReturnType",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true,
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.JaxbHbmNativeQueryPropertyReturnType$JaxbHbmReturnColumn",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true,
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.JaxbHbmNativeQueryReturnType",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true,
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.JaxbHbmNativeQueryReturnType$JaxbHbmReturnDiscriminator",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true,
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.JaxbHbmNativeQueryScalarReturnType",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true,
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.JaxbHbmNaturalIdCacheType",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true,
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.JaxbHbmNaturalIdType",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true,
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.JaxbHbmNestedCompositeElementType",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true,
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.JaxbHbmNotFoundEnum",
"allDeclaredFields": true,
"methods": [
{
"name": "values",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.JaxbHbmOnDeleteEnum",
"allDeclaredFields": true,
"methods": [
{
"name": "values",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.JaxbHbmOneToManyCollectionElementType",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true,
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.JaxbHbmOneToOneType",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true,
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.JaxbHbmOuterJoinEnum",
"allDeclaredFields": true,
"methods": [
{
"name": "values",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.JaxbHbmParentType",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true,
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.JaxbHbmPolymorphismEnum",
"allDeclaredFields": true,
"methods": [
{
"name": "values",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.JaxbHbmPrimitiveArrayType",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true,
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.JaxbHbmPropertiesType",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true,
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.JaxbHbmQueryParamType",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true,
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.JaxbHbmResultSetMappingType",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true,
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.JaxbHbmRootEntityType",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true,
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.JaxbHbmSecondaryTableType",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true,
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.JaxbHbmSetType",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true,
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.JaxbHbmSimpleIdType",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true,
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.JaxbHbmSubclassEntityBaseDefinition",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true,
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.JaxbHbmSynchronizeType",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true,
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.JaxbHbmTimestampAttributeType",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true,
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.JaxbHbmTimestampSourceEnum",
"allDeclaredFields": true,
"methods": [
{
"name": "values",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.JaxbHbmToolingHintContainer",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true,
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.JaxbHbmToolingHintType",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true,
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.JaxbHbmTuplizerType",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true,
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.JaxbHbmTypeDefinitionType",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true,
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.JaxbHbmTypeSpecificationType",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true,
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.JaxbHbmUnionSubclassEntityType",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true,
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.JaxbHbmUnsavedValueCompositeIdEnum",
"allDeclaredFields": true,
"methods": [
{
"name": "values",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.JaxbHbmUnsavedValueTimestampEnum",
"allDeclaredFields": true,
"methods": [
{
"name": "values",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.JaxbHbmUnsavedValueVersionEnum",
"allDeclaredFields": true,
"methods": [
{
"name": "values",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.JaxbHbmVersionAttributeType",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true,
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.ObjectFactory",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.PluralAttributeInfoIdBagAdapter",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true,
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.boot.jaxb.hbm.spi.PluralAttributeInfoPrimitiveArrayAdapter",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true,
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.cache.spi.access.AccessType"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.engine.OptimisticLockStyle"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.synchronization.AuditProcess"
},
"name": "org.hibernate.engine.jdbc.batch.internal.BatchBuilderImpl",
"queryAllPublicMethods": true
},
{
"condition": {
"typeReachable": "org.hibernate.envers.strategy.internal.ValidityAuditStrategy"
},
"name": "org.hibernate.engine.jdbc.batch.internal.BatchBuilderImpl",
"queryAllPublicMethods": true
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.engine.spi.ExecuteUpdateResultCheckStyle"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.configuration.internal.RevisionInfoConfiguration"
},
"name": "org.hibernate.envers.DefaultRevisionEntity",
"allDeclaredConstructors": true,
"allDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.hibernate.envers.configuration.internal.RevisionInfoConfiguration"
},
"name": "org.hibernate.envers.enhanced.SequenceIdRevisionEntity",
"allDeclaredConstructors": true,
"allDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.EnversServiceImpl$1"
},
"name": "org.hibernate.envers.DefaultRevisionEntity",
"queryAllDeclaredMethods": true,
"fields": [
{
"name": "timestamp"
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.revisioninfo.DefaultRevisionInfoGenerator"
},
"name": "org.hibernate.envers.DefaultRevisionEntity",
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.revisioninfo.RevisionInfoNumberReader"
},
"name": "org.hibernate.envers.DefaultRevisionEntity",
"queryAllDeclaredMethods": true,
"fields": [
{
"name": "id"
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.revisioninfo.RevisionTimestampValueResolver"
},
"name": "org.hibernate.envers.DefaultRevisionEntity",
"queryAllDeclaredMethods": true,
"fields": [
{
"name": "timestamp"
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.EnversBootLogger"
},
"name": "org.hibernate.envers.boot.EnversBootLogger_$logger",
"methods": [
{
"name": "<init>",
"parameterTypes": [
"org.jboss.logging.Logger"
]
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.TypeContributorImpl"
},
"name": "org.hibernate.envers.boot.internal.EnversServiceImpl",
"queryAllPublicMethods": true
},
{
"condition": {
"typeReachable": "org.hibernate.envers.configuration.internal.metadata.PersistentEntityInstantiator"
},
"name": "org.hibernate.envers.boot.model.DiscriminatorPersistentEntity",
"methods": [
{
"name": "<init>",
"parameterTypes": [
"org.hibernate.envers.configuration.internal.metadata.AuditTableData",
"org.hibernate.mapping.PersistentClass"
]
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.configuration.internal.metadata.PersistentEntityInstantiator"
},
"name": "org.hibernate.envers.boot.model.JoinedSubclassPersistentEntity",
"methods": [
{
"name": "<init>",
"parameterTypes": [
"org.hibernate.envers.configuration.internal.metadata.AuditTableData",
"org.hibernate.mapping.PersistentClass"
]
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.configuration.internal.metadata.PersistentEntityInstantiator"
},
"name": "org.hibernate.envers.boot.model.RootPersistentEntity",
"methods": [
{
"name": "<init>",
"parameterTypes": [
"org.hibernate.envers.configuration.internal.metadata.AuditTableData",
"org.hibernate.mapping.PersistentClass"
]
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.configuration.internal.metadata.PersistentEntityInstantiator"
},
"name": "org.hibernate.envers.boot.model.UnionSubclassPersistentEntity",
"methods": [
{
"name": "<init>",
"parameterTypes": [
"org.hibernate.envers.configuration.internal.metadata.AuditTableData",
"org.hibernate.mapping.PersistentClass"
]
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.EnversServiceImpl$1"
},
"name": "org.hibernate.envers.enhanced.SequenceIdRevisionEntity",
"queryAllDeclaredMethods": true,
"fields": [
{
"name": "timestamp"
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.reader.AuditReaderImpl"
},
"name": "org.hibernate.envers.enhanced.SequenceIdRevisionEntity",
"methods": [
{
"name": "<init>",
"parameterTypes": []
},
{
"name": "setId",
"parameterTypes": [
"int"
]
},
{
"name": "setTimestamp",
"parameterTypes": [
"long"
]
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.reader.CrossTypeRevisionChangesReaderImpl"
},
"name": "org.hibernate.envers.enhanced.SequenceIdRevisionEntity",
"methods": [
{
"name": "setId",
"parameterTypes": [
"int"
]
},
{
"name": "setTimestamp",
"parameterTypes": [
"long"
]
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.revisioninfo.DefaultRevisionInfoGenerator"
},
"name": "org.hibernate.envers.enhanced.SequenceIdRevisionEntity",
"methods": [
{
"name": "<init>",
"parameterTypes": []
},
{
"name": "getTimestamp",
"parameterTypes": []
},
{
"name": "setId",
"parameterTypes": [
"int"
]
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.revisioninfo.RevisionInfoNumberReader"
},
"name": "org.hibernate.envers.enhanced.SequenceIdRevisionEntity",
"queryAllDeclaredMethods": true,
"fields": [
{
"name": "id"
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.revisioninfo.RevisionTimestampValueResolver"
},
"name": "org.hibernate.envers.enhanced.SequenceIdRevisionEntity",
"queryAllDeclaredMethods": true,
"fields": [
{
"name": "timestamp"
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.synchronization.AuditProcess"
},
"name": "org.hibernate.envers.enhanced.SequenceIdRevisionEntity",
"methods": [
{
"name": "<init>",
"parameterTypes": []
},
{
"name": "getId",
"parameterTypes": []
},
{
"name": "getTimestamp",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.synchronization.SessionCacheCleaner$1"
},
"name": "org.hibernate.envers.enhanced.SequenceIdRevisionEntity",
"methods": [
{
"name": "getId",
"parameterTypes": []
},
{
"name": "getTimestamp",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.tools.ReflectionTools"
},
"name": "org.hibernate.envers.enhanced.SequenceIdRevisionEntity",
"queryAllDeclaredMethods": true,
"fields": [
{
"name": "timestamp"
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.query.internal.impl.EntitiesModifiedAtRevisionQuery"
},
"name": "org.hibernate.envers.enhanced.SequenceIdRevisionEntity",
"methods": [
{
"name": "getId",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.strategy.internal.DefaultAuditStrategy"
},
"name": "org.hibernate.envers.enhanced.SequenceIdRevisionEntity",
"methods": [
{
"name": "getId",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.strategy.internal.ValidityAuditStrategy"
},
"name": "org.hibernate.envers.enhanced.SequenceIdRevisionEntity",
"methods": [
{
"name": "getId",
"parameterTypes": []
},
{
"name": "getTimestamp",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.EnversServiceImpl$1"
},
"name": "org.hibernate.envers.enhanced.SequenceIdTrackingModifiedEntitiesRevisionEntity",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.reader.CrossTypeRevisionChangesReaderImpl"
},
"name": "org.hibernate.envers.enhanced.SequenceIdTrackingModifiedEntitiesRevisionEntity",
"methods": [
{
"name": "<init>",
"parameterTypes": []
},
{
"name": "setModifiedEntityNames",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.revisioninfo.DefaultTrackingModifiedEntitiesRevisionInfoGenerator"
},
"name": "org.hibernate.envers.enhanced.SequenceIdTrackingModifiedEntitiesRevisionEntity",
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.revisioninfo.RevisionInfoNumberReader"
},
"name": "org.hibernate.envers.enhanced.SequenceIdTrackingModifiedEntitiesRevisionEntity",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.revisioninfo.RevisionTimestampValueResolver"
},
"name": "org.hibernate.envers.enhanced.SequenceIdTrackingModifiedEntitiesRevisionEntity",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.synchronization.AuditProcess"
},
"name": "org.hibernate.envers.enhanced.SequenceIdTrackingModifiedEntitiesRevisionEntity",
"methods": [
{
"name": "<init>",
"parameterTypes": []
},
{
"name": "getModifiedEntityNames",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.synchronization.SessionCacheCleaner$1"
},
"name": "org.hibernate.envers.enhanced.SequenceIdTrackingModifiedEntitiesRevisionEntity",
"methods": [
{
"name": "getModifiedEntityNames",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.strategy.internal.ValidityAuditStrategy"
},
"name": "org.hibernate.envers.enhanced.SequenceIdTrackingModifiedEntitiesRevisionEntity",
"methods": [
{
"name": "getModifiedEntityNames",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.configuration.internal.ClassesAuditingData"
},
"name": "org.hibernate.envers.internal.EnversMessageLogger_$logger",
"methods": [
{
"name": "<init>",
"parameterTypes": [
"org.jboss.logging.Logger"
]
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.configuration.internal.metadata.AuditMetadataGenerator"
},
"name": "org.hibernate.envers.internal.EnversMessageLogger_$logger",
"methods": [
{
"name": "<init>",
"parameterTypes": [
"org.jboss.logging.Logger"
]
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.configuration.internal.metadata.CollectionMappedByResolver"
},
"name": "org.hibernate.envers.internal.EnversMessageLogger_$logger",
"methods": [
{
"name": "<init>",
"parameterTypes": [
"org.jboss.logging.Logger"
]
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.configuration.internal.metadata.JoinColumnCollectionMetadataGenerator"
},
"name": "org.hibernate.envers.internal.EnversMessageLogger_$logger",
"methods": [
{
"name": "<init>",
"parameterTypes": [
"org.jboss.logging.Logger"
]
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.configuration.internal.metadata.MiddleTableCollectionMetadataGenerator"
},
"name": "org.hibernate.envers.internal.EnversMessageLogger_$logger",
"methods": [
{
"name": "<init>",
"parameterTypes": [
"org.jboss.logging.Logger"
]
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.reader.FirstLevelCache"
},
"name": "org.hibernate.envers.internal.EnversMessageLogger_$logger",
"methods": [
{
"name": "<init>",
"parameterTypes": [
"org.jboss.logging.Logger"
]
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.configuration.internal.metadata.AbstractCollectionMetadataGenerator"
},
"name": "org.hibernate.envers.internal.entities.mapper.relation.lazy.proxy.ListProxy",
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": [
"org.hibernate.envers.internal.entities.mapper.relation.lazy.initializor.Initializor"
]
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.configuration.internal.metadata.AuditMetadataGenerator"
},
"name": "org.hibernate.envers.internal.entities.mapper.relation.lazy.proxy.ListProxy",
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": [
"org.hibernate.envers.internal.entities.mapper.relation.lazy.initializor.Initializor"
]
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.configuration.internal.metadata.JoinColumnCollectionMetadataGenerator"
},
"name": "org.hibernate.envers.internal.entities.mapper.relation.lazy.proxy.ListProxy",
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": [
"org.hibernate.envers.internal.entities.mapper.relation.lazy.initializor.Initializor"
]
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.configuration.internal.metadata.MiddleTableCollectionMetadataGenerator"
},
"name": "org.hibernate.envers.internal.entities.mapper.relation.lazy.proxy.ListProxy",
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": [
"org.hibernate.envers.internal.entities.mapper.relation.lazy.initializor.Initializor"
]
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.entities.EntityInstantiator"
},
"name": "org.hibernate.envers.internal.entities.mapper.relation.lazy.proxy.ListProxy",
"methods": [
{
"name": "<init>",
"parameterTypes": [
"org.hibernate.envers.internal.entities.mapper.relation.lazy.initializor.Initializor"
]
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.entities.mapper.ComponentPropertyMapper"
},
"name": "org.hibernate.envers.internal.entities.mapper.relation.lazy.proxy.ListProxy",
"methods": [
{
"name": "<init>",
"parameterTypes": [
"org.hibernate.envers.internal.entities.mapper.relation.lazy.initializor.Initializor"
]
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.entities.mapper.relation.AbstractCollectionMapper"
},
"name": "org.hibernate.envers.internal.entities.mapper.relation.lazy.proxy.ListProxy",
"methods": [
{
"name": "<init>",
"parameterTypes": [
"org.hibernate.envers.internal.entities.mapper.relation.lazy.initializor.Initializor"
]
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.entities.mapper.relation.BasicCollectionMapper"
},
"name": "org.hibernate.envers.internal.entities.mapper.relation.lazy.proxy.ListProxy",
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": [
"org.hibernate.envers.internal.entities.mapper.relation.lazy.initializor.Initializor"
]
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.entities.mapper.relation.ListCollectionMapper"
},
"name": "org.hibernate.envers.internal.entities.mapper.relation.lazy.proxy.ListProxy",
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": [
"org.hibernate.envers.internal.entities.mapper.relation.lazy.initializor.Initializor"
]
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.entities.mapper.relation.lazy.initializor.BasicCollectionInitializor"
},
"name": "org.hibernate.envers.internal.entities.mapper.relation.lazy.proxy.ListProxy",
"methods": [
{
"name": "<init>",
"parameterTypes": [
"org.hibernate.envers.internal.entities.mapper.relation.lazy.initializor.Initializor"
]
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.entities.mapper.relation.lazy.initializor.ListCollectionInitializor"
},
"name": "org.hibernate.envers.internal.entities.mapper.relation.lazy.proxy.ListProxy",
"methods": [
{
"name": "<init>",
"parameterTypes": [
"org.hibernate.envers.internal.entities.mapper.relation.lazy.initializor.Initializor"
]
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.reader.AuditReaderImpl"
},
"name": "org.hibernate.envers.internal.entities.mapper.relation.lazy.proxy.ListProxy",
"methods": [
{
"name": "<init>",
"parameterTypes": [
"org.hibernate.envers.internal.entities.mapper.relation.lazy.initializor.Initializor"
]
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.query.internal.impl.RevisionsOfEntityQuery"
},
"name": "org.hibernate.envers.internal.entities.mapper.relation.lazy.proxy.ListProxy",
"methods": [
{
"name": "<init>",
"parameterTypes": [
"org.hibernate.envers.internal.entities.mapper.relation.lazy.initializor.Initializor"
]
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.configuration.internal.metadata.AuditMetadataGenerator"
},
"name": "org.hibernate.envers.internal.entities.mapper.relation.lazy.proxy.MapProxy",
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": [
"org.hibernate.envers.internal.entities.mapper.relation.lazy.initializor.Initializor"
]
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.entities.EntityInstantiator"
},
"name": "org.hibernate.envers.internal.entities.mapper.relation.lazy.proxy.MapProxy",
"methods": [
{
"name": "<init>",
"parameterTypes": [
"org.hibernate.envers.internal.entities.mapper.relation.lazy.initializor.Initializor"
]
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.entities.mapper.relation.AbstractCollectionMapper"
},
"name": "org.hibernate.envers.internal.entities.mapper.relation.lazy.proxy.MapProxy",
"methods": [
{
"name": "<init>",
"parameterTypes": [
"org.hibernate.envers.internal.entities.mapper.relation.lazy.initializor.Initializor"
]
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.entities.mapper.relation.MapCollectionMapper"
},
"name": "org.hibernate.envers.internal.entities.mapper.relation.lazy.proxy.MapProxy",
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": [
"org.hibernate.envers.internal.entities.mapper.relation.lazy.initializor.Initializor"
]
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.entities.mapper.relation.lazy.proxy.CollectionProxy"
},
"name": "org.hibernate.envers.internal.entities.mapper.relation.lazy.proxy.MapProxy",
"methods": [
{
"name": "<init>",
"parameterTypes": [
"org.hibernate.envers.internal.entities.mapper.relation.lazy.initializor.Initializor"
]
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.reader.AuditReaderImpl"
},
"name": "org.hibernate.envers.internal.entities.mapper.relation.lazy.proxy.MapProxy",
"methods": [
{
"name": "<init>",
"parameterTypes": [
"org.hibernate.envers.internal.entities.mapper.relation.lazy.initializor.Initializor"
]
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.query.internal.impl.RevisionsOfEntityQuery"
},
"name": "org.hibernate.envers.internal.entities.mapper.relation.lazy.proxy.MapProxy",
"methods": [
{
"name": "<init>",
"parameterTypes": [
"org.hibernate.envers.internal.entities.mapper.relation.lazy.initializor.Initializor"
]
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.configuration.internal.metadata.AuditMetadataGenerator"
},
"name": "org.hibernate.envers.internal.entities.mapper.relation.lazy.proxy.SetProxy",
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": [
"org.hibernate.envers.internal.entities.mapper.relation.lazy.initializor.Initializor"
]
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.configuration.internal.metadata.JoinColumnCollectionMetadataGenerator"
},
"name": "org.hibernate.envers.internal.entities.mapper.relation.lazy.proxy.SetProxy",
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": [
"org.hibernate.envers.internal.entities.mapper.relation.lazy.initializor.Initializor"
]
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.configuration.internal.metadata.MiddleTableCollectionMetadataGenerator"
},
"name": "org.hibernate.envers.internal.entities.mapper.relation.lazy.proxy.SetProxy",
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": [
"org.hibernate.envers.internal.entities.mapper.relation.lazy.initializor.Initializor"
]
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.entities.EntityInstantiator"
},
"name": "org.hibernate.envers.internal.entities.mapper.relation.lazy.proxy.SetProxy",
"methods": [
{
"name": "<init>",
"parameterTypes": [
"org.hibernate.envers.internal.entities.mapper.relation.lazy.initializor.Initializor"
]
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.entities.mapper.relation.AbstractCollectionMapper"
},
"name": "org.hibernate.envers.internal.entities.mapper.relation.lazy.proxy.SetProxy",
"methods": [
{
"name": "<init>",
"parameterTypes": [
"org.hibernate.envers.internal.entities.mapper.relation.lazy.initializor.Initializor"
]
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.entities.mapper.relation.BasicCollectionMapper"
},
"name": "org.hibernate.envers.internal.entities.mapper.relation.lazy.proxy.SetProxy",
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": [
"org.hibernate.envers.internal.entities.mapper.relation.lazy.initializor.Initializor"
]
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.entities.mapper.relation.lazy.ToOneDelegateSessionImplementor"
},
"name": "org.hibernate.envers.internal.entities.mapper.relation.lazy.proxy.SetProxy",
"methods": [
{
"name": "<init>",
"parameterTypes": [
"org.hibernate.envers.internal.entities.mapper.relation.lazy.initializor.Initializor"
]
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.entities.mapper.relation.lazy.initializor.BasicCollectionInitializor"
},
"name": "org.hibernate.envers.internal.entities.mapper.relation.lazy.proxy.SetProxy",
"methods": [
{
"name": "<init>",
"parameterTypes": [
"org.hibernate.envers.internal.entities.mapper.relation.lazy.initializor.Initializor"
]
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.entities.mapper.relation.lazy.proxy.CollectionProxy"
},
"name": "org.hibernate.envers.internal.entities.mapper.relation.lazy.proxy.SetProxy",
"methods": [
{
"name": "<init>",
"parameterTypes": [
"org.hibernate.envers.internal.entities.mapper.relation.lazy.initializor.Initializor"
]
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.reader.AuditReaderImpl"
},
"name": "org.hibernate.envers.internal.entities.mapper.relation.lazy.proxy.SetProxy",
"methods": [
{
"name": "<init>",
"parameterTypes": [
"org.hibernate.envers.internal.entities.mapper.relation.lazy.initializor.Initializor"
]
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.entities.EntityInstantiator"
},
"name": "org.hibernate.envers.internal.entities.mapper.relation.lazy.proxy.SortedMapProxy",
"methods": [
{
"name": "<init>",
"parameterTypes": [
"org.hibernate.envers.internal.entities.mapper.relation.lazy.initializor.Initializor"
]
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.entities.mapper.relation.SortedMapCollectionMapper"
},
"name": "org.hibernate.envers.internal.entities.mapper.relation.lazy.proxy.SortedMapProxy",
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": [
"org.hibernate.envers.internal.entities.mapper.relation.lazy.initializor.Initializor"
]
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.entities.EntityInstantiator"
},
"name": "org.hibernate.envers.internal.entities.mapper.relation.lazy.proxy.SortedSetProxy",
"methods": [
{
"name": "<init>",
"parameterTypes": [
"org.hibernate.envers.internal.entities.mapper.relation.lazy.initializor.Initializor"
]
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.entities.mapper.relation.SortedSetCollectionMapper"
},
"name": "org.hibernate.envers.internal.entities.mapper.relation.lazy.proxy.SortedSetProxy",
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": [
"org.hibernate.envers.internal.entities.mapper.relation.lazy.initializor.Initializor"
]
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.EnversServiceImpl$1"
},
"name": "org.hibernate.envers.test.entities.reventity.CustomLocalDateTimeRevEntity",
"queryAllDeclaredMethods": true,
"fields": [
{
"name": "localDateTimestamp"
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.configuration.internal.metadata.reader.AuditedPropertiesReader"
},
"name": "org.hibernate.envers.test.entities.reventity.CustomLocalDateTimeRevEntity",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.revisioninfo.RevisionInfoNumberReader"
},
"name": "org.hibernate.envers.test.entities.reventity.CustomLocalDateTimeRevEntity",
"queryAllDeclaredMethods": true,
"fields": [
{
"name": "id"
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.revisioninfo.RevisionTimestampValueResolver"
},
"name": "org.hibernate.envers.test.entities.reventity.CustomLocalDateTimeRevEntity",
"queryAllDeclaredMethods": true,
"fields": [
{
"name": "localDateTimestamp"
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.configuration.internal.metadata.reader.AuditedPropertiesReader"
},
"name": "org.hibernate.envers.test.integration.data.DateTestEntity",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.entities.mapper.AbstractMapper"
},
"name": "org.hibernate.envers.test.integration.data.DateTestEntity",
"queryAllDeclaredMethods": true,
"fields": [
{
"name": "id"
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.entities.mapper.SinglePropertyMapper"
},
"name": "org.hibernate.envers.test.integration.data.DateTestEntity",
"queryAllDeclaredMethods": true,
"fields": [
{
"name": "dateValue"
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.configuration.internal.metadata.reader.AuditedPropertiesReader"
},
"name": "org.hibernate.envers.test.integration.data.EnumTestEntity",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.entities.mapper.AbstractMapper"
},
"name": "org.hibernate.envers.test.integration.data.EnumTestEntity",
"queryAllDeclaredMethods": true,
"fields": [
{
"name": "id"
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.entities.mapper.SinglePropertyMapper"
},
"name": "org.hibernate.envers.test.integration.data.EnumTestEntity",
"queryAllDeclaredMethods": true,
"fields": [
{
"name": "enum1"
},
{
"name": "enum2"
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.configuration.internal.metadata.reader.AuditedPropertiesReader"
},
"name": "org.hibernate.envers.test.integration.data.LobSerializableTestEntity",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.entities.mapper.AbstractMapper"
},
"name": "org.hibernate.envers.test.integration.data.LobSerializableTestEntity",
"queryAllDeclaredMethods": true,
"fields": [
{
"name": "id"
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.entities.mapper.SinglePropertyMapper"
},
"name": "org.hibernate.envers.test.integration.data.LobSerializableTestEntity",
"queryAllDeclaredMethods": true,
"fields": [
{
"name": "obj"
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.query.internal.impl.EntitiesAtRevisionQuery"
},
"name": "org.hibernate.envers.test.integration.data.SerObject"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.configuration.internal.metadata.reader.AuditedPropertiesReader"
},
"name": "org.hibernate.envers.test.integration.data.SerializableTestEntity",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.entities.mapper.AbstractMapper"
},
"name": "org.hibernate.envers.test.integration.data.SerializableTestEntity",
"queryAllDeclaredMethods": true,
"fields": [
{
"name": "id"
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.entities.mapper.SinglePropertyMapper"
},
"name": "org.hibernate.envers.test.integration.data.SerializableTestEntity",
"queryAllDeclaredMethods": true,
"fields": [
{
"name": "obj"
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.configuration.internal.metadata.AuditMetadataGenerator"
},
"name": "org.hibernate.internal.CoreMessageLogger_$logger",
"methods": [
{
"name": "<init>",
"parameterTypes": [
"org.jboss.logging.Logger"
]
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.configuration.internal.metadata.ValueMetadataGenerator"
},
"name": "org.hibernate.internal.CoreMessageLogger_$logger",
"methods": [
{
"name": "<init>",
"parameterTypes": [
"org.jboss.logging.Logger"
]
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.enhanced.OrderedSequenceGenerator"
},
"name": "org.hibernate.internal.CoreMessageLogger_$logger",
"methods": [
{
"name": "<init>",
"parameterTypes": [
"org.jboss.logging.Logger"
]
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.event.spi.EnversPostInsertEventListenerImpl"
},
"name": "org.hibernate.internal.CoreMessageLogger_$logger",
"methods": [
{
"name": "<init>",
"parameterTypes": [
"org.jboss.logging.Logger"
]
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.event.spi.EnversPreUpdateEventListenerImpl"
},
"name": "org.hibernate.internal.CoreMessageLogger_$logger",
"methods": [
{
"name": "<init>",
"parameterTypes": [
"org.jboss.logging.Logger"
]
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.reader.AuditReaderImpl"
},
"name": "org.hibernate.internal.CoreMessageLogger_$logger",
"methods": [
{
"name": "<init>",
"parameterTypes": [
"org.jboss.logging.Logger"
]
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.revisioninfo.DefaultRevisionInfoGenerator"
},
"name": "org.hibernate.internal.CoreMessageLogger_$logger",
"methods": [
{
"name": "<init>",
"parameterTypes": [
"org.jboss.logging.Logger"
]
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.revisioninfo.RevisionInfoQueryCreator"
},
"name": "org.hibernate.internal.CoreMessageLogger_$logger",
"methods": [
{
"name": "<init>",
"parameterTypes": [
"org.jboss.logging.Logger"
]
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.revisioninfo.RevisionTimestampValueResolver"
},
"name": "org.hibernate.internal.CoreMessageLogger_$logger",
"methods": [
{
"name": "<init>",
"parameterTypes": [
"org.jboss.logging.Logger"
]
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.synchronization.AuditProcess"
},
"name": "org.hibernate.internal.CoreMessageLogger_$logger",
"methods": [
{
"name": "<init>",
"parameterTypes": [
"org.jboss.logging.Logger"
]
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.synchronization.SessionCacheCleaner$1"
},
"name": "org.hibernate.internal.CoreMessageLogger_$logger",
"methods": [
{
"name": "<init>",
"parameterTypes": [
"org.jboss.logging.Logger"
]
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.tools.query.QueryBuilder"
},
"name": "org.hibernate.internal.CoreMessageLogger_$logger",
"methods": [
{
"name": "<init>",
"parameterTypes": [
"org.jboss.logging.Logger"
]
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.query.internal.impl.AbstractAuditQuery"
},
"name": "org.hibernate.internal.CoreMessageLogger_$logger",
"methods": [
{
"name": "<init>",
"parameterTypes": [
"org.jboss.logging.Logger"
]
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.query.internal.impl.EntitiesAtRevisionQuery"
},
"name": "org.hibernate.internal.CoreMessageLogger_$logger",
"methods": [
{
"name": "<init>",
"parameterTypes": [
"org.jboss.logging.Logger"
]
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.strategy.internal.ValidityAuditStrategy"
},
"name": "org.hibernate.internal.CoreMessageLogger_$logger",
"methods": [
{
"name": "<init>",
"parameterTypes": [
"org.jboss.logging.Logger"
]
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.entities.mapper.relation.lazy.proxy.CollectionProxy"
},
"name": "org.hibernate.internal.EntityManagerMessageLogger_$logger",
"methods": [
{
"name": "<init>",
"parameterTypes": [
"org.jboss.logging.Logger"
]
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.entities.mapper.relation.lazy.proxy.MapProxy"
},
"name": "org.hibernate.internal.EntityManagerMessageLogger_$logger",
"methods": [
{
"name": "<init>",
"parameterTypes": [
"org.jboss.logging.Logger"
]
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.metamodel.RepresentationMode"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.EnversServiceImpl$1"
},
"name": "org.hibernate.property.access.internal.PropertyAccessStrategyResolverStandardImpl",
"queryAllPublicMethods": true
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.revisioninfo.RevisionTimestampValueResolver"
},
"name": "org.hibernate.property.access.internal.PropertyAccessStrategyResolverStandardImpl",
"queryAllPublicMethods": true
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.tools.ReflectionTools"
},
"name": "org.hibernate.property.access.internal.PropertyAccessStrategyResolverStandardImpl",
"queryAllPublicMethods": true
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.reader.AuditReaderImpl"
},
"name": "org.hibernate.query.hql.HqlLogging_$logger",
"methods": [
{
"name": "<init>",
"parameterTypes": [
"org.jboss.logging.Logger"
]
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.revisioninfo.RevisionInfoQueryCreator"
},
"name": "org.hibernate.query.hql.HqlLogging_$logger",
"methods": [
{
"name": "<init>",
"parameterTypes": [
"org.jboss.logging.Logger"
]
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.tools.query.QueryBuilder"
},
"name": "org.hibernate.query.hql.HqlLogging_$logger",
"methods": [
{
"name": "<init>",
"parameterTypes": [
"org.jboss.logging.Logger"
]
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.query.internal.impl.AbstractAuditQuery"
},
"name": "org.hibernate.query.hql.HqlLogging_$logger",
"methods": [
{
"name": "<init>",
"parameterTypes": [
"org.jboss.logging.Logger"
]
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.query.internal.impl.EntitiesAtRevisionQuery"
},
"name": "org.hibernate.query.hql.HqlLogging_$logger",
"methods": [
{
"name": "<init>",
"parameterTypes": [
"org.jboss.logging.Logger"
]
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.strategy.internal.ValidityAuditStrategy"
},
"name": "org.hibernate.query.hql.HqlLogging_$logger",
"methods": [
{
"name": "<init>",
"parameterTypes": [
"org.jboss.logging.Logger"
]
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.event.spi.EnversPostInsertEventListenerImpl"
},
"name": "org.hibernate.sql.exec.SqlExecLogger_$logger",
"methods": [
{
"name": "<init>",
"parameterTypes": [
"org.jboss.logging.Logger"
]
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.event.spi.EnversPreUpdateEventListenerImpl"
},
"name": "org.hibernate.sql.exec.SqlExecLogger_$logger",
"methods": [
{
"name": "<init>",
"parameterTypes": [
"org.jboss.logging.Logger"
]
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.reader.AuditReaderImpl"
},
"name": "org.hibernate.sql.exec.SqlExecLogger_$logger",
"methods": [
{
"name": "<init>",
"parameterTypes": [
"org.jboss.logging.Logger"
]
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.query.internal.impl.AbstractAuditQuery"
},
"name": "org.hibernate.sql.exec.SqlExecLogger_$logger",
"methods": [
{
"name": "<init>",
"parameterTypes": [
"org.jboss.logging.Logger"
]
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.query.internal.impl.EntitiesAtRevisionQuery"
},
"name": "org.hibernate.sql.exec.SqlExecLogger_$logger",
"methods": [
{
"name": "<init>",
"parameterTypes": [
"org.jboss.logging.Logger"
]
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.event.spi.EnversPostInsertEventListenerImpl"
},
"name": "org.hibernate.sql.results.LoadingLogger_$logger",
"methods": [
{
"name": "<init>",
"parameterTypes": [
"org.jboss.logging.Logger"
]
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.event.spi.EnversPreUpdateEventListenerImpl"
},
"name": "org.hibernate.sql.results.LoadingLogger_$logger",
"methods": [
{
"name": "<init>",
"parameterTypes": [
"org.jboss.logging.Logger"
]
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.reader.AuditReaderImpl"
},
"name": "org.hibernate.sql.results.LoadingLogger_$logger",
"methods": [
{
"name": "<init>",
"parameterTypes": [
"org.jboss.logging.Logger"
]
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.query.internal.impl.AbstractAuditQuery"
},
"name": "org.hibernate.sql.results.LoadingLogger_$logger",
"methods": [
{
"name": "<init>",
"parameterTypes": [
"org.jboss.logging.Logger"
]
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.query.internal.impl.EntitiesAtRevisionQuery"
},
"name": "org.hibernate.sql.results.LoadingLogger_$logger",
"methods": [
{
"name": "<init>",
"parameterTypes": [
"org.jboss.logging.Logger"
]
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.event.spi.EnversPostInsertEventListenerImpl"
},
"name": "org.hibernate.sql.results.ResultsLogger_$logger",
"methods": [
{
"name": "<init>",
"parameterTypes": [
"org.jboss.logging.Logger"
]
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.event.spi.EnversPreUpdateEventListenerImpl"
},
"name": "org.hibernate.sql.results.ResultsLogger_$logger",
"methods": [
{
"name": "<init>",
"parameterTypes": [
"org.jboss.logging.Logger"
]
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.internal.reader.AuditReaderImpl"
},
"name": "org.hibernate.sql.results.ResultsLogger_$logger",
"methods": [
{
"name": "<init>",
"parameterTypes": [
"org.jboss.logging.Logger"
]
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.query.internal.impl.AbstractAuditQuery"
},
"name": "org.hibernate.sql.results.ResultsLogger_$logger",
"methods": [
{
"name": "<init>",
"parameterTypes": [
"org.jboss.logging.Logger"
]
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.query.internal.impl.EntitiesAtRevisionQuery"
},
"name": "org.hibernate.sql.results.ResultsLogger_$logger",
"methods": [
{
"name": "<init>",
"parameterTypes": [
"org.jboss.logging.Logger"
]
}
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"name": "org.hibernate.tuple.GenerationTiming"
},
{
"condition": {
"typeReachable": "org.hibernate.envers.strategy.internal.ValidityAuditStrategy$QueryParameterBinding"
},
"name": "sun.util.resources.provider.NonBaseLocaleDataMetaInfo",
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
}
]
| mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/org.hibernate.orm/hibernate-envers/6.1.1.Final/reflect-config.json/0 | {
"file_path": "mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/org.hibernate.orm/hibernate-envers/6.1.1.Final/reflect-config.json",
"repo_id": "mybatis-native-demo",
"token_count": 56381
} | 148 |
[
"reflect-config.json"
]
| mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/org.jboss.logging/jboss-logging/3.5.0.Final/index.json/0 | {
"file_path": "mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/org.jboss.logging/jboss-logging/3.5.0.Final/index.json",
"repo_id": "mybatis-native-demo",
"token_count": 13
} | 149 |
[
{
"condition": {
"typeReachable": "org.jline.terminal.impl.jansi.JansiNativePty"
},
"name": "java.io.FileDescriptor",
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": [
"int"
]
}
]
},
{
"condition": {
"typeReachable": "org.jline.terminal.TerminalBuilder"
},
"methods": [
{
"name": "current",
"parameterTypes": []
},
{
"name": "info",
"parameterTypes": []
},
{
"name": "parent",
"parameterTypes": []
}
],
"name": "java.lang.ProcessHandle"
},
{
"condition": {
"typeReachable": "org.jline.terminal.TerminalBuilder"
},
"methods": [
{
"name": "command",
"parameterTypes": []
}
],
"name": "java.lang.ProcessHandle$Info"
},
{
"condition": {
"typeReachable": "org.jline.builtins.Styles"
},
"methods": [
{
"name": "get",
"parameterTypes": []
}
],
"name": "org.jline.console.SystemRegistry"
},
{
"condition": {
"typeReachable": "org.jline.utils.Signals"
},
"methods": [
{
"name": "<init>",
"parameterTypes": [
"java.lang.String"
]
},
{
"name": "handle",
"parameterTypes": [
"sun.misc.Signal",
"sun.misc.SignalHandler"
]
}
],
"name": "sun.misc.Signal"
},
{
"condition": {
"typeReachable": "org.jline.utils.Signals"
},
"fields": [
{
"name": "SIG_DFL"
}
],
"name": "sun.misc.SignalHandler"
},
{
"allDeclaredClasses": true,
"allDeclaredConstructors": true,
"allDeclaredMethods": true,
"allPublicClasses": true,
"allPublicConstructors": true,
"allPublicMethods": true,
"condition": {
"typeReachable": "org.jline.terminal.TerminalBuilder"
},
"name": "sun.misc.SignalHandler"
}
]
| mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/org.jline/jline/3.21.0/reflect-config.json/0 | {
"file_path": "mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/org.jline/jline/3.21.0/reflect-config.json",
"repo_id": "mybatis-native-demo",
"token_count": 1045
} | 150 |
[
{
"condition": {
"typeReachable": "org.mariadb.jdbc.Configuration"
},
"name": "org.mariadb.jdbc.Configuration",
"allDeclaredFields": true
},
{
"condition": {
"typeReachable": "org.mariadb.jdbc.Configuration"
},
"name": "org.mariadb.jdbc.Configuration$Builder",
"allDeclaredFields": true
},
{
"condition": {
"typeReachable": "org.mariadb.jdbc.plugin.authentication.standard.NativePasswordPlugin"
},
"name": "sun.security.provider.SHA",
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
}
]
| mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/org.mariadb.jdbc/mariadb-java-client/3.0.6/reflect-config.json/0 | {
"file_path": "mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/org.mariadb.jdbc/mariadb-java-client/3.0.6/reflect-config.json",
"repo_id": "mybatis-native-demo",
"token_count": 287
} | 151 |
[
{
"name": "org.thymeleaf.spring6.view.ThymeleafView",
"allDeclaredConstructors": true,
"condition": {
"typeReachable": "org.thymeleaf.spring6.view.ThymeleafViewResolver"
}
},
{
"name": "org.thymeleaf.spring6.view.reactive.ThymeleafReactiveView",
"allDeclaredConstructors": true,
"condition": {
"typeReachable": "org.thymeleaf.spring6.view.reactive.ThymeleafReactiveViewResolver"
}
},
{
"name": "org.thymeleaf.spring6.expression.Mvc$Spring41MvcUriComponentsBuilderDelegate",
"allDeclaredConstructors": true,
"condition": {
"typeReachable": "org.thymeleaf.spring6.expression.Mvc"
}
},
{
"name": "org.thymeleaf.spring6.expression.Fields",
"allPublicMethods": true,
"condition": {
"typeReachable": "org.thymeleaf.spring6.expression.SPELVariableExpressionEvaluator"
}
}
]
| mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/org.thymeleaf/thymeleaf-spring6/3.1.0.M2/reflect-config.json/0 | {
"file_path": "mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/org.thymeleaf/thymeleaf-spring6/3.1.0.M2/reflect-config.json",
"repo_id": "mybatis-native-demo",
"token_count": 367
} | 152 |
/home/ubuntu/wensimin-work/mybatis-native-demo/src/test/java/com/example/nativedemo/NativeDemoApplicationTests.java
| mybatis-native-demo/target/maven-status/maven-compiler-plugin/testCompile/default-testCompile/inputFiles.lst/0 | {
"file_path": "mybatis-native-demo/target/maven-status/maven-compiler-plugin/testCompile/default-testCompile/inputFiles.lst",
"repo_id": "mybatis-native-demo",
"token_count": 44
} | 153 |
package com.example.nativedemo;
import com.baomidou.mybatisplus.extension.plugins.MybatisPlusInterceptor;
import org.springframework.beans.factory.aot.BeanInstanceSupplier;
import org.springframework.beans.factory.config.BeanDefinition;
import org.springframework.beans.factory.support.RootBeanDefinition;
import org.springframework.context.annotation.ConfigurationClassUtils;
/**
* Bean definitions for {@link MybatisPlusConfig}.
*/
public class MybatisPlusConfig__BeanDefinitions {
/**
* Get the bean definition for 'mybatisPlusConfig'.
*/
public static BeanDefinition getMybatisPlusConfigBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(MybatisPlusConfig.class);
beanDefinition.setTargetType(MybatisPlusConfig.class);
ConfigurationClassUtils.initializeConfigurationClass(MybatisPlusConfig.class);
beanDefinition.setInstanceSupplier(MybatisPlusConfig$$SpringCGLIB$$0::new);
return beanDefinition;
}
/**
* Get the bean instance supplier for 'mybatisPlusInterceptor'.
*/
private static BeanInstanceSupplier<MybatisPlusInterceptor> getMybatisPlusInterceptorInstanceSupplier(
) {
return BeanInstanceSupplier.<MybatisPlusInterceptor>forFactoryMethod(MybatisPlusConfig.class, "mybatisPlusInterceptor")
.withGenerator((registeredBean) -> registeredBean.getBeanFactory().getBean(MybatisPlusConfig.class).mybatisPlusInterceptor());
}
/**
* Get the bean definition for 'mybatisPlusInterceptor'.
*/
public static BeanDefinition getMybatisPlusInterceptorBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(MybatisPlusInterceptor.class);
beanDefinition.setInstanceSupplier(getMybatisPlusInterceptorInstanceSupplier());
return beanDefinition;
}
}
| mybatis-native-demo/target/spring-aot/main/sources/com/example/nativedemo/MybatisPlusConfig__BeanDefinitions.java/0 | {
"file_path": "mybatis-native-demo/target/spring-aot/main/sources/com/example/nativedemo/MybatisPlusConfig__BeanDefinitions.java",
"repo_id": "mybatis-native-demo",
"token_count": 543
} | 154 |
package org.springframework.boot.autoconfigure.jdbc;
import javax.sql.DataSource;
import org.springframework.beans.factory.ObjectProvider;
import org.springframework.beans.factory.aot.BeanInstanceSupplier;
import org.springframework.beans.factory.config.BeanDefinition;
import org.springframework.beans.factory.support.RootBeanDefinition;
import org.springframework.core.env.Environment;
import org.springframework.jdbc.datasource.DataSourceTransactionManager;
/**
* Bean definitions for {@link DataSourceTransactionManagerAutoConfiguration}.
*/
public class DataSourceTransactionManagerAutoConfiguration__BeanDefinitions {
/**
* Get the bean definition for 'dataSourceTransactionManagerAutoConfiguration'.
*/
public static BeanDefinition getDataSourceTransactionManagerAutoConfigurationBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(DataSourceTransactionManagerAutoConfiguration.class);
beanDefinition.setInstanceSupplier(DataSourceTransactionManagerAutoConfiguration::new);
return beanDefinition;
}
/**
* Bean definitions for {@link DataSourceTransactionManagerAutoConfiguration.JdbcTransactionManagerConfiguration}.
*/
public static class JdbcTransactionManagerConfiguration {
/**
* Get the bean definition for 'jdbcTransactionManagerConfiguration'.
*/
public static BeanDefinition getJdbcTransactionManagerConfigurationBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(DataSourceTransactionManagerAutoConfiguration.JdbcTransactionManagerConfiguration.class);
beanDefinition.setInstanceSupplier(DataSourceTransactionManagerAutoConfiguration.JdbcTransactionManagerConfiguration::new);
return beanDefinition;
}
/**
* Get the bean instance supplier for 'transactionManager'.
*/
private static BeanInstanceSupplier<DataSourceTransactionManager> getTransactionManagerInstanceSupplier(
) {
return BeanInstanceSupplier.<DataSourceTransactionManager>forFactoryMethod(DataSourceTransactionManagerAutoConfiguration.JdbcTransactionManagerConfiguration.class, "transactionManager", Environment.class, DataSource.class, ObjectProvider.class)
.withGenerator((registeredBean, args) -> registeredBean.getBeanFactory().getBean(DataSourceTransactionManagerAutoConfiguration.JdbcTransactionManagerConfiguration.class).transactionManager(args.get(0), args.get(1), args.get(2)));
}
/**
* Get the bean definition for 'transactionManager'.
*/
public static BeanDefinition getTransactionManagerBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(DataSourceTransactionManager.class);
beanDefinition.setInstanceSupplier(getTransactionManagerInstanceSupplier());
return beanDefinition;
}
}
}
| mybatis-native-demo/target/spring-aot/main/sources/org/springframework/boot/autoconfigure/jdbc/DataSourceTransactionManagerAutoConfiguration__BeanDefinitions.java/0 | {
"file_path": "mybatis-native-demo/target/spring-aot/main/sources/org/springframework/boot/autoconfigure/jdbc/DataSourceTransactionManagerAutoConfiguration__BeanDefinitions.java",
"repo_id": "mybatis-native-demo",
"token_count": 753
} | 155 |
package org.springframework.boot.autoconfigure.task;
import java.lang.SuppressWarnings;
import org.springframework.beans.factory.ObjectProvider;
import org.springframework.beans.factory.aot.BeanInstanceSupplier;
import org.springframework.beans.factory.config.BeanDefinition;
import org.springframework.beans.factory.support.RootBeanDefinition;
import org.springframework.boot.task.SimpleAsyncTaskSchedulerBuilder;
import org.springframework.boot.task.TaskSchedulerBuilder;
import org.springframework.boot.task.ThreadPoolTaskSchedulerBuilder;
/**
* Bean definitions for {@link TaskSchedulingConfigurations}.
*/
public class TaskSchedulingConfigurations__BeanDefinitions {
/**
* Bean definitions for {@link TaskSchedulingConfigurations.ThreadPoolTaskSchedulerBuilderConfiguration}.
*/
public static class ThreadPoolTaskSchedulerBuilderConfiguration {
/**
* Get the bean definition for 'threadPoolTaskSchedulerBuilderConfiguration'.
*/
public static BeanDefinition getThreadPoolTaskSchedulerBuilderConfigurationBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(TaskSchedulingConfigurations.ThreadPoolTaskSchedulerBuilderConfiguration.class);
beanDefinition.setInstanceSupplier(TaskSchedulingConfigurations.ThreadPoolTaskSchedulerBuilderConfiguration::new);
return beanDefinition;
}
/**
* Get the bean instance supplier for 'threadPoolTaskSchedulerBuilder'.
*/
private static BeanInstanceSupplier<ThreadPoolTaskSchedulerBuilder> getThreadPoolTaskSchedulerBuilderInstanceSupplier(
) {
return BeanInstanceSupplier.<ThreadPoolTaskSchedulerBuilder>forFactoryMethod(TaskSchedulingConfigurations.ThreadPoolTaskSchedulerBuilderConfiguration.class, "threadPoolTaskSchedulerBuilder", TaskSchedulingProperties.class, ObjectProvider.class, ObjectProvider.class)
.withGenerator((registeredBean, args) -> registeredBean.getBeanFactory().getBean(TaskSchedulingConfigurations.ThreadPoolTaskSchedulerBuilderConfiguration.class).threadPoolTaskSchedulerBuilder(args.get(0), args.get(1), args.get(2)));
}
/**
* Get the bean definition for 'threadPoolTaskSchedulerBuilder'.
*/
public static BeanDefinition getThreadPoolTaskSchedulerBuilderBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(ThreadPoolTaskSchedulerBuilder.class);
beanDefinition.setInstanceSupplier(getThreadPoolTaskSchedulerBuilderInstanceSupplier());
return beanDefinition;
}
}
/**
* Bean definitions for {@link TaskSchedulingConfigurations.TaskSchedulerBuilderConfiguration}.
*/
public static class TaskSchedulerBuilderConfiguration {
/**
* Get the bean definition for 'taskSchedulerBuilderConfiguration'.
*/
public static BeanDefinition getTaskSchedulerBuilderConfigurationBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(TaskSchedulingConfigurations.TaskSchedulerBuilderConfiguration.class);
beanDefinition.setInstanceSupplier(TaskSchedulingConfigurations.TaskSchedulerBuilderConfiguration::new);
return beanDefinition;
}
/**
* Get the bean instance supplier for 'taskSchedulerBuilder'.
*/
@SuppressWarnings("removal")
private static BeanInstanceSupplier<TaskSchedulerBuilder> getTaskSchedulerBuilderInstanceSupplier(
) {
return BeanInstanceSupplier.<TaskSchedulerBuilder>forFactoryMethod(TaskSchedulingConfigurations.TaskSchedulerBuilderConfiguration.class, "taskSchedulerBuilder", TaskSchedulingProperties.class, ObjectProvider.class)
.withGenerator((registeredBean, args) -> registeredBean.getBeanFactory().getBean(TaskSchedulingConfigurations.TaskSchedulerBuilderConfiguration.class).taskSchedulerBuilder(args.get(0), args.get(1)));
}
/**
* Get the bean definition for 'taskSchedulerBuilder'.
*/
@SuppressWarnings("removal")
public static BeanDefinition getTaskSchedulerBuilderBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(TaskSchedulerBuilder.class);
beanDefinition.setInstanceSupplier(getTaskSchedulerBuilderInstanceSupplier());
return beanDefinition;
}
}
/**
* Bean definitions for {@link TaskSchedulingConfigurations.SimpleAsyncTaskSchedulerBuilderConfiguration}.
*/
public static class SimpleAsyncTaskSchedulerBuilderConfiguration {
/**
* Get the bean instance supplier for 'org.springframework.boot.autoconfigure.task.TaskSchedulingConfigurations$SimpleAsyncTaskSchedulerBuilderConfiguration'.
*/
private static BeanInstanceSupplier<TaskSchedulingConfigurations.SimpleAsyncTaskSchedulerBuilderConfiguration> getSimpleAsyncTaskSchedulerBuilderConfigurationInstanceSupplier(
) {
return BeanInstanceSupplier.<TaskSchedulingConfigurations.SimpleAsyncTaskSchedulerBuilderConfiguration>forConstructor(TaskSchedulingProperties.class, ObjectProvider.class)
.withGenerator((registeredBean, args) -> new TaskSchedulingConfigurations.SimpleAsyncTaskSchedulerBuilderConfiguration(args.get(0), args.get(1)));
}
/**
* Get the bean definition for 'simpleAsyncTaskSchedulerBuilderConfiguration'.
*/
public static BeanDefinition getSimpleAsyncTaskSchedulerBuilderConfigurationBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(TaskSchedulingConfigurations.SimpleAsyncTaskSchedulerBuilderConfiguration.class);
beanDefinition.setInstanceSupplier(getSimpleAsyncTaskSchedulerBuilderConfigurationInstanceSupplier());
return beanDefinition;
}
/**
* Get the bean instance supplier for 'simpleAsyncTaskSchedulerBuilder'.
*/
private static BeanInstanceSupplier<SimpleAsyncTaskSchedulerBuilder> getSimpleAsyncTaskSchedulerBuilderInstanceSupplier(
) {
return BeanInstanceSupplier.<SimpleAsyncTaskSchedulerBuilder>forFactoryMethod(TaskSchedulingConfigurations.SimpleAsyncTaskSchedulerBuilderConfiguration.class, "simpleAsyncTaskSchedulerBuilderVirtualThreads")
.withGenerator((registeredBean) -> registeredBean.getBeanFactory().getBean(TaskSchedulingConfigurations.SimpleAsyncTaskSchedulerBuilderConfiguration.class).simpleAsyncTaskSchedulerBuilderVirtualThreads());
}
/**
* Get the bean definition for 'simpleAsyncTaskSchedulerBuilder'.
*/
public static BeanDefinition getSimpleAsyncTaskSchedulerBuilderBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(SimpleAsyncTaskSchedulerBuilder.class);
beanDefinition.setInstanceSupplier(getSimpleAsyncTaskSchedulerBuilderInstanceSupplier());
return beanDefinition;
}
}
}
| mybatis-native-demo/target/spring-aot/main/sources/org/springframework/boot/autoconfigure/task/TaskSchedulingConfigurations__BeanDefinitions.java/0 | {
"file_path": "mybatis-native-demo/target/spring-aot/main/sources/org/springframework/boot/autoconfigure/task/TaskSchedulingConfigurations__BeanDefinitions.java",
"repo_id": "mybatis-native-demo",
"token_count": 1971
} | 156 |
package org.springframework.boot.autoconfigure.web.servlet;
import java.util.List;
import org.springframework.beans.factory.aot.AutowiredMethodArgumentsResolver;
import org.springframework.beans.factory.support.RegisteredBean;
/**
* Autowiring for {@link WebMvcAutoConfiguration.EnableWebMvcConfiguration}.
*/
public class WebMvcAutoConfiguration_EnableWebMvcConfiguration__Autowiring {
/**
* Apply the autowiring.
*/
public static WebMvcAutoConfiguration.EnableWebMvcConfiguration apply(
RegisteredBean registeredBean, WebMvcAutoConfiguration.EnableWebMvcConfiguration instance) {
AutowiredMethodArgumentsResolver.forMethod("setConfigurers", List.class).resolve(registeredBean, args -> instance.setConfigurers(args.get(0)));
return instance;
}
}
| mybatis-native-demo/target/spring-aot/main/sources/org/springframework/boot/autoconfigure/web/servlet/WebMvcAutoConfiguration_EnableWebMvcConfiguration__Autowiring.java/0 | {
"file_path": "mybatis-native-demo/target/spring-aot/main/sources/org/springframework/boot/autoconfigure/web/servlet/WebMvcAutoConfiguration_EnableWebMvcConfiguration__Autowiring.java",
"repo_id": "mybatis-native-demo",
"token_count": 233
} | 157 |
package org.springframework.cloud.client;
import org.springframework.beans.factory.config.BeanDefinition;
import org.springframework.beans.factory.support.RootBeanDefinition;
/**
* Bean definitions for {@link CommonsClientAutoConfiguration}.
*/
public class CommonsClientAutoConfiguration__BeanDefinitions {
/**
* Get the bean definition for 'commonsClientAutoConfiguration'.
*/
public static BeanDefinition getCommonsClientAutoConfigurationBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(CommonsClientAutoConfiguration.class);
beanDefinition.setInstanceSupplier(CommonsClientAutoConfiguration::new);
return beanDefinition;
}
}
| mybatis-native-demo/target/spring-aot/main/sources/org/springframework/cloud/client/CommonsClientAutoConfiguration__BeanDefinitions.java/0 | {
"file_path": "mybatis-native-demo/target/spring-aot/main/sources/org/springframework/cloud/client/CommonsClientAutoConfiguration__BeanDefinitions.java",
"repo_id": "mybatis-native-demo",
"token_count": 178
} | 158 |
package org.springframework.cloud.configuration;
import org.springframework.beans.factory.config.BeanDefinition;
import org.springframework.beans.factory.support.RootBeanDefinition;
/**
* Bean definitions for {@link CompatibilityVerifierProperties}.
*/
public class CompatibilityVerifierProperties__BeanDefinitions {
/**
* Get the bean definition for 'compatibilityVerifierProperties'.
*/
public static BeanDefinition getCompatibilityVerifierPropertiesBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(CompatibilityVerifierProperties.class);
beanDefinition.setInstanceSupplier(CompatibilityVerifierProperties::new);
return beanDefinition;
}
}
| mybatis-native-demo/target/spring-aot/main/sources/org/springframework/cloud/configuration/CompatibilityVerifierProperties__BeanDefinitions.java/0 | {
"file_path": "mybatis-native-demo/target/spring-aot/main/sources/org/springframework/cloud/configuration/CompatibilityVerifierProperties__BeanDefinitions.java",
"repo_id": "mybatis-native-demo",
"token_count": 185
} | 159 |
MYSQL_ROOT_PASSWORD=root
MYSQL_DATABASE=nacos_devtest
MYSQL_USER=nacos
MYSQL_PASSWORD=nacos
LANG=C.UTF-8
| nacos-docker/env/mysql.env/0 | {
"file_path": "nacos-docker/env/mysql.env",
"repo_id": "nacos-docker",
"token_count": 60
} | 160 |
2024-06-22 00:07:09,988 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_instance_metadata/log from log index 3 to 3, cost 0 ms.
2024-06-22 00:07:47,948 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_persistent_service_v2/log from log index 3 to 3, cost 0 ms.
2024-06-22 00:16:49,565 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_service_metadata/log from log index 3 to 3, cost 0 ms.
2024-06-22 00:37:09,988 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_instance_metadata/log from log index 3 to 3, cost 0 ms.
2024-06-22 00:37:47,948 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_persistent_service_v2/log from log index 3 to 3, cost 0 ms.
2024-06-22 00:46:49,565 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_service_metadata/log from log index 3 to 3, cost 0 ms.
2024-06-22 01:07:09,988 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_instance_metadata/log from log index 3 to 3, cost 0 ms.
2024-06-22 01:07:47,948 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_persistent_service_v2/log from log index 3 to 3, cost 0 ms.
2024-06-22 01:16:49,565 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_service_metadata/log from log index 3 to 3, cost 0 ms.
2024-06-22 01:37:09,988 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_instance_metadata/log from log index 3 to 3, cost 0 ms.
2024-06-22 01:37:47,948 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_persistent_service_v2/log from log index 3 to 3, cost 0 ms.
2024-06-22 01:46:49,565 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_service_metadata/log from log index 3 to 3, cost 0 ms.
2024-06-22 02:07:09,988 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_instance_metadata/log from log index 3 to 3, cost 0 ms.
2024-06-22 02:07:47,948 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_persistent_service_v2/log from log index 3 to 3, cost 1 ms.
2024-06-22 02:16:49,566 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_service_metadata/log from log index 3 to 3, cost 0 ms.
2024-06-22 02:37:09,988 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_instance_metadata/log from log index 3 to 3, cost 1 ms.
2024-06-22 02:37:47,948 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_persistent_service_v2/log from log index 3 to 3, cost 0 ms.
2024-06-22 02:46:49,566 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_service_metadata/log from log index 3 to 3, cost 0 ms.
2024-06-22 03:07:09,989 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_instance_metadata/log from log index 3 to 3, cost 1 ms.
2024-06-22 03:07:47,950 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_persistent_service_v2/log from log index 3 to 3, cost 0 ms.
2024-06-22 03:16:49,567 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_service_metadata/log from log index 3 to 3, cost 0 ms.
2024-06-22 03:37:09,989 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_instance_metadata/log from log index 3 to 3, cost 0 ms.
2024-06-22 03:37:47,949 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_persistent_service_v2/log from log index 3 to 3, cost 0 ms.
2024-06-22 03:46:49,566 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_service_metadata/log from log index 3 to 3, cost 0 ms.
2024-06-22 04:07:09,989 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_instance_metadata/log from log index 3 to 3, cost 1 ms.
2024-06-22 04:07:47,949 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_persistent_service_v2/log from log index 3 to 3, cost 1 ms.
2024-06-22 04:16:49,566 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_service_metadata/log from log index 3 to 3, cost 1 ms.
2024-06-22 04:37:09,989 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_instance_metadata/log from log index 3 to 3, cost 0 ms.
2024-06-22 04:37:47,949 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_persistent_service_v2/log from log index 3 to 3, cost 0 ms.
2024-06-22 04:46:49,567 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_service_metadata/log from log index 3 to 3, cost 0 ms.
2024-06-22 05:07:09,990 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_instance_metadata/log from log index 3 to 3, cost 0 ms.
2024-06-22 05:07:47,950 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_persistent_service_v2/log from log index 3 to 3, cost 0 ms.
2024-06-22 05:16:49,567 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_service_metadata/log from log index 3 to 3, cost 0 ms.
2024-06-22 05:37:09,990 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_instance_metadata/log from log index 3 to 3, cost 0 ms.
2024-06-22 05:37:47,950 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_persistent_service_v2/log from log index 3 to 3, cost 0 ms.
2024-06-22 05:46:49,567 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_service_metadata/log from log index 3 to 3, cost 0 ms.
2024-06-22 06:07:09,990 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_instance_metadata/log from log index 3 to 3, cost 0 ms.
2024-06-22 06:07:47,950 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_persistent_service_v2/log from log index 3 to 3, cost 0 ms.
2024-06-22 06:16:49,567 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_service_metadata/log from log index 3 to 3, cost 0 ms.
2024-06-22 06:37:09,990 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_instance_metadata/log from log index 3 to 3, cost 0 ms.
2024-06-22 06:37:47,950 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_persistent_service_v2/log from log index 3 to 3, cost 1 ms.
2024-06-22 06:46:49,568 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_service_metadata/log from log index 3 to 3, cost 1 ms.
2024-06-22 07:07:09,990 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_instance_metadata/log from log index 3 to 3, cost 1 ms.
2024-06-22 07:07:47,951 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_persistent_service_v2/log from log index 3 to 3, cost 0 ms.
2024-06-22 07:16:49,568 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_service_metadata/log from log index 3 to 3, cost 0 ms.
2024-06-22 07:37:09,990 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_instance_metadata/log from log index 3 to 3, cost 0 ms.
2024-06-22 07:37:47,951 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_persistent_service_v2/log from log index 3 to 3, cost 0 ms.
2024-06-22 07:46:49,568 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_service_metadata/log from log index 3 to 3, cost 0 ms.
2024-06-22 08:07:09,991 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_instance_metadata/log from log index 3 to 3, cost 0 ms.
2024-06-22 08:07:47,951 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_persistent_service_v2/log from log index 3 to 3, cost 0 ms.
2024-06-22 08:16:49,568 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_service_metadata/log from log index 3 to 3, cost 0 ms.
2024-06-22 08:37:09,990 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_instance_metadata/log from log index 3 to 3, cost 0 ms.
2024-06-22 08:37:47,951 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_persistent_service_v2/log from log index 3 to 3, cost 0 ms.
2024-06-22 08:46:49,568 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_service_metadata/log from log index 3 to 3, cost 0 ms.
2024-06-22 09:07:09,991 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_instance_metadata/log from log index 3 to 3, cost 0 ms.
2024-06-22 09:07:47,951 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_persistent_service_v2/log from log index 3 to 3, cost 0 ms.
2024-06-22 09:16:49,568 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_service_metadata/log from log index 3 to 3, cost 0 ms.
2024-06-22 09:37:09,993 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_instance_metadata/log from log index 3 to 3, cost 0 ms.
2024-06-22 09:37:47,951 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_persistent_service_v2/log from log index 3 to 3, cost 0 ms.
2024-06-22 09:46:49,568 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_service_metadata/log from log index 3 to 3, cost 1 ms.
2024-06-22 10:07:09,991 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_instance_metadata/log from log index 3 to 3, cost 0 ms.
2024-06-22 10:07:47,952 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_persistent_service_v2/log from log index 3 to 3, cost 0 ms.
2024-06-22 10:16:49,569 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_service_metadata/log from log index 3 to 3, cost 0 ms.
2024-06-22 10:37:09,991 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_instance_metadata/log from log index 3 to 3, cost 1 ms.
2024-06-22 10:37:47,952 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_persistent_service_v2/log from log index 3 to 3, cost 0 ms.
2024-06-22 10:46:49,569 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_service_metadata/log from log index 3 to 3, cost 0 ms.
2024-06-22 11:07:09,992 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_instance_metadata/log from log index 3 to 3, cost 0 ms.
2024-06-22 11:07:47,953 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_persistent_service_v2/log from log index 3 to 3, cost 0 ms.
2024-06-22 11:16:49,570 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_service_metadata/log from log index 3 to 3, cost 0 ms.
2024-06-22 11:37:09,993 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_instance_metadata/log from log index 3 to 3, cost 0 ms.
2024-06-22 11:37:47,953 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_persistent_service_v2/log from log index 3 to 3, cost 1 ms.
2024-06-22 11:46:49,570 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_service_metadata/log from log index 3 to 3, cost 0 ms.
2024-06-22 12:07:09,992 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_instance_metadata/log from log index 3 to 3, cost 0 ms.
2024-06-22 12:07:47,953 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_persistent_service_v2/log from log index 3 to 3, cost 0 ms.
2024-06-22 12:16:49,570 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_service_metadata/log from log index 3 to 3, cost 0 ms.
2024-06-22 12:37:09,993 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_instance_metadata/log from log index 3 to 3, cost 0 ms.
2024-06-22 12:37:47,953 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_persistent_service_v2/log from log index 3 to 3, cost 0 ms.
2024-06-22 12:46:49,570 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_service_metadata/log from log index 3 to 3, cost 0 ms.
2024-06-22 13:07:09,993 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_instance_metadata/log from log index 3 to 3, cost 0 ms.
2024-06-22 13:07:47,953 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_persistent_service_v2/log from log index 3 to 3, cost 0 ms.
2024-06-22 13:16:49,570 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_service_metadata/log from log index 3 to 3, cost 0 ms.
2024-06-22 13:37:09,993 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_instance_metadata/log from log index 3 to 3, cost 0 ms.
2024-06-22 13:37:47,954 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_persistent_service_v2/log from log index 3 to 3, cost 0 ms.
2024-06-22 13:46:49,571 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_service_metadata/log from log index 3 to 3, cost 0 ms.
2024-06-22 14:07:09,993 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_instance_metadata/log from log index 3 to 3, cost 0 ms.
2024-06-22 14:07:47,954 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_persistent_service_v2/log from log index 3 to 3, cost 0 ms.
2024-06-22 14:16:49,570 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_service_metadata/log from log index 3 to 3, cost 0 ms.
2024-06-22 14:37:09,994 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_instance_metadata/log from log index 3 to 3, cost 0 ms.
2024-06-22 14:37:47,954 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_persistent_service_v2/log from log index 3 to 3, cost 0 ms.
2024-06-22 14:46:49,572 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_service_metadata/log from log index 3 to 3, cost 0 ms.
2024-06-22 15:07:09,994 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_instance_metadata/log from log index 3 to 3, cost 0 ms.
2024-06-22 15:07:47,954 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_persistent_service_v2/log from log index 3 to 3, cost 0 ms.
2024-06-22 15:16:49,571 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_service_metadata/log from log index 3 to 3, cost 0 ms.
2024-06-22 15:37:09,993 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_instance_metadata/log from log index 3 to 3, cost 0 ms.
2024-06-22 15:37:47,954 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_persistent_service_v2/log from log index 3 to 3, cost 0 ms.
2024-06-22 15:46:49,571 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_service_metadata/log from log index 3 to 3, cost 0 ms.
2024-06-22 16:07:09,993 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_instance_metadata/log from log index 3 to 3, cost 0 ms.
2024-06-22 16:07:47,955 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_persistent_service_v2/log from log index 3 to 3, cost 0 ms.
2024-06-22 16:16:49,571 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_service_metadata/log from log index 3 to 3, cost 0 ms.
2024-06-22 16:37:09,994 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_instance_metadata/log from log index 3 to 3, cost 0 ms.
2024-06-22 16:37:47,954 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_persistent_service_v2/log from log index 3 to 3, cost 0 ms.
2024-06-22 16:46:49,571 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_service_metadata/log from log index 3 to 3, cost 0 ms.
2024-06-22 17:07:09,994 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_instance_metadata/log from log index 3 to 3, cost 0 ms.
2024-06-22 17:07:47,955 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_persistent_service_v2/log from log index 3 to 3, cost 0 ms.
2024-06-22 17:16:49,571 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_service_metadata/log from log index 3 to 3, cost 1 ms.
2024-06-22 17:37:09,994 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_instance_metadata/log from log index 3 to 3, cost 0 ms.
2024-06-22 17:37:47,955 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_persistent_service_v2/log from log index 3 to 3, cost 0 ms.
2024-06-22 17:46:49,572 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_service_metadata/log from log index 3 to 3, cost 0 ms.
2024-06-22 18:07:09,994 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_instance_metadata/log from log index 3 to 3, cost 0 ms.
2024-06-22 18:07:47,954 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_persistent_service_v2/log from log index 3 to 3, cost 0 ms.
2024-06-22 18:16:49,572 INFO Truncated prefix logs in data path: /home/nacos/data/protocol/raft/naming_service_metadata/log from log index 3 to 3, cost 0 ms.
2024-06-22 18:18:32,626 INFO Node <naming_service_metadata/04806c39e732:7848> shutdown, currTerm=2 state=STATE_LEADER.
2024-06-22 18:18:32,629 INFO Fail to find the next candidate, group naming_service_metadata.
2024-06-22 18:18:32,629 INFO onLeaderStop: status=Status[ESHUTDOWN<1007>: Raft node is going to quit.].
2024-06-22 18:18:32,649 INFO Save raft meta, path=/home/nacos/data/protocol/raft/naming_service_metadata/meta-data, term=2, votedFor=04806c39e732:7848, cost time=5 ms
2024-06-22 18:18:32,649 INFO Shutting down FSMCaller...
2024-06-22 18:18:32,650 INFO ThreadPool is terminated: JRaft-RPC-Processor, com.alipay.sofa.jraft.util.MetricThreadPoolExecutor@5515f2c9[Shutting down, pool size = 0, active threads = 0, queued tasks = 0, completed tasks = 0].
2024-06-22 18:18:32,650 INFO ThreadPool is terminated: JRaft-Node-ScheduleThreadPool, com.alipay.sofa.jraft.util.MetricScheduledThreadPoolExecutor@7d8332a8[Shutting down, pool size = 0, active threads = 0, queued tasks = 0, completed tasks = 0].
2024-06-22 18:18:32,650 INFO Destroy timer: RepeatedTimer{timeout=null, stopped=true, running=false, destroyed=true, invoking=false, timeoutMs=5000, name='JRaft-ElectionTimer-<naming_service_metadata/04806c39e732:7848>'}.
2024-06-22 18:18:32,650 INFO Destroy timer: RepeatedTimer{timeout=null, stopped=true, running=false, destroyed=true, invoking=false, timeoutMs=5000, name='JRaft-VoteTimer-<naming_service_metadata/04806c39e732:7848>'}.
2024-06-22 18:18:32,650 INFO Destroy timer: RepeatedTimer{timeout=null, stopped=true, running=false, destroyed=true, invoking=false, timeoutMs=2500, name='JRaft-StepDownTimer-<naming_service_metadata/04806c39e732:7848>'}.
2024-06-22 18:18:32,650 INFO Destroy timer: RepeatedTimer{timeout=null, stopped=true, running=false, destroyed=true, invoking=false, timeoutMs=1800000, name='JRaft-SnapshotTimer-<naming_service_metadata/04806c39e732:7848>'}.
2024-06-22 18:18:32,651 INFO onShutdown.
2024-06-22 18:18:32,651 INFO The number of active nodes decrement to 2.
2024-06-22 18:18:32,653 INFO Node <naming_service_metadata/04806c39e732:7848> shutdown, currTerm=2 state=STATE_SHUTTING.
2024-06-22 18:18:32,654 INFO Stop the RaftGroupService successfully.
2024-06-22 18:18:32,654 INFO Node <naming_instance_metadata/04806c39e732:7848> shutdown, currTerm=2 state=STATE_LEADER.
2024-06-22 18:18:32,655 INFO Fail to find the next candidate, group naming_instance_metadata.
2024-06-22 18:18:32,655 INFO onLeaderStop: status=Status[ESHUTDOWN<1007>: Raft node is going to quit.].
2024-06-22 18:18:32,663 INFO DB destroyed, the db path is: /home/nacos/data/protocol/raft/naming_service_metadata/log.
2024-06-22 18:18:32,675 INFO Save raft meta, path=/home/nacos/data/protocol/raft/naming_instance_metadata/meta-data, term=2, votedFor=04806c39e732:7848, cost time=3 ms
2024-06-22 18:18:32,675 INFO Shutting down FSMCaller...
2024-06-22 18:18:32,676 INFO ThreadPool is terminated: JRaft-RPC-Processor, com.alipay.sofa.jraft.util.MetricThreadPoolExecutor@7ff61598[Shutting down, pool size = 0, active threads = 0, queued tasks = 0, completed tasks = 0].
2024-06-22 18:18:32,676 INFO ThreadPool is terminated: JRaft-Node-ScheduleThreadPool, com.alipay.sofa.jraft.util.MetricScheduledThreadPoolExecutor@1b047ebc[Shutting down, pool size = 0, active threads = 0, queued tasks = 0, completed tasks = 0].
2024-06-22 18:18:32,676 INFO onShutdown.
2024-06-22 18:18:32,676 INFO Destroy timer: RepeatedTimer{timeout=null, stopped=true, running=false, destroyed=true, invoking=false, timeoutMs=5000, name='JRaft-ElectionTimer-<naming_instance_metadata/04806c39e732:7848>'}.
2024-06-22 18:18:32,676 INFO Destroy timer: RepeatedTimer{timeout=null, stopped=true, running=false, destroyed=true, invoking=false, timeoutMs=5000, name='JRaft-VoteTimer-<naming_instance_metadata/04806c39e732:7848>'}.
2024-06-22 18:18:32,676 INFO Destroy timer: RepeatedTimer{timeout=null, stopped=true, running=false, destroyed=true, invoking=false, timeoutMs=2500, name='JRaft-StepDownTimer-<naming_instance_metadata/04806c39e732:7848>'}.
2024-06-22 18:18:32,676 INFO Destroy timer: RepeatedTimer{timeout=null, stopped=true, running=false, destroyed=true, invoking=false, timeoutMs=1800000, name='JRaft-SnapshotTimer-<naming_instance_metadata/04806c39e732:7848>'}.
2024-06-22 18:18:32,676 INFO The number of active nodes decrement to 1.
2024-06-22 18:18:32,677 INFO Node <naming_instance_metadata/04806c39e732:7848> shutdown, currTerm=2 state=STATE_SHUTTING.
2024-06-22 18:18:32,677 INFO Stop the RaftGroupService successfully.
2024-06-22 18:18:32,677 INFO Node <naming_persistent_service_v2/04806c39e732:7848> shutdown, currTerm=2 state=STATE_LEADER.
2024-06-22 18:18:32,679 INFO Fail to find the next candidate, group naming_persistent_service_v2.
2024-06-22 18:18:32,679 INFO onLeaderStop: status=Status[ESHUTDOWN<1007>: Raft node is going to quit.].
2024-06-22 18:18:32,688 INFO DB destroyed, the db path is: /home/nacos/data/protocol/raft/naming_instance_metadata/log.
2024-06-22 18:18:32,694 INFO Save raft meta, path=/home/nacos/data/protocol/raft/naming_persistent_service_v2/meta-data, term=2, votedFor=04806c39e732:7848, cost time=3 ms
2024-06-22 18:18:32,694 INFO Shutting down FSMCaller...
2024-06-22 18:18:32,696 INFO ThreadPool is terminated: JRaft-RPC-Processor, com.alipay.sofa.jraft.util.MetricThreadPoolExecutor@2868c449[Shutting down, pool size = 0, active threads = 0, queued tasks = 0, completed tasks = 0].
2024-06-22 18:18:32,696 INFO ThreadPool is terminated: JRaft-Node-ScheduleThreadPool, com.alipay.sofa.jraft.util.MetricScheduledThreadPoolExecutor@67fd4e09[Shutting down, pool size = 0, active threads = 0, queued tasks = 0, completed tasks = 0].
2024-06-22 18:18:32,696 INFO onShutdown.
2024-06-22 18:18:32,696 INFO The number of active nodes decrement to 0.
2024-06-22 18:18:32,699 INFO ThreadPool is terminated: JRaft-Global-ElectionTimer, com.alipay.sofa.jraft.util.MetricScheduledThreadPoolExecutor@1275e742[Shutting down, pool size = 0, active threads = 0, queued tasks = 0, completed tasks = 3].
2024-06-22 18:18:32,699 INFO Destroy timer: RepeatedTimer{timeout=null, stopped=true, running=false, destroyed=true, invoking=false, timeoutMs=5000, name='JRaft-ElectionTimer-<naming_persistent_service_v2/04806c39e732:7848>'}.
2024-06-22 18:18:32,701 INFO ThreadPool is terminated: JRaft-Global-VoteTimer, com.alipay.sofa.jraft.util.MetricScheduledThreadPoolExecutor@7ece1ea6[Shutting down, pool size = 0, active threads = 0, queued tasks = 0, completed tasks = 3].
2024-06-22 18:18:32,701 INFO Destroy timer: RepeatedTimer{timeout=null, stopped=true, running=false, destroyed=true, invoking=false, timeoutMs=5000, name='JRaft-VoteTimer-<naming_persistent_service_v2/04806c39e732:7848>'}.
2024-06-22 18:18:32,712 INFO ThreadPool is terminated: JRaft-Global-StepDownTimer, com.alipay.sofa.jraft.util.MetricScheduledThreadPoolExecutor@7576e072[Shutting down, pool size = 0, active threads = 0, queued tasks = 0, completed tasks = 1676052].
2024-06-22 18:18:32,713 INFO Destroy timer: RepeatedTimer{timeout=null, stopped=true, running=false, destroyed=true, invoking=false, timeoutMs=2500, name='JRaft-StepDownTimer-<naming_persistent_service_v2/04806c39e732:7848>'}.
2024-06-22 18:18:32,722 INFO ThreadPool is terminated: JRaft-Global-SnapshotTimer, com.alipay.sofa.jraft.util.MetricScheduledThreadPoolExecutor@37a53094[Shutting down, pool size = 2, active threads = 0, queued tasks = 0, completed tasks = 2328].
2024-06-22 18:18:32,723 INFO Destroy timer: RepeatedTimer{timeout=null, stopped=true, running=false, destroyed=true, invoking=false, timeoutMs=1800000, name='JRaft-SnapshotTimer-<naming_persistent_service_v2/04806c39e732:7848>'}.
2024-06-22 18:18:32,724 INFO Node <naming_persistent_service_v2/04806c39e732:7848> shutdown, currTerm=2 state=STATE_SHUTTING.
2024-06-22 18:18:32,724 INFO Stop the RaftGroupService successfully.
2024-06-22 18:18:32,728 INFO Shutdown managed channel: 04806c39e732:7848, ManagedChannelOrphanWrapper{delegate=ManagedChannelImpl{logId=10, target=04806c39e732:7848}}.
2024-06-22 18:18:32,730 INFO The channel 04806c39e732:7848 is in state: SHUTDOWN.
2024-06-22 18:18:32,730 WARN This channel 04806c39e732:7848 has started shutting down. Any new RPCs should fail immediately.
2024-06-22 18:18:32,734 INFO DB destroyed, the db path is: /home/nacos/data/protocol/raft/naming_persistent_service_v2/log.
2024-06-22 18:18:32,736 INFO Connection disconnected: /172.27.0.2:38522
2024-06-22 18:18:32,747 INFO ThreadPool is terminated: JRaft-RPC-Processor, com.alipay.sofa.jraft.util.MetricThreadPoolExecutor@7421afc3[Shutting down, pool size = 10, active threads = 0, queued tasks = 0, completed tasks = 1114370].
| nacos-docker/example/standalone-logs/alipay-jraft.log/0 | {
"file_path": "nacos-docker/example/standalone-logs/alipay-jraft.log",
"repo_id": "nacos-docker",
"token_count": 9838
} | 161 |
2024-06-22 18:18:32,467 WARN [NamingServerHttpClientManager] Start destroying HTTP-Client
2024-06-22 18:18:32,515 WARN [NamingServerHttpClientManager] Destruction of the end
| nacos-docker/example/standalone-logs/naming-server.log/0 | {
"file_path": "nacos-docker/example/standalone-logs/naming-server.log",
"repo_id": "nacos-docker",
"token_count": 56
} | 162 |
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="AutoImportSettings">
<option name="autoReloadType" value="SELECTIVE" />
</component>
<component name="ChangeListManager">
<list default="true" id="33e5043c-5ffc-484f-9258-943e1d64b300" name="更改" comment="" />
<option name="SHOW_DIALOG" value="false" />
<option name="HIGHLIGHT_CONFLICTS" value="true" />
<option name="HIGHLIGHT_NON_ACTIVE_CHANGELIST" value="false" />
<option name="LAST_RESOLUTION" value="IGNORE" />
</component>
<component name="ExternalProjectsData">
<projectState path="$PROJECT_DIR$">
<ProjectState />
</projectState>
</component>
<component name="ExternalProjectsManager">
<system id="GRADLE">
<state>
<task path="$PROJECT_DIR$">
<activation />
</task>
<projects_view>
<tree_state>
<expand>
<path>
<item name="" type="6a2764b6:ExternalProjectsStructure$RootNode" />
<item name="pgvector" type="f1a62948:ProjectNode" />
</path>
<path>
<item name="" type="6a2764b6:ExternalProjectsStructure$RootNode" />
<item name="pgvector" type="f1a62948:ProjectNode" />
<item name="Tasks" type="e4a08cd1:TasksNode" />
</path>
<path>
<item name="" type="6a2764b6:ExternalProjectsStructure$RootNode" />
<item name="pgvector" type="f1a62948:ProjectNode" />
<item name="Tasks" type="e4a08cd1:TasksNode" />
<item name="build" type="c8890929:TasksNode$1" />
</path>
</expand>
<select />
</tree_state>
</projects_view>
</state>
</system>
</component>
<component name="FileTemplateManagerImpl">
<option name="RECENT_TEMPLATES">
<list>
<option value="Kotlin Data Class" />
<option value="Class" />
<option value="Kotlin Class" />
</list>
</option>
</component>
<component name="KubernetesApiPersistence">{}</component>
<component name="KubernetesApiProvider">{
"isMigrated": true
}</component>
<component name="ProblemsViewState">
<option name="selectedTabId" value="QODANA_PROBLEMS_VIEW_TAB" />
</component>
<component name="ProjectColorInfo">{
"associatedIndex": 6
}</component>
<component name="ProjectId" id="2fFxSAzg7SDDhajyM2fxVrPjd4M" />
<component name="ProjectViewState">
<option name="hideEmptyMiddlePackages" value="true" />
<option name="showLibraryContents" value="true" />
</component>
<component name="PropertiesComponent">{
"keyToString": {
"Docker.depend-compose.yml.pg-vector: Compose 部署.executor": "Run",
"Docker.depend-compose.yml: Compose 部署.executor": "Run",
"Gradle.Build pgvector.executor": "Run",
"Gradle.pgvector [bootBuildImage].executor": "Run",
"Gradle.下载源代码.executor": "Run",
"Gradle.构建 pgvector.executor": "Run",
"RequestMappingsPanelOrder0": "0",
"RequestMappingsPanelOrder1": "1",
"RequestMappingsPanelWidth0": "75",
"RequestMappingsPanelWidth1": "75",
"RunOnceActivity.ShowReadmeOnStart": "true",
"Spring Boot.PgVectorApplication.executor": "Run",
"Spring Boot.PgvectorApplication.executor": "Debug",
"ignore.virus.scanning.warn.message": "true",
"kotlin-language-version-configured": "true",
"node.js.detected.package.eslint": "true",
"node.js.detected.package.tslint": "true",
"node.js.selected.package.eslint": "(autodetect)",
"node.js.selected.package.tslint": "(autodetect)",
"nodejs_package_manager_path": "npm",
"project.structure.last.edited": "Project",
"project.structure.proportion": "0.15",
"project.structure.side.proportion": "0.0",
"settings.editor.selected.configurable": "http.proxy",
"vue.rearranger.settings.migration": "true"
},
"keyToStringList": {
"DatabaseDriversLRU": [
"postgresql"
],
"kotlin-gradle-user-dirs": [
"/home/ubuntu/.gradle"
]
}
}</component>
<component name="RdControllerToolWindowsLayoutState" isNewUi="true">
<layout>
<window_info id="Bookmarks" show_stripe_button="false" side_tool="true" />
<window_info id="Merge Requests" show_stripe_button="false" />
<window_info id="Commit_Guest" show_stripe_button="false" />
<window_info id="Pull Requests" show_stripe_button="false" />
<window_info id="Persistence" side_tool="true" />
<window_info id="Learn" show_stripe_button="false" />
<window_info active="true" content_ui="combo" id="Project" order="0" visible="true" weight="0.25182292" />
<window_info id="Commit" order="1" weight="0.25" />
<window_info id="Structure" order="2" side_tool="true" weight="0.25" />
<window_info anchor="bottom" id="Database Changes" show_stripe_button="false" />
<window_info anchor="bottom" id="TypeScript" show_stripe_button="false" />
<window_info anchor="bottom" id="Profiler" show_stripe_button="false" />
<window_info anchor="bottom" id="TODO" show_stripe_button="false" />
<window_info anchor="bottom" id="File Transfer" show_stripe_button="false" />
<window_info anchor="bottom" id="Version Control" order="0" />
<window_info anchor="bottom" id="Problems" order="1" />
<window_info anchor="bottom" id="Problems View" order="2" />
<window_info anchor="bottom" id="Terminal" order="3" weight="0.3302257" />
<window_info anchor="bottom" id="Build" order="4" weight="0.3302257" />
<window_info anchor="bottom" id="Debug" order="5" weight="0.23895976" />
<window_info active="true" anchor="bottom" id="Run" order="6" visible="true" weight="0.5255152" />
<window_info anchor="bottom" id="Services" order="7" weight="0.23895976" />
<window_info anchor="right" id="Endpoints" show_stripe_button="false" />
<window_info anchor="right" id="Coverage" show_stripe_button="false" side_tool="true" />
<window_info anchor="right" id="Beans" />
<window_info anchor="right" content_ui="combo" id="Notifications" order="0" weight="0.25" />
<window_info anchor="right" id="AIAssistant" order="1" weight="0.25" />
<window_info anchor="right" id="Database" order="2" weight="0.25" />
<window_info active="true" anchor="right" id="Gradle" order="3" visible="true" weight="0.13046876" />
<window_info anchor="right" id="Maven" order="4" weight="0.25" />
<window_info anchor="right" id="Translation.Wordbook" order="5" show_stripe_button="false" side_tool="true" />
</layout>
</component>
<component name="ReactorSettings">
<option name="notificationShown" value="true" />
</component>
<component name="RecentsManager">
<key name="MoveFile.RECENT_KEYS">
<recent name="C:\Users\hukai\IdeaProjects\pgvector\src\main\resources" />
<recent name="C:\Users\hukai\IdeaProjects\pgvector\src\main\kotlin\org\example\pgvector\entity" />
</key>
<key name="MoveKotlinTopLevelDeclarationsDialog.RECENTS_KEY">
<recent name="org.example.pgvector.pojo" />
</key>
<key name="CopyKotlinDeclarationDialog.RECENTS_KEY">
<recent name="org.example.pgvector.entity" />
</key>
</component>
<component name="RunManager" selected="Gradle.pgvector [bootBuildImage]">
<configuration name="pgvector [bootBuildImage]" type="GradleRunConfiguration" factoryName="Gradle" temporary="true">
<target name="@@@LOCAL@@@" />
<ExternalSystemSettings>
<option name="executionName" />
<option name="externalProjectPath" value="$PROJECT_DIR$" />
<option name="externalSystemIdString" value="GRADLE" />
<option name="scriptParameters" value="" />
<option name="taskDescriptions">
<list />
</option>
<option name="taskNames">
<list>
<option value="bootBuildImage" />
</list>
</option>
<option name="vmOptions" />
</ExternalSystemSettings>
<ExternalSystemDebugServerProcess>true</ExternalSystemDebugServerProcess>
<ExternalSystemReattachDebugProcess>true</ExternalSystemReattachDebugProcess>
<DebugAllEnabled>false</DebugAllEnabled>
<RunAsTest>false</RunAsTest>
<method v="2" />
</configuration>
<configuration name="PgVectorApplication" type="SpringBootApplicationConfigurationType" factoryName="Spring Boot" nameIsGenerated="true">
<module name="pgvector.main" />
<option name="SHORTEN_COMMAND_LINE" value="ARGS_FILE" />
<option name="SPRING_BOOT_MAIN_CLASS" value="org.example.pgvector.PgVectorApplication" />
<method v="2">
<option name="Make" enabled="true" />
</method>
</configuration>
<configuration default="true" type="SpringBootApplicationConfigurationType" factoryName="Spring Boot">
<option name="SHORTEN_COMMAND_LINE" value="ARGS_FILE" />
<method v="2">
<option name="Make" enabled="true" />
</method>
</configuration>
<configuration default="true" type="docker-deploy" factoryName="docker-compose.yml" temporary="true">
<deployment type="docker-compose.yml">
<settings />
</deployment>
<method v="2" />
</configuration>
<configuration name="depend-compose.yml: Compose 部署" type="docker-deploy" factoryName="docker-compose.yml" temporary="true" server-name="Docker">
<deployment type="docker-compose.yml">
<settings>
<option name="sourceFilePath" value="depend-compose.yml" />
</settings>
</deployment>
<method v="2" />
</configuration>
<configuration name="depend-compose.yml.pg-vector: Compose 部署" type="docker-deploy" factoryName="docker-compose.yml" temporary="true" server-name="Docker">
<deployment type="docker-compose.yml">
<settings>
<option name="services">
<list>
<option value="pg-vector" />
</list>
</option>
<option name="sourceFilePath" value="depend-compose.yml" />
</settings>
</deployment>
<method v="2" />
</configuration>
<list>
<item itemvalue="Docker.depend-compose.yml.pg-vector: Compose 部署" />
<item itemvalue="Docker.depend-compose.yml: Compose 部署" />
<item itemvalue="Gradle.pgvector [bootBuildImage]" />
<item itemvalue="Spring Boot.PgVectorApplication" />
</list>
<recent_temporary>
<list>
<item itemvalue="Gradle.pgvector [bootBuildImage]" />
<item itemvalue="Docker.depend-compose.yml: Compose 部署" />
<item itemvalue="Docker.depend-compose.yml.pg-vector: Compose 部署" />
</list>
</recent_temporary>
</component>
<component name="SharedIndexes">
<attachedChunks>
<set>
<option value="bundled-jdk-9f38398b9061-39b83d9b5494-intellij.indexing.shared.core-IU-241.15989.21" />
<option value="bundled-js-predefined-1d06a55b98c1-3d8cd37a7330-JavaScript-IU-241.15989.21" />
</set>
</attachedChunks>
</component>
<component name="SpellCheckerSettings" RuntimeDictionaries="0" Folders="0" CustomDictionaries="0" DefaultDictionary="应用程序级" UseSingleDictionary="true" transferred="true" />
<component name="TaskManager">
<task active="true" id="Default" summary="默认任务">
<changelist id="33e5043c-5ffc-484f-9258-943e1d64b300" name="更改" comment="" />
<created>1713410629103</created>
<option name="number" value="Default" />
<option name="presentableId" value="Default" />
<updated>1713410629103</updated>
<workItem from="1713410630147" duration="25775000" />
<workItem from="1713499289681" duration="6473000" />
<workItem from="1713510716569" duration="249000" />
<workItem from="1713510992574" duration="599000" />
<workItem from="1713515107901" duration="599000" />
<workItem from="1715051191561" duration="53000" />
</task>
<servers />
</component>
<component name="TypeScriptGeneratedFilesManager">
<option name="version" value="3" />
</component>
<component name="XSLT-Support.FileAssociations.UIState">
<expand />
<select />
</component>
</project> | pgvector/.idea/workspace.xml/0 | {
"file_path": "pgvector/.idea/workspace.xml",
"repo_id": "pgvector",
"token_count": 5504
} | 163 |
package org.springframework.boot.autoconfigure.context;
import org.springframework.aot.generate.Generated;
import org.springframework.beans.factory.config.BeanDefinition;
import org.springframework.beans.factory.support.RootBeanDefinition;
/**
* Bean definitions for {@link LifecycleProperties}.
*/
@Generated
public class LifecycleProperties__BeanDefinitions {
/**
* Get the bean definition for 'lifecycleProperties'.
*/
public static BeanDefinition getLifecyclePropertiesBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(LifecycleProperties.class);
beanDefinition.setInstanceSupplier(LifecycleProperties::new);
return beanDefinition;
}
}
| pgvector/build/generated/aotSources/org/springframework/boot/autoconfigure/context/LifecycleProperties__BeanDefinitions.java/0 | {
"file_path": "pgvector/build/generated/aotSources/org/springframework/boot/autoconfigure/context/LifecycleProperties__BeanDefinitions.java",
"repo_id": "pgvector",
"token_count": 201
} | 164 |
package org.springframework.boot.autoconfigure.orm.jpa;
import org.springframework.aot.generate.Generated;
import org.springframework.beans.factory.config.BeanDefinition;
import org.springframework.beans.factory.support.RootBeanDefinition;
/**
* Bean definitions for {@link JpaProperties}.
*/
@Generated
public class JpaProperties__BeanDefinitions {
/**
* Get the bean definition for 'jpaProperties'.
*/
public static BeanDefinition getJpaPropertiesBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(JpaProperties.class);
beanDefinition.setInstanceSupplier(JpaProperties::new);
return beanDefinition;
}
}
| pgvector/build/generated/aotSources/org/springframework/boot/autoconfigure/orm/jpa/JpaProperties__BeanDefinitions.java/0 | {
"file_path": "pgvector/build/generated/aotSources/org/springframework/boot/autoconfigure/orm/jpa/JpaProperties__BeanDefinitions.java",
"repo_id": "pgvector",
"token_count": 201
} | 165 |
package org.springframework.boot.autoconfigure.transaction;
import org.springframework.aot.generate.Generated;
import org.springframework.beans.factory.config.BeanDefinition;
import org.springframework.beans.factory.support.RootBeanDefinition;
/**
* Bean definitions for {@link TransactionProperties}.
*/
@Generated
public class TransactionProperties__BeanDefinitions {
/**
* Get the bean definition for 'transactionProperties'.
*/
public static BeanDefinition getTransactionPropertiesBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(TransactionProperties.class);
beanDefinition.setInstanceSupplier(TransactionProperties::new);
return beanDefinition;
}
}
| pgvector/build/generated/aotSources/org/springframework/boot/autoconfigure/transaction/TransactionProperties__BeanDefinitions.java/0 | {
"file_path": "pgvector/build/generated/aotSources/org/springframework/boot/autoconfigure/transaction/TransactionProperties__BeanDefinitions.java",
"repo_id": "pgvector",
"token_count": 194
} | 166 |
package org.springframework.data.jpa.repository.support;
import org.springframework.aot.generate.Generated;
import org.springframework.beans.factory.ObjectProvider;
import org.springframework.beans.factory.aot.AutowiredMethodArgumentsResolver;
import org.springframework.beans.factory.support.RegisteredBean;
import org.springframework.data.jpa.repository.query.JpaQueryMethodFactory;
/**
* Autowiring for {@link JpaRepositoryFactoryBean}.
*/
@Generated
public class JpaRepositoryFactoryBean__Autowiring {
/**
* Apply the autowiring.
*/
public static JpaRepositoryFactoryBean apply(RegisteredBean registeredBean,
JpaRepositoryFactoryBean instance) {
AutowiredMethodArgumentsResolver.forRequiredMethod("setEntityPathResolver", ObjectProvider.class).resolve(registeredBean, args -> instance.setEntityPathResolver(args.get(0)));
AutowiredMethodArgumentsResolver.forRequiredMethod("setQueryMethodFactory", JpaQueryMethodFactory.class).resolve(registeredBean, args -> instance.setQueryMethodFactory(args.get(0)));
return instance;
}
}
| pgvector/build/generated/aotSources/org/springframework/data/jpa/repository/support/JpaRepositoryFactoryBean__Autowiring.java/0 | {
"file_path": "pgvector/build/generated/aotSources/org/springframework/data/jpa/repository/support/JpaRepositoryFactoryBean__Autowiring.java",
"repo_id": "pgvector",
"token_count": 327
} | 167 |
override=true
| pgvector/build/native-reachability-metadata/META-INF/native-image/io.netty/netty-codec-http/4.1.107.Final/reachability-metadata.properties/0 | {
"file_path": "pgvector/build/native-reachability-metadata/META-INF/native-image/io.netty/netty-codec-http/4.1.107.Final/reachability-metadata.properties",
"repo_id": "pgvector",
"token_count": 5
} | 168 |
Args = -H:Class=org.example.pgvector.PgVectorApplicationKt \
--report-unsupported-elements-at-runtime \
--no-fallback \
--install-exit-handlers | pgvector/build/resources/aot/META-INF/native-image/org.example/pgvector/native-image.properties/0 | {
"file_path": "pgvector/build/resources/aot/META-INF/native-image/org.example/pgvector/native-image.properties",
"repo_id": "pgvector",
"token_count": 49
} | 169 |
package org.example.pgvector
import com.alibaba.fastjson.JSONObject
import com.fasterxml.jackson.databind.ObjectMapper
import com.fasterxml.jackson.module.kotlin.readValue
import io.milvus.client.MilvusClient
import io.milvus.client.MilvusServiceClient
import io.milvus.grpc.DataType
import io.milvus.param.IndexType
import io.milvus.param.MetricType
import io.milvus.param.collection.*
import io.milvus.param.dml.InsertParam
import io.milvus.param.dml.SearchParam
import io.milvus.param.index.CreateIndexParam
import jakarta.annotation.PostConstruct
import org.example.pgvector.dao.DatabaseRecordDao
import org.example.pgvector.entity.DataRecord
import org.example.pgvector.entity.DatabaseRecord
import org.example.pgvector.entity.Id
import org.example.pgvector.pojo.Vector
import org.springframework.boot.autoconfigure.SpringBootApplication
import org.springframework.boot.runApplication
import org.springframework.core.io.ClassPathResource
import org.springframework.web.bind.annotation.GetMapping
import org.springframework.web.bind.annotation.RequestMapping
import org.springframework.web.bind.annotation.RestController
import kotlin.reflect.KClass
import kotlin.reflect.full.hasAnnotation
import kotlin.reflect.full.memberProperties
@SpringBootApplication
@RestController
@RequestMapping
class PgVectorApplication(
objectMapper: ObjectMapper,
val databaseRecordDao: DatabaseRecordDao,
val milvusServiceClient: MilvusServiceClient
) {
private lateinit var databaseRecords: ArrayList<DatabaseRecord>
private lateinit var dataRecords: ArrayList<DataRecord>
@GetMapping("a")
fun queryA(index: Int) = databaseRecordDao.findByFeatures(databaseRecords[index].features.toString())
@GetMapping("b")
fun queryB(index: Int): String {
val search = SearchParam.newBuilder().withCollectionName(DataRecord::class.simpleName!!)
.withTopK(10)
.withVectorFieldName(DataRecord::features.name)
.withVectors(listOf(dataRecords[index].features))
.withOutFields(listOf("id", "features", "quality", "groupId"))
.build()
return milvusServiceClient.search(search)?.data.toString()
}
init {
val readValue = objectMapper.readValue<List<Map<Int, Vector>>>(
ClassPathResource("result.json").file.inputStream()
)
val vectors = ArrayList<Vector>()
readValue.forEach {
vectors.addAll(it.values)
}
databaseRecords = ArrayList()
dataRecords = ArrayList()
vectors.forEachIndexed { vIndex, v ->
v.qualities.forEachIndexed { index, q ->
databaseRecords.add(DatabaseRecord(null, v.features[index], q, vIndex))
dataRecords.add(DataRecord(null, v.features[index], q, vIndex))
}
}
}
@PostConstruct
fun init() {
// 没数据则初始化数据
if (databaseRecordDao.count() == 0L) {
databaseRecordDao.saveAll(databaseRecords)
}
}
@PostConstruct
fun initMilvus() {
if (milvusServiceClient.hasCollection<DataRecord>()) {
milvusServiceClient.dropCollection(
DropCollectionParam.newBuilder().withCollectionName(DataRecord::class.simpleName!!).build()
)
}
milvusServiceClient.createEntity(DataRecord::class)
milvusServiceClient.batchInsert(dataRecords)
println("milvus version is ${milvusServiceClient.version}")
}
fun MilvusServiceClient.createEntity(kClass: KClass<out Any>) {
val collectionName = kClass.simpleName!!
val fields = kClass.memberProperties.map { field ->
val ftb = FieldType.newBuilder()
.withName(field.name)
if (field.hasAnnotation<Id>()) {
ftb.withAutoID(true)
ftb.withPrimaryKey(true)
}
field.returnType is List<*>
var datatype = when (field.returnType.classifier) {
Int::class -> DataType.Int32
Long::class -> DataType.Int64
Double::class -> DataType.Double
String::class -> DataType.String
else -> {
null
}
}
// FIXME 应该使用注解
if (datatype == null) {
datatype = DataType.FloatVector
ftb.withDimension(256)
}
ftb.withDataType(datatype)
ftb.build()
}
val schemaBuilder = CollectionSchemaParam.newBuilder().withFieldTypes(fields)
val requestParam = CreateCollectionParam.newBuilder()
.withCollectionName(collectionName)
.withDescription("auto create")
.withSchema(schemaBuilder.build())
.build()
// 创建集合
val createCollection = this.createCollection(requestParam)
println("createCollection : $createCollection")
// 建立索引 FIXME 写死
val createIndex = this.createIndex(
CreateIndexParam.newBuilder()
.withCollectionName(collectionName)
.withFieldName(DataRecord::features.name)
.withIndexName("${DataRecord::features.name}_index")
.withIndexType(IndexType.HNSW)
.withMetricType(MetricType.L2)
.withExtraParam(
"""
{
"efConstruction": 100,
"M": 10
}
""".trimIndent()
)
.withSyncMode(true)
.build()
)
println("createIndexMsg: $createIndex")
// 加载集合
val loadCollection = this.loadCollection(
LoadCollectionParam.newBuilder().withCollectionName(collectionName).withSyncLoad(true).build()
)
println("loadCollection : $loadCollection")
}
}
private inline fun <reified T> MilvusClient.batchInsert(data: List<T>) = this.batchInsert(data, T::class)
private fun MilvusClient.batchInsert(data: List<*>, type: KClass<*>) {
val collectionName = type.simpleName!!
val props = type.memberProperties
val rows = data.map {
val row = JSONObject()
props.forEach { prop ->
val value = prop.getter.call(it)
if (value != null) {
row[prop.name] = value
}
}
row
}
val insertParam = InsertParam.newBuilder().withCollectionName(collectionName).withRows(rows).build()
this.insert(insertParam)
}
private inline fun <reified T> MilvusClient.hasCollection(): Boolean = this.hasCollection(T::class.simpleName!!)
private fun MilvusClient.hasCollection(simpleName: String): Boolean =
this.hasCollection(HasCollectionParam.newBuilder().withCollectionName(simpleName).build()).data
fun main(args: Array<String>) {
runApplication<PgVectorApplication>(*args)
}
| pgvector/src/main/kotlin/org/example/pgvector/PgVectorApplication.kt/0 | {
"file_path": "pgvector/src/main/kotlin/org/example/pgvector/PgVectorApplication.kt",
"repo_id": "pgvector",
"token_count": 3041
} | 170 |
Subsets and Splits