text
stringlengths 3
11.2M
| id
stringlengths 15
188
| metadata
dict | __index_level_0__
int64 0
275
|
---|---|---|---|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from ppdet.core.workspace import register
__all__ = ['SmoothL1Loss']
@register
class SmoothL1Loss(nn.Layer):
"""Smooth L1 Loss.
Args:
beta (float): controls smooth region, it becomes L1 Loss when beta=0.0
loss_weight (float): the final loss will be multiplied by this
"""
def __init__(self,
beta=1.0,
loss_weight=1.0):
super(SmoothL1Loss, self).__init__()
assert beta >= 0
self.beta = beta
self.loss_weight = loss_weight
def forward(self, pred, target, reduction='none'):
"""forward function, based on fvcore.
Args:
pred (Tensor): prediction tensor
target (Tensor): target tensor, pred.shape must be the same as target.shape
reduction (str): the way to reduce loss, one of (none, sum, mean)
"""
assert reduction in ('none', 'sum', 'mean')
target = target.detach()
if self.beta < 1e-5:
loss = paddle.abs(pred - target)
else:
n = paddle.abs(pred - target)
cond = n < self.beta
loss = paddle.where(cond, 0.5 * n ** 2 / self.beta, n - 0.5 * self.beta)
if reduction == 'mean':
loss = loss.mean() if loss.size > 0 else 0.0 * loss.sum()
elif reduction == 'sum':
loss = loss.sum()
return loss * self.loss_weight
| PaddleDetection/ppdet/modeling/losses/smooth_l1_loss.py/0 | {
"file_path": "PaddleDetection/ppdet/modeling/losses/smooth_l1_loss.py",
"repo_id": "PaddleDetection",
"token_count": 847
} | 77 |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# The code is based on:
# https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/necks/yolox_pafpn.py
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddle import ParamAttr
from ppdet.core.workspace import register, serializable
from ..shape_spec import ShapeSpec
__all__ = ['CSPPAN']
class ConvBNLayer(nn.Layer):
def __init__(self,
in_channel=96,
out_channel=96,
kernel_size=3,
stride=1,
groups=1,
act='leaky_relu'):
super(ConvBNLayer, self).__init__()
initializer = nn.initializer.KaimingUniform()
self.conv = nn.Conv2D(
in_channels=in_channel,
out_channels=out_channel,
kernel_size=kernel_size,
groups=groups,
padding=(kernel_size - 1) // 2,
stride=stride,
weight_attr=ParamAttr(initializer=initializer),
bias_attr=False)
self.bn = nn.BatchNorm2D(out_channel)
if act == "hard_swish":
act = 'hardswish'
self.act = act
def forward(self, x):
x = self.bn(self.conv(x))
if self.act:
x = getattr(F, self.act)(x)
return x
class DPModule(nn.Layer):
"""
Depth-wise and point-wise module.
Args:
in_channel (int): The input channels of this Module.
out_channel (int): The output channels of this Module.
kernel_size (int): The conv2d kernel size of this Module.
stride (int): The conv2d's stride of this Module.
act (str): The activation function of this Module,
Now support `leaky_relu` and `hard_swish`.
"""
def __init__(self,
in_channel=96,
out_channel=96,
kernel_size=3,
stride=1,
act='leaky_relu',
use_act_in_out=True):
super(DPModule, self).__init__()
initializer = nn.initializer.KaimingUniform()
self.use_act_in_out = use_act_in_out
self.dwconv = nn.Conv2D(
in_channels=in_channel,
out_channels=out_channel,
kernel_size=kernel_size,
groups=out_channel,
padding=(kernel_size - 1) // 2,
stride=stride,
weight_attr=ParamAttr(initializer=initializer),
bias_attr=False)
self.bn1 = nn.BatchNorm2D(out_channel)
self.pwconv = nn.Conv2D(
in_channels=out_channel,
out_channels=out_channel,
kernel_size=1,
groups=1,
padding=0,
weight_attr=ParamAttr(initializer=initializer),
bias_attr=False)
self.bn2 = nn.BatchNorm2D(out_channel)
if act == "hard_swish":
act = 'hardswish'
self.act = act
def forward(self, x):
x = self.bn1(self.dwconv(x))
if self.act:
x = getattr(F, self.act)(x)
x = self.bn2(self.pwconv(x))
if self.use_act_in_out and self.act:
x = getattr(F, self.act)(x)
return x
class DarknetBottleneck(nn.Layer):
"""The basic bottleneck block used in Darknet.
Each Block consists of two ConvModules and the input is added to the
final output. Each ConvModule is composed of Conv, BN, and act.
The first convLayer has filter size of 1x1 and the second one has the
filter size of 3x3.
Args:
in_channels (int): The input channels of this Module.
out_channels (int): The output channels of this Module.
expansion (int): The kernel size of the convolution. Default: 0.5
add_identity (bool): Whether to add identity to the out.
Default: True
use_depthwise (bool): Whether to use depthwise separable convolution.
Default: False
"""
def __init__(self,
in_channels,
out_channels,
kernel_size=3,
expansion=0.5,
add_identity=True,
use_depthwise=False,
act="leaky_relu"):
super(DarknetBottleneck, self).__init__()
hidden_channels = int(out_channels * expansion)
conv_func = DPModule if use_depthwise else ConvBNLayer
self.conv1 = ConvBNLayer(
in_channel=in_channels,
out_channel=hidden_channels,
kernel_size=1,
act=act)
self.conv2 = conv_func(
in_channel=hidden_channels,
out_channel=out_channels,
kernel_size=kernel_size,
stride=1,
act=act)
self.add_identity = \
add_identity and in_channels == out_channels
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.conv2(out)
if self.add_identity:
return out + identity
else:
return out
class CSPLayer(nn.Layer):
"""Cross Stage Partial Layer.
Args:
in_channels (int): The input channels of the CSP layer.
out_channels (int): The output channels of the CSP layer.
expand_ratio (float): Ratio to adjust the number of channels of the
hidden layer. Default: 0.5
num_blocks (int): Number of blocks. Default: 1
add_identity (bool): Whether to add identity in blocks.
Default: True
use_depthwise (bool): Whether to depthwise separable convolution in
blocks. Default: False
"""
def __init__(self,
in_channels,
out_channels,
kernel_size=3,
expand_ratio=0.5,
num_blocks=1,
add_identity=True,
use_depthwise=False,
act="leaky_relu"):
super().__init__()
mid_channels = int(out_channels * expand_ratio)
self.main_conv = ConvBNLayer(in_channels, mid_channels, 1, act=act)
self.short_conv = ConvBNLayer(in_channels, mid_channels, 1, act=act)
self.final_conv = ConvBNLayer(
2 * mid_channels, out_channels, 1, act=act)
self.blocks = nn.Sequential(* [
DarknetBottleneck(
mid_channels,
mid_channels,
kernel_size,
1.0,
add_identity,
use_depthwise,
act=act) for _ in range(num_blocks)
])
def forward(self, x):
x_short = self.short_conv(x)
x_main = self.main_conv(x)
x_main = self.blocks(x_main)
x_final = paddle.concat((x_main, x_short), axis=1)
return self.final_conv(x_final)
class Channel_T(nn.Layer):
def __init__(self,
in_channels=[116, 232, 464],
out_channels=96,
act="leaky_relu"):
super(Channel_T, self).__init__()
self.convs = nn.LayerList()
for i in range(len(in_channels)):
self.convs.append(
ConvBNLayer(
in_channels[i], out_channels, 1, act=act))
def forward(self, x):
outs = [self.convs[i](x[i]) for i in range(len(x))]
return outs
@register
@serializable
class CSPPAN(nn.Layer):
"""Path Aggregation Network with CSP module.
Args:
in_channels (List[int]): Number of input channels per scale.
out_channels (int): Number of output channels (used at each scale)
kernel_size (int): The conv2d kernel size of this Module.
num_features (int): Number of output features of CSPPAN module.
num_csp_blocks (int): Number of bottlenecks in CSPLayer. Default: 1
use_depthwise (bool): Whether to depthwise separable convolution in
blocks. Default: True
"""
def __init__(self,
in_channels,
out_channels,
kernel_size=5,
num_features=3,
num_csp_blocks=1,
use_depthwise=True,
act='hard_swish',
spatial_scales=[0.125, 0.0625, 0.03125]):
super(CSPPAN, self).__init__()
self.conv_t = Channel_T(in_channels, out_channels, act=act)
in_channels = [out_channels] * len(spatial_scales)
self.in_channels = in_channels
self.out_channels = out_channels
self.spatial_scales = spatial_scales
self.num_features = num_features
conv_func = DPModule if use_depthwise else ConvBNLayer
if self.num_features == 4:
self.first_top_conv = conv_func(
in_channels[0], in_channels[0], kernel_size, stride=2, act=act)
self.second_top_conv = conv_func(
in_channels[0], in_channels[0], kernel_size, stride=2, act=act)
self.spatial_scales.append(self.spatial_scales[-1] / 2)
# build top-down blocks
self.upsample = nn.Upsample(scale_factor=2, mode='nearest')
self.top_down_blocks = nn.LayerList()
for idx in range(len(in_channels) - 1, 0, -1):
self.top_down_blocks.append(
CSPLayer(
in_channels[idx - 1] * 2,
in_channels[idx - 1],
kernel_size=kernel_size,
num_blocks=num_csp_blocks,
add_identity=False,
use_depthwise=use_depthwise,
act=act))
# build bottom-up blocks
self.downsamples = nn.LayerList()
self.bottom_up_blocks = nn.LayerList()
for idx in range(len(in_channels) - 1):
self.downsamples.append(
conv_func(
in_channels[idx],
in_channels[idx],
kernel_size=kernel_size,
stride=2,
act=act))
self.bottom_up_blocks.append(
CSPLayer(
in_channels[idx] * 2,
in_channels[idx + 1],
kernel_size=kernel_size,
num_blocks=num_csp_blocks,
add_identity=False,
use_depthwise=use_depthwise,
act=act))
def forward(self, inputs):
"""
Args:
inputs (tuple[Tensor]): input features.
Returns:
tuple[Tensor]: CSPPAN features.
"""
assert len(inputs) == len(self.in_channels)
inputs = self.conv_t(inputs)
# top-down path
inner_outs = [inputs[-1]]
for idx in range(len(self.in_channels) - 1, 0, -1):
feat_heigh = inner_outs[0]
feat_low = inputs[idx - 1]
upsample_feat = self.upsample(feat_heigh)
inner_out = self.top_down_blocks[len(self.in_channels) - 1 - idx](
paddle.concat([upsample_feat, feat_low], 1))
inner_outs.insert(0, inner_out)
# bottom-up path
outs = [inner_outs[0]]
for idx in range(len(self.in_channels) - 1):
feat_low = outs[-1]
feat_height = inner_outs[idx + 1]
downsample_feat = self.downsamples[idx](feat_low)
out = self.bottom_up_blocks[idx](paddle.concat(
[downsample_feat, feat_height], 1))
outs.append(out)
top_features = None
if self.num_features == 4:
top_features = self.first_top_conv(inputs[-1])
top_features = top_features + self.second_top_conv(outs[-1])
outs.append(top_features)
return tuple(outs)
@property
def out_shape(self):
return [
ShapeSpec(
channels=self.out_channels, stride=1. / s)
for s in self.spatial_scales
]
@classmethod
def from_config(cls, cfg, input_shape):
return {'in_channels': [i.channels for i in input_shape], }
| PaddleDetection/ppdet/modeling/necks/csp_pan.py/0 | {
"file_path": "PaddleDetection/ppdet/modeling/necks/csp_pan.py",
"repo_id": "PaddleDetection",
"token_count": 6377
} | 78 |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import paddle
from ..bbox_utils import bbox2delta, bbox_overlaps
def rpn_anchor_target(anchors,
gt_boxes,
rpn_batch_size_per_im,
rpn_positive_overlap,
rpn_negative_overlap,
rpn_fg_fraction,
use_random=True,
batch_size=1,
ignore_thresh=-1,
is_crowd=None,
weights=[1., 1., 1., 1.],
assign_on_cpu=False):
tgt_labels = []
tgt_bboxes = []
tgt_deltas = []
for i in range(batch_size):
gt_bbox = gt_boxes[i]
is_crowd_i = is_crowd[i] if is_crowd else None
# Step1: match anchor and gt_bbox
matches, match_labels = label_box(
anchors, gt_bbox, rpn_positive_overlap, rpn_negative_overlap, True,
ignore_thresh, is_crowd_i, assign_on_cpu)
# Step2: sample anchor
fg_inds, bg_inds = subsample_labels(match_labels, rpn_batch_size_per_im,
rpn_fg_fraction, 0, use_random)
# Fill with the ignore label (-1), then set positive and negative labels
labels = paddle.full(match_labels.shape, -1, dtype='int32')
if bg_inds.shape[0] > 0:
labels = paddle.scatter(labels, bg_inds, paddle.zeros_like(bg_inds))
if fg_inds.shape[0] > 0:
labels = paddle.scatter(labels, fg_inds, paddle.ones_like(fg_inds))
# Step3: make output
if gt_bbox.shape[0] == 0:
matched_gt_boxes = paddle.zeros([matches.shape[0], 4])
tgt_delta = paddle.zeros([matches.shape[0], 4])
else:
matched_gt_boxes = paddle.gather(gt_bbox, matches)
tgt_delta = bbox2delta(anchors, matched_gt_boxes, weights)
matched_gt_boxes.stop_gradient = True
tgt_delta.stop_gradient = True
labels.stop_gradient = True
tgt_labels.append(labels)
tgt_bboxes.append(matched_gt_boxes)
tgt_deltas.append(tgt_delta)
return tgt_labels, tgt_bboxes, tgt_deltas
def label_box(anchors,
gt_boxes,
positive_overlap,
negative_overlap,
allow_low_quality,
ignore_thresh,
is_crowd=None,
assign_on_cpu=False):
if assign_on_cpu:
device = paddle.device.get_device()
paddle.set_device("cpu")
iou = bbox_overlaps(gt_boxes, anchors)
paddle.set_device(device)
else:
iou = bbox_overlaps(gt_boxes, anchors)
n_gt = gt_boxes.shape[0]
if n_gt == 0 or is_crowd is None:
n_gt_crowd = 0
else:
n_gt_crowd = paddle.nonzero(is_crowd).shape[0]
if iou.shape[0] == 0 or n_gt_crowd == n_gt:
# No truth, assign everything to background
default_matches = paddle.full((iou.shape[1], ), 0, dtype='int64')
default_match_labels = paddle.full((iou.shape[1], ), 0, dtype='int32')
return default_matches, default_match_labels
# if ignore_thresh > 0, remove anchor if it is closed to
# one of the crowded ground-truth
if n_gt_crowd > 0:
N_a = anchors.shape[0]
ones = paddle.ones([N_a])
mask = is_crowd * ones
if ignore_thresh > 0:
crowd_iou = iou * mask
valid = (paddle.sum((crowd_iou > ignore_thresh).cast('int32'),
axis=0) > 0).cast('float32')
iou = iou * (1 - valid) - valid
# ignore the iou between anchor and crowded ground-truth
iou = iou * (1 - mask) - mask
matched_vals, matches = paddle.topk(iou, k=1, axis=0)
match_labels = paddle.full(matches.shape, -1, dtype='int32')
# set ignored anchor with iou = -1
neg_cond = paddle.logical_and(matched_vals > -1,
matched_vals < negative_overlap)
match_labels = paddle.where(neg_cond,
paddle.zeros_like(match_labels), match_labels)
match_labels = paddle.where(matched_vals >= positive_overlap,
paddle.ones_like(match_labels), match_labels)
if allow_low_quality:
highest_quality_foreach_gt = iou.max(axis=1, keepdim=True)
pred_inds_with_highest_quality = paddle.logical_and(
iou > 0, iou == highest_quality_foreach_gt).cast('int32').sum(
0, keepdim=True)
match_labels = paddle.where(pred_inds_with_highest_quality > 0,
paddle.ones_like(match_labels),
match_labels)
matches = matches.flatten()
match_labels = match_labels.flatten()
return matches, match_labels
def subsample_labels(labels,
num_samples,
fg_fraction,
bg_label=0,
use_random=True):
positive = paddle.nonzero(
paddle.logical_and(labels != -1, labels != bg_label))
negative = paddle.nonzero(labels == bg_label)
fg_num = int(num_samples * fg_fraction)
fg_num = min(positive.numel(), fg_num)
bg_num = num_samples - fg_num
bg_num = min(negative.numel(), bg_num)
if fg_num == 0 and bg_num == 0:
fg_inds = paddle.zeros([0], dtype='int32')
bg_inds = paddle.zeros([0], dtype='int32')
return fg_inds, bg_inds
# randomly select positive and negative examples
negative = negative.cast('int32').flatten()
bg_perm = paddle.randperm(negative.numel(), dtype='int32')
bg_perm = paddle.slice(bg_perm, axes=[0], starts=[0], ends=[bg_num])
if use_random:
bg_inds = paddle.gather(negative, bg_perm)
else:
bg_inds = paddle.slice(negative, axes=[0], starts=[0], ends=[bg_num])
if fg_num == 0:
fg_inds = paddle.zeros([0], dtype='int32')
return fg_inds, bg_inds
positive = positive.cast('int32').flatten()
fg_perm = paddle.randperm(positive.numel(), dtype='int32')
fg_perm = paddle.slice(fg_perm, axes=[0], starts=[0], ends=[fg_num])
if use_random:
fg_inds = paddle.gather(positive, fg_perm)
else:
fg_inds = paddle.slice(positive, axes=[0], starts=[0], ends=[fg_num])
return fg_inds, bg_inds
def generate_proposal_target(rpn_rois,
gt_classes,
gt_boxes,
batch_size_per_im,
fg_fraction,
fg_thresh,
bg_thresh,
num_classes,
ignore_thresh=-1.,
is_crowd=None,
use_random=True,
is_cascade=False,
cascade_iou=0.5,
assign_on_cpu=False,
add_gt_as_proposals=True):
rois_with_gt = []
tgt_labels = []
tgt_bboxes = []
tgt_gt_inds = []
new_rois_num = []
# In cascade rcnn, the threshold for foreground and background
# is used from cascade_iou
fg_thresh = cascade_iou if is_cascade else fg_thresh
bg_thresh = cascade_iou if is_cascade else bg_thresh
for i, rpn_roi in enumerate(rpn_rois):
gt_bbox = gt_boxes[i]
is_crowd_i = is_crowd[i] if is_crowd else None
gt_class = paddle.squeeze(gt_classes[i], axis=-1)
# Concat RoIs and gt boxes except cascade rcnn or none gt
if add_gt_as_proposals and gt_bbox.shape[0] > 0:
bbox = paddle.concat([rpn_roi, gt_bbox])
else:
bbox = rpn_roi
# Step1: label bbox
matches, match_labels = label_box(bbox, gt_bbox, fg_thresh, bg_thresh,
False, ignore_thresh, is_crowd_i,
assign_on_cpu)
# Step2: sample bbox
sampled_inds, sampled_gt_classes = sample_bbox(
matches, match_labels, gt_class, batch_size_per_im, fg_fraction,
num_classes, use_random, is_cascade)
# Step3: make output
rois_per_image = bbox if is_cascade else paddle.gather(bbox,
sampled_inds)
sampled_gt_ind = matches if is_cascade else paddle.gather(matches,
sampled_inds)
if gt_bbox.shape[0] > 0:
sampled_bbox = paddle.gather(gt_bbox, sampled_gt_ind)
else:
num = rois_per_image.shape[0]
sampled_bbox = paddle.zeros([num, 4], dtype='float32')
rois_per_image.stop_gradient = True
sampled_gt_ind.stop_gradient = True
sampled_bbox.stop_gradient = True
tgt_labels.append(sampled_gt_classes)
tgt_bboxes.append(sampled_bbox)
rois_with_gt.append(rois_per_image)
tgt_gt_inds.append(sampled_gt_ind)
new_rois_num.append(paddle.shape(sampled_inds)[0:1])
new_rois_num = paddle.concat(new_rois_num)
return rois_with_gt, tgt_labels, tgt_bboxes, tgt_gt_inds, new_rois_num
def sample_bbox(matches,
match_labels,
gt_classes,
batch_size_per_im,
fg_fraction,
num_classes,
use_random=True,
is_cascade=False):
n_gt = gt_classes.shape[0]
if n_gt == 0:
# No truth, assign everything to background
gt_classes = paddle.ones(matches.shape, dtype='int32') * num_classes
#return matches, match_labels + num_classes
else:
gt_classes = paddle.gather(gt_classes, matches)
gt_classes = paddle.where(match_labels == 0,
paddle.ones_like(gt_classes) * num_classes,
gt_classes)
gt_classes = paddle.where(match_labels == -1,
paddle.ones_like(gt_classes) * -1, gt_classes)
if is_cascade:
index = paddle.arange(matches.shape[0])
return index, gt_classes
rois_per_image = int(batch_size_per_im)
fg_inds, bg_inds = subsample_labels(gt_classes, rois_per_image, fg_fraction,
num_classes, use_random)
if fg_inds.shape[0] == 0 and bg_inds.shape[0] == 0:
# fake output labeled with -1 when all boxes are neither
# foreground nor background
sampled_inds = paddle.zeros([1], dtype='int32')
else:
sampled_inds = paddle.concat([fg_inds, bg_inds])
sampled_gt_classes = paddle.gather(gt_classes, sampled_inds)
return sampled_inds, sampled_gt_classes
def polygons_to_mask(polygons, height, width):
"""
Convert the polygons to mask format
Args:
polygons (list[ndarray]): each array has shape (Nx2,)
height (int): mask height
width (int): mask width
Returns:
ndarray: a bool mask of shape (height, width)
"""
import pycocotools.mask as mask_util
assert len(polygons) > 0, "COCOAPI does not support empty polygons"
rles = mask_util.frPyObjects(polygons, height, width)
rle = mask_util.merge(rles)
return mask_util.decode(rle).astype(np.bool_)
def rasterize_polygons_within_box(poly, box, resolution):
w, h = box[2] - box[0], box[3] - box[1]
polygons = [np.asarray(p, dtype=np.float64) for p in poly]
for p in polygons:
p[0::2] = p[0::2] - box[0]
p[1::2] = p[1::2] - box[1]
ratio_h = resolution / max(h, 0.1)
ratio_w = resolution / max(w, 0.1)
if ratio_h == ratio_w:
for p in polygons:
p *= ratio_h
else:
for p in polygons:
p[0::2] *= ratio_w
p[1::2] *= ratio_h
# 3. Rasterize the polygons with coco api
mask = polygons_to_mask(polygons, resolution, resolution)
mask = paddle.to_tensor(mask, dtype='int32')
return mask
def generate_mask_target(gt_segms, rois, labels_int32, sampled_gt_inds,
num_classes, resolution):
mask_rois = []
mask_rois_num = []
tgt_masks = []
tgt_classes = []
mask_index = []
tgt_weights = []
for k in range(len(rois)):
labels_per_im = labels_int32[k]
# select rois labeled with foreground
fg_inds = paddle.nonzero(
paddle.logical_and(labels_per_im != -1, labels_per_im !=
num_classes))
has_fg = True
# generate fake roi if foreground is empty
if fg_inds.numel() == 0:
has_fg = False
fg_inds = paddle.ones([1, 1], dtype='int64')
inds_per_im = sampled_gt_inds[k]
inds_per_im = paddle.gather(inds_per_im, fg_inds)
rois_per_im = rois[k]
fg_rois = paddle.gather(rois_per_im, fg_inds)
# Copy the foreground roi to cpu
# to generate mask target with ground-truth
boxes = fg_rois.numpy()
gt_segms_per_im = gt_segms[k]
new_segm = []
inds_per_im = inds_per_im.numpy()
if len(gt_segms_per_im) > 0:
for i in inds_per_im:
new_segm.append(gt_segms_per_im[i])
fg_inds_new = fg_inds.reshape([-1]).numpy()
results = []
if len(gt_segms_per_im) > 0:
for j in range(fg_inds_new.shape[0]):
results.append(
rasterize_polygons_within_box(new_segm[j], boxes[j],
resolution))
else:
results.append(paddle.ones([resolution, resolution], dtype='int32'))
fg_classes = paddle.gather(labels_per_im, fg_inds)
weight = paddle.ones([fg_rois.shape[0]], dtype='float32')
if not has_fg:
# now all sampled classes are background
# which will cause error in loss calculation,
# make fake classes with weight of 0.
fg_classes = paddle.zeros([1], dtype='int32')
weight = weight - 1
tgt_mask = paddle.stack(results)
tgt_mask.stop_gradient = True
fg_rois.stop_gradient = True
mask_index.append(fg_inds)
mask_rois.append(fg_rois)
mask_rois_num.append(paddle.shape(fg_rois)[0:1])
tgt_classes.append(fg_classes)
tgt_masks.append(tgt_mask)
tgt_weights.append(weight)
mask_index = paddle.concat(mask_index)
mask_rois_num = paddle.concat(mask_rois_num)
tgt_classes = paddle.concat(tgt_classes, axis=0)
tgt_masks = paddle.concat(tgt_masks, axis=0)
tgt_weights = paddle.concat(tgt_weights, axis=0)
return mask_rois, mask_rois_num, tgt_classes, tgt_masks, mask_index, tgt_weights
def libra_sample_pos(max_overlaps, max_classes, pos_inds, num_expected):
if len(pos_inds) <= num_expected:
return pos_inds
else:
unique_gt_inds = np.unique(max_classes[pos_inds])
num_gts = len(unique_gt_inds)
num_per_gt = int(round(num_expected / float(num_gts)) + 1)
sampled_inds = []
for i in unique_gt_inds:
inds = np.nonzero(max_classes == i)[0]
before_len = len(inds)
inds = list(set(inds) & set(pos_inds))
after_len = len(inds)
if len(inds) > num_per_gt:
inds = np.random.choice(inds, size=num_per_gt, replace=False)
sampled_inds.extend(list(inds)) # combine as a new sampler
if len(sampled_inds) < num_expected:
num_extra = num_expected - len(sampled_inds)
extra_inds = np.array(list(set(pos_inds) - set(sampled_inds)))
assert len(sampled_inds) + len(extra_inds) == len(pos_inds), \
"sum of sampled_inds({}) and extra_inds({}) length must be equal with pos_inds({})!".format(
len(sampled_inds), len(extra_inds), len(pos_inds))
if len(extra_inds) > num_extra:
extra_inds = np.random.choice(
extra_inds, size=num_extra, replace=False)
sampled_inds.extend(extra_inds.tolist())
elif len(sampled_inds) > num_expected:
sampled_inds = np.random.choice(
sampled_inds, size=num_expected, replace=False)
return paddle.to_tensor(sampled_inds)
def libra_sample_via_interval(max_overlaps, full_set, num_expected, floor_thr,
num_bins, bg_thresh):
max_iou = max_overlaps.max()
iou_interval = (max_iou - floor_thr) / num_bins
per_num_expected = int(num_expected / num_bins)
sampled_inds = []
for i in range(num_bins):
start_iou = floor_thr + i * iou_interval
end_iou = floor_thr + (i + 1) * iou_interval
tmp_set = set(
np.where(
np.logical_and(max_overlaps >= start_iou, max_overlaps <
end_iou))[0])
tmp_inds = list(tmp_set & full_set)
if len(tmp_inds) > per_num_expected:
tmp_sampled_set = np.random.choice(
tmp_inds, size=per_num_expected, replace=False)
else:
tmp_sampled_set = np.array(tmp_inds, dtype=np.int32)
sampled_inds.append(tmp_sampled_set)
sampled_inds = np.concatenate(sampled_inds)
if len(sampled_inds) < num_expected:
num_extra = num_expected - len(sampled_inds)
extra_inds = np.array(list(full_set - set(sampled_inds)))
assert len(sampled_inds) + len(extra_inds) == len(full_set), \
"sum of sampled_inds({}) and extra_inds({}) length must be equal with full_set({})!".format(
len(sampled_inds), len(extra_inds), len(full_set))
if len(extra_inds) > num_extra:
extra_inds = np.random.choice(extra_inds, num_extra, replace=False)
sampled_inds = np.concatenate([sampled_inds, extra_inds])
return sampled_inds
def libra_sample_neg(max_overlaps,
max_classes,
neg_inds,
num_expected,
floor_thr=-1,
floor_fraction=0,
num_bins=3,
bg_thresh=0.5):
if len(neg_inds) <= num_expected:
return neg_inds
else:
# balance sampling for negative samples
neg_set = set(neg_inds.tolist())
if floor_thr > 0:
floor_set = set(
np.where(
np.logical_and(max_overlaps >= 0, max_overlaps < floor_thr))
[0])
iou_sampling_set = set(np.where(max_overlaps >= floor_thr)[0])
elif floor_thr == 0:
floor_set = set(np.where(max_overlaps == 0)[0])
iou_sampling_set = set(np.where(max_overlaps > floor_thr)[0])
else:
floor_set = set()
iou_sampling_set = set(np.where(max_overlaps > floor_thr)[0])
floor_thr = 0
floor_neg_inds = list(floor_set & neg_set)
iou_sampling_neg_inds = list(iou_sampling_set & neg_set)
num_expected_iou_sampling = int(num_expected * (1 - floor_fraction))
if len(iou_sampling_neg_inds) > num_expected_iou_sampling:
if num_bins >= 2:
iou_sampled_inds = libra_sample_via_interval(
max_overlaps,
set(iou_sampling_neg_inds), num_expected_iou_sampling,
floor_thr, num_bins, bg_thresh)
else:
iou_sampled_inds = np.random.choice(
iou_sampling_neg_inds,
size=num_expected_iou_sampling,
replace=False)
else:
iou_sampled_inds = np.array(iou_sampling_neg_inds, dtype=np.int32)
num_expected_floor = num_expected - len(iou_sampled_inds)
if len(floor_neg_inds) > num_expected_floor:
sampled_floor_inds = np.random.choice(
floor_neg_inds, size=num_expected_floor, replace=False)
else:
sampled_floor_inds = np.array(floor_neg_inds, dtype=np.int32)
sampled_inds = np.concatenate((sampled_floor_inds, iou_sampled_inds))
if len(sampled_inds) < num_expected:
num_extra = num_expected - len(sampled_inds)
extra_inds = np.array(list(neg_set - set(sampled_inds)))
if len(extra_inds) > num_extra:
extra_inds = np.random.choice(
extra_inds, size=num_extra, replace=False)
sampled_inds = np.concatenate((sampled_inds, extra_inds))
return paddle.to_tensor(sampled_inds)
def libra_label_box(anchors, gt_boxes, gt_classes, positive_overlap,
negative_overlap, num_classes):
# TODO: use paddle API to speed up
gt_classes = gt_classes.numpy()
gt_overlaps = np.zeros((anchors.shape[0], num_classes))
matches = np.zeros((anchors.shape[0]), dtype=np.int32)
if len(gt_boxes) > 0:
proposal_to_gt_overlaps = bbox_overlaps(anchors, gt_boxes).numpy()
overlaps_argmax = proposal_to_gt_overlaps.argmax(axis=1)
overlaps_max = proposal_to_gt_overlaps.max(axis=1)
# Boxes which with non-zero overlap with gt boxes
overlapped_boxes_ind = np.where(overlaps_max > 0)[0]
overlapped_boxes_gt_classes = gt_classes[overlaps_argmax[
overlapped_boxes_ind]]
for idx in range(len(overlapped_boxes_ind)):
gt_overlaps[overlapped_boxes_ind[idx], overlapped_boxes_gt_classes[
idx]] = overlaps_max[overlapped_boxes_ind[idx]]
matches[overlapped_boxes_ind[idx]] = overlaps_argmax[
overlapped_boxes_ind[idx]]
gt_overlaps = paddle.to_tensor(gt_overlaps)
matches = paddle.to_tensor(matches)
matched_vals = paddle.max(gt_overlaps, axis=1)
match_labels = paddle.full(matches.shape, -1, dtype='int32')
match_labels = paddle.where(matched_vals < negative_overlap,
paddle.zeros_like(match_labels), match_labels)
match_labels = paddle.where(matched_vals >= positive_overlap,
paddle.ones_like(match_labels), match_labels)
return matches, match_labels, matched_vals
def libra_sample_bbox(matches,
match_labels,
matched_vals,
gt_classes,
batch_size_per_im,
num_classes,
fg_fraction,
fg_thresh,
bg_thresh,
num_bins,
use_random=True,
is_cascade_rcnn=False):
rois_per_image = int(batch_size_per_im)
fg_rois_per_im = int(np.round(fg_fraction * rois_per_image))
bg_rois_per_im = rois_per_image - fg_rois_per_im
if is_cascade_rcnn:
fg_inds = paddle.nonzero(matched_vals >= fg_thresh)
bg_inds = paddle.nonzero(matched_vals < bg_thresh)
else:
matched_vals_np = matched_vals.numpy()
match_labels_np = match_labels.numpy()
# sample fg
fg_inds = paddle.nonzero(matched_vals >= fg_thresh).flatten()
fg_nums = int(np.minimum(fg_rois_per_im, fg_inds.shape[0]))
if (fg_inds.shape[0] > fg_nums) and use_random:
fg_inds = libra_sample_pos(matched_vals_np, match_labels_np,
fg_inds.numpy(), fg_rois_per_im)
fg_inds = fg_inds[:fg_nums]
# sample bg
bg_inds = paddle.nonzero(matched_vals < bg_thresh).flatten()
bg_nums = int(np.minimum(rois_per_image - fg_nums, bg_inds.shape[0]))
if (bg_inds.shape[0] > bg_nums) and use_random:
bg_inds = libra_sample_neg(
matched_vals_np,
match_labels_np,
bg_inds.numpy(),
bg_rois_per_im,
num_bins=num_bins,
bg_thresh=bg_thresh)
bg_inds = bg_inds[:bg_nums]
sampled_inds = paddle.concat([fg_inds, bg_inds])
gt_classes = paddle.gather(gt_classes, matches)
gt_classes = paddle.where(match_labels == 0,
paddle.ones_like(gt_classes) * num_classes,
gt_classes)
gt_classes = paddle.where(match_labels == -1,
paddle.ones_like(gt_classes) * -1, gt_classes)
sampled_gt_classes = paddle.gather(gt_classes, sampled_inds)
return sampled_inds, sampled_gt_classes
def libra_generate_proposal_target(rpn_rois,
gt_classes,
gt_boxes,
batch_size_per_im,
fg_fraction,
fg_thresh,
bg_thresh,
num_classes,
use_random=True,
is_cascade_rcnn=False,
max_overlaps=None,
num_bins=3):
rois_with_gt = []
tgt_labels = []
tgt_bboxes = []
sampled_max_overlaps = []
tgt_gt_inds = []
new_rois_num = []
for i, rpn_roi in enumerate(rpn_rois):
max_overlap = max_overlaps[i] if is_cascade_rcnn else None
gt_bbox = gt_boxes[i]
gt_class = paddle.squeeze(gt_classes[i], axis=-1)
if is_cascade_rcnn:
rpn_roi = filter_roi(rpn_roi, max_overlap)
bbox = paddle.concat([rpn_roi, gt_bbox])
# Step1: label bbox
matches, match_labels, matched_vals = libra_label_box(
bbox, gt_bbox, gt_class, fg_thresh, bg_thresh, num_classes)
# Step2: sample bbox
sampled_inds, sampled_gt_classes = libra_sample_bbox(
matches, match_labels, matched_vals, gt_class, batch_size_per_im,
num_classes, fg_fraction, fg_thresh, bg_thresh, num_bins,
use_random, is_cascade_rcnn)
# Step3: make output
rois_per_image = paddle.gather(bbox, sampled_inds)
sampled_gt_ind = paddle.gather(matches, sampled_inds)
sampled_bbox = paddle.gather(gt_bbox, sampled_gt_ind)
sampled_overlap = paddle.gather(matched_vals, sampled_inds)
rois_per_image.stop_gradient = True
sampled_gt_ind.stop_gradient = True
sampled_bbox.stop_gradient = True
sampled_overlap.stop_gradient = True
tgt_labels.append(sampled_gt_classes)
tgt_bboxes.append(sampled_bbox)
rois_with_gt.append(rois_per_image)
sampled_max_overlaps.append(sampled_overlap)
tgt_gt_inds.append(sampled_gt_ind)
new_rois_num.append(paddle.shape(sampled_inds)[0:1])
new_rois_num = paddle.concat(new_rois_num)
# rois_with_gt, tgt_labels, tgt_bboxes, tgt_gt_inds, new_rois_num
return rois_with_gt, tgt_labels, tgt_bboxes, tgt_gt_inds, new_rois_num
| PaddleDetection/ppdet/modeling/proposal_generator/target.py/0 | {
"file_path": "PaddleDetection/ppdet/modeling/proposal_generator/target.py",
"repo_id": "PaddleDetection",
"token_count": 14574
} | 79 |
# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from ppdet.core.workspace import register, serializable
from ppdet.modeling.ops import get_act_fn
from ..shape_spec import ShapeSpec
from ..backbones.csp_darknet import BaseConv
from ..backbones.cspresnet import RepVggBlock
from ppdet.modeling.transformers.detr_transformer import TransformerEncoder
from ..initializer import xavier_uniform_, linear_init_
from ..layers import MultiHeadAttention
from paddle import ParamAttr
from paddle.regularizer import L2Decay
__all__ = ['HybridEncoder']
class CSPRepLayer(nn.Layer):
def __init__(self,
in_channels,
out_channels,
num_blocks=3,
expansion=1.0,
bias=False,
act="silu"):
super(CSPRepLayer, self).__init__()
hidden_channels = int(out_channels * expansion)
self.conv1 = BaseConv(
in_channels, hidden_channels, ksize=1, stride=1, bias=bias, act=act)
self.conv2 = BaseConv(
in_channels, hidden_channels, ksize=1, stride=1, bias=bias, act=act)
self.bottlenecks = nn.Sequential(* [
RepVggBlock(
hidden_channels, hidden_channels, act=act)
for _ in range(num_blocks)
])
if hidden_channels != out_channels:
self.conv3 = BaseConv(
hidden_channels,
out_channels,
ksize=1,
stride=1,
bias=bias,
act=act)
else:
self.conv3 = nn.Identity()
def forward(self, x):
x_1 = self.conv1(x)
x_1 = self.bottlenecks(x_1)
x_2 = self.conv2(x)
return self.conv3(x_1 + x_2)
@register
class TransformerLayer(nn.Layer):
def __init__(self,
d_model,
nhead,
dim_feedforward=1024,
dropout=0.,
activation="relu",
attn_dropout=None,
act_dropout=None,
normalize_before=False):
super(TransformerLayer, self).__init__()
attn_dropout = dropout if attn_dropout is None else attn_dropout
act_dropout = dropout if act_dropout is None else act_dropout
self.normalize_before = normalize_before
self.self_attn = MultiHeadAttention(d_model, nhead, attn_dropout)
# Implementation of Feedforward model
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(act_dropout, mode="upscale_in_train")
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout, mode="upscale_in_train")
self.dropout2 = nn.Dropout(dropout, mode="upscale_in_train")
self.activation = getattr(F, activation)
self._reset_parameters()
def _reset_parameters(self):
linear_init_(self.linear1)
linear_init_(self.linear2)
@staticmethod
def with_pos_embed(tensor, pos_embed):
return tensor if pos_embed is None else tensor + pos_embed
def forward(self, src, src_mask=None, pos_embed=None):
residual = src
if self.normalize_before:
src = self.norm1(src)
q = k = self.with_pos_embed(src, pos_embed)
src = self.self_attn(q, k, value=src, attn_mask=src_mask)
src = residual + self.dropout1(src)
if not self.normalize_before:
src = self.norm1(src)
residual = src
if self.normalize_before:
src = self.norm2(src)
src = self.linear2(self.dropout(self.activation(self.linear1(src))))
src = residual + self.dropout2(src)
if not self.normalize_before:
src = self.norm2(src)
return src
@register
@serializable
class HybridEncoder(nn.Layer):
__shared__ = ['depth_mult', 'act', 'trt', 'eval_size']
__inject__ = ['encoder_layer']
def __init__(self,
in_channels=[512, 1024, 2048],
feat_strides=[8, 16, 32],
hidden_dim=256,
use_encoder_idx=[2],
num_encoder_layers=1,
encoder_layer='TransformerLayer',
pe_temperature=10000,
expansion=1.0,
depth_mult=1.0,
act='silu',
trt=False,
eval_size=None):
super(HybridEncoder, self).__init__()
self.in_channels = in_channels
self.feat_strides = feat_strides
self.hidden_dim = hidden_dim
self.use_encoder_idx = use_encoder_idx
self.num_encoder_layers = num_encoder_layers
self.pe_temperature = pe_temperature
self.eval_size = eval_size
# channel projection
self.input_proj = nn.LayerList()
for in_channel in in_channels:
self.input_proj.append(
nn.Sequential(
nn.Conv2D(
in_channel, hidden_dim, kernel_size=1, bias_attr=False),
nn.BatchNorm2D(
hidden_dim,
weight_attr=ParamAttr(regularizer=L2Decay(0.0)),
bias_attr=ParamAttr(regularizer=L2Decay(0.0)))))
# encoder transformer
self.encoder = nn.LayerList([
TransformerEncoder(encoder_layer, num_encoder_layers)
for _ in range(len(use_encoder_idx))
])
act = get_act_fn(
act, trt=trt) if act is None or isinstance(act,
(str, dict)) else act
# top-down fpn
self.lateral_convs = nn.LayerList()
self.fpn_blocks = nn.LayerList()
for idx in range(len(in_channels) - 1, 0, -1):
self.lateral_convs.append(
BaseConv(
hidden_dim, hidden_dim, 1, 1, act=act))
self.fpn_blocks.append(
CSPRepLayer(
hidden_dim * 2,
hidden_dim,
round(3 * depth_mult),
act=act,
expansion=expansion))
# bottom-up pan
self.downsample_convs = nn.LayerList()
self.pan_blocks = nn.LayerList()
for idx in range(len(in_channels) - 1):
self.downsample_convs.append(
BaseConv(
hidden_dim, hidden_dim, 3, stride=2, act=act))
self.pan_blocks.append(
CSPRepLayer(
hidden_dim * 2,
hidden_dim,
round(3 * depth_mult),
act=act,
expansion=expansion))
self._reset_parameters()
def _reset_parameters(self):
if self.eval_size:
for idx in self.use_encoder_idx:
stride = self.feat_strides[idx]
pos_embed = self.build_2d_sincos_position_embedding(
self.eval_size[1] // stride, self.eval_size[0] // stride,
self.hidden_dim, self.pe_temperature)
setattr(self, f'pos_embed{idx}', pos_embed)
@staticmethod
def build_2d_sincos_position_embedding(w,
h,
embed_dim=256,
temperature=10000.):
grid_w = paddle.arange(int(w), dtype=paddle.float32)
grid_h = paddle.arange(int(h), dtype=paddle.float32)
grid_w, grid_h = paddle.meshgrid(grid_w, grid_h)
assert embed_dim % 4 == 0, \
'Embed dimension must be divisible by 4 for 2D sin-cos position embedding'
pos_dim = embed_dim // 4
omega = paddle.arange(pos_dim, dtype=paddle.float32) / pos_dim
omega = 1. / (temperature**omega)
out_w = grid_w.flatten()[..., None] @omega[None]
out_h = grid_h.flatten()[..., None] @omega[None]
return paddle.concat(
[
paddle.sin(out_w), paddle.cos(out_w), paddle.sin(out_h),
paddle.cos(out_h)
],
axis=1)[None, :, :]
def forward(self, feats, for_mot=False, is_teacher=False):
assert len(feats) == len(self.in_channels)
# get projection features
proj_feats = [self.input_proj[i](feat) for i, feat in enumerate(feats)]
# encoder
if self.num_encoder_layers > 0:
for i, enc_ind in enumerate(self.use_encoder_idx):
h, w = proj_feats[enc_ind].shape[2:]
# flatten [B, C, H, W] to [B, HxW, C]
src_flatten = proj_feats[enc_ind].flatten(2).transpose(
[0, 2, 1])
if self.training or self.eval_size is None or is_teacher:
pos_embed = self.build_2d_sincos_position_embedding(
w, h, self.hidden_dim, self.pe_temperature)
else:
pos_embed = getattr(self, f'pos_embed{enc_ind}', None)
memory = self.encoder[i](src_flatten, pos_embed=pos_embed)
proj_feats[enc_ind] = memory.transpose([0, 2, 1]).reshape(
[-1, self.hidden_dim, h, w])
# top-down fpn
inner_outs = [proj_feats[-1]]
for idx in range(len(self.in_channels) - 1, 0, -1):
feat_heigh = inner_outs[0]
feat_low = proj_feats[idx - 1]
feat_heigh = self.lateral_convs[len(self.in_channels) - 1 - idx](
feat_heigh)
inner_outs[0] = feat_heigh
upsample_feat = F.interpolate(
feat_heigh, scale_factor=2., mode="nearest")
inner_out = self.fpn_blocks[len(self.in_channels) - 1 - idx](
paddle.concat(
[upsample_feat, feat_low], axis=1))
inner_outs.insert(0, inner_out)
# bottom-up pan
outs = [inner_outs[0]]
for idx in range(len(self.in_channels) - 1):
feat_low = outs[-1]
feat_height = inner_outs[idx + 1]
downsample_feat = self.downsample_convs[idx](feat_low)
out = self.pan_blocks[idx](paddle.concat(
[downsample_feat, feat_height], axis=1))
outs.append(out)
return outs
@classmethod
def from_config(cls, cfg, input_shape):
return {
'in_channels': [i.channels for i in input_shape],
'feat_strides': [i.stride for i in input_shape]
}
@property
def out_shape(self):
return [
ShapeSpec(
channels=self.hidden_dim, stride=self.feat_strides[idx])
for idx in range(len(self.in_channels))
]
| PaddleDetection/ppdet/modeling/transformers/hybrid_encoder.py/0 | {
"file_path": "PaddleDetection/ppdet/modeling/transformers/hybrid_encoder.py",
"repo_id": "PaddleDetection",
"token_count": 5957
} | 80 |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import paddle
from paddle.utils import try_import
from ppdet.core.workspace import register, serializable
from ppdet.utils.logger import setup_logger
logger = setup_logger(__name__)
def print_prune_params(model):
model_dict = model.state_dict()
for key in model_dict.keys():
weight_name = model_dict[key].name
logger.info('Parameter name: {}, shape: {}'.format(
weight_name, model_dict[key].shape))
@register
@serializable
class Pruner(object):
def __init__(self,
criterion,
pruned_params,
pruned_ratios,
print_params=False):
super(Pruner, self).__init__()
assert criterion in ['l1_norm', 'fpgm'], \
"unsupported prune criterion: {}".format(criterion)
self.criterion = criterion
self.pruned_params = pruned_params
self.pruned_ratios = pruned_ratios
self.print_params = print_params
def __call__(self, model):
# FIXME: adapt to network graph when Training and inference are
# inconsistent, now only supports prune inference network graph.
model.eval()
paddleslim = try_import('paddleslim')
from paddleslim.analysis import dygraph_flops as flops
input_spec = [{
"image": paddle.ones(
shape=[1, 3, 640, 640], dtype='float32'),
"im_shape": paddle.full(
[1, 2], 640, dtype='float32'),
"scale_factor": paddle.ones(
shape=[1, 2], dtype='float32')
}]
if self.print_params:
print_prune_params(model)
ori_flops = flops(model, input_spec) / (1000**3)
logger.info("FLOPs before pruning: {}GFLOPs".format(ori_flops))
if self.criterion == 'fpgm':
pruner = paddleslim.dygraph.FPGMFilterPruner(model, input_spec)
elif self.criterion == 'l1_norm':
pruner = paddleslim.dygraph.L1NormFilterPruner(model, input_spec)
logger.info("pruned params: {}".format(self.pruned_params))
pruned_ratios = [float(n) for n in self.pruned_ratios]
ratios = {}
for i, param in enumerate(self.pruned_params):
ratios[param] = pruned_ratios[i]
pruner.prune_vars(ratios, [0])
pruned_flops = flops(model, input_spec) / (1000**3)
logger.info("FLOPs after pruning: {}GFLOPs; pruned ratio: {}".format(
pruned_flops, (ori_flops - pruned_flops) / ori_flops))
return model
@register
@serializable
class PrunerQAT(object):
def __init__(self, criterion, pruned_params, pruned_ratios,
print_prune_params, quant_config, print_qat_model):
super(PrunerQAT, self).__init__()
assert criterion in ['l1_norm', 'fpgm'], \
"unsupported prune criterion: {}".format(criterion)
# Pruner hyperparameter
self.criterion = criterion
self.pruned_params = pruned_params
self.pruned_ratios = pruned_ratios
self.print_prune_params = print_prune_params
# QAT hyperparameter
self.quant_config = quant_config
self.print_qat_model = print_qat_model
def __call__(self, model):
# FIXME: adapt to network graph when Training and inference are
# inconsistent, now only supports prune inference network graph.
model.eval()
paddleslim = try_import('paddleslim')
from paddleslim.analysis import dygraph_flops as flops
input_spec = [{
"image": paddle.ones(
shape=[1, 3, 640, 640], dtype='float32'),
"im_shape": paddle.full(
[1, 2], 640, dtype='float32'),
"scale_factor": paddle.ones(
shape=[1, 2], dtype='float32')
}]
if self.print_prune_params:
print_prune_params(model)
ori_flops = flops(model, input_spec) / 1000
logger.info("FLOPs before pruning: {}GFLOPs".format(ori_flops))
if self.criterion == 'fpgm':
pruner = paddleslim.dygraph.FPGMFilterPruner(model, input_spec)
elif self.criterion == 'l1_norm':
pruner = paddleslim.dygraph.L1NormFilterPruner(model, input_spec)
logger.info("pruned params: {}".format(self.pruned_params))
pruned_ratios = [float(n) for n in self.pruned_ratios]
ratios = {}
for i, param in enumerate(self.pruned_params):
ratios[param] = pruned_ratios[i]
pruner.prune_vars(ratios, [0])
pruned_flops = flops(model, input_spec) / 1000
logger.info("FLOPs after pruning: {}GFLOPs; pruned ratio: {}".format(
pruned_flops, (ori_flops - pruned_flops) / ori_flops))
self.quanter = paddleslim.dygraph.quant.QAT(config=self.quant_config)
self.quanter.quantize(model)
if self.print_qat_model:
logger.info("Quantized model:")
logger.info(model)
return model
def save_quantized_model(self, layer, path, input_spec=None, **config):
self.quanter.save_quantized_model(
model=layer, path=path, input_spec=input_spec, **config)
| PaddleDetection/ppdet/slim/prune.py/0 | {
"file_path": "PaddleDetection/ppdet/slim/prune.py",
"repo_id": "PaddleDetection",
"token_count": 2552
} | 81 |
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import numpy as np
from PIL import Image, ImageDraw, ImageFont
import cv2
import math
from .colormap import colormap
from ppdet.utils.logger import setup_logger
from ppdet.utils.compact import imagedraw_textsize_c
from ppdet.utils.download import get_path
logger = setup_logger(__name__)
__all__ = ['visualize_results']
def visualize_results(image,
bbox_res,
mask_res,
segm_res,
keypoint_res,
pose3d_res,
im_id,
catid2name,
threshold=0.5):
"""
Visualize bbox and mask results
"""
if bbox_res is not None:
image = draw_bbox(image, im_id, catid2name, bbox_res, threshold)
if mask_res is not None:
image = draw_mask(image, im_id, mask_res, threshold)
if segm_res is not None:
image = draw_segm(image, im_id, catid2name, segm_res, threshold)
if keypoint_res is not None:
image = draw_pose(image, keypoint_res, threshold)
if pose3d_res is not None:
pose3d = np.array(pose3d_res[0]['pose3d']) * 1000
image = draw_pose3d(image, pose3d, visual_thread=threshold)
return image
def draw_mask(image, im_id, segms, threshold, alpha=0.7):
"""
Draw mask on image
"""
mask_color_id = 0
w_ratio = .4
color_list = colormap(rgb=True)
img_array = np.array(image).astype('float32')
for dt in np.array(segms):
if im_id != dt['image_id']:
continue
segm, score = dt['segmentation'], dt['score']
if score < threshold:
continue
import pycocotools.mask as mask_util
mask = mask_util.decode(segm) * 255
color_mask = color_list[mask_color_id % len(color_list), 0:3]
mask_color_id += 1
for c in range(3):
color_mask[c] = color_mask[c] * (1 - w_ratio) + w_ratio * 255
idx = np.nonzero(mask)
img_array[idx[0], idx[1], :] *= 1.0 - alpha
img_array[idx[0], idx[1], :] += alpha * color_mask
return Image.fromarray(img_array.astype('uint8'))
def draw_bbox(image, im_id, catid2name, bboxes, threshold):
"""
Draw bbox on image
"""
font_url = "https://paddledet.bj.bcebos.com/simfang.ttf"
font_path , _ = get_path(font_url, "~/.cache/paddle/")
font_size = 18
font = ImageFont.truetype(font_path, font_size, encoding="utf-8")
draw = ImageDraw.Draw(image)
catid2color = {}
color_list = colormap(rgb=True)[:40]
for dt in np.array(bboxes):
if im_id != dt['image_id']:
continue
catid, bbox, score = dt['category_id'], dt['bbox'], dt['score']
if score < threshold:
continue
if catid not in catid2color:
idx = np.random.randint(len(color_list))
catid2color[catid] = color_list[idx]
color = tuple(catid2color[catid])
# draw bbox
if len(bbox) == 4:
# draw bbox
xmin, ymin, w, h = bbox
xmax = xmin + w
ymax = ymin + h
draw.line(
[(xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin),
(xmin, ymin)],
width=2,
fill=color)
elif len(bbox) == 8:
x1, y1, x2, y2, x3, y3, x4, y4 = bbox
draw.line(
[(x1, y1), (x2, y2), (x3, y3), (x4, y4), (x1, y1)],
width=2,
fill=color)
xmin = min(x1, x2, x3, x4)
ymin = min(y1, y2, y3, y4)
else:
logger.error('the shape of bbox must be [M, 4] or [M, 8]!')
# draw label
text = "{} {:.2f}".format(catid2name[catid], score)
tw, th = imagedraw_textsize_c(draw, text, font=font)
draw.rectangle(
[(xmin + 1, ymin - th), (xmin + tw + 1, ymin)], fill=color)
draw.text((xmin + 1, ymin - th), text, fill=(255, 255, 255), font=font)
return image
def save_result(save_path, results, catid2name, threshold):
"""
save result as txt
"""
img_id = int(results["im_id"])
with open(save_path, 'w') as f:
if "bbox_res" in results:
for dt in results["bbox_res"]:
catid, bbox, score = dt['category_id'], dt['bbox'], dt['score']
if score < threshold:
continue
# each bbox result as a line
# for rbox: classname score x1 y1 x2 y2 x3 y3 x4 y4
# for bbox: classname score x1 y1 w h
bbox_pred = '{} {} '.format(catid2name[catid],
score) + ' '.join(
[str(e) for e in bbox])
f.write(bbox_pred + '\n')
elif "keypoint_res" in results:
for dt in results["keypoint_res"]:
kpts = dt['keypoints']
scores = dt['score']
keypoint_pred = [img_id, scores, kpts]
print(keypoint_pred, file=f)
else:
print("No valid results found, skip txt save")
def draw_segm(image,
im_id,
catid2name,
segms,
threshold,
alpha=0.7,
draw_box=True):
"""
Draw segmentation on image
"""
mask_color_id = 0
w_ratio = .4
color_list = colormap(rgb=True)
img_array = np.array(image).astype('float32')
for dt in np.array(segms):
if im_id != dt['image_id']:
continue
segm, score, catid = dt['segmentation'], dt['score'], dt['category_id']
if score < threshold:
continue
import pycocotools.mask as mask_util
mask = mask_util.decode(segm) * 255
color_mask = color_list[mask_color_id % len(color_list), 0:3]
mask_color_id += 1
for c in range(3):
color_mask[c] = color_mask[c] * (1 - w_ratio) + w_ratio * 255
idx = np.nonzero(mask)
img_array[idx[0], idx[1], :] *= 1.0 - alpha
img_array[idx[0], idx[1], :] += alpha * color_mask
if not draw_box:
center_y, center_x = ndimage.measurements.center_of_mass(mask)
label_text = "{}".format(catid2name[catid])
vis_pos = (max(int(center_x) - 10, 0), int(center_y))
cv2.putText(img_array, label_text, vis_pos,
cv2.FONT_HERSHEY_COMPLEX, 0.3, (255, 255, 255))
else:
mask = mask_util.decode(segm) * 255
sum_x = np.sum(mask, axis=0)
x = np.where(sum_x > 0.5)[0]
sum_y = np.sum(mask, axis=1)
y = np.where(sum_y > 0.5)[0]
x0, x1, y0, y1 = x[0], x[-1], y[0], y[-1]
cv2.rectangle(img_array, (x0, y0), (x1, y1),
tuple(color_mask.astype('int32').tolist()), 1)
bbox_text = '%s %.2f' % (catid2name[catid], score)
t_size = cv2.getTextSize(bbox_text, 0, 0.3, thickness=1)[0]
cv2.rectangle(img_array, (x0, y0), (x0 + t_size[0],
y0 - t_size[1] - 3),
tuple(color_mask.astype('int32').tolist()), -1)
cv2.putText(
img_array,
bbox_text, (x0, y0 - 2),
cv2.FONT_HERSHEY_SIMPLEX,
0.3, (0, 0, 0),
1,
lineType=cv2.LINE_AA)
return Image.fromarray(img_array.astype('uint8'))
def draw_pose(image,
results,
visual_thread=0.6,
save_name='pose.jpg',
save_dir='output',
returnimg=False,
ids=None):
try:
import matplotlib.pyplot as plt
import matplotlib
plt.switch_backend('agg')
except Exception as e:
logger.error('Matplotlib not found, please install matplotlib.'
'for example: `pip install matplotlib`.')
raise e
skeletons = np.array([item['keypoints'] for item in results])
kpt_nums = 17
if len(skeletons) > 0:
kpt_nums = int(skeletons.shape[1] / 3)
skeletons = skeletons.reshape(-1, kpt_nums, 3)
if kpt_nums == 17: #plot coco keypoint
EDGES = [(0, 1), (0, 2), (1, 3), (2, 4), (3, 5), (4, 6), (5, 7), (6, 8),
(7, 9), (8, 10), (5, 11), (6, 12), (11, 13), (12, 14),
(13, 15), (14, 16), (11, 12)]
else: #plot mpii keypoint
EDGES = [(0, 1), (1, 2), (3, 4), (4, 5), (2, 6), (3, 6), (6, 7), (7, 8),
(8, 9), (10, 11), (11, 12), (13, 14), (14, 15), (8, 12),
(8, 13)]
NUM_EDGES = len(EDGES)
colors = [[255, 0, 0], [255, 85, 0], [255, 170, 0], [255, 255, 0], [170, 255, 0], [85, 255, 0], [0, 255, 0], \
[0, 255, 85], [0, 255, 170], [0, 255, 255], [0, 170, 255], [0, 85, 255], [0, 0, 255], [85, 0, 255], \
[170, 0, 255], [255, 0, 255], [255, 0, 170], [255, 0, 85]]
cmap = matplotlib.cm.get_cmap('hsv')
plt.figure()
img = np.array(image).astype('float32')
color_set = results['colors'] if 'colors' in results else None
if 'bbox' in results and ids is None:
bboxs = results['bbox']
for j, rect in enumerate(bboxs):
xmin, ymin, xmax, ymax = rect
color = colors[0] if color_set is None else colors[color_set[j] %
len(colors)]
cv2.rectangle(img, (xmin, ymin), (xmax, ymax), color, 1)
canvas = img.copy()
for i in range(kpt_nums):
for j in range(len(skeletons)):
if skeletons[j][i, 2] < visual_thread:
continue
if ids is None:
color = colors[i] if color_set is None else colors[color_set[j]
%
len(colors)]
else:
color = get_color(ids[j])
cv2.circle(
canvas,
tuple(skeletons[j][i, 0:2].astype('int32')),
2,
color,
thickness=-1)
to_plot = cv2.addWeighted(img, 0.3, canvas, 0.7, 0)
fig = matplotlib.pyplot.gcf()
stickwidth = 2
for i in range(NUM_EDGES):
for j in range(len(skeletons)):
edge = EDGES[i]
if skeletons[j][edge[0], 2] < visual_thread or skeletons[j][edge[
1], 2] < visual_thread:
continue
cur_canvas = canvas.copy()
X = [skeletons[j][edge[0], 1], skeletons[j][edge[1], 1]]
Y = [skeletons[j][edge[0], 0], skeletons[j][edge[1], 0]]
mX = np.mean(X)
mY = np.mean(Y)
length = ((X[0] - X[1])**2 + (Y[0] - Y[1])**2)**0.5
angle = math.degrees(math.atan2(X[0] - X[1], Y[0] - Y[1]))
polygon = cv2.ellipse2Poly((int(mY), int(mX)),
(int(length / 2), stickwidth),
int(angle), 0, 360, 1)
if ids is None:
color = colors[i] if color_set is None else colors[color_set[j]
%
len(colors)]
else:
color = get_color(ids[j])
cv2.fillConvexPoly(cur_canvas, polygon, color)
canvas = cv2.addWeighted(canvas, 0.4, cur_canvas, 0.6, 0)
image = Image.fromarray(canvas.astype('uint8'))
plt.close()
return image
def draw_pose3d(image,
pose3d,
pose2d=None,
visual_thread=0.6,
save_name='pose3d.jpg',
returnimg=True):
try:
import matplotlib.pyplot as plt
import matplotlib
plt.switch_backend('agg')
except Exception as e:
logger.error('Matplotlib not found, please install matplotlib.'
'for example: `pip install matplotlib`.')
raise e
if pose3d.shape[0] == 24:
joints_connectivity_dict = [
[0, 1, 0], [1, 2, 0], [5, 4, 1], [4, 3, 1], [2, 3, 0], [2, 14, 1],
[3, 14, 1], [14, 16, 1], [15, 16, 1], [15, 12, 1], [6, 7, 0],
[7, 8, 0], [11, 10, 1], [10, 9, 1], [8, 12, 0], [9, 12, 1],
[12, 19, 1], [19, 18, 1], [19, 20, 0], [19, 21, 1], [22, 20, 0],
[23, 21, 1]
]
elif pose3d.shape[0] == 14:
joints_connectivity_dict = [
[0, 1, 0], [1, 2, 0], [5, 4, 1], [4, 3, 1], [2, 3, 0], [2, 12, 0],
[3, 12, 1], [6, 7, 0], [7, 8, 0], [11, 10, 1], [10, 9, 1],
[8, 12, 0], [9, 12, 1], [12, 13, 1]
]
else:
print(
"not defined joints number :{}, cannot visualize because unknown of joint connectivity".
format(pose.shape[0]))
return
def draw3Dpose(pose3d,
ax,
lcolor="#3498db",
rcolor="#e74c3c",
add_labels=False):
# pose3d = orthographic_projection(pose3d, cam)
for i in joints_connectivity_dict:
x, y, z = [
np.array([pose3d[i[0], j], pose3d[i[1], j]]) for j in range(3)
]
ax.plot(-x, -z, -y, lw=2, c=lcolor if i[2] else rcolor)
RADIUS = 1000
center_xy = 2 if pose3d.shape[0] == 14 else 14
x, y, z = pose3d[center_xy, 0], pose3d[center_xy, 1], pose3d[center_xy,
2]
ax.set_xlim3d([-RADIUS + x, RADIUS + x])
ax.set_ylim3d([-RADIUS + y, RADIUS + y])
ax.set_zlim3d([-RADIUS + z, RADIUS + z])
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("z")
def draw2Dpose(pose2d,
ax,
lcolor="#3498db",
rcolor="#e74c3c",
add_labels=False):
for i in joints_connectivity_dict:
if pose2d[i[0], 2] and pose2d[i[1], 2]:
x, y = [
np.array([pose2d[i[0], j], pose2d[i[1], j]])
for j in range(2)
]
ax.plot(x, y, 0, lw=2, c=lcolor if i[2] else rcolor)
def draw_img_pose(pose3d,
pose2d=None,
frame=None,
figsize=(12, 12),
savepath=None):
fig = plt.figure(figsize=figsize, dpi=80)
# fig.clear()
fig.tight_layout()
ax = fig.add_subplot(221)
if frame is not None:
ax.imshow(frame, interpolation='nearest')
if pose2d is not None:
draw2Dpose(pose2d, ax)
ax = fig.add_subplot(222, projection='3d')
ax.view_init(45, 45)
draw3Dpose(pose3d, ax)
ax = fig.add_subplot(223, projection='3d')
ax.view_init(0, 0)
draw3Dpose(pose3d, ax)
ax = fig.add_subplot(224, projection='3d')
ax.view_init(0, 90)
draw3Dpose(pose3d, ax)
if savepath is not None:
plt.savefig(savepath)
plt.close()
else:
return fig
def fig2data(fig):
"""
fig = plt.figure()
image = fig2data(fig)
@brief Convert a Matplotlib figure to a 4D numpy array with RGBA channels and return it
@param fig a matplotlib figure
@return a numpy 3D array of RGBA values
"""
# draw the renderer
fig.canvas.draw()
# Get the RGBA buffer from the figure
w, h = fig.canvas.get_width_height()
buf = np.fromstring(fig.canvas.tostring_argb(), dtype=np.uint8)
buf.shape = (w, h, 4)
# canvas.tostring_argb give pixmap in ARGB mode. Roll the ALPHA channel to have it in RGBA mode
buf = np.roll(buf, 3, axis=2)
image = Image.frombytes("RGBA", (w, h), buf.tostring())
return image.convert("RGB")
fig = draw_img_pose(pose3d, pose2d, frame=image)
data = fig2data(fig)
if returnimg is False:
data.save(save_name)
else:
return data
| PaddleDetection/ppdet/utils/visualizer.py/0 | {
"file_path": "PaddleDetection/ppdet/utils/visualizer.py",
"repo_id": "PaddleDetection",
"token_count": 9441
} | 82 |
_BASE_: [
'../../../configs/datasets/spine_coco.yml',
'../../../configs/runtime.yml',
'../../../configs/rotate/s2anet/_base_/s2anet_optimizer_2x.yml',
'../../../configs/rotate/s2anet/_base_/s2anet.yml',
'../../../configs/rotate/s2anet/_base_/s2anet_reader.yml',
]
pretrain_weights: https://paddledet.bj.bcebos.com/models/pretrained/ResNet50_vd_ssld_v2_pretrained.pdparams
weights: output/s2anet_alignconv_2x_dota/model_final
S2ANetHead:
anchor_strides: [8, 16, 32, 64, 128]
anchor_scales: [4]
anchor_ratios: [1.0]
anchor_assign: RBoxAssigner
stacked_convs: 2
feat_in: 256
feat_out: 256
num_classes: 9
align_conv_type: 'AlignConv' # AlignConv Conv
align_conv_size: 3
use_sigmoid_cls: True
reg_loss_weight: [1.0, 1.0, 1.0, 1.0, 1.1]
cls_loss_weight: [1.1, 1.05]
reg_loss_type: 'l1'
| PaddleDetection/test_tipc/configs/rotate/s2anet_alignconv_2x_spine.yml/0 | {
"file_path": "PaddleDetection/test_tipc/configs/rotate/s2anet_alignconv_2x_spine.yml",
"repo_id": "PaddleDetection",
"token_count": 393
} | 83 |
#!/bin/bash
source test_tipc/utils_func.sh
FILENAME=$1
# MODE be one of ['lite_train_lite_infer' 'lite_train_whole_infer'
# 'whole_train_whole_infer', 'whole_infer', 'klquant_whole_infer']
MODE=$2
# parse params
dataline=$(cat ${FILENAME})
IFS=$'\n'
lines=(${dataline})
# The training params
model_name=$(func_parser_value "${lines[1]}")
echo "ppdet python_infer: ${model_name}"
python=$(func_parser_value "${lines[2]}")
gpu_list=$(func_parser_value "${lines[3]}")
train_use_gpu_key=$(func_parser_key "${lines[4]}")
train_use_gpu_value=$(func_parser_value "${lines[4]}")
autocast_list=$(func_parser_value "${lines[5]}")
autocast_key=$(func_parser_key "${lines[5]}")
epoch_key=$(func_parser_key "${lines[6]}")
epoch_num=$(func_parser_params "${lines[6]}")
save_model_key=$(func_parser_key "${lines[7]}")
train_batch_key=$(func_parser_key "${lines[8]}")
train_batch_value=$(func_parser_params "${lines[8]}")
pretrain_model_key=$(func_parser_key "${lines[9]}")
pretrain_model_value=$(func_parser_value "${lines[9]}")
train_model_name=$(func_parser_value "${lines[10]}")
train_infer_img_dir=$(func_parser_value "${lines[11]}")
train_param_key1=$(func_parser_key "${lines[12]}")
train_param_value1=$(func_parser_value "${lines[12]}")
trainer_list=$(func_parser_value "${lines[14]}")
norm_key=$(func_parser_key "${lines[15]}")
norm_trainer=$(func_parser_value "${lines[15]}")
pact_key=$(func_parser_key "${lines[16]}")
pact_trainer=$(func_parser_value "${lines[16]}")
fpgm_key=$(func_parser_key "${lines[17]}")
fpgm_trainer=$(func_parser_value "${lines[17]}")
distill_key=$(func_parser_key "${lines[18]}")
distill_trainer=$(func_parser_value "${lines[18]}")
trainer_key1=$(func_parser_key "${lines[19]}")
trainer_value1=$(func_parser_value "${lines[19]}")
trainer_key2=$(func_parser_key "${lines[20]}")
trainer_value2=$(func_parser_value "${lines[20]}")
# eval params
eval_py=$(func_parser_value "${lines[23]}")
eval_key1=$(func_parser_key "${lines[24]}")
eval_value1=$(func_parser_value "${lines[24]}")
# export params
save_export_key=$(func_parser_key "${lines[27]}")
save_export_value=$(func_parser_value "${lines[27]}")
export_weight_key=$(func_parser_key "${lines[28]}")
export_weight_value=$(func_parser_value "${lines[28]}")
norm_export=$(func_parser_value "${lines[29]}")
pact_export=$(func_parser_value "${lines[30]}")
fpgm_export=$(func_parser_value "${lines[31]}")
distill_export=$(func_parser_value "${lines[32]}")
export_key1=$(func_parser_key "${lines[33]}")
export_value1=$(func_parser_value "${lines[33]}")
export_onnx_key=$(func_parser_key "${lines[34]}")
export_value2=$(func_parser_value "${lines[34]}")
kl_quant_export=$(func_parser_value "${lines[35]}")
# parser inference model
infer_mode_list=$(func_parser_value "${lines[37]}")
infer_is_quant_list=$(func_parser_value "${lines[38]}")
# parser inference
inference_py=$(func_parser_value "${lines[39]}")
use_gpu_key=$(func_parser_key "${lines[40]}")
use_gpu_list=$(func_parser_value "${lines[40]}")
use_mkldnn_key=$(func_parser_key "${lines[41]}")
use_mkldnn_list=$(func_parser_value "${lines[41]}")
cpu_threads_key=$(func_parser_key "${lines[42]}")
cpu_threads_list=$(func_parser_value "${lines[42]}")
batch_size_key=$(func_parser_key "${lines[43]}")
batch_size_list=$(func_parser_value "${lines[43]}")
use_trt_key=$(func_parser_key "${lines[44]}")
use_trt_list=$(func_parser_value "${lines[44]}")
precision_key=$(func_parser_key "${lines[45]}")
precision_list=$(func_parser_value "${lines[45]}")
infer_model_key=$(func_parser_key "${lines[46]}")
image_dir_key=$(func_parser_key "${lines[47]}")
infer_img_dir=$(func_parser_value "${lines[47]}")
save_log_key=$(func_parser_key "${lines[48]}")
benchmark_key=$(func_parser_key "${lines[49]}")
benchmark_value=$(func_parser_value "${lines[49]}")
infer_key1=$(func_parser_key "${lines[50]}")
infer_value1=$(func_parser_value "${lines[50]}")
LOG_PATH="./test_tipc/output/${model_name}/${MODE}"
mkdir -p ${LOG_PATH}
status_log="${LOG_PATH}/results_python.log"
line_num=`grep -n -w "to_static_train_benchmark_params" $FILENAME | cut -d ":" -f 1`
to_static_key=$(func_parser_key "${lines[line_num]}")
to_static_trainer=$(func_parser_value "${lines[line_num]}")
function func_inference(){
IFS='|'
_python=$1
_script=$2
_model_dir=$3
_log_path=$4
_img_dir=$5
_flag_quant=$6
_gpu=$7
# inference
for use_gpu in ${use_gpu_list[*]}; do
if [ ${use_gpu} = "False" ] || [ ${use_gpu} = "cpu" ]; then
for use_mkldnn in ${use_mkldnn_list[*]}; do
if [ ${use_mkldnn} = "False" ] && [ ${_flag_quant} = "True" ]; then
continue
fi
for threads in ${cpu_threads_list[*]}; do
for batch_size in ${batch_size_list[*]}; do
_save_log_path="${_log_path}/python_infer_cpu_gpus_${gpu}_usemkldnn_${use_mkldnn}_threads_${threads}_mode_paddle_batchsize_${batch_size}.log"
set_infer_data=$(func_set_params "${image_dir_key}" "${_img_dir}")
set_benchmark=$(func_set_params "${benchmark_key}" "${benchmark_value}")
set_batchsize=$(func_set_params "${batch_size_key}" "${batch_size}")
set_cpu_threads=$(func_set_params "${cpu_threads_key}" "${threads}")
set_model_dir=$(func_set_params "${infer_model_key}" "${_model_dir}")
set_infer_params1=$(func_set_params "${infer_key1}" "${infer_value1}")
command="${_python} ${_script} ${use_gpu_key}=${use_gpu} ${use_mkldnn_key}=${use_mkldnn} ${set_cpu_threads} ${set_model_dir} ${set_batchsize} ${set_infer_data} ${set_benchmark} ${set_infer_params1} > ${_save_log_path} 2>&1 "
eval $command
last_status=${PIPESTATUS[0]}
eval "cat ${_save_log_path}"
status_check $last_status "${command}" "${status_log}" "${model_name}" "${_save_log_path}"
done
done
done
elif [ ${use_gpu} = "True" ] || [ ${use_gpu} = "gpu" ]; then
for precision in ${precision_list[*]}; do
if [[ ${precision} != "paddle" ]]; then
if [[ ${_flag_quant} = "False" ]] && [[ ${precision} = "trt_int8" ]]; then
continue
fi
if [[ ${_flag_quant} = "True" ]] && [[ ${precision} != "trt_int8" ]]; then
continue
fi
fi
for batch_size in ${batch_size_list[*]}; do
_save_log_path="${_log_path}/python_infer_gpu_gpus_${gpu}_mode_${precision}_batchsize_${batch_size}.log"
set_infer_data=$(func_set_params "${image_dir_key}" "${_img_dir}")
set_benchmark=$(func_set_params "${benchmark_key}" "${benchmark_value}")
set_batchsize=$(func_set_params "${batch_size_key}" "${batch_size}")
set_precision=$(func_set_params "${precision_key}" "${precision}")
set_model_dir=$(func_set_params "${infer_model_key}" "${_model_dir}")
set_infer_params1=$(func_set_params "${infer_key1}" "${infer_value1}")
command="${_python} ${_script} ${use_gpu_key}=${use_gpu} ${set_precision} ${set_model_dir} ${set_batchsize} ${set_infer_data} ${set_benchmark} ${set_infer_params1} > ${_save_log_path} 2>&1 "
eval $command
last_status=${PIPESTATUS[0]}
eval "cat ${_save_log_path}"
status_check $last_status "${command}" "${status_log}" "${model_name}" "${_save_log_path}"
done
done
else
echo "Does not support hardware other than CPU and GPU Currently!"
fi
done
}
if [ ${MODE} = "whole_infer" ] || [ ${MODE} = "klquant_whole_infer" ]; then
# set CUDA_VISIBLE_DEVICES
GPUID=$3
if [ ${#GPUID} -le 0 ];then
env=" "
else
env="export CUDA_VISIBLE_DEVICES=${GPUID}"
fi
eval $env
Count=0
gpu=0
IFS="|"
infer_quant_flag=(${infer_is_quant_list})
for infer_mode in ${infer_mode_list[*]}; do
if [ ${infer_mode} = "null" ]; then
continue
fi
if [ ${MODE} = "klquant_whole_infer" ] && [ ${infer_mode} != "kl_quant" ]; then
continue
fi
if [ ${MODE} = "whole_infer" ] && [ ${infer_mode} = "kl_quant" ]; then
continue
fi
# run export
case ${infer_mode} in
norm) run_export=${norm_export} ;;
pact) run_export=${pact_export} ;;
fpgm) run_export=${fpgm_export} ;;
distill) run_export=${distill_export} ;;
kl_quant) run_export=${kl_quant_export} ;;
*) echo "Undefined infer_mode!"; exit 1;
esac
set_export_weight=$(func_set_params "${export_weight_key}" "${export_weight_value}")
set_save_export_dir=$(func_set_params "${save_export_key}" "${save_export_value}")
set_filename=$(func_set_params "filename" "${model_name}")
export_cmd="${python} ${run_export} ${set_export_weight} ${set_filename} ${set_save_export_dir} "
echo $export_cmd
eval $export_cmd
status_check $? "${export_cmd}" "${status_log}" "${model_name}"
#run inference
save_export_model_dir="${save_export_value}/${model_name}"
is_quant=${infer_quant_flag[Count]}
func_inference "${python}" "${inference_py}" "${save_export_model_dir}" "${LOG_PATH}" "${infer_img_dir}" ${is_quant} "{gpu}"
Count=$((${Count} + 1))
done
else
IFS="|"
Count=0
for gpu in ${gpu_list[*]}; do
use_gpu=${train_use_gpu_value}
Count=$((${Count} + 1))
ips=""
if [ ${gpu} = "-1" ];then
env=""
use_gpu=False
elif [ ${#gpu} -le 1 ];then
env="export CUDA_VISIBLE_DEVICES=${gpu}"
eval ${env}
elif [ ${#gpu} -le 15 ];then
IFS=","
array=(${gpu})
env="export CUDA_VISIBLE_DEVICES=${array[0]}"
IFS="|"
else
IFS=";"
array=(${gpu})
ips=${array[0]}
gpu=${array[1]}
IFS="|"
env=" "
fi
for autocast in ${autocast_list[*]}; do
for trainer in ${trainer_list[*]}; do
flag_quant=False
set_to_static=""
if [ ${trainer} = "${norm_key}" ]; then
run_train=${norm_trainer}
run_export=${norm_export}
elif [ ${trainer} = "${pact_key}" ]; then
run_train=${pact_trainer}
run_export=${pact_export}
flag_quant=True
elif [ ${trainer} = "${fpgm_key}" ]; then
run_train=${fpgm_trainer}
run_export=${fpgm_export}
elif [ ${trainer} = "${distill_key}" ]; then
run_train=${distill_trainer}
run_export=${distill_export}
elif [ ${trainer} = "${trainer_key1}" ]; then
run_train=${trainer_value1}
run_export=${export_value1}
elif [ ${trainer} = "${trainer_key2}" ]; then
run_train=${trainer_value2}
run_export=${export_value2}
elif [ ${trainer} = "${to_static_key}" ]; then
run_train=${norm_trainer}
run_export=${norm_export}
set_to_static=${to_static_trainer}
else
continue
fi
if [ ${run_train} = "null" ]; then
continue
fi
set_epoch=$(func_set_params "${epoch_key}" "${epoch_num}")
set_pretrain=$(func_set_params "${pretrain_model_key}" "${pretrain_model_value}")
set_batchsize=$(func_set_params "${train_batch_key}" "${train_batch_value}")
set_filename=$(func_set_params "filename" "${model_name}")
set_use_gpu=$(func_set_params "${train_use_gpu_key}" "${use_gpu}")
set_train_params1=$(func_set_params "${train_param_key1}" "${train_param_value1}")
save_log="${LOG_PATH}/${trainer}_gpus_${gpu}_autocast_${autocast}"
if [ ${autocast} = "amp" ]; then
set_autocast="--amp"
set_amp_level="amp_level=O1"
elif [ ${autocast} = "fp16" ]; then
set_autocast="--amp"
set_amp_level="amp_level=O2"
else
set_autocast=" "
set_amp_level=" "
fi
if [ ${MODE} = "benchmark_train" ]; then
set_shuffle="TrainReader.shuffle=False"
set_enable_ce="--enable_ce=True"
else
set_shuffle=" "
set_enable_ce=" "
fi
set_save_model=$(func_set_params "${save_model_key}" "${save_log}")
nodes="1"
if [ ${#gpu} -le 2 ];then # train with cpu or single gpu
cmd="${python} ${run_train} LearningRate.base_lr=0.0001 log_iter=1 ${set_use_gpu} ${set_save_model} ${set_epoch} ${set_pretrain} ${set_batchsize} ${set_filename} ${set_shuffle} ${set_amp_level} ${set_enable_ce} ${set_autocast} ${set_to_static} ${set_train_params1}"
elif [ ${#ips} -le 15 ];then # train with multi-gpu
cmd="${python} -m paddle.distributed.launch --gpus=${gpu} ${run_train} log_iter=1 ${set_use_gpu} ${set_save_model} ${set_epoch} ${set_pretrain} ${set_batchsize} ${set_filename} ${set_shuffle} ${set_amp_level} ${set_enable_ce} ${set_autocast} ${set_to_static} ${set_train_params1}"
else # train with multi-machine
IFS=","
ips_array=(${ips})
nodes=${#ips_array[@]}
save_log="${LOG_PATH}/${trainer}_gpus_${gpu}_autocast_${autocast}_nodes_${nodes}"
IFS="|"
set_save_model=$(func_set_params "${save_model_key}" "${save_log}")
cmd="${python} -m paddle.distributed.launch --ips=${ips} --gpus=${gpu} ${run_train} log_iter=1 ${set_use_gpu} ${set_save_model} ${set_epoch} ${set_pretrain} ${set_batchsize} ${set_filename} ${set_shuffle} ${set_amp_level} ${set_enable_ce} ${set_autocast} ${set_to_static} ${set_train_params1}"
fi
# run train
train_log_path="${LOG_PATH}/${trainer}_gpus_${gpu}_autocast_${autocast}_nodes_${nodes}.log"
eval "timeout 5m ${cmd} > ${train_log_path} 2>&1"
last_status=$?
cat ${train_log_path}
status_check $last_status "${cmd}" "${status_log}" "${model_name}" "${train_log_path}"
set_eval_trained_weight=$(func_set_params "${export_weight_key}" "${save_log}/${model_name}/${train_model_name}")
# run eval
if [ ${eval_py} != "null" ]; then
set_eval_params1=$(func_set_params "${eval_key1}" "${eval_value1}")
eval_log_path="${LOG_PATH}/${trainer}_gpus_${gpu}_autocast_${autocast}_nodes_${nodes}_eval.log"
eval_cmd="${python} ${eval_py} ${set_eval_trained_weight} ${set_use_gpu} ${set_eval_params1}"
eval "${eval_cmd} > ${eval_log_path} 2>&1"
last_status=$?
cat ${eval_log_path}
status_check $last_status "${eval_cmd}" "${status_log}" "${model_name}" "${eval_log_path}"
fi
# run export model
if [ ${run_export} != "null" ]; then
save_export_model_dir="${save_log}/${model_name}"
set_export_weight=$(func_set_params "${export_weight_key}" "${save_log}/${model_name}/${train_model_name}")
set_save_export_dir=$(func_set_params "${save_export_key}" "${save_log}")
if [ ${export_onnx_key} = "export_onnx" ]; then
# run export onnx model for rcnn
export_log_path_onnx=${LOG_PATH}/${trainer}_gpus_${gpu}_autocast_${autocast}_nodes_${nodes}_onnx_export.log
export_cmd="${python} ${run_export} ${set_export_weight} ${set_filename} export_onnx=True ${set_save_export_dir} >${export_log_path_onnx} 2>&1"
eval $export_cmd
status_check $? "${export_cmd}" "${status_log}" "${model_name}" "${export_log_path_onnx}"
# copy model for inference benchmark
eval "cp ${save_export_model_dir}/* ${save_log}/"
fi
# run export model
export_log_path="${LOG_PATH}/${trainer}_gpus_${gpu}_autocast_${autocast}_nodes_${nodes}_export.log"
export_cmd="${python} ${run_export} ${set_export_weight} ${set_filename} ${set_save_export_dir} "
eval "${export_cmd} > ${export_log_path} 2>&1"
last_status=$?
cat ${export_log_path}
status_check $last_status "${export_cmd}" "${status_log}" "${model_name}" "${export_log_path}"
#run inference
if [ ${export_onnx_key} != "export_onnx" ]; then
# copy model for inference benchmark
eval "cp ${save_export_model_dir}/* ${save_log}/"
fi
eval $env
func_inference "${python}" "${inference_py}" "${save_export_model_dir}" "${LOG_PATH}" "${train_infer_img_dir}" "${flag_quant}" "{gpu}"
eval "unset CUDA_VISIBLE_DEVICES"
fi
done # done with: for trainer in ${trainer_list[*]}; do
done # done with: for autocast in ${autocast_list[*]}; do
done # done with: for gpu in ${gpu_list[*]}; do
fi # end if [ ${MODE} = "infer" ]; then
| PaddleDetection/test_tipc/test_train_inference_python.sh/0 | {
"file_path": "PaddleDetection/test_tipc/test_train_inference_python.sh",
"repo_id": "PaddleDetection",
"token_count": 9524
} | 84 |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import json
import logging
import numpy as np
from ppdet.utils.logger import setup_logger
logger = setup_logger('sniper_params_stats')
def get_default_params(architecture):
"""get_default_params"""
if architecture == "FasterRCNN":
anchor_range = np.array([64., 512.]) # for frcnn-fpn
# anchor_range = np.array([16., 373.]) # for yolov3
# anchor_range = np.array([32., 373.]) # for yolov3
default_crop_size = 1536 # mod 32 for frcnn-fpn
default_max_bbox_size = 352
elif architecture == "YOLOv3":
anchor_range = np.array([32., 373.]) # for yolov3
default_crop_size = 800 # mod 32 for yolov3
default_max_bbox_size = 352
else:
raise NotImplementedError
return anchor_range, default_crop_size, default_max_bbox_size
def get_box_ratios(anno_file):
"""
get_size_ratios
:param anno_file: coco anno flile
:return: size_ratio: (box_long_size / pic_long_size)
"""
coco_dict = json.load(open(anno_file))
image_list = coco_dict['images']
anno_list = coco_dict['annotations']
image_id2hw = {}
for im_dict in image_list:
im_id = im_dict['id']
h, w = im_dict['height'], im_dict['width']
image_id2hw[im_id] = (h, w)
box_ratios = []
for a_dict in anno_list:
im_id = a_dict['image_id']
im_h, im_w = image_id2hw[im_id]
bbox = a_dict['bbox']
x1, y1, w, h = bbox
pic_long = max(im_h, im_w)
box_long = max(w, h)
box_ratios.append(box_long / pic_long)
return np.array(box_ratios)
def get_target_size_and_valid_box_ratios(anchor_range, box_ratio_p2, box_ratio_p98):
"""get_scale_and_ratios"""
anchor_better_low, anchor_better_high = anchor_range # (60., 512.)
anchor_center = np.sqrt(anchor_better_high * anchor_better_low)
anchor_log_range = np.log10(anchor_better_high) - np.log10(anchor_better_low)
box_ratio_log_range = np.log10(box_ratio_p98) - np.log10(box_ratio_p2)
logger.info("anchor_log_range:{}, box_ratio_log_range:{}".format(anchor_log_range, box_ratio_log_range))
box_cut_num = int(np.ceil(box_ratio_log_range / anchor_log_range))
box_ratio_log_window = box_ratio_log_range / box_cut_num
logger.info("box_cut_num:{}, box_ratio_log_window:{}".format(box_cut_num, box_ratio_log_window))
image_target_sizes = []
valid_ratios = []
for i in range(box_cut_num):
# # method1: align center
# box_ratio_log_center = np.log10(p2) + 0.5 * box_ratio_log_window + i * box_ratio_log_window
# box_ratio_center = np.power(10, box_ratio_log_center)
# scale = anchor_center / box_ratio_center
# method2: align left low
box_ratio_low = np.power(10, np.log10(box_ratio_p2) + i * box_ratio_log_window)
image_target_size = anchor_better_low / box_ratio_low
image_target_sizes.append(int(image_target_size))
valid_ratio = anchor_range / image_target_size
valid_ratios.append(valid_ratio.tolist())
logger.info("Box cut {}".format(i))
logger.info("box_ratio_low: {}".format(box_ratio_low))
logger.info("image_target_size: {}".format(image_target_size))
logger.info("valid_ratio: {}".format(valid_ratio))
return image_target_sizes, valid_ratios
def get_valid_ranges(valid_ratios):
"""
get_valid_box_ratios_range
:param valid_ratios:
:return:
"""
valid_ranges = []
if len(valid_ratios) == 1:
valid_ranges.append([-1, -1])
else:
for i, vratio in enumerate(valid_ratios):
if i == 0:
valid_ranges.append([-1, vratio[1]])
elif i == len(valid_ratios) - 1:
valid_ranges.append([vratio[0], -1])
else:
valid_ranges.append(vratio)
return valid_ranges
def get_percentile(a_array, low_percent, high_percent):
"""
get_percentile
:param low_percent:
:param high_percent:
:return:
"""
array_p0 = min(a_array)
array_p100 = max(a_array)
array_plow = np.percentile(a_array, low_percent)
array_phigh = np.percentile(a_array, high_percent)
logger.info(
"array_percentile(0): {},array_percentile low({}): {}, "
"array_percentile high({}): {}, array_percentile 100: {}".format(
array_p0, low_percent, array_plow, high_percent, array_phigh, array_p100))
return array_plow, array_phigh
def sniper_anno_stats(architecture, anno_file):
"""
sniper_anno_stats
:param anno_file:
:return:
"""
anchor_range, default_crop_size, default_max_bbox_size = get_default_params(architecture)
box_ratios = get_box_ratios(anno_file)
box_ratio_p8, box_ratio_p92 = get_percentile(box_ratios, 8, 92)
image_target_sizes, valid_box_ratios = get_target_size_and_valid_box_ratios(anchor_range, box_ratio_p8, box_ratio_p92)
valid_ranges = get_valid_ranges(valid_box_ratios)
crop_size = min(default_crop_size, min([item for item in image_target_sizes]))
crop_size = int(np.ceil(crop_size / 32.) * 32.)
crop_stride = max(min(default_max_bbox_size, crop_size), crop_size - default_max_bbox_size)
logger.info("Result".center(100, '-'))
logger.info("image_target_sizes: {}".format(image_target_sizes))
logger.info("valid_box_ratio_ranges: {}".format(valid_ranges))
logger.info("chip_target_size: {}, chip_target_stride: {}".format(crop_size, crop_stride))
return {
"image_target_sizes": image_target_sizes,
"valid_box_ratio_ranges": valid_ranges,
"chip_target_size": crop_size,
"chip_target_stride": crop_stride
}
if __name__=="__main__":
architecture, anno_file = sys.argv[1], sys.argv[2]
sniper_anno_stats(architecture, anno_file)
| PaddleDetection/tools/sniper_params_stats.py/0 | {
"file_path": "PaddleDetection/tools/sniper_params_stats.py",
"repo_id": "PaddleDetection",
"token_count": 2792
} | 85 |
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="AutoImportSettings">
<option name="autoReloadType" value="SELECTIVE" />
</component>
<component name="ChangeListManager">
<list default="true" id="b67e837f-8e75-492e-aa39-5ab1951885e3" name="更改" comment="" />
<option name="SHOW_DIALOG" value="false" />
<option name="HIGHLIGHT_CONFLICTS" value="true" />
<option name="HIGHLIGHT_NON_ACTIVE_CHANGELIST" value="false" />
<option name="LAST_RESOLUTION" value="IGNORE" />
</component>
<component name="FileTemplateManagerImpl">
<option name="RECENT_TEMPLATES">
<list>
<option value="Python Script" />
</list>
</option>
</component>
<component name="ProjectColorInfo">{
"associatedIndex": 7
}</component>
<component name="ProjectId" id="2iMK3jdXPZ4vnVhJkfzVKi7HETy" />
<component name="ProjectViewState">
<option name="hideEmptyMiddlePackages" value="true" />
<option name="showLibraryContents" value="true" />
</component>
<component name="PropertiesComponent"><![CDATA[{
"keyToString": {
"Docker.Dockerfile.executor": "Run",
"Python.main.executor": "Run",
"RunOnceActivity.ShowReadmeOnStart": "true",
"last_opened_file_path": "/home/ubuntu/wensimin-work/euryale/Dockerfile",
"node.js.detected.package.eslint": "true",
"node.js.detected.package.tslint": "true",
"node.js.selected.package.eslint": "(autodetect)",
"node.js.selected.package.tslint": "(autodetect)",
"nodejs_package_manager_path": "npm",
"vue.rearranger.settings.migration": "true"
}
}]]></component>
<component name="RdControllerToolWindowsLayoutState" isNewUi="true">
<layout>
<window_info id="Bookmarks" show_stripe_button="false" side_tool="true" />
<window_info id="Merge Requests" show_stripe_button="false" />
<window_info id="Commit_Guest" show_stripe_button="false" />
<window_info id="Pull Requests" show_stripe_button="false" />
<window_info id="Learn" show_stripe_button="false" />
<window_info active="true" content_ui="combo" id="Project" order="0" visible="true" weight="0.3711189" />
<window_info id="Commit" order="1" weight="0.25" />
<window_info id="Structure" order="2" side_tool="true" weight="0.25" />
<window_info anchor="bottom" id="Database Changes" show_stripe_button="false" />
<window_info anchor="bottom" id="TypeScript" show_stripe_button="false" />
<window_info anchor="bottom" id="Debug" />
<window_info anchor="bottom" id="TODO" show_stripe_button="false" />
<window_info anchor="bottom" id="File Transfer" show_stripe_button="false" />
<window_info anchor="bottom" id="Find" />
<window_info anchor="bottom" id="Version Control" order="0" />
<window_info anchor="bottom" id="Problems" order="1" />
<window_info anchor="bottom" id="Problems View" order="2" />
<window_info anchor="bottom" id="Terminal" order="3" weight="0.33415115" />
<window_info anchor="bottom" id="Services" order="4" weight="0.56084394" />
<window_info anchor="bottom" id="Python Packages" order="5" weight="0.1" />
<window_info anchor="bottom" id="Python Console" order="6" weight="0.1" />
<window_info active="true" anchor="bottom" id="Run" order="7" visible="true" weight="0.45583904" />
<window_info anchor="right" id="Endpoints" show_stripe_button="false" />
<window_info anchor="right" id="Coverage" show_stripe_button="false" side_tool="true" />
<window_info anchor="right" id="SciView" show_stripe_button="false" />
<window_info anchor="right" content_ui="combo" id="Notifications" order="0" weight="0.25" />
<window_info anchor="right" id="AIAssistant" order="1" weight="0.25" />
<window_info anchor="right" id="Database" order="2" weight="0.25" />
<window_info anchor="right" id="Gradle" order="3" weight="0.25" />
<window_info anchor="right" id="Maven" order="4" weight="0.25" />
<window_info anchor="right" id="Plots" order="5" weight="0.1" />
<window_info anchor="right" id="Translation.Wordbook" order="6" show_stripe_button="false" side_tool="true" />
</layout>
</component>
<component name="RunManager" selected="Python.main">
<configuration name="main" type="PythonConfigurationType" factoryName="Python" nameIsGenerated="true">
<module name="euryale" />
<option name="ENV_FILES" value="" />
<option name="INTERPRETER_OPTIONS" value="" />
<option name="PARENT_ENVS" value="true" />
<envs>
<env name="PYTHONUNBUFFERED" value="1" />
</envs>
<option name="SDK_HOME" value="" />
<option name="WORKING_DIRECTORY" value="$PROJECT_DIR$" />
<option name="IS_MODULE_SDK" value="true" />
<option name="ADD_CONTENT_ROOTS" value="true" />
<option name="ADD_SOURCE_ROOTS" value="true" />
<EXTENSION ID="DockerContainerSettingsRunConfigurationExtension">
<option name="envVars">
<list />
</option>
<option name="extraHosts">
<list />
</option>
<option name="links">
<list />
</option>
<option name="networkDisabled" value="false" />
<option name="networkMode" value="bridge" />
<option name="portBindings">
<list />
</option>
<option name="publishAllPorts" value="false" />
<option name="runCliOptions" value="--entrypoint= --rm" />
<option name="version" value="2" />
<option name="volumeBindings">
<list>
<DockerVolumeBindingImpl>
<option name="containerPath" value="/opt/project" />
<option name="editable" value="true" />
<option name="hostPath" value="$PROJECT_DIR$" />
<option name="readOnly" value="false" />
</DockerVolumeBindingImpl>
</list>
</option>
</EXTENSION>
<EXTENSION ID="PythonCoverageRunConfigurationExtension" runner="coverage.py" />
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/main.py" />
<option name="PARAMETERS" value="" />
<option name="SHOW_COMMAND_LINE" value="false" />
<option name="EMULATE_TERMINAL" value="false" />
<option name="MODULE_MODE" value="false" />
<option name="REDIRECT_INPUT" value="false" />
<option name="INPUT_FILE" value="" />
<method v="2" />
</configuration>
<configuration name="Dockerfile" type="docker-deploy" factoryName="dockerfile" temporary="true" server-name="Docker">
<deployment type="dockerfile">
<settings>
<option name="imageTag" value="tensorflow:runtime" />
<option name="buildOnly" value="true" />
<option name="sourceFilePath" value="Dockerfile" />
</settings>
</deployment>
<method v="2" />
</configuration>
<configuration default="true" type="docker-deploy" factoryName="dockerfile" temporary="true">
<deployment type="dockerfile">
<settings />
</deployment>
<method v="2" />
</configuration>
<list>
<item itemvalue="Docker.Dockerfile" />
<item itemvalue="Python.main" />
</list>
<recent_temporary>
<list>
<item itemvalue="Docker.Dockerfile" />
</list>
</recent_temporary>
</component>
<component name="SharedIndexes">
<attachedChunks>
<set>
<option value="bundled-js-predefined-1d06a55b98c1-74d2a5396914-JavaScript-PY-241.14494.241" />
<option value="bundled-python-sdk-0509580d9d50-28c9f5db9ffe-com.jetbrains.pycharm.pro.sharedIndexes.bundled-PY-241.14494.241" />
</set>
</attachedChunks>
</component>
<component name="SpellCheckerSettings" RuntimeDictionaries="0" Folders="0" CustomDictionaries="0" DefaultDictionary="应用程序级" UseSingleDictionary="true" transferred="true" />
<component name="TaskManager">
<task active="true" id="Default" summary="默认任务">
<changelist id="b67e837f-8e75-492e-aa39-5ab1951885e3" name="更改" comment="" />
<created>1719294809321</created>
<option name="number" value="Default" />
<option name="presentableId" value="Default" />
<updated>1719294809321</updated>
<workItem from="1719294811304" duration="1467000" />
<workItem from="1719296295003" duration="3890000" />
<workItem from="1719304537733" duration="4281000" />
</task>
<servers />
</component>
<component name="TypeScriptGeneratedFilesManager">
<option name="version" value="3" />
</component>
<component name="com.intellij.coverage.CoverageDataManagerImpl">
<SUITE FILE_PATH="coverage/euryale$main.coverage" NAME="main 覆盖结果" MODIFIED="1719307848309" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="false" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" />
</component>
</project> | euryale/.idea/workspace.xml/0 | {
"file_path": "euryale/.idea/workspace.xml",
"repo_id": "euryale",
"token_count": 3668
} | 86 |
#!/bin/bash
MODEL=${1}
NUM_GPUS=${2}
echo "Converting model ${MODEL} with ${NUM_GPUS} GPUs"
cp -r models/${MODEL}-${NUM_GPUS}gpu /models
python3 codegen_gptj_convert.py --code_model Salesforce/${MODEL} ${MODEL}-hf
python3 huggingface_gptj_convert.py -in_file ${MODEL}-hf -saved_dir /models/${MODEL}-${NUM_GPUS}gpu/fastertransformer/1 -infer_gpu_num ${NUM_GPUS}
rm -rf ${MODEL}-hf
| fauxpilot/converter/download_and_convert_model.sh/0 | {
"file_path": "fauxpilot/converter/download_and_convert_model.sh",
"repo_id": "fauxpilot",
"token_count": 172
} | 87 |
import logging
import os
import uvicorn
from fastapi import FastAPI, Request, Response
from fastapi.responses import JSONResponse
from sse_starlette.sse import EventSourceResponse
from config.log_config import uvicorn_logger
from models import OpenAIinput
from utils.codegen import CodeGenProxy
from utils.errors import FauxPilotException
logging.config.dictConfig(uvicorn_logger)
codegen = CodeGenProxy(
host=os.environ.get("TRITON_HOST", "triton"),
port=os.environ.get("TRITON_PORT", 8001),
verbose=os.environ.get("TRITON_VERBOSITY", False)
)
app = FastAPI(
title="FauxPilot",
description="This is an attempt to build a locally hosted version of GitHub Copilot. It uses the SalesForce CodeGen"
"models inside of NVIDIA's Triton Inference Server with the FasterTransformer backend.",
docs_url="/",
swagger_ui_parameters={"defaultModelsExpandDepth": -1}
)
@app.exception_handler(FauxPilotException)
async def fauxpilot_handler(request: Request, exc: FauxPilotException):
return JSONResponse(
status_code=400,
content=exc.json()
)
# Used to support copilot.vim
@app.get("/copilot_internal/v2/token")
def get_copilot_token():
content = {'token': '1', 'expires_at': 2600000000, 'refresh_in': 900}
return JSONResponse(
status_code=200,
content=content
)
@app.post("/v1/engines/codegen/completions")
# Used to support copilot.vim
@app.post("/v1/engines/copilot-codex/completions")
@app.post("/v1/completions")
async def completions(data: OpenAIinput):
data = data.dict()
try:
content = codegen(data=data)
except codegen.TokensExceedsMaximum as E:
raise FauxPilotException(
message=str(E),
error_type="invalid_request_error",
param=None,
code=None,
)
if data.get("stream") is not None:
return EventSourceResponse(
content=content,
status_code=200,
media_type="text/event-stream"
)
else:
return Response(
status_code=200,
content=content,
media_type="application/json"
)
if __name__ == "__main__":
uvicorn.run("app:app", host="0.0.0.0", port=5000)
| fauxpilot/copilot_proxy/app.py/0 | {
"file_path": "fauxpilot/copilot_proxy/app.py",
"repo_id": "fauxpilot",
"token_count": 930
} | 88 |
version: '3.3'
services:
triton:
build:
context: .
dockerfile: triton.Dockerfile
command: bash -c "CUDA_VISIBLE_DEVICES=${GPUS} mpirun -n 1 --allow-run-as-root /opt/tritonserver/bin/tritonserver --model-repository=/model"
shm_size: '2gb'
volumes:
- ${MODEL_DIR}:/model
- ${HF_CACHE_DIR}:/root/.cache/huggingface
ports:
- "8000:8000"
- "${TRITON_PORT}:8001"
- "8002:8002"
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: all
capabilities: [gpu]
copilot_proxy:
# For dockerhub version
# image: moyix/copilot_proxy:latest
# For local build
build:
context: .
dockerfile: proxy.Dockerfile
command: uvicorn app:app --host 0.0.0.0 --port 5000
env_file:
# Automatically created via ./setup.sh
- .env
ports:
- "${API_EXTERNAL_PORT}:5000"
| fauxpilot/docker-compose.yaml/0 | {
"file_path": "fauxpilot/docker-compose.yaml",
"repo_id": "fauxpilot",
"token_count": 455
} | 89 |
#!/usr/bin/env bash
if [ -f .env ]; then
read -rp ".env already exists, do you want to delete .env and recreate it? [y/n] " DELETE
if [[ ${DELETE:-y} =~ ^[Yy]$ ]]
then
echo "Deleting .env"
rm .env
else
echo "Exiting"
exit 0
fi;
fi
function check_dep(){
echo "Checking for $1 ..."
which "$1" 2>/dev/null || {
echo "Please install $1."
exit 1
}
}
check_dep curl
check_dep zstd
check_dep docker
############### Common configuration ###############
# Read number of GPUs
read -rp "Enter number of GPUs [1]: " NUM_GPUS
NUM_GPUS=${NUM_GPUS:-1}
read -rp "External port for the API [5000]: " API_EXTERNAL_PORT
API_EXTERNAL_PORT=${API_EXTERNAL_PORT:-5000}
read -rp "Address for Triton [triton]: " TRITON_HOST
TRITON_HOST=${TRITON_HOST:-triton}
read -rp "Port of Triton host [8001]: " TRITON_PORT
TRITON_PORT=${TRITON_PORT:-8001}
# Read models root directory (all models go under this)
read -rp "Where do you want to save your models [$(pwd)/models]? " MODELS_ROOT_DIR
if [ -z "$MODELS_ROOT_DIR" ]; then
MODELS_ROOT_DIR="$(pwd)/models"
else
MODELS_ROOT_DIR="$(readlink -m "${MODELS_ROOT_DIR}")"
fi
mkdir -p "$MODELS_ROOT_DIR"
# Write .env
echo "NUM_GPUS=${NUM_GPUS}" >> .env
echo "GPUS=$(seq 0 $(( NUM_GPUS - 1)) | paste -s -d ',' -)" >> .env
echo "API_EXTERNAL_PORT=${API_EXTERNAL_PORT}" >> .env
echo "TRITON_HOST=${TRITON_HOST}" >> .env
echo "TRITON_PORT=${TRITON_PORT}" >> .env
############### Backend specific configuration ###############
function fastertransformer_backend(){
echo "Models available:"
echo "[1] codegen-350M-mono (2GB total VRAM required; Python-only)"
echo "[2] codegen-350M-multi (2GB total VRAM required; multi-language)"
echo "[3] codegen-2B-mono (7GB total VRAM required; Python-only)"
echo "[4] codegen-2B-multi (7GB total VRAM required; multi-language)"
echo "[5] codegen-6B-mono (13GB total VRAM required; Python-only)"
echo "[6] codegen-6B-multi (13GB total VRAM required; multi-language)"
echo "[7] codegen-16B-mono (32GB total VRAM required; Python-only)"
echo "[8] codegen-16B-multi (32GB total VRAM required; multi-language)"
# Read their choice
read -rp "Enter your choice [6]: " MODEL_NUM
# Convert model number to model name
case $MODEL_NUM in
1) MODEL="codegen-350M-mono" ;;
2) MODEL="codegen-350M-multi" ;;
3) MODEL="codegen-2B-mono" ;;
4) MODEL="codegen-2B-multi" ;;
5) MODEL="codegen-6B-mono" ;;
6) MODEL="codegen-6B-multi" ;;
7) MODEL="codegen-16B-mono" ;;
8) MODEL="codegen-16B-multi" ;;
*) MODEL="codegen-6B-multi" ;;
esac
echo "MODEL=${MODEL}" >> .env
echo "MODEL_DIR=${MODELS_ROOT_DIR}/${MODEL}-${NUM_GPUS}gpu" >> .env
if (test -d "$MODELS_ROOT_DIR"/"${MODEL}"-"${NUM_GPUS}"gpu ); then
echo "$MODELS_ROOT_DIR"/"${MODEL}"-"${NUM_GPUS}"gpu
echo "Converted model for ${MODEL}-${NUM_GPUS}gpu already exists."
read -rp "Do you want to re-use it? y/n: " REUSE_CHOICE
if [[ ${REUSE_CHOICE:-y} =~ ^[Yy]$ ]]
then
DOWNLOAD_MODEL=n
echo "Re-using model"
else
DOWNLOAD_MODEL=y
rm -rf "$MODELS_ROOT_DIR"/"${MODEL}"-"${NUM_GPUS}"gpu
fi
else
DOWNLOAD_MODEL=y
fi
if [[ ${DOWNLOAD_MODEL:-y} =~ ^[Yy]$ ]]
then
if [ "$NUM_GPUS" -le 2 ]; then
echo "Downloading the model from HuggingFace, this will take a while..."
SCRIPT_DIR="$(dirname "$(readlink -f "$0")")"
DEST="${MODEL}-${NUM_GPUS}gpu"
ARCHIVE="${MODELS_ROOT_DIR}/${DEST}.tar.zst"
cp -r "$SCRIPT_DIR"/converter/models/"$DEST" "${MODELS_ROOT_DIR}"
DOWN_URL="https://hf-mirror.com/moyix/${MODEL}-gptj/resolve/main/${MODEL}-${NUM_GPUS}gpu.tar.zst"
echo "down url : $DOWN_URL"
echo "down out $ARCHIVE"
curl -L "$DOWN_URL" \
-o "$ARCHIVE"
zstd -dc "$ARCHIVE" | tar -xf - -C "${MODELS_ROOT_DIR}"
rm -f "$ARCHIVE"
else
echo "Downloading and converting the model, this will take a while..."
docker run --rm -v "${MODELS_ROOT_DIR}":/models -e MODEL=${MODEL} -e NUM_GPUS="${NUM_GPUS}" moyix/model_converter:latest
fi
fi
# Not used for this backend but needs to be present
HF_CACHE_DIR="$(pwd)/.hf_cache"
mkdir -p "$HF_CACHE_DIR"
echo "HF_CACHE_DIR=${HF_CACHE_DIR}" >> .env
}
function python_backend(){
echo "Models available:"
echo "[1] codegen-350M-mono (1GB total VRAM required; Python-only)"
echo "[2] codegen-350M-multi (1GB total VRAM required; multi-language)"
echo "[3] codegen-2B-mono (4GB total VRAM required; Python-only)"
echo "[4] codegen-2B-multi (4GB total VRAM required; multi-language)"
read -rp "Enter your choice [4]: " MODEL_NUM
# Convert model number to model name
case $MODEL_NUM in
1) MODEL="codegen-350M-mono"; ORG="Salesforce" ;;
2) MODEL="codegen-350M-multi"; ORG="Salesforce" ;;
3) MODEL="codegen-2B-mono"; ORG="Salesforce" ;;
4) MODEL="codegen-2B-multi"; ORG="Salesforce" ;;
*) MODEL="codegen-2B-multi"; ORG="Salesforce" ;;
esac
# share huggingface cache? Should be safe to share, but permission issues may arise depending upon your docker setup
read -rp "Do you want to share your huggingface cache between host and docker container? y/n [n]: " SHARE_HF_CACHE
SHARE_HF_CACHE=${SHARE_HF_CACHE:-n}
if [[ ${SHARE_HF_CACHE:-y} =~ ^[Yy]$ ]]; then
read -rp "Enter your huggingface cache directory [$HOME/.cache/huggingface]: " HF_CACHE_DIR
HF_CACHE_DIR=${HF_CACHE_DIR:-$HOME/.cache/huggingface}
else
HF_CACHE_DIR="$(pwd)/.hf_cache"
fi
# use int8? Allows larger models to fit in GPU but might be very marginally slower
read -rp "Do you want to use int8? y/n [n]: " USE_INT8
if [[ ! $USE_INT8 =~ ^[Yy]$ ]]; then
USE_INT8="0"
else
USE_INT8="1"
fi
# Write config.env
echo "MODEL=py-${MODEL}" >> .env
echo "MODEL_DIR=${MODELS_ROOT_DIR}/py-${ORG}-${MODEL}" >> .env # different format from fastertransformer backend
echo "HF_CACHE_DIR=${HF_CACHE_DIR}" >> .env
python3 ./python_backend/init_model.py --model_name "${MODEL}" --org_name "${ORG}" --model_dir "${MODELS_ROOT_DIR}" --use_int8 "${USE_INT8}"
bash -c "source .env ; docker compose build || docker-compose build"
}
# choose backend
echo "Choose your backend:"
echo "[1] FasterTransformer backend (faster, but limited models)"
echo "[2] Python backend (slower, but more models, and allows loading with int8)"
read -rp "Enter your choice [1]: " BACKEND_NUM
if [[ "$BACKEND_NUM" -eq 2 ]]; then
python_backend
else
fastertransformer_backend
fi
read -rp "Config complete, do you want to run FauxPilot? [y/n] " RUN
if [[ ${RUN:-y} =~ ^[Yy]$ ]]
then
bash ./launch.sh
else
echo "You can run ./launch.sh to start the FauxPilot server."
exit 0
fi
| fauxpilot/setup.sh/0 | {
"file_path": "fauxpilot/setup.sh",
"repo_id": "fauxpilot",
"token_count": 3036
} | 90 |
# coding=utf-8
# Copyright 2024 Sourab Mangrulkar. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pandas as pd
import gzip
import json
from datasets import Dataset
DATAFOLDER = "hf_stack"
HF_DATASET_NAME = "smangrul/hug_stack"
def load_gzip_jsonl(file_path):
data = []
with gzip.open(file_path, "rt", encoding="utf-8") as f:
for line in f:
data.append(json.loads(line))
return data
def create_hf_dataset():
df = None
for file in os.listdir(DATAFOLDER):
data = load_gzip_jsonl(os.path.join(DATAFOLDER, file))
if df is None:
df = pd.DataFrame(data)
else:
df = pd.concat([df, pd.DataFrame(data)])
dataset = Dataset.from_pandas(df)
dataset.push_to_hub(HF_DATASET_NAME, private=False)
if __name__ == "__main__":
create_hf_dataset()
| get-data/prepare_hf_dataset.py/0 | {
"file_path": "get-data/prepare_hf_dataset.py",
"repo_id": "get-data",
"token_count": 526
} | 91 |
<?xml version="1.0" encoding="UTF-8"?>
<module type="PYTHON_MODULE" version="4">
<component name="NewModuleRootManager">
<content url="file://$MODULE_DIR$" />
<orderEntry type="jdk" jdkName="Remote Python 3.10.13 Docker Compose (arcface-dev)" jdkType="Python SDK" />
<orderEntry type="sourceFolder" forTests="false" />
</component>
<component name="PyDocumentationSettings">
<option name="format" value="PLAIN" />
<option name="myDocStringFormat" value="Plain" />
</component>
</module> | insightface/.idea/insightface.iml/0 | {
"file_path": "insightface/.idea/insightface.iml",
"repo_id": "insightface",
"token_count": 180
} | 92 |
import mxnet as mx
import mxnet.optimizer as optimizer
from mxnet.ndarray import (NDArray, zeros, clip, sqrt, cast, maximum, abs as
NDabs)
#from mxnet.ndarray import (sgd_update, sgd_mom_update, adam_update, rmsprop_update, rmspropalex_update,
# mp_sgd_update, mp_sgd_mom_update, square, ftrl_update)
class ONadam(optimizer.Optimizer):
def __init__(self,
learning_rate=0.001,
beta1=0.9,
beta2=0.999,
epsilon=1e-8,
schedule_decay=0.004,
**kwargs):
super(ONadam, self).__init__(learning_rate=learning_rate, **kwargs)
self.beta1 = beta1
self.beta2 = beta2
self.epsilon = epsilon
self.schedule_decay = schedule_decay
self.m_schedule = 1.
def create_state(self, index, weight):
return (
zeros(weight.shape, weight.context, dtype=weight.dtype), # mean
zeros(weight.shape, weight.context,
dtype=weight.dtype)) # variance
def update(self, index, weight, grad, state):
assert (isinstance(weight, NDArray))
assert (isinstance(grad, NDArray))
self._update_count(index)
lr = self._get_lr(index)
wd = self._get_wd(index)
t = self._index_update_count[index]
# preprocess grad
#grad = grad * self.rescale_grad + wd * weight
grad *= self.rescale_grad + wd * weight
if self.clip_gradient is not None:
grad = clip(grad, -self.clip_gradient, self.clip_gradient)
# warming momentum schedule
momentum_t = self.beta1 * (1. - 0.5 *
(pow(0.96, t * self.schedule_decay)))
momentum_t_1 = self.beta1 * (1. - 0.5 *
(pow(0.96,
(t + 1) * self.schedule_decay)))
self.m_schedule = self.m_schedule * momentum_t
m_schedule_next = self.m_schedule * momentum_t_1
# update m_t and v_t
m_t, v_t = state
m_t[:] = self.beta1 * m_t + (1. - self.beta1) * grad
v_t[:] = self.beta2 * v_t + (1. - self.beta2) * grad * grad
grad_prime = grad / (1. - self.m_schedule)
m_t_prime = m_t / (1. - m_schedule_next)
v_t_prime = v_t / (1. - pow(self.beta2, t))
m_t_bar = (1. - momentum_t) * grad_prime + momentum_t_1 * m_t_prime
# update weight
weight[:] -= lr * m_t_bar / (sqrt(v_t_prime) + self.epsilon)
| insightface/alignment/heatmap/optimizer.py/0 | {
"file_path": "insightface/alignment/heatmap/optimizer.py",
"repo_id": "insightface",
"token_count": 1341
} | 93 |
# Training performance report on NVIDIA A100
[NVIDIA A100 Tensor Core GPU](https://www.nvidia.com/en-us/data-center/a100/)
## Test Server Spec
| Key | Value |
| ------------ | ------------------------------------------------ |
| System | ServMax G408-X2 Rackmountable Server |
| CPU | 2 x Intel(R) Xeon(R) Gold 5220R CPU @ 2.20GHz |
| Memory | 384GB, 12 x Samsung 32GB DDR4-2933 |
| GPU | 8 x NVIDIA A100 80GB |
| Cooling | 2x Customized GPU Kit for GPU support FAN-1909L2 |
| Hard Drive | Intel SSD S4500 1.9TB/SATA/TLC/2.5" |
| OS | Ubuntu 16.04.7 LTS |
| Installation | CUDA 11.1, cuDNN 8.0.5 |
| Installation | Python 3.7.10 |
| Installation | PyTorch 1.9.0 (conda) |
This server is donated by [AMAX](https://www.amaxchina.com/), many thanks!
## Experiments on arcface_torch
We report training speed in following table, please also note that:
1. The training dataset is in mxnet record format and located on SSD hard drive.
2. Embedding-size are all set to 512.
3. We use large datasets with about 618K/2M identities to simulate real cases.
4. We test the 10K batch-size on real dataset to take the full advantage of 80GB memory.
5. We also test on huge synthetic datasets which include 50M~80M classes.
| Dataset | Classes | Backbone | Batch-size | PFC | FP16 | TF32 | Samples/sec | GPU Mem(GB) |
| ----------- | ------- | ----------- | ---------- | ---- | ---- | ---- | ----------- | ----------- |
| WebFace600K | 618K | IResNet-50 | 1024 | × | × | × | ~3670 | ~18.2 |
| WebFace600K | 618K | IResNet-50 | 1024 | × | × | √ | ~4760 | ~15.0 |
| WebFace600K | 618K | IResNet-50 | 1024 | × | √ | × | ~5170 | ~10.1 |
| WebFace600K | 618K | IResNet-50 | 1024 | × | √ | √ | ~5400 | ~10.1 |
| WebFace600K | 618K | IResNet-50 | 2048 | × | √ | √ | ~7780 | ~16.4 |
| WebFace600K | 618K | IResNet-50 | 10240 | × | √ | √ | ~9400 | ~66.7 |
| WebFace600K | 618K | IResNet-100 | 1024 | × | √ | √ | ~3700 | ~13.1 |
| WebFace600K | 618K | IResNet-180 | 1024 | × | √ | √ | ~2380 | ~17.5 |
| WebFace2M | 2M | IResNet-100 | 1024 | × | √ | √ | ~3480 | ~20.5 |
| WebFace2M | 2M | IResNet-180 | 1024 | × | √ | √ | ~2350 | ~25.0 |
| WebFace2M | 2M | IResNet-300 | 1024 | × | √ | √ | ~1541 | ~32.6 |
| Virtual | 50M | IResNet-50 | 1024 | 0.1 | √ | √ | ~2700 | ~54.1 |
| Virtual | 70M | IResNet-50 | 1024 | 0.1 | √ | √ | ~2170 | ~73.7 |
| Virtual | 80M | IResNet-50 | 1024 | 0.1 | √ | √ | ~1080 | ~79.6 |
| insightface/benchmarks/train/nvidia_a100.md/0 | {
"file_path": "insightface/benchmarks/train/nvidia_a100.md",
"repo_id": "insightface",
"token_count": 1692
} | 94 |
import sys
from torch.utils.data import Dataset, DataLoader
import os
import os.path as osp
import glob
import numpy as np
import random
import cv2
import pickle as pkl
import json
import h5py
import torch
import matplotlib.pyplot as plt
from lib.utils.misc import process_dataset_for_video
class MPIINFDataset(Dataset):
def __init__(self, config, is_train=True):
self.frame_interval = config.DATA.FRAME_INTERVAL
# for mpi dataset, we convert its order to match with h36m
self.mpi2h36m = [10, 9, 8, 11, 12, 13, 4, 3, 2, 5, 6, 7, 1, 14, 15, 16, 0]
# randomization will lead to inferior performance
self.scale_path = "../data/mpi_train_scales.pkl" if config.USE_GT else "../data/mpi_train_scales_pre.pkl"
self.use_same_norm_2d = config.DATA.USE_SAME_NORM_2D
self.use_same_norm_3d = config.DATA.USE_SAME_NORM_3D
self.is_train = is_train
self.data_path = config.DATA.TRAIN_PATH if self.is_train else config.DATA.VALID_PATH
self.head_root_distance = 1 / config.TRAIN.CAMERA_SKELETON_DISTANCE
# whether to use dataset adapted from k[MaÌ]inetics
self.use_gt = config.USE_GT
self.use_ideal_scale = config.DATA.USE_IDEAL_SCALE
self.min_diff_dist = config.DATA.MIN_DIFF_DIST
self.use_scaler = config.TRAIN.USE_SCALER
self.bound_azim = float(config.TRAIN.BOUND_AZIM) # y axis rotation
self.bound_elev = float(config.TRAIN.BOUND_ELEV)
self._load_data_set()
def _load_data_set(self):
if self.is_train:
print('start loading mpiinf {} data.'.format("train" if self.is_train else "test"))
key = "joint_2d_gt" if self.use_gt else "joint_2d_pre"
fp = h5py.File(self.data_path, "r")
self.kp2ds = np.array(fp[key])[:, self.mpi2h36m, :2]
self.kp2ds[:, :, 0] = (self.kp2ds[..., 0] - 1024.0) / 1024.0
self.kp2ds[:, :, 1] = (self.kp2ds[..., 1] - 1024.0) / 1024.0
# self.kp2ds = np.maximum(np.minimum(self.kp2ds, 1.0), -1.0)
# locate root at the origin
self.kp2ds = self.kp2ds - self.kp2ds[:, 13:14]
self.kp2ds[:, 13] = 1e-5
# imagenames will be used to sample frames
self.imagenames = [name.decode() for name in fp['imagename'][:]]
if 'seqname' not in fp.keys():
# first we close the already opened (read-only) h5
fp.close()
print("Process corresponding dataset...")
process_dataset_for_video(self.data_path, is_mpi=True)
fp = h5py.File(self.data_path, "r")
self.sequence_lens = np.array(fp['seqlen'])
self.sequence_names = [name.decode() for name in fp['seqname'][:]]
self.indices_in_seq = np.array(fp['index_in_seq'])
# normlize again so that the mean distance of head and root is 1/c
if not self.use_same_norm_2d:
factor_gt = self.head_root_distance / (np.tile(np.linalg.norm(self.kp2ds[:, -1] - self.kp2ds[:, 13], axis=1).reshape(-1, 1, 1), (1, 17, 2)) + 1e-8)
else:
factor_gt = self.head_root_distance / np.linalg.norm(self.kp2ds[:, -1] - self.kp2ds[:, 13], axis=1).mean()
self.kp2ds = self.kp2ds * factor_gt
self.kp3ds = np.array(fp['joint_3d_gt'])[:, self.mpi2h36m, :3] / 1000.0
# factor_3d = np.linalg.norm(self.kp3ds[:, -1] - self.kp3ds[:, 13], axis=1).mean())
factor_filename = "../data/mpi_{}_factor_3d.pkl".format("train" if self.is_train else "test")
if not self.use_same_norm_3d:
factor_3d = (np.tile(np.linalg.norm(self.kp3ds[:, -1] - self.kp3ds[:, 13], axis=1).reshape(-1, 1, 1), (1, 17, 3)) + 1e-8)
print(factor_3d.shape)
with open(factor_filename, "wb") as f:
pkl.dump(factor_3d, f)
if osp.exists(self.scale_path):
with open(self.scale_path, "rb") as f:
self.scales = pkl.load(f)['scale']
else:
if self.use_scaler:
pass
# raise Warning("You haven't generated the computed scales, if you don't need to observe the scale error during training, \njust ignore this warning because it won't affect training.")
self.scales = None
if self.use_ideal_scale:
# scales computed from projection of 3d
with open("../data/mpi_{}_scales.pkl".format("train" if self.is_train else "valid"), "rb") as f:
scales = pkl.load(f)
self.kp2ds = self.kp2ds * scales
fp.close()
print('finished load mpiinf {} data, total {} samples'.format("train" if self.is_train else "test", \
self.kp2ds.shape[0]))
# generate the rotation factors
num_examples = self.kp2ds.shape[0]
rotation_y = (2 * np.random.random_sample((num_examples, 1)) - 1) * self.bound_azim
rotation_x = (2 * np.random.random_sample((num_examples, 1)) - 1) * self.bound_elev
rotation_z = (2 * np.random.random_sample((num_examples, 1)) - 1) * self.bound_elev / 2
rotation_1 = np.concatenate((rotation_y, rotation_x, rotation_z), axis=1)
rotation_2 = rotation_1.copy()
rotation_2[:, 0] = rotation_2[:, 0] + np.pi
self.rotation = np.concatenate((rotation_1, rotation_2), axis=0)
np.random.shuffle(self.rotation)
self.rotation = torch.from_numpy(self.rotation).float()
self.kp2ds = torch.from_numpy(self.kp2ds).float()
self.kp3ds = torch.from_numpy(self.kp3ds).float()
if self.scales is not None:
self.scales = torch.from_numpy(self.scales).float()
def get_seqnames(self):
return self.sequence_names
def __len__(self):
return self.kp2ds.shape[0]
def __getitem__(self, index):
seq_len = self.sequence_lens[index]
index_in_seq = self.indices_in_seq[index]
kps_3d = self.kp3ds[index]
rot = self.rotation[index]
if not self.is_train:
kps_2d = self.kp2ds[index]
# don't use
diff1 = diff2 = self.kp2ds[index]
else:
kps_2d = self.kp2ds[index]
if self.frame_interval + index < seq_len:
diff1_index = index + self.frame_interval
else:
diff1_index = index - self.frame_interval
diff1 = self.kp2ds[diff1_index]
diff_dist = np.random.randint(-index_in_seq, seq_len - index_in_seq)
while abs(diff_dist) < self.min_diff_dist:
diff_dist = np.random.randint(-index_in_seq, seq_len - index_in_seq)
diff2_index = index + diff_dist
diff2 = self.kp2ds[diff2_index]
rot = self.rotation[index]
# for valdiation, simply ignore scale
if self.scales is None or not self.is_train:
scale = 0
else:
scale = self.scales[index]
return kps_2d, kps_3d, rot, diff1, diff2, scale
| insightface/body/human_pose/ambiguity_aware/lib/dataloader/mpiinf.py/0 | {
"file_path": "insightface/body/human_pose/ambiguity_aware/lib/dataloader/mpiinf.py",
"repo_id": "insightface",
"token_count": 3401
} | 95 |
# 4th Face Anti-spoofing Workshop and Challenge@CVPR2023, Wild Track
<div align="left">
<img src="https://raw.githubusercontent.com/nttstar/insightface-resources/master/images/faswild_large.png" width="640"/>
</div>
## Updates
**``2023-05-03``**: We have launched the ongoing version of this challenge. You can start submitting your test results at this [new link](https://codalab.lisn.upsaclay.fr/competitions/12933). If you have not applied for the dataset yet, you need to send an application email to both of ``insightface.challenge@gmail.com`` and ``wangdong@moredian.com``.
**``2023-02-15``**: The annotation format in readme is fixed:
```
- e.g: Train/spoof/2D-Display-Phone/000001/000001.txt
192 148 (bbox left top)
234 203 (bbox right bottom)
216 171 (landmark left eye)
230 168 (landmark right eye)
231 180 (landmark nose)
218 190 (landmark left mouth )
229 188 (landmark right mouth )
```
## Challenge
We host the WILD track of Face Anti-spoofing Workshop and Challenge@CVPR2023 here. The challenge will officially start together with [4th Face Anti-spoofing Workshop](https://sites.google.com/view/face-anti-spoofing-challenge/welcome/challengecvpr2023).
[Registration](https://codalab.lisn.upsaclay.fr/competitions/10670) is now open on codalab.
Our competition encompasses over 800K spoof photos and over 500K live photos. In the spoof photos, there are three major categories and 17 subcategories.
### Rules and Regulations
1) Any extra data or pretrained model trained from extra data cannot be used in this challenge.
2) Only one DL model can be used, we can not accept the fusion results from many DL models. The computational cost of a single DL model should be **less than 5G FLOPs**. (FLOPs can be calculated using `ptflops` python library)
3) The top-3 winners are required to submit the code for the entire method, ensuring reproducibility of the results and compliance with all contest rules, otherwise the score will be disqualified.
### Timeline
| Phase | Start Date | End Date | Intro |
|-------|------------|---------------------|---------------------------------------------------------------------|
| Dev | 2023-02-13 | 2023-03-15 | evaluate the accuracy on dev set |
| Test | 2023-03-15 | 2023-03-28 23:59:59 | evaluate the accuracy on test set, using the threshold from dev set |
### Rewards
| Rank | Prize |
|-----------|---------|
| 1st place | $ 3,000 |
| 2nd place | $ 1,500 |
| 3rd place | $ 500 |
Sponsors: ``Moredian Technology``
## Evaluation
### Evaluation Criteria
For the performance evaluation, we selected the recently standardized ISO/IEC 30107-3 metrics: Attack Presentation Classification Error Rate (APCER), Normal/Bona Fide Presentation Classification Error Rate (NPCER/BPCER) and Average Classification Error Rate (ACER) as the evaluation metric, in which APCER and BPCER/NPCER are used to measure the error rate of fake or live samples, respectively. The ACER value is used as the final evaluation criterion.
### Submission Format
**Phase1**: training dataset is used to train the model (Label: live=1, fake=0). Then the trained model is used to predict the sample scores in dev.txt. Participants can directly submit the predicted score file in codalab system. Note that the order of the samples in dev.txt cannot be changed. The final submitted file contains a total of 140,058 lines. Each line in the file contains two parts separated by a space. The first part is the path of each image in dev.txt and must contain the set name(``dev/``), and the second part is the prediction score given by the model (representing the probability that the sample belongs to the live face, which must be in the range of [0.0, 1.0]). Such as:
```
dev/000001.jpg 0.15361 #Note: line 1- the first row of dev.txt
......
dev/140058.jpg 0.23394 #Note: line 140,058 the last row of dev.txt
```
The predicted file should be a ``.txt`` file and compressed into a ZIP file (do not add any folder in the ZIP).
**Phase2**: In order to submit results at one time, participants need to combine the dev and test predictions into one file before result submission via codalab system. Note that the order of the samples cannot be changed and the dev sample list needs to be written before the test samples.
The final submission file contains a total of 895,237 lines. Each line in the file contains two parts separated by a space. Such as:
```
dev/000001.jpg 0.15361 #Note: line 1- the first row of dev.txt
......
dev/140058.jpg 0.23394 #Note: line 140,058 the last row of dev.txt
test/000001.jpg 0.15361 #Note: line 140,059 the first row of test.txt
......
test/755179.jpg 0.23394 #Note: line 895,237 the last row of test.txt
```
The predicted file should be a ``.txt`` file and compressed into a ZIP file (do not add any folder in the ZIP).
## Dataset
### Rules
1. The dataset and its subsets can only be used for academic research purposes.
2. The user is not allowed to use the dataset or its subsets for any type of commercial purpose.
3. Any form of usage of the dataset in defamatory, pornographic, or any other unlawful manner, or violation of any applicable regulations or laws is forbidden. We are not responsible for any consequences caused by the above behaviors.
4. The User is not allowed to distribute, broadcast, or reproduce the dataset or its subsets in any way without official permission.
5. The user is not allowed to share, transfer, sell or resell the dataset or its subsets to any third party for any purpose. HOWEVER, providing the dataset access to user’s research associates, colleagues or team member is allowed if user’s research associates, colleagues or team member agree to be bound by these usage rules.
6. All images in this dataset can be used for academic research purposes, BUT, only the approved images of the dataset can be exhibited on user’s publications(including but not limited to research paper, presentations for conferences or educational purposes). The approved images have special marks and are listed in a appendix.
7. We reserve the right to interpret and amend these rules.
8. please cite us if the InsightFace Wild Anti-Spoofing Dataset or its subset is used in your research:
```
@misc{wang2023wild,
title={Wild Face Anti-Spoofing Challenge 2023: Benchmark and Results},
author={Dong Wang and Jia Guo and Qiqi Shao and Haochi He and Zhian Chen and Chuanbao Xiao and Ajian Liu and Sergio Escalera and Hugo Jair Escalante and Lei Zhen and Jun Wan and Jiankang Deng},
year={2023},
eprint={2304.05753},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
```
### Download
All users can obtain and use this dataset and its subsets only after signing the [Agreement](https://github.com/nttstar/insightface-resources/raw/master/files/License%20Agreement%20for%20InsightFace%20Wild%20Anti-Spoofing%20Dataset.pdf) and sending it to the official e-mail ``insightface.challenge#gmail.com``.
### Dataset Annotations
Please refer to the following table for detailed information on the number of labeled data and examples in the dataset:
#### Spoof Images, Training Set
Training Subset, live/spoof labels and categorization information are given:
<div align="left">
<img src="https://raw.githubusercontent.com/nttstar/insightface-resources/master/images/faswild_train_dataset.png" width="1024"/>
</div>
#### Spoof Images, Dev and Test Sets
Dev and Test Subsets, where dev set is used to select the threshold.
<div align="left">
<img src="https://raw.githubusercontent.com/nttstar/insightface-resources/master/images/faswild_devtest_dataset.png" width="1024"/>
</div>
#### Live Images
There're 205,146 live images in training set, and 51,299/273,126 images in dev and test sets respectively.
## Baselines
### Dev
| Backbone | Input Crop | FLOPs | APCER | BPCER | ACER |
|----------|------------|-------|--------|--------|--------|
| ResNet18 | 224x224 | 1.8G | 4.244% | 4.245% | 4.245% |
### Test
| Backbone | Input Crop | FLOPs | APCER | BPCER | ACER |
|----------|------------|-------|--------|--------|--------|
| ResNet18 | 224x224 | 1.8G | 6.145% | 8.874% | 7.509% |
## Feedback
1) If you have any questions regarding the challenge, kindly open an issue on insightface github. (recommended)
2) Or you can send an e-mail to ``insightface.challenge#gmail.com``
| insightface/challenges/cvpr23-fas-wild/README.md/0 | {
"file_path": "insightface/challenges/cvpr23-fas-wild/README.md",
"repo_id": "insightface",
"token_count": 2641
} | 96 |
import logging
# set up logger
logging.basicConfig()
logger = logging.getLogger()
logger.setLevel(logging.INFO)
| insightface/detection/retinaface/rcnn/logger.py/0 | {
"file_path": "insightface/detection/retinaface/rcnn/logger.py",
"repo_id": "insightface",
"token_count": 38
} | 97 |
from distutils.core import setup
from Cython.Build import cythonize
from distutils.extension import Extension
import numpy as np
# To compile and install locally run "python setup.py build_ext --inplace"
# To install library to Python site-packages run "python setup.py build_ext install"
ext_modules = [
Extension(
'_mask',
sources=['maskApi.c', '_mask.pyx'],
include_dirs=[np.get_include()],
extra_compile_args=['-Wno-cpp', '-Wno-unused-function', '-std=c99'],
)
]
setup(name='pycocotools', ext_modules=cythonize(ext_modules))
| insightface/detection/retinaface/rcnn/pycocotools/setup.py/0 | {
"file_path": "insightface/detection/retinaface/rcnn/pycocotools/setup.py",
"repo_id": "insightface",
"token_count": 218
} | 98 |
checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
workflow = [('train', 1)]
| insightface/detection/scrfd/configs/_base_/default_runtime.py/0 | {
"file_path": "insightface/detection/scrfd/configs/_base_/default_runtime.py",
"repo_id": "insightface",
"token_count": 137
} | 99 |
from .assigners import (AssignResult, BaseAssigner, CenterRegionAssigner,
MaxIoUAssigner)
from .builder import build_assigner, build_bbox_coder, build_sampler
from .coder import (BaseBBoxCoder, DeltaXYWHBBoxCoder, PseudoBBoxCoder,
TBLRBBoxCoder)
from .iou_calculators import BboxOverlaps2D, bbox_overlaps
from .samplers import (BaseSampler, CombinedSampler,
InstanceBalancedPosSampler, IoUBalancedNegSampler,
OHEMSampler, PseudoSampler, RandomSampler,
SamplingResult, ScoreHLRSampler)
from .transforms import (bbox2distance, bbox2result, bbox2roi, kps2distance,
bbox_cxcywh_to_xyxy, bbox_flip, bbox_mapping,
bbox_mapping_back, bbox_rescale, bbox_xyxy_to_cxcywh,
distance2bbox, distance2kps, roi2bbox)
__all__ = [
'bbox_overlaps', 'BboxOverlaps2D', 'BaseAssigner', 'MaxIoUAssigner',
'AssignResult', 'BaseSampler', 'PseudoSampler', 'RandomSampler',
'InstanceBalancedPosSampler', 'IoUBalancedNegSampler', 'CombinedSampler',
'OHEMSampler', 'SamplingResult', 'ScoreHLRSampler', 'build_assigner',
'build_sampler', 'bbox_flip', 'bbox_mapping', 'bbox_mapping_back',
'bbox2roi', 'roi2bbox', 'bbox2result',
'distance2bbox', 'bbox2distance', 'distance2kps', 'kps2distance',
'build_bbox_coder', 'BaseBBoxCoder', 'PseudoBBoxCoder',
'DeltaXYWHBBoxCoder', 'TBLRBBoxCoder', 'CenterRegionAssigner',
'bbox_rescale', 'bbox_cxcywh_to_xyxy', 'bbox_xyxy_to_cxcywh'
]
| insightface/detection/scrfd/mmdet/core/bbox/__init__.py/0 | {
"file_path": "insightface/detection/scrfd/mmdet/core/bbox/__init__.py",
"repo_id": "insightface",
"token_count": 763
} | 100 |
import torch
from mmdet.utils import util_mixins
class SamplingResult(util_mixins.NiceRepr):
"""Bbox sampling result.
Example:
>>> # xdoctest: +IGNORE_WANT
>>> from mmdet.core.bbox.samplers.sampling_result import * # NOQA
>>> self = SamplingResult.random(rng=10)
>>> print(f'self = {self}')
self = <SamplingResult({
'neg_bboxes': torch.Size([12, 4]),
'neg_inds': tensor([ 0, 1, 2, 4, 5, 6, 7, 8, 9, 10, 11, 12]),
'num_gts': 4,
'pos_assigned_gt_inds': tensor([], dtype=torch.int64),
'pos_bboxes': torch.Size([0, 4]),
'pos_inds': tensor([], dtype=torch.int64),
'pos_is_gt': tensor([], dtype=torch.uint8)
})>
"""
def __init__(self, pos_inds, neg_inds, bboxes, gt_bboxes, assign_result,
gt_flags):
self.pos_inds = pos_inds
self.neg_inds = neg_inds
self.pos_bboxes = bboxes[pos_inds]
self.neg_bboxes = bboxes[neg_inds]
self.pos_is_gt = gt_flags[pos_inds]
self.num_gts = gt_bboxes.shape[0]
self.pos_assigned_gt_inds = assign_result.gt_inds[pos_inds] - 1
if gt_bboxes.numel() == 0:
# hack for index error case
assert self.pos_assigned_gt_inds.numel() == 0
self.pos_gt_bboxes = torch.empty_like(gt_bboxes).view(-1, 4)
else:
if len(gt_bboxes.shape) < 2:
gt_bboxes = gt_bboxes.view(-1, 4)
self.pos_gt_bboxes = gt_bboxes[self.pos_assigned_gt_inds, :]
if assign_result.labels is not None:
self.pos_gt_labels = assign_result.labels[pos_inds]
else:
self.pos_gt_labels = None
@property
def bboxes(self):
"""torch.Tensor: concatenated positive and negative boxes"""
return torch.cat([self.pos_bboxes, self.neg_bboxes])
def to(self, device):
"""Change the device of the data inplace.
Example:
>>> self = SamplingResult.random()
>>> print(f'self = {self.to(None)}')
>>> # xdoctest: +REQUIRES(--gpu)
>>> print(f'self = {self.to(0)}')
"""
_dict = self.__dict__
for key, value in _dict.items():
if isinstance(value, torch.Tensor):
_dict[key] = value.to(device)
return self
def __nice__(self):
data = self.info.copy()
data['pos_bboxes'] = data.pop('pos_bboxes').shape
data['neg_bboxes'] = data.pop('neg_bboxes').shape
parts = [f"'{k}': {v!r}" for k, v in sorted(data.items())]
body = ' ' + ',\n '.join(parts)
return '{\n' + body + '\n}'
@property
def info(self):
"""Returns a dictionary of info about the object."""
return {
'pos_inds': self.pos_inds,
'neg_inds': self.neg_inds,
'pos_bboxes': self.pos_bboxes,
'neg_bboxes': self.neg_bboxes,
'pos_is_gt': self.pos_is_gt,
'num_gts': self.num_gts,
'pos_assigned_gt_inds': self.pos_assigned_gt_inds,
}
@classmethod
def random(cls, rng=None, **kwargs):
"""
Args:
rng (None | int | numpy.random.RandomState): seed or state.
kwargs (keyword arguments):
- num_preds: number of predicted boxes
- num_gts: number of true boxes
- p_ignore (float): probability of a predicted box assinged to \
an ignored truth.
- p_assigned (float): probability of a predicted box not being \
assigned.
- p_use_label (float | bool): with labels or not.
Returns:
:obj:`SamplingResult`: Randomly generated sampling result.
Example:
>>> from mmdet.core.bbox.samplers.sampling_result import * # NOQA
>>> self = SamplingResult.random()
>>> print(self.__dict__)
"""
from mmdet.core.bbox.samplers.random_sampler import RandomSampler
from mmdet.core.bbox.assigners.assign_result import AssignResult
from mmdet.core.bbox import demodata
rng = demodata.ensure_rng(rng)
# make probabalistic?
num = 32
pos_fraction = 0.5
neg_pos_ub = -1
assign_result = AssignResult.random(rng=rng, **kwargs)
# Note we could just compute an assignment
bboxes = demodata.random_boxes(assign_result.num_preds, rng=rng)
gt_bboxes = demodata.random_boxes(assign_result.num_gts, rng=rng)
if rng.rand() > 0.2:
# sometimes algorithms squeeze their data, be robust to that
gt_bboxes = gt_bboxes.squeeze()
bboxes = bboxes.squeeze()
if assign_result.labels is None:
gt_labels = None
else:
gt_labels = None # todo
if gt_labels is None:
add_gt_as_proposals = False
else:
add_gt_as_proposals = True # make probabalistic?
sampler = RandomSampler(
num,
pos_fraction,
neg_pos_ub=neg_pos_ub,
add_gt_as_proposals=add_gt_as_proposals,
rng=rng)
self = sampler.sample(assign_result, bboxes, gt_bboxes, gt_labels)
return self
| insightface/detection/scrfd/mmdet/core/bbox/samplers/sampling_result.py/0 | {
"file_path": "insightface/detection/scrfd/mmdet/core/bbox/samplers/sampling_result.py",
"repo_id": "insightface",
"token_count": 2746
} | 101 |
from abc import ABCMeta, abstractmethod
import cv2
import mmcv
import numpy as np
import pycocotools.mask as maskUtils
import torch
from mmcv.ops.roi_align import roi_align
class BaseInstanceMasks(metaclass=ABCMeta):
"""Base class for instance masks."""
@abstractmethod
def rescale(self, scale, interpolation='nearest'):
"""Rescale masks as large as possible while keeping the aspect ratio.
For details can refer to `mmcv.imrescale`.
Args:
scale (tuple[int]): The maximum size (h, w) of rescaled mask.
interpolation (str): Same as :func:`mmcv.imrescale`.
Returns:
BaseInstanceMasks: The rescaled masks.
"""
pass
@abstractmethod
def resize(self, out_shape, interpolation='nearest'):
"""Resize masks to the given out_shape.
Args:
out_shape: Target (h, w) of resized mask.
interpolation (str): See :func:`mmcv.imresize`.
Returns:
BaseInstanceMasks: The resized masks.
"""
pass
@abstractmethod
def flip(self, flip_direction='horizontal'):
"""Flip masks alone the given direction.
Args:
flip_direction (str): Either 'horizontal' or 'vertical'.
Returns:
BaseInstanceMasks: The flipped masks.
"""
pass
@abstractmethod
def pad(self, out_shape, pad_val):
"""Pad masks to the given size of (h, w).
Args:
out_shape (tuple[int]): Target (h, w) of padded mask.
pad_val (int): The padded value.
Returns:
BaseInstanceMasks: The padded masks.
"""
pass
@abstractmethod
def crop(self, bbox):
"""Crop each mask by the given bbox.
Args:
bbox (ndarray): Bbox in format [x1, y1, x2, y2], shape (4, ).
Return:
BaseInstanceMasks: The cropped masks.
"""
pass
@abstractmethod
def crop_and_resize(self,
bboxes,
out_shape,
inds,
device,
interpolation='bilinear'):
"""Crop and resize masks by the given bboxes.
This function is mainly used in mask targets computation.
It firstly align mask to bboxes by assigned_inds, then crop mask by the
assigned bbox and resize to the size of (mask_h, mask_w)
Args:
bboxes (Tensor): Bboxes in format [x1, y1, x2, y2], shape (N, 4)
out_shape (tuple[int]): Target (h, w) of resized mask
inds (ndarray): Indexes to assign masks to each bbox
device (str): Device of bboxes
interpolation (str): See `mmcv.imresize`
Return:
BaseInstanceMasks: the cropped and resized masks.
"""
pass
@abstractmethod
def expand(self, expanded_h, expanded_w, top, left):
"""see :class:`Expand`."""
pass
@property
@abstractmethod
def areas(self):
"""ndarray: areas of each instance."""
pass
@abstractmethod
def to_ndarray(self):
"""Convert masks to the format of ndarray.
Return:
ndarray: Converted masks in the format of ndarray.
"""
pass
@abstractmethod
def to_tensor(self, dtype, device):
"""Convert masks to the format of Tensor.
Args:
dtype (str): Dtype of converted mask.
device (torch.device): Device of converted masks.
Returns:
Tensor: Converted masks in the format of Tensor.
"""
pass
@abstractmethod
def translate(self,
out_shape,
offset,
direction='horizontal',
fill_val=0,
interpolation='bilinear'):
"""Translate the masks.
Args:
out_shape (tuple[int]): Shape for output mask, format (h, w).
offset (int | float): The offset for translate.
direction (str): The translate direction, either "horizontal"
or "vertical".
fill_val (int | float): Border value. Default 0.
interpolation (str): Same as :func:`mmcv.imtranslate`.
Returns:
Translated masks.
"""
pass
def shear(self,
out_shape,
magnitude,
direction='horizontal',
border_value=0,
interpolation='bilinear'):
"""Shear the masks.
Args:
out_shape (tuple[int]): Shape for output mask, format (h, w).
magnitude (int | float): The magnitude used for shear.
direction (str): The shear direction, either "horizontal"
or "vertical".
border_value (int | tuple[int]): Value used in case of a
constant border. Default 0.
interpolation (str): Same as in :func:`mmcv.imshear`.
Returns:
ndarray: Sheared masks.
"""
pass
@abstractmethod
def rotate(self, out_shape, angle, center=None, scale=1.0, fill_val=0):
"""Rotate the masks.
Args:
out_shape (tuple[int]): Shape for output mask, format (h, w).
angle (int | float): Rotation angle in degrees. Positive values
mean counter-clockwise rotation.
center (tuple[float], optional): Center point (w, h) of the
rotation in source image. If not specified, the center of
the image will be used.
scale (int | float): Isotropic scale factor.
fill_val (int | float): Border value. Default 0 for masks.
Returns:
Rotated masks.
"""
pass
class BitmapMasks(BaseInstanceMasks):
"""This class represents masks in the form of bitmaps.
Args:
masks (ndarray): ndarray of masks in shape (N, H, W), where N is
the number of objects.
height (int): height of masks
width (int): width of masks
"""
def __init__(self, masks, height, width):
self.height = height
self.width = width
if len(masks) == 0:
self.masks = np.empty((0, self.height, self.width), dtype=np.uint8)
else:
assert isinstance(masks, (list, np.ndarray))
if isinstance(masks, list):
assert isinstance(masks[0], np.ndarray)
assert masks[0].ndim == 2 # (H, W)
else:
assert masks.ndim == 3 # (N, H, W)
self.masks = np.stack(masks).reshape(-1, height, width)
assert self.masks.shape[1] == self.height
assert self.masks.shape[2] == self.width
def __getitem__(self, index):
"""Index the BitmapMask.
Args:
index (int | ndarray): Indices in the format of integer or ndarray.
Returns:
:obj:`BitmapMasks`: Indexed bitmap masks.
"""
masks = self.masks[index].reshape(-1, self.height, self.width)
return BitmapMasks(masks, self.height, self.width)
def __iter__(self):
return iter(self.masks)
def __repr__(self):
s = self.__class__.__name__ + '('
s += f'num_masks={len(self.masks)}, '
s += f'height={self.height}, '
s += f'width={self.width})'
return s
def __len__(self):
"""Number of masks."""
return len(self.masks)
def rescale(self, scale, interpolation='nearest'):
"""See :func:`BaseInstanceMasks.rescale`."""
if len(self.masks) == 0:
new_w, new_h = mmcv.rescale_size((self.width, self.height), scale)
rescaled_masks = np.empty((0, new_h, new_w), dtype=np.uint8)
else:
rescaled_masks = np.stack([
mmcv.imrescale(mask, scale, interpolation=interpolation)
for mask in self.masks
])
height, width = rescaled_masks.shape[1:]
return BitmapMasks(rescaled_masks, height, width)
def resize(self, out_shape, interpolation='nearest'):
"""See :func:`BaseInstanceMasks.resize`."""
if len(self.masks) == 0:
resized_masks = np.empty((0, *out_shape), dtype=np.uint8)
else:
resized_masks = np.stack([
mmcv.imresize(mask, out_shape, interpolation=interpolation)
for mask in self.masks
])
return BitmapMasks(resized_masks, *out_shape)
def flip(self, flip_direction='horizontal'):
"""See :func:`BaseInstanceMasks.flip`."""
assert flip_direction in ('horizontal', 'vertical', 'diagonal')
if len(self.masks) == 0:
flipped_masks = self.masks
else:
flipped_masks = np.stack([
mmcv.imflip(mask, direction=flip_direction)
for mask in self.masks
])
return BitmapMasks(flipped_masks, self.height, self.width)
def pad(self, out_shape, pad_val=0):
"""See :func:`BaseInstanceMasks.pad`."""
if len(self.masks) == 0:
padded_masks = np.empty((0, *out_shape), dtype=np.uint8)
else:
padded_masks = np.stack([
mmcv.impad(mask, shape=out_shape, pad_val=pad_val)
for mask in self.masks
])
return BitmapMasks(padded_masks, *out_shape)
def crop(self, bbox):
"""See :func:`BaseInstanceMasks.crop`."""
assert isinstance(bbox, np.ndarray)
assert bbox.ndim == 1
# clip the boundary
bbox = bbox.copy()
bbox[0::2] = np.clip(bbox[0::2], 0, self.width)
bbox[1::2] = np.clip(bbox[1::2], 0, self.height)
x1, y1, x2, y2 = bbox
w = np.maximum(x2 - x1, 1)
h = np.maximum(y2 - y1, 1)
if len(self.masks) == 0:
cropped_masks = np.empty((0, h, w), dtype=np.uint8)
else:
cropped_masks = self.masks[:, y1:y1 + h, x1:x1 + w]
return BitmapMasks(cropped_masks, h, w)
def crop_and_resize(self,
bboxes,
out_shape,
inds,
device='cpu',
interpolation='bilinear'):
"""See :func:`BaseInstanceMasks.crop_and_resize`."""
if len(self.masks) == 0:
empty_masks = np.empty((0, *out_shape), dtype=np.uint8)
return BitmapMasks(empty_masks, *out_shape)
# convert bboxes to tensor
if isinstance(bboxes, np.ndarray):
bboxes = torch.from_numpy(bboxes).to(device=device)
if isinstance(inds, np.ndarray):
inds = torch.from_numpy(inds).to(device=device)
num_bbox = bboxes.shape[0]
fake_inds = torch.arange(
num_bbox, device=device).to(dtype=bboxes.dtype)[:, None]
rois = torch.cat([fake_inds, bboxes], dim=1) # Nx5
rois = rois.to(device=device)
if num_bbox > 0:
gt_masks_th = torch.from_numpy(self.masks).to(device).index_select(
0, inds).to(dtype=rois.dtype)
targets = roi_align(gt_masks_th[:, None, :, :], rois, out_shape,
1.0, 0, 'avg', True).squeeze(1)
resized_masks = (targets >= 0.5).cpu().numpy()
else:
resized_masks = []
return BitmapMasks(resized_masks, *out_shape)
def expand(self, expanded_h, expanded_w, top, left):
"""See :func:`BaseInstanceMasks.expand`."""
if len(self.masks) == 0:
expanded_mask = np.empty((0, expanded_h, expanded_w),
dtype=np.uint8)
else:
expanded_mask = np.zeros((len(self), expanded_h, expanded_w),
dtype=np.uint8)
expanded_mask[:, top:top + self.height,
left:left + self.width] = self.masks
return BitmapMasks(expanded_mask, expanded_h, expanded_w)
def translate(self,
out_shape,
offset,
direction='horizontal',
fill_val=0,
interpolation='bilinear'):
"""Translate the BitmapMasks.
Args:
out_shape (tuple[int]): Shape for output mask, format (h, w).
offset (int | float): The offset for translate.
direction (str): The translate direction, either "horizontal"
or "vertical".
fill_val (int | float): Border value. Default 0 for masks.
interpolation (str): Same as :func:`mmcv.imtranslate`.
Returns:
BitmapMasks: Translated BitmapMasks.
"""
if len(self.masks) == 0:
translated_masks = np.empty((0, *out_shape), dtype=np.uint8)
else:
translated_masks = mmcv.imtranslate(
self.masks.transpose((1, 2, 0)),
offset,
direction,
border_value=fill_val,
interpolation=interpolation)
if translated_masks.ndim == 2:
translated_masks = translated_masks[:, :, None]
translated_masks = translated_masks.transpose(
(2, 0, 1)).astype(self.masks.dtype)
return BitmapMasks(translated_masks, *out_shape)
def shear(self,
out_shape,
magnitude,
direction='horizontal',
border_value=0,
interpolation='bilinear'):
"""Shear the BitmapMasks.
Args:
out_shape (tuple[int]): Shape for output mask, format (h, w).
magnitude (int | float): The magnitude used for shear.
direction (str): The shear direction, either "horizontal"
or "vertical".
border_value (int | tuple[int]): Value used in case of a
constant border.
interpolation (str): Same as in :func:`mmcv.imshear`.
Returns:
BitmapMasks: The sheared masks.
"""
if len(self.masks) == 0:
sheared_masks = np.empty((0, *out_shape), dtype=np.uint8)
else:
sheared_masks = mmcv.imshear(
self.masks.transpose((1, 2, 0)),
magnitude,
direction,
border_value=border_value,
interpolation=interpolation)
if sheared_masks.ndim == 2:
sheared_masks = sheared_masks[:, :, None]
sheared_masks = sheared_masks.transpose(
(2, 0, 1)).astype(self.masks.dtype)
return BitmapMasks(sheared_masks, *out_shape)
def rotate(self, out_shape, angle, center=None, scale=1.0, fill_val=0):
"""Rotate the BitmapMasks.
Args:
out_shape (tuple[int]): Shape for output mask, format (h, w).
angle (int | float): Rotation angle in degrees. Positive values
mean counter-clockwise rotation.
center (tuple[float], optional): Center point (w, h) of the
rotation in source image. If not specified, the center of
the image will be used.
scale (int | float): Isotropic scale factor.
fill_val (int | float): Border value. Default 0 for masks.
Returns:
BitmapMasks: Rotated BitmapMasks.
"""
if len(self.masks) == 0:
rotated_masks = np.empty((0, *out_shape), dtype=self.masks.dtype)
else:
rotated_masks = mmcv.imrotate(
self.masks.transpose((1, 2, 0)),
angle,
center=center,
scale=scale,
border_value=fill_val)
if rotated_masks.ndim == 2:
# case when only one mask, (h, w)
rotated_masks = rotated_masks[:, :, None] # (h, w, 1)
rotated_masks = rotated_masks.transpose(
(2, 0, 1)).astype(self.masks.dtype)
return BitmapMasks(rotated_masks, *out_shape)
@property
def areas(self):
"""See :py:attr:`BaseInstanceMasks.areas`."""
return self.masks.sum((1, 2))
def to_ndarray(self):
"""See :func:`BaseInstanceMasks.to_ndarray`."""
return self.masks
def to_tensor(self, dtype, device):
"""See :func:`BaseInstanceMasks.to_tensor`."""
return torch.tensor(self.masks, dtype=dtype, device=device)
class PolygonMasks(BaseInstanceMasks):
"""This class represents masks in the form of polygons.
Polygons is a list of three levels. The first level of the list
corresponds to objects, the second level to the polys that compose the
object, the third level to the poly coordinates
Args:
masks (list[list[ndarray]]): The first level of the list
corresponds to objects, the second level to the polys that
compose the object, the third level to the poly coordinates
height (int): height of masks
width (int): width of masks
"""
def __init__(self, masks, height, width):
assert isinstance(masks, list)
if len(masks) > 0:
assert isinstance(masks[0], list)
assert isinstance(masks[0][0], np.ndarray)
self.height = height
self.width = width
self.masks = masks
def __getitem__(self, index):
"""Index the polygon masks.
Args:
index (ndarray | List): The indices.
Returns:
:obj:`PolygonMasks`: The indexed polygon masks.
"""
if isinstance(index, np.ndarray):
index = index.tolist()
if isinstance(index, list):
masks = [self.masks[i] for i in index]
else:
try:
masks = self.masks[index]
except Exception:
raise ValueError(
f'Unsupported input of type {type(index)} for indexing!')
if len(masks) and isinstance(masks[0], np.ndarray):
masks = [masks] # ensure a list of three levels
return PolygonMasks(masks, self.height, self.width)
def __iter__(self):
return iter(self.masks)
def __repr__(self):
s = self.__class__.__name__ + '('
s += f'num_masks={len(self.masks)}, '
s += f'height={self.height}, '
s += f'width={self.width})'
return s
def __len__(self):
"""Number of masks."""
return len(self.masks)
def rescale(self, scale, interpolation=None):
"""see :func:`BaseInstanceMasks.rescale`"""
new_w, new_h = mmcv.rescale_size((self.width, self.height), scale)
if len(self.masks) == 0:
rescaled_masks = PolygonMasks([], new_h, new_w)
else:
rescaled_masks = self.resize((new_h, new_w))
return rescaled_masks
def resize(self, out_shape, interpolation=None):
"""see :func:`BaseInstanceMasks.resize`"""
if len(self.masks) == 0:
resized_masks = PolygonMasks([], *out_shape)
else:
h_scale = out_shape[0] / self.height
w_scale = out_shape[1] / self.width
resized_masks = []
for poly_per_obj in self.masks:
resized_poly = []
for p in poly_per_obj:
p = p.copy()
p[0::2] *= w_scale
p[1::2] *= h_scale
resized_poly.append(p)
resized_masks.append(resized_poly)
resized_masks = PolygonMasks(resized_masks, *out_shape)
return resized_masks
def flip(self, flip_direction='horizontal'):
"""see :func:`BaseInstanceMasks.flip`"""
assert flip_direction in ('horizontal', 'vertical', 'diagonal')
if len(self.masks) == 0:
flipped_masks = PolygonMasks([], self.height, self.width)
else:
flipped_masks = []
for poly_per_obj in self.masks:
flipped_poly_per_obj = []
for p in poly_per_obj:
p = p.copy()
if flip_direction == 'horizontal':
p[0::2] = self.width - p[0::2]
elif flip_direction == 'vertical':
p[1::2] = self.height - p[1::2]
else:
p[0::2] = self.width - p[0::2]
p[1::2] = self.height - p[1::2]
flipped_poly_per_obj.append(p)
flipped_masks.append(flipped_poly_per_obj)
flipped_masks = PolygonMasks(flipped_masks, self.height,
self.width)
return flipped_masks
def crop(self, bbox):
"""see :func:`BaseInstanceMasks.crop`"""
assert isinstance(bbox, np.ndarray)
assert bbox.ndim == 1
# clip the boundary
bbox = bbox.copy()
bbox[0::2] = np.clip(bbox[0::2], 0, self.width)
bbox[1::2] = np.clip(bbox[1::2], 0, self.height)
x1, y1, x2, y2 = bbox
w = np.maximum(x2 - x1, 1)
h = np.maximum(y2 - y1, 1)
if len(self.masks) == 0:
cropped_masks = PolygonMasks([], h, w)
else:
cropped_masks = []
for poly_per_obj in self.masks:
cropped_poly_per_obj = []
for p in poly_per_obj:
# pycocotools will clip the boundary
p = p.copy()
p[0::2] -= bbox[0]
p[1::2] -= bbox[1]
cropped_poly_per_obj.append(p)
cropped_masks.append(cropped_poly_per_obj)
cropped_masks = PolygonMasks(cropped_masks, h, w)
return cropped_masks
def pad(self, out_shape, pad_val=0):
"""padding has no effect on polygons`"""
return PolygonMasks(self.masks, *out_shape)
def expand(self, *args, **kwargs):
"""TODO: Add expand for polygon"""
raise NotImplementedError
def crop_and_resize(self,
bboxes,
out_shape,
inds,
device='cpu',
interpolation='bilinear'):
"""see :func:`BaseInstanceMasks.crop_and_resize`"""
out_h, out_w = out_shape
if len(self.masks) == 0:
return PolygonMasks([], out_h, out_w)
resized_masks = []
for i in range(len(bboxes)):
mask = self.masks[inds[i]]
bbox = bboxes[i, :]
x1, y1, x2, y2 = bbox
w = np.maximum(x2 - x1, 1)
h = np.maximum(y2 - y1, 1)
h_scale = out_h / max(h, 0.1) # avoid too large scale
w_scale = out_w / max(w, 0.1)
resized_mask = []
for p in mask:
p = p.copy()
# crop
# pycocotools will clip the boundary
p[0::2] -= bbox[0]
p[1::2] -= bbox[1]
# resize
p[0::2] *= w_scale
p[1::2] *= h_scale
resized_mask.append(p)
resized_masks.append(resized_mask)
return PolygonMasks(resized_masks, *out_shape)
def translate(self,
out_shape,
offset,
direction='horizontal',
fill_val=None,
interpolation=None):
"""Translate the PolygonMasks."""
assert fill_val is None or fill_val == 0, 'Here fill_val is not '\
f'used, and defaultly should be None or 0. got {fill_val}.'
if len(self.masks) == 0:
translated_masks = PolygonMasks([], *out_shape)
else:
translated_masks = []
for poly_per_obj in self.masks:
translated_poly_per_obj = []
for p in poly_per_obj:
p = p.copy()
if direction == 'horizontal':
p[0::2] = np.clip(p[0::2] + offset, 0, out_shape[1])
elif direction == 'vertical':
p[1::2] = np.clip(p[1::2] + offset, 0, out_shape[0])
translated_poly_per_obj.append(p)
translated_masks.append(translated_poly_per_obj)
translated_masks = PolygonMasks(translated_masks, *out_shape)
return translated_masks
def shear(self,
out_shape,
magnitude,
direction='horizontal',
border_value=0,
interpolation='bilinear'):
"""See :func:`BaseInstanceMasks.shear`."""
if len(self.masks) == 0:
sheared_masks = PolygonMasks([], *out_shape)
else:
sheared_masks = []
if direction == 'horizontal':
shear_matrix = np.stack([[1, magnitude],
[0, 1]]).astype(np.float32)
elif direction == 'vertical':
shear_matrix = np.stack([[1, 0], [magnitude,
1]]).astype(np.float32)
for poly_per_obj in self.masks:
sheared_poly = []
for p in poly_per_obj:
p = np.stack([p[0::2], p[1::2]], axis=0) # [2, n]
new_coords = np.matmul(shear_matrix, p) # [2, n]
new_coords[0, :] = np.clip(new_coords[0, :], 0,
out_shape[1])
new_coords[1, :] = np.clip(new_coords[1, :], 0,
out_shape[0])
sheared_poly.append(
new_coords.transpose((1, 0)).reshape(-1))
sheared_masks.append(sheared_poly)
sheared_masks = PolygonMasks(sheared_masks, *out_shape)
return sheared_masks
def rotate(self, out_shape, angle, center=None, scale=1.0, fill_val=0):
"""See :func:`BaseInstanceMasks.rotate`."""
if len(self.masks) == 0:
rotated_masks = PolygonMasks([], *out_shape)
else:
rotated_masks = []
rotate_matrix = cv2.getRotationMatrix2D(center, -angle, scale)
for poly_per_obj in self.masks:
rotated_poly = []
for p in poly_per_obj:
p = p.copy()
coords = np.stack([p[0::2], p[1::2]], axis=1) # [n, 2]
# pad 1 to convert from format [x, y] to homogeneous
# coordinates format [x, y, 1]
coords = np.concatenate(
(coords, np.ones((coords.shape[0], 1), coords.dtype)),
axis=1) # [n, 3]
rotated_coords = np.matmul(
rotate_matrix[None, :, :],
coords[:, :, None])[..., 0] # [n, 2, 1] -> [n, 2]
rotated_coords[:, 0] = np.clip(rotated_coords[:, 0], 0,
out_shape[1])
rotated_coords[:, 1] = np.clip(rotated_coords[:, 1], 0,
out_shape[0])
rotated_poly.append(rotated_coords.reshape(-1))
rotated_masks.append(rotated_poly)
rotated_masks = PolygonMasks(rotated_masks, *out_shape)
return rotated_masks
def to_bitmap(self):
"""convert polygon masks to bitmap masks."""
bitmap_masks = self.to_ndarray()
return BitmapMasks(bitmap_masks, self.height, self.width)
@property
def areas(self):
"""Compute areas of masks.
This func is modified from `detectron2
<https://github.com/facebookresearch/detectron2/blob/ffff8acc35ea88ad1cb1806ab0f00b4c1c5dbfd9/detectron2/structures/masks.py#L387>`_.
The function only works with Polygons using the shoelace formula.
Return:
ndarray: areas of each instance
""" # noqa: W501
area = []
for polygons_per_obj in self.masks:
area_per_obj = 0
for p in polygons_per_obj:
area_per_obj += self._polygon_area(p[0::2], p[1::2])
area.append(area_per_obj)
return np.asarray(area)
def _polygon_area(self, x, y):
"""Compute the area of a component of a polygon.
Using the shoelace formula:
https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates
Args:
x (ndarray): x coordinates of the component
y (ndarray): y coordinates of the component
Return:
float: the are of the component
""" # noqa: 501
return 0.5 * np.abs(
np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1)))
def to_ndarray(self):
"""Convert masks to the format of ndarray."""
if len(self.masks) == 0:
return np.empty((0, self.height, self.width), dtype=np.uint8)
bitmap_masks = []
for poly_per_obj in self.masks:
bitmap_masks.append(
polygon_to_bitmap(poly_per_obj, self.height, self.width))
return np.stack(bitmap_masks)
def to_tensor(self, dtype, device):
"""See :func:`BaseInstanceMasks.to_tensor`."""
if len(self.masks) == 0:
return torch.empty((0, self.height, self.width),
dtype=dtype,
device=device)
ndarray_masks = self.to_ndarray()
return torch.tensor(ndarray_masks, dtype=dtype, device=device)
def polygon_to_bitmap(polygons, height, width):
"""Convert masks from the form of polygons to bitmaps.
Args:
polygons (list[ndarray]): masks in polygon representation
height (int): mask height
width (int): mask width
Return:
ndarray: the converted masks in bitmap representation
"""
rles = maskUtils.frPyObjects(polygons, height, width)
rle = maskUtils.merge(rles)
bitmap_mask = maskUtils.decode(rle).astype(np.bool)
return bitmap_mask
| insightface/detection/scrfd/mmdet/core/mask/structures.py/0 | {
"file_path": "insightface/detection/scrfd/mmdet/core/mask/structures.py",
"repo_id": "insightface",
"token_count": 15691
} | 102 |
from .auto_augment import (AutoAugment, BrightnessTransform, ColorTransform,
ContrastTransform, EqualizeTransform, Rotate, Shear,
Translate)
from .compose import Compose
from .formating import (Collect, DefaultFormatBundle, ImageToTensor,
ToDataContainer, ToTensor, Transpose, to_tensor)
from .instaboost import InstaBoost
from .loading import (LoadAnnotations, LoadImageFromFile, LoadImageFromWebcam,
LoadMultiChannelImageFromFiles, LoadProposals)
from .test_time_aug import MultiScaleFlipAug
from .transforms import (Albu, CutOut, Expand, MinIoURandomCrop, Normalize,
Pad, PhotoMetricDistortion, RandomCenterCropPad,
RandomSquareCrop,
RandomCrop, RandomFlip, Resize, SegRescale)
__all__ = [
'Compose', 'to_tensor', 'ToTensor', 'ImageToTensor', 'ToDataContainer',
'Transpose', 'Collect', 'DefaultFormatBundle', 'LoadAnnotations',
'LoadImageFromFile', 'LoadImageFromWebcam',
'LoadMultiChannelImageFromFiles', 'LoadProposals', 'MultiScaleFlipAug',
'Resize', 'RandomFlip', 'Pad', 'RandomCrop', 'Normalize', 'SegRescale',
'RandomSquareCrop',
'MinIoURandomCrop', 'Expand', 'PhotoMetricDistortion', 'Albu',
'InstaBoost', 'RandomCenterCropPad', 'AutoAugment', 'CutOut', 'Shear',
'Rotate', 'ColorTransform', 'EqualizeTransform', 'BrightnessTransform',
'ContrastTransform', 'Translate'
]
| insightface/detection/scrfd/mmdet/datasets/pipelines/__init__.py/0 | {
"file_path": "insightface/detection/scrfd/mmdet/datasets/pipelines/__init__.py",
"repo_id": "insightface",
"token_count": 609
} | 103 |
from .backbones import * # noqa: F401,F403
from .builder import (BACKBONES, DETECTORS, HEADS, LOSSES, NECKS,
ROI_EXTRACTORS, SHARED_HEADS, build_backbone,
build_detector, build_head, build_loss, build_neck,
build_roi_extractor, build_shared_head)
from .dense_heads import * # noqa: F401,F403
from .detectors import * # noqa: F401,F403
from .losses import * # noqa: F401,F403
from .necks import * # noqa: F401,F403
from .roi_heads import * # noqa: F401,F403
__all__ = [
'BACKBONES', 'NECKS', 'ROI_EXTRACTORS', 'SHARED_HEADS', 'HEADS', 'LOSSES',
'DETECTORS', 'build_backbone', 'build_neck', 'build_roi_extractor',
'build_shared_head', 'build_head', 'build_loss', 'build_detector'
]
| insightface/detection/scrfd/mmdet/models/__init__.py/0 | {
"file_path": "insightface/detection/scrfd/mmdet/models/__init__.py",
"repo_id": "insightface",
"token_count": 354
} | 104 |
from .anchor_free_head import AnchorFreeHead
from .anchor_head import AnchorHead
from .atss_head import ATSSHead
from .centripetal_head import CentripetalHead
from .corner_head import CornerHead
from .fcos_head import FCOSHead
from .fovea_head import FoveaHead
from .free_anchor_retina_head import FreeAnchorRetinaHead
from .fsaf_head import FSAFHead
from .ga_retina_head import GARetinaHead
from .ga_rpn_head import GARPNHead
from .gfl_head import GFLHead
from .guided_anchor_head import FeatureAdaption, GuidedAnchorHead
from .nasfcos_head import NASFCOSHead
from .paa_head import PAAHead
from .pisa_retinanet_head import PISARetinaHead
from .pisa_ssd_head import PISASSDHead
from .reppoints_head import RepPointsHead
from .retina_head import RetinaHead
from .retina_sepbn_head import RetinaSepBNHead
from .rpn_head import RPNHead
from .sabl_retina_head import SABLRetinaHead
from .ssd_head import SSDHead
from .transformer_head import TransformerHead
from .vfnet_head import VFNetHead
from .yolact_head import YOLACTHead, YOLACTProtonet, YOLACTSegmHead
from .yolo_head import YOLOV3Head
from .scrfd_head import SCRFDHead
__all__ = [
'AnchorFreeHead', 'AnchorHead', 'GuidedAnchorHead', 'FeatureAdaption',
'RPNHead', 'GARPNHead', 'RetinaHead', 'RetinaSepBNHead', 'GARetinaHead',
'SSDHead', 'FCOSHead', 'RepPointsHead', 'FoveaHead',
'FreeAnchorRetinaHead', 'ATSSHead', 'FSAFHead', 'NASFCOSHead',
'PISARetinaHead', 'PISASSDHead', 'GFLHead', 'CornerHead', 'YOLACTHead',
'YOLACTSegmHead', 'YOLACTProtonet', 'YOLOV3Head', 'PAAHead',
'SABLRetinaHead', 'CentripetalHead', 'VFNetHead', 'TransformerHead',
'SCRFDHead'
]
| insightface/detection/scrfd/mmdet/models/dense_heads/__init__.py/0 | {
"file_path": "insightface/detection/scrfd/mmdet/models/dense_heads/__init__.py",
"repo_id": "insightface",
"token_count": 632
} | 105 |
import copy
import torch.nn as nn
from mmcv.cnn import (ConvModule, Scale, bias_init_with_prob,
caffe2_xavier_init, normal_init)
from mmdet.models.dense_heads.fcos_head import FCOSHead
from ..builder import HEADS
@HEADS.register_module()
class NASFCOSHead(FCOSHead):
"""Anchor-free head used in `NASFCOS <https://arxiv.org/abs/1906.04423>`_.
It is quite similar with FCOS head, except for the searched structure of
classification branch and bbox regression branch, where a structure of
"dconv3x3, conv3x3, dconv3x3, conv1x1" is utilized instead.
"""
def _init_layers(self):
"""Initialize layers of the head."""
dconv3x3_config = dict(
type='DCNv2',
kernel_size=3,
use_bias=True,
deform_groups=2,
padding=1)
conv3x3_config = dict(type='Conv', kernel_size=3, padding=1)
conv1x1_config = dict(type='Conv', kernel_size=1)
self.arch_config = [
dconv3x3_config, conv3x3_config, dconv3x3_config, conv1x1_config
]
self.cls_convs = nn.ModuleList()
self.reg_convs = nn.ModuleList()
for i, op_ in enumerate(self.arch_config):
op = copy.deepcopy(op_)
chn = self.in_channels if i == 0 else self.feat_channels
assert isinstance(op, dict)
use_bias = op.pop('use_bias', False)
padding = op.pop('padding', 0)
kernel_size = op.pop('kernel_size')
module = ConvModule(
chn,
self.feat_channels,
kernel_size,
stride=1,
padding=padding,
norm_cfg=self.norm_cfg,
bias=use_bias,
conv_cfg=op)
self.cls_convs.append(copy.deepcopy(module))
self.reg_convs.append(copy.deepcopy(module))
self.conv_cls = nn.Conv2d(
self.feat_channels, self.cls_out_channels, 3, padding=1)
self.conv_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1)
self.conv_centerness = nn.Conv2d(self.feat_channels, 1, 3, padding=1)
self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides])
def init_weights(self):
"""Initialize weights of the head."""
# retinanet_bias_init
bias_cls = bias_init_with_prob(0.01)
normal_init(self.conv_reg, std=0.01)
normal_init(self.conv_centerness, std=0.01)
normal_init(self.conv_cls, std=0.01, bias=bias_cls)
for branch in [self.cls_convs, self.reg_convs]:
for module in branch.modules():
if isinstance(module, ConvModule) \
and isinstance(module.conv, nn.Conv2d):
caffe2_xavier_init(module.conv)
| insightface/detection/scrfd/mmdet/models/dense_heads/nasfcos_head.py/0 | {
"file_path": "insightface/detection/scrfd/mmdet/models/dense_heads/nasfcos_head.py",
"repo_id": "insightface",
"token_count": 1414
} | 106 |
from .atss import ATSS
from .base import BaseDetector
from .cascade_rcnn import CascadeRCNN
from .cornernet import CornerNet
from .detr import DETR
from .fast_rcnn import FastRCNN
from .faster_rcnn import FasterRCNN
from .fcos import FCOS
from .fovea import FOVEA
from .fsaf import FSAF
from .gfl import GFL
from .grid_rcnn import GridRCNN
from .htc import HybridTaskCascade
from .mask_rcnn import MaskRCNN
from .mask_scoring_rcnn import MaskScoringRCNN
from .nasfcos import NASFCOS
from .paa import PAA
from .point_rend import PointRend
from .reppoints_detector import RepPointsDetector
from .retinanet import RetinaNet
from .rpn import RPN
from .single_stage import SingleStageDetector
from .trident_faster_rcnn import TridentFasterRCNN
from .two_stage import TwoStageDetector
from .vfnet import VFNet
from .yolact import YOLACT
from .yolo import YOLOV3
from .scrfd import SCRFD
__all__ = [
'ATSS', 'BaseDetector', 'SingleStageDetector', 'TwoStageDetector', 'RPN',
'FastRCNN', 'FasterRCNN', 'MaskRCNN', 'CascadeRCNN', 'HybridTaskCascade',
'RetinaNet', 'FCOS', 'GridRCNN', 'MaskScoringRCNN', 'RepPointsDetector',
'FOVEA', 'FSAF', 'NASFCOS', 'PointRend', 'GFL', 'CornerNet', 'PAA',
'YOLOV3', 'YOLACT', 'VFNet', 'DETR', 'TridentFasterRCNN', 'SCRFD'
]
| insightface/detection/scrfd/mmdet/models/detectors/__init__.py/0 | {
"file_path": "insightface/detection/scrfd/mmdet/models/detectors/__init__.py",
"repo_id": "insightface",
"token_count": 468
} | 107 |
from ..builder import DETECTORS
from .single_stage import SingleStageDetector
@DETECTORS.register_module()
class NASFCOS(SingleStageDetector):
"""NAS-FCOS: Fast Neural Architecture Search for Object Detection.
https://arxiv.org/abs/1906.0442
"""
def __init__(self,
backbone,
neck,
bbox_head,
train_cfg=None,
test_cfg=None,
pretrained=None):
super(NASFCOS, self).__init__(backbone, neck, bbox_head, train_cfg,
test_cfg, pretrained)
| insightface/detection/scrfd/mmdet/models/detectors/nasfcos.py/0 | {
"file_path": "insightface/detection/scrfd/mmdet/models/detectors/nasfcos.py",
"repo_id": "insightface",
"token_count": 302
} | 108 |
import numpy as np
import torch
import torch.nn as nn
from ..builder import LOSSES
from .utils import weighted_loss
@weighted_loss
def balanced_l1_loss(pred,
target,
beta=1.0,
alpha=0.5,
gamma=1.5,
reduction='mean'):
"""Calculate balanced L1 loss.
Please see the `Libra R-CNN <https://arxiv.org/pdf/1904.02701.pdf>`_
Args:
pred (torch.Tensor): The prediction with shape (N, 4).
target (torch.Tensor): The learning target of the prediction with
shape (N, 4).
beta (float): The loss is a piecewise function of prediction and target
and ``beta`` serves as a threshold for the difference between the
prediction and target. Defaults to 1.0.
alpha (float): The denominator ``alpha`` in the balanced L1 loss.
Defaults to 0.5.
gamma (float): The ``gamma`` in the balanced L1 loss.
Defaults to 1.5.
reduction (str, optional): The method that reduces the loss to a
scalar. Options are "none", "mean" and "sum".
Returns:
torch.Tensor: The calculated loss
"""
assert beta > 0
assert pred.size() == target.size() and target.numel() > 0
diff = torch.abs(pred - target)
b = np.e**(gamma / alpha) - 1
loss = torch.where(
diff < beta, alpha / b *
(b * diff + 1) * torch.log(b * diff / beta + 1) - alpha * diff,
gamma * diff + gamma / b - alpha * beta)
return loss
@LOSSES.register_module()
class BalancedL1Loss(nn.Module):
"""Balanced L1 Loss.
arXiv: https://arxiv.org/pdf/1904.02701.pdf (CVPR 2019)
Args:
alpha (float): The denominator ``alpha`` in the balanced L1 loss.
Defaults to 0.5.
gamma (float): The ``gamma`` in the balanced L1 loss. Defaults to 1.5.
beta (float, optional): The loss is a piecewise function of prediction
and target. ``beta`` serves as a threshold for the difference
between the prediction and target. Defaults to 1.0.
reduction (str, optional): The method that reduces the loss to a
scalar. Options are "none", "mean" and "sum".
loss_weight (float, optional): The weight of the loss. Defaults to 1.0
"""
def __init__(self,
alpha=0.5,
gamma=1.5,
beta=1.0,
reduction='mean',
loss_weight=1.0):
super(BalancedL1Loss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.beta = beta
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None,
**kwargs):
"""Forward function of loss.
Args:
pred (torch.Tensor): The prediction with shape (N, 4).
target (torch.Tensor): The learning target of the prediction with
shape (N, 4).
weight (torch.Tensor, optional): Sample-wise loss weight with
shape (N, ).
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Options are "none", "mean" and "sum".
Returns:
torch.Tensor: The calculated loss
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
loss_bbox = self.loss_weight * balanced_l1_loss(
pred,
target,
weight,
alpha=self.alpha,
gamma=self.gamma,
beta=self.beta,
reduction=reduction,
avg_factor=avg_factor,
**kwargs)
return loss_bbox
| insightface/detection/scrfd/mmdet/models/losses/balanced_l1_loss.py/0 | {
"file_path": "insightface/detection/scrfd/mmdet/models/losses/balanced_l1_loss.py",
"repo_id": "insightface",
"token_count": 1909
} | 109 |
import torch.nn as nn
from mmcv.cnn import ConvModule, build_upsample_layer, xavier_init
from mmcv.ops.carafe import CARAFEPack
from ..builder import NECKS
@NECKS.register_module()
class FPN_CARAFE(nn.Module):
"""FPN_CARAFE is a more flexible implementation of FPN. It allows more
choice for upsample methods during the top-down pathway.
It can reproduce the preformance of ICCV 2019 paper
CARAFE: Content-Aware ReAssembly of FEatures
Please refer to https://arxiv.org/abs/1905.02188 for more details.
Args:
in_channels (list[int]): Number of channels for each input feature map.
out_channels (int): Output channels of feature pyramids.
num_outs (int): Number of output stages.
start_level (int): Start level of feature pyramids.
(Default: 0)
end_level (int): End level of feature pyramids.
(Default: -1 indicates the last level).
norm_cfg (dict): Dictionary to construct and config norm layer.
activate (str): Type of activation function in ConvModule
(Default: None indicates w/o activation).
order (dict): Order of components in ConvModule.
upsample (str): Type of upsample layer.
upsample_cfg (dict): Dictionary to construct and config upsample layer.
"""
def __init__(self,
in_channels,
out_channels,
num_outs,
start_level=0,
end_level=-1,
norm_cfg=None,
act_cfg=None,
order=('conv', 'norm', 'act'),
upsample_cfg=dict(
type='carafe',
up_kernel=5,
up_group=1,
encoder_kernel=3,
encoder_dilation=1)):
super(FPN_CARAFE, self).__init__()
assert isinstance(in_channels, list)
self.in_channels = in_channels
self.out_channels = out_channels
self.num_ins = len(in_channels)
self.num_outs = num_outs
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
self.with_bias = norm_cfg is None
self.upsample_cfg = upsample_cfg.copy()
self.upsample = self.upsample_cfg.get('type')
self.relu = nn.ReLU(inplace=False)
self.order = order
assert order in [('conv', 'norm', 'act'), ('act', 'conv', 'norm')]
assert self.upsample in [
'nearest', 'bilinear', 'deconv', 'pixel_shuffle', 'carafe', None
]
if self.upsample in ['deconv', 'pixel_shuffle']:
assert hasattr(
self.upsample_cfg,
'upsample_kernel') and self.upsample_cfg.upsample_kernel > 0
self.upsample_kernel = self.upsample_cfg.pop('upsample_kernel')
if end_level == -1:
self.backbone_end_level = self.num_ins
assert num_outs >= self.num_ins - start_level
else:
# if end_level < inputs, no extra level is allowed
self.backbone_end_level = end_level
assert end_level <= len(in_channels)
assert num_outs == end_level - start_level
self.start_level = start_level
self.end_level = end_level
self.lateral_convs = nn.ModuleList()
self.fpn_convs = nn.ModuleList()
self.upsample_modules = nn.ModuleList()
for i in range(self.start_level, self.backbone_end_level):
l_conv = ConvModule(
in_channels[i],
out_channels,
1,
norm_cfg=norm_cfg,
bias=self.with_bias,
act_cfg=act_cfg,
inplace=False,
order=self.order)
fpn_conv = ConvModule(
out_channels,
out_channels,
3,
padding=1,
norm_cfg=self.norm_cfg,
bias=self.with_bias,
act_cfg=act_cfg,
inplace=False,
order=self.order)
if i != self.backbone_end_level - 1:
upsample_cfg_ = self.upsample_cfg.copy()
if self.upsample == 'deconv':
upsample_cfg_.update(
in_channels=out_channels,
out_channels=out_channels,
kernel_size=self.upsample_kernel,
stride=2,
padding=(self.upsample_kernel - 1) // 2,
output_padding=(self.upsample_kernel - 1) // 2)
elif self.upsample == 'pixel_shuffle':
upsample_cfg_.update(
in_channels=out_channels,
out_channels=out_channels,
scale_factor=2,
upsample_kernel=self.upsample_kernel)
elif self.upsample == 'carafe':
upsample_cfg_.update(channels=out_channels, scale_factor=2)
else:
# suppress warnings
align_corners = (None
if self.upsample == 'nearest' else False)
upsample_cfg_.update(
scale_factor=2,
mode=self.upsample,
align_corners=align_corners)
upsample_module = build_upsample_layer(upsample_cfg_)
self.upsample_modules.append(upsample_module)
self.lateral_convs.append(l_conv)
self.fpn_convs.append(fpn_conv)
# add extra conv layers (e.g., RetinaNet)
extra_out_levels = (
num_outs - self.backbone_end_level + self.start_level)
if extra_out_levels >= 1:
for i in range(extra_out_levels):
in_channels = (
self.in_channels[self.backbone_end_level -
1] if i == 0 else out_channels)
extra_l_conv = ConvModule(
in_channels,
out_channels,
3,
stride=2,
padding=1,
norm_cfg=norm_cfg,
bias=self.with_bias,
act_cfg=act_cfg,
inplace=False,
order=self.order)
if self.upsample == 'deconv':
upsampler_cfg_ = dict(
in_channels=out_channels,
out_channels=out_channels,
kernel_size=self.upsample_kernel,
stride=2,
padding=(self.upsample_kernel - 1) // 2,
output_padding=(self.upsample_kernel - 1) // 2)
elif self.upsample == 'pixel_shuffle':
upsampler_cfg_ = dict(
in_channels=out_channels,
out_channels=out_channels,
scale_factor=2,
upsample_kernel=self.upsample_kernel)
elif self.upsample == 'carafe':
upsampler_cfg_ = dict(
channels=out_channels,
scale_factor=2,
**self.upsample_cfg)
else:
# suppress warnings
align_corners = (None
if self.upsample == 'nearest' else False)
upsampler_cfg_ = dict(
scale_factor=2,
mode=self.upsample,
align_corners=align_corners)
upsampler_cfg_['type'] = self.upsample
upsample_module = build_upsample_layer(upsampler_cfg_)
extra_fpn_conv = ConvModule(
out_channels,
out_channels,
3,
padding=1,
norm_cfg=self.norm_cfg,
bias=self.with_bias,
act_cfg=act_cfg,
inplace=False,
order=self.order)
self.upsample_modules.append(upsample_module)
self.fpn_convs.append(extra_fpn_conv)
self.lateral_convs.append(extra_l_conv)
# default init_weights for conv(msra) and norm in ConvModule
def init_weights(self):
"""Initialize the weights of module."""
for m in self.modules():
if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)):
xavier_init(m, distribution='uniform')
for m in self.modules():
if isinstance(m, CARAFEPack):
m.init_weights()
def slice_as(self, src, dst):
"""Slice ``src`` as ``dst``
Note:
``src`` should have the same or larger size than ``dst``.
Args:
src (torch.Tensor): Tensors to be sliced.
dst (torch.Tensor): ``src`` will be sliced to have the same
size as ``dst``.
Returns:
torch.Tensor: Sliced tensor.
"""
assert (src.size(2) >= dst.size(2)) and (src.size(3) >= dst.size(3))
if src.size(2) == dst.size(2) and src.size(3) == dst.size(3):
return src
else:
return src[:, :, :dst.size(2), :dst.size(3)]
def tensor_add(self, a, b):
"""Add tensors ``a`` and ``b`` that might have different sizes."""
if a.size() == b.size():
c = a + b
else:
c = a + self.slice_as(b, a)
return c
def forward(self, inputs):
"""Forward function."""
assert len(inputs) == len(self.in_channels)
# build laterals
laterals = []
for i, lateral_conv in enumerate(self.lateral_convs):
if i <= self.backbone_end_level - self.start_level:
input = inputs[min(i + self.start_level, len(inputs) - 1)]
else:
input = laterals[-1]
lateral = lateral_conv(input)
laterals.append(lateral)
# build top-down path
for i in range(len(laterals) - 1, 0, -1):
if self.upsample is not None:
upsample_feat = self.upsample_modules[i - 1](laterals[i])
else:
upsample_feat = laterals[i]
laterals[i - 1] = self.tensor_add(laterals[i - 1], upsample_feat)
# build outputs
num_conv_outs = len(self.fpn_convs)
outs = []
for i in range(num_conv_outs):
out = self.fpn_convs[i](laterals[i])
outs.append(out)
return tuple(outs)
| insightface/detection/scrfd/mmdet/models/necks/fpn_carafe.py/0 | {
"file_path": "insightface/detection/scrfd/mmdet/models/necks/fpn_carafe.py",
"repo_id": "insightface",
"token_count": 5908
} | 110 |
from ..builder import HEADS
from .standard_roi_head import StandardRoIHead
@HEADS.register_module()
class DoubleHeadRoIHead(StandardRoIHead):
"""RoI head for Double Head RCNN.
https://arxiv.org/abs/1904.06493
"""
def __init__(self, reg_roi_scale_factor, **kwargs):
super(DoubleHeadRoIHead, self).__init__(**kwargs)
self.reg_roi_scale_factor = reg_roi_scale_factor
def _bbox_forward(self, x, rois):
"""Box head forward function used in both training and testing time."""
bbox_cls_feats = self.bbox_roi_extractor(
x[:self.bbox_roi_extractor.num_inputs], rois)
bbox_reg_feats = self.bbox_roi_extractor(
x[:self.bbox_roi_extractor.num_inputs],
rois,
roi_scale_factor=self.reg_roi_scale_factor)
if self.with_shared_head:
bbox_cls_feats = self.shared_head(bbox_cls_feats)
bbox_reg_feats = self.shared_head(bbox_reg_feats)
cls_score, bbox_pred = self.bbox_head(bbox_cls_feats, bbox_reg_feats)
bbox_results = dict(
cls_score=cls_score,
bbox_pred=bbox_pred,
bbox_feats=bbox_cls_feats)
return bbox_results
| insightface/detection/scrfd/mmdet/models/roi_heads/double_roi_head.py/0 | {
"file_path": "insightface/detection/scrfd/mmdet/models/roi_heads/double_roi_head.py",
"repo_id": "insightface",
"token_count": 593
} | 111 |
from abc import ABCMeta, abstractmethod
import torch
import torch.nn as nn
from mmcv import ops
class BaseRoIExtractor(nn.Module, metaclass=ABCMeta):
"""Base class for RoI extractor.
Args:
roi_layer (dict): Specify RoI layer type and arguments.
out_channels (int): Output channels of RoI layers.
featmap_strides (int): Strides of input feature maps.
"""
def __init__(self, roi_layer, out_channels, featmap_strides):
super(BaseRoIExtractor, self).__init__()
self.roi_layers = self.build_roi_layers(roi_layer, featmap_strides)
self.out_channels = out_channels
self.featmap_strides = featmap_strides
self.fp16_enabled = False
@property
def num_inputs(self):
"""int: Number of input feature maps."""
return len(self.featmap_strides)
def init_weights(self):
pass
def build_roi_layers(self, layer_cfg, featmap_strides):
"""Build RoI operator to extract feature from each level feature map.
Args:
layer_cfg (dict): Dictionary to construct and config RoI layer
operation. Options are modules under ``mmcv/ops`` such as
``RoIAlign``.
featmap_strides (int): The stride of input feature map w.r.t to the
original image size, which would be used to scale RoI
coordinate (original image coordinate system) to feature
coordinate system.
Returns:
nn.ModuleList: The RoI extractor modules for each level feature
map.
"""
cfg = layer_cfg.copy()
layer_type = cfg.pop('type')
assert hasattr(ops, layer_type)
layer_cls = getattr(ops, layer_type)
roi_layers = nn.ModuleList(
[layer_cls(spatial_scale=1 / s, **cfg) for s in featmap_strides])
return roi_layers
def roi_rescale(self, rois, scale_factor):
"""Scale RoI coordinates by scale factor.
Args:
rois (torch.Tensor): RoI (Region of Interest), shape (n, 5)
scale_factor (float): Scale factor that RoI will be multiplied by.
Returns:
torch.Tensor: Scaled RoI.
"""
cx = (rois[:, 1] + rois[:, 3]) * 0.5
cy = (rois[:, 2] + rois[:, 4]) * 0.5
w = rois[:, 3] - rois[:, 1]
h = rois[:, 4] - rois[:, 2]
new_w = w * scale_factor
new_h = h * scale_factor
x1 = cx - new_w * 0.5
x2 = cx + new_w * 0.5
y1 = cy - new_h * 0.5
y2 = cy + new_h * 0.5
new_rois = torch.stack((rois[:, 0], x1, y1, x2, y2), dim=-1)
return new_rois
@abstractmethod
def forward(self, feats, rois, roi_scale_factor=None):
pass
| insightface/detection/scrfd/mmdet/models/roi_heads/roi_extractors/base_roi_extractor.py/0 | {
"file_path": "insightface/detection/scrfd/mmdet/models/roi_heads/roi_extractors/base_roi_extractor.py",
"repo_id": "insightface",
"token_count": 1274
} | 112 |
from mmcv.utils import collect_env as collect_base_env
from mmcv.utils import get_git_hash
import mmdet
def collect_env():
"""Collect the information of the running environments."""
env_info = collect_base_env()
env_info['MMDetection'] = mmdet.__version__ + '+' + get_git_hash()[:7]
return env_info
if __name__ == '__main__':
for name, val in collect_env().items():
print(f'{name}: {val}')
| insightface/detection/scrfd/mmdet/utils/collect_env.py/0 | {
"file_path": "insightface/detection/scrfd/mmdet/utils/collect_env.py",
"repo_id": "insightface",
"token_count": 156
} | 113 |
#!/usr/bin/env bash
GPU=0
OUTPUT_DIR=wouts
THR=0.02
GROUP=scrfdgen2p5g
PREFIX=$GROUP
for i in {1..320}
do
TASK="$PREFIX"_"$i"
echo $TASK
CUDA_VISIBLE_DEVICES="$GPU" python -u tools/test_widerface.py ./configs/"$GROUP"/"$TASK".py ./work_dirs/"$TASK"/latest.pth --mode 0 --thr "$THR" --out "$OUTPUT_DIR"/"$GROUP"/"$TASK"
done
| insightface/detection/scrfd/search_tools/search_test.sh/0 | {
"file_path": "insightface/detection/scrfd/search_tools/search_test.sh",
"repo_id": "insightface",
"token_count": 175
} | 114 |
#!/usr/bin/env bash
GPU=1
GROUP=scrfd
TASK=scrfd_2.5g_bnkps
#CUDA_VISIBLE_DEVICES="$GPU" python -u tools/benchmark_vga.py ./configs/"$GROUP"/"$TASK".py ./work_dirs/"$TASK"/latest.pth #--cpu
CUDA_VISIBLE_DEVICES="$GPU" python -u tools/test_widerface.py ./configs/"$GROUP"/"$TASK".py ./work_dirs/"$TASK"/model.pth --mode 0 --out wouts --save-preds
| insightface/detection/scrfd/tools/test_example.sh/0 | {
"file_path": "insightface/detection/scrfd/tools/test_example.sh",
"repo_id": "insightface",
"token_count": 164
} | 115 |
# Decoupled Multi-task Learning with Cyclical Self-Regulation for Face Parsing.
The official repository of *[Decoupled Multi-task Learning with Cyclical Self-Regulation for Face Parsing. (CVPR 2022)](https://arxiv.org/abs/2203.14448)*.
## Installation
Our model is based on Pytorch 1.7.1 with Python 3.6.2.
```sh
pip install -r requirements.txt
```
## Data
You can download original datasets:
- **Helen** : [https://www.sifeiliu.net/face-parsing](https://www.sifeiliu.net/face-parsing)
- **LaPa** : [https://github.com/JDAI-CV/lapa-dataset](https://github.com/JDAI-CV/lapa-dataset)
- **CelebAMask-HQ** : [https://github.com/switchablenorms/CelebAMask-HQ](https://github.com/switchablenorms/CelebAMask-HQ)
and put them in ./dataset folder as below
```
dataset/
images/
labels/
edges/
train_list.txt
test_list.txt
each line: 'images/100032540_1.jpg labels/100032540_1.png'
```
Besides, we provide the edge genearation code in the *generate_edge.py*.
## Usage
If you need imagenet pretrained resent-101, please download from [baidu drive]() or [Google drive](https://drive.google.com/open?id=1rzLU-wK6rEorCNJfwrmIu5hY2wRMyKTK), and put it into snapshot folder.
For dstributed(multi-gpu) training. Inplace-abn requires pytorch distributed data parallel.
```
GPU=4,5,6,7
Node=4
dataset=./datasets/CelebAMask-HQ/
snapshot=./work_dirs/
CUDA_VISIBLE_DEVICES="$GPU" python -m torch.distributed.launch --nproc_per_node="$Node" --master_port=295002 train.py --data-dir "$dataset" --random-mirror --random-scale \
--gpu "$GPU" --batch-size 7 --input-size 473,473 --snapshot-dir "$snapshot" --num-classes 19 --epochs 200 --schp-start 150
```
For testing [pretrained models](https://drive.google.com/file/d/1-PjUts1AMzXNyvw3VaJQmg43GJbfEpEQ/view?usp=sharing)
```
python test.py --data-dir "$dataset" --out-dir "$out_dir" --restore-from "$snapshot" --gpu "$GPU" --batch-size 7 --input-size 473,473 --dataset test --num-classes 19
```
## Reference
If you consider use our code, please cite our paper:
```
@inproceedings{Zheng2022DecoupledML,
title={Decoupled Multi-task Learning with Cyclical Self-Regulation for Face Parsing},
author={Qi Zheng and Jiankang Deng and Zheng Zhu and Ying Li and Stefanos Zafeiriou},
booktitle={Computer Vision and Pattern Recognition},
year={2022}
}
```
| insightface/parsing/dml_csr/README.md/0 | {
"file_path": "insightface/parsing/dml_csr/README.md",
"repo_id": "insightface",
"token_count": 869
} | 116 |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
@Author : Qingping Zheng
@Contact : qingpingzheng2014@gmail.com
@File : schp.py
@Time : 10/01/21 00:00 PM
@Desc :
@License : Licensed under the Apache License, Version 2.0 (the "License");
@Copyright : Copyright 2022 The Authors. All Rights Reserved.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import torch
def moving_average(net1, net2, alpha=1):
for param1, param2 in zip(net1.parameters(), net2.parameters()):
param1.data *= (1.0 - alpha)
param1.data += param2.data * alpha
def _check_bn(module, flag):
classname = module.__class__.__name__
if classname.find('BatchNorm') != -1 or classname.find('InPlaceABNSync') != -1:
flag[0] = True
def check_bn(model):
flag = [False]
model.apply(lambda module: _check_bn(module, flag))
return flag[0]
def reset_bn(module):
classname = module.__class__.__name__
if classname.find('BatchNorm') != -1 or classname.find('InPlaceABNSync') != -1:
module.running_mean = torch.zeros_like(module.running_mean)
module.running_var = torch.ones_like(module.running_var)
def _get_momenta(module, momenta):
classname = module.__class__.__name__
if classname.find('BatchNorm') != -1 or classname.find('InPlaceABNSync') != -1:
momenta[module] = module.momentum
def _set_momenta(module, momenta):
classname = module.__class__.__name__
if classname.find('BatchNorm') != -1 or classname.find('InPlaceABNSync') != -1:
module.momentum = momenta[module]
def bn_re_estimate(loader, model):
if not check_bn(model):
print('No batch norm layer detected')
return
model.train()
momenta = {}
model.apply(reset_bn)
model.apply(lambda module: _get_momenta(module, momenta))
n = 0
for i_iter, batch in enumerate(loader):
# images, labels, edges, _ = batch
images = batch[0]
b = images.data.size(0)
momentum = b / (n + b)
for module in momenta.keys():
module.momentum = momentum
model(images)
n += b
model.apply(lambda module: _set_momenta(module, momenta))
def save_schp_checkpoint(states, is_best_parsing, output_dir, filename='schp_checkpoint.pth.tar'):
save_path = os.path.join(output_dir, filename)
# if os.path.exists(save_path):
# os.remove(save_path)
torch.save(states, save_path)
if is_best_parsing and 'state_dict' in states:
best_save_path = os.path.join(output_dir, 'model_parsing_best.pth.tar')
if os.path.exists(best_save_path):
os.remove(best_save_path)
torch.save(states, best_save_path)
| insightface/parsing/dml_csr/utils/schp.py/0 | {
"file_path": "insightface/parsing/dml_csr/utils/schp.py",
"repo_id": "insightface",
"token_count": 1164
} | 117 |
"""
This code file mainly comes from https://github.com/dmlc/gluon-cv/blob/master/gluoncv/utils/filesystem.py
"""
import os
import os.path as osp
import errno
def get_model_dir(name, root='~/.insightface'):
root = os.path.expanduser(root)
model_dir = osp.join(root, 'models', name)
return model_dir
def makedirs(path):
"""Create directory recursively if not exists.
Similar to `makedir -p`, you can skip checking existence before this function.
Parameters
----------
path : str
Path of the desired dir
"""
try:
os.makedirs(path)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
def try_import(package, message=None):
"""Try import specified package, with custom message support.
Parameters
----------
package : str
The name of the targeting package.
message : str, default is None
If not None, this function will raise customized error message when import error is found.
Returns
-------
module if found, raise ImportError otherwise
"""
try:
return __import__(package)
except ImportError as e:
if not message:
raise e
raise ImportError(message)
def try_import_cv2():
"""Try import cv2 at runtime.
Returns
-------
cv2 module if found. Raise ImportError otherwise
"""
msg = "cv2 is required, you can install by package manager, e.g. 'apt-get', \
or `pip install opencv-python --user` (note that this is unofficial PYPI package)."
return try_import('cv2', msg)
def try_import_mmcv():
"""Try import mmcv at runtime.
Returns
-------
mmcv module if found. Raise ImportError otherwise
"""
msg = "mmcv is required, you can install by first `pip install Cython --user` \
and then `pip install mmcv --user` (note that this is unofficial PYPI package)."
return try_import('mmcv', msg)
def try_import_rarfile():
"""Try import rarfile at runtime.
Returns
-------
rarfile module if found. Raise ImportError otherwise
"""
msg = "rarfile is required, you can install by first `sudo apt-get install unrar` \
and then `pip install rarfile --user` (note that this is unofficial PYPI package)."
return try_import('rarfile', msg)
def import_try_install(package, extern_url=None):
"""Try import the specified package.
If the package not installed, try use pip to install and import if success.
Parameters
----------
package : str
The name of the package trying to import.
extern_url : str or None, optional
The external url if package is not hosted on PyPI.
For example, you can install a package using:
"pip install git+http://github.com/user/repo/tarball/master/egginfo=xxx".
In this case, you can pass the url to the extern_url.
Returns
-------
<class 'Module'>
The imported python module.
"""
try:
return __import__(package)
except ImportError:
try:
from pip import main as pipmain
except ImportError:
from pip._internal import main as pipmain
# trying to install package
url = package if extern_url is None else extern_url
pipmain(['install', '--user',
url]) # will raise SystemExit Error if fails
# trying to load again
try:
return __import__(package)
except ImportError:
import sys
import site
user_site = site.getusersitepackages()
if user_site not in sys.path:
sys.path.append(user_site)
return __import__(package)
return __import__(package)
def try_import_dali():
"""Try import NVIDIA DALI at runtime.
"""
try:
dali = __import__('nvidia.dali', fromlist=['pipeline', 'ops', 'types'])
dali.Pipeline = dali.pipeline.Pipeline
except ImportError:
class dali:
class Pipeline:
def __init__(self):
raise NotImplementedError(
"DALI not found, please check if you installed it correctly."
)
return dali
| insightface/python-package/insightface/utils/filesystem.py/0 | {
"file_path": "insightface/python-package/insightface/utils/filesystem.py",
"repo_id": "insightface",
"token_count": 1680
} | 118 |
#!/usr/bin/env bash
DEVKIT="/raid5data/dplearn/megaface/devkit/experiments"
ALGO="r100ii" #ms1mv2
ROOT=$(dirname `which $0`)
echo $ROOT
python -u gen_megaface.py --gpu 0 --algo "$ALGO" --model '../../models2/model-r100-ii/model,0'
python -u remove_noises.py --algo "$ALGO"
cd "$DEVKIT"
LD_LIBRARY_PATH="/usr/local/lib64:$LD_LIBRARY_PATH" python -u run_experiment.py "$ROOT/feature_out_clean/megaface" "$ROOT/feature_out_clean/facescrub" _"$ALGO".bin ../../mx_results/ -s 1000000 -p ../templatelists/facescrub_features_list.json
cd -
| insightface/recognition/_evaluation_/megaface/run.sh/0 | {
"file_path": "insightface/recognition/_evaluation_/megaface/run.sh",
"repo_id": "insightface",
"token_count": 229
} | 119 |
import oneflow as flow
import oneflow.nn as nn
from typing import Type, Any, Callable, Union, List, Optional
def conv3x3(
in_planes: int, out_planes: int, stride: int = 1, groups: int = 1, dilation: int = 1
) -> nn.Conv2d:
"""3x3 convolution with padding"""
return nn.Conv2d(
in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=dilation,
groups=groups,
bias=False,
dilation=dilation,
)
def conv1x1(in_planes: int, out_planes: int, stride: int = 1) -> nn.Conv2d:
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class IBasicBlock(nn.Module):
expansion = 1
def __init__(
self,
inplanes,
planes,
stride=1,
downsample=None,
groups=1,
base_width=64,
dilation=1,
):
super(IBasicBlock, self).__init__()
if groups != 1 or base_width != 64:
raise ValueError("BasicBlock only supports groups=1 and base_width=64")
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
self.bn1 = nn.BatchNorm2d(inplanes, eps=1e-05,)
self.conv1 = conv3x3(inplanes, planes)
self.bn2 = nn.BatchNorm2d(planes, eps=1e-05,)
self.prelu = nn.ReLU(planes)
self.conv2 = conv3x3(planes, planes, stride)
self.bn3 = nn.BatchNorm2d(planes, eps=1e-05,)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.bn1(x)
out = self.conv1(out)
out = self.bn2(out)
out = self.prelu(out)
out = self.conv2(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
return out
class IResNet(nn.Module):
fc_scale = 7 * 7
def __init__(
self,
block,
layers,
dropout=0,
num_features=512,
zero_init_residual=False,
groups=1,
width_per_group=64,
replace_stride_with_dilation=None,
fp16=False,
):
super(IResNet, self).__init__()
self.fp16 = fp16
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError(
"replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation)
)
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(
3, self.inplanes, kernel_size=3, stride=1, padding=1, bias=False
)
self.bn1 = nn.BatchNorm2d(self.inplanes, eps=1e-05)
self.prelu = nn.ReLU(self.inplanes)
self.layer1 = self._make_layer(block, 64, layers[0], stride=2)
self.layer2 = self._make_layer(
block, 128, layers[1], stride=2, dilate=replace_stride_with_dilation[0]
)
self.layer3 = self._make_layer(
block, 256, layers[2], stride=2, dilate=replace_stride_with_dilation[1]
)
self.layer4 = self._make_layer(
block, 512, layers[3], stride=2, dilate=replace_stride_with_dilation[2]
)
self.bn2 = nn.BatchNorm2d(512 * block.expansion, eps=1e-05,)
self.dropout = nn.Dropout(p=dropout, inplace=True)
self.fc = nn.Linear(512 * block.expansion * self.fc_scale, num_features)
self.features = nn.BatchNorm1d(num_features, eps=1e-05)
nn.init.constant_(self.features.weight, 1.0)
self.features.weight.requires_grad = False
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight, 0, 0.1)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
if zero_init_residual:
for m in self.modules():
if isinstance(m, IBasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
nn.BatchNorm2d(planes * block.expansion, eps=1e-05,),
)
layers = []
layers.append(
block(
self.inplanes,
planes,
stride,
downsample,
self.groups,
self.base_width,
previous_dilation,
)
)
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(
block(
self.inplanes,
planes,
groups=self.groups,
base_width=self.base_width,
dilation=self.dilation,
)
)
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.prelu(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.bn2(x)
x = flow.flatten(x, 1)
x = self.dropout(x)
x = self.fc(x)
x = self.features(x)
return x
def _iresnet(arch, block, layers, pretrained, progress, **kwargs):
model = IResNet(block, layers, **kwargs)
if pretrained:
raise ValueError()
return model
def iresnet18(pretrained=False, progress=True, **kwargs):
return _iresnet(
"iresnet18", IBasicBlock, [2, 2, 2, 2], pretrained, progress, **kwargs
)
def iresnet34(pretrained=False, progress=True, **kwargs):
return _iresnet(
"iresnet34", IBasicBlock, [3, 4, 6, 3], pretrained, progress, **kwargs
)
def iresnet50(pretrained=False, progress=True, **kwargs):
return _iresnet(
"iresnet50", IBasicBlock, [3, 4, 14, 3], pretrained, progress, **kwargs
)
def iresnet100(pretrained=False, progress=True, **kwargs):
return _iresnet(
"iresnet100", IBasicBlock, [3, 13, 30, 3], pretrained, progress, **kwargs
)
def iresnet200(pretrained=False, progress=True, **kwargs):
return _iresnet(
"iresnet200", IBasicBlock, [6, 26, 60, 6], pretrained, progress, **kwargs
)
| insightface/recognition/arcface_oneflow/backbones/ir_resnet.py/0 | {
"file_path": "insightface/recognition/arcface_oneflow/backbones/ir_resnet.py",
"repo_id": "insightface",
"token_count": 3483
} | 120 |
# Service deployment based on PaddleServing
(English|[简体中文](./README_CN.md))
This document will introduce how to use the [PaddleServing](https://github.com/PaddlePaddle/Serving/blob/develop/README.md) to deploy the Arcface dynamic graph model as a pipeline online service.
Some Key Features of Paddle Serving:
- Integrate with Paddle training pipeline seamlessly, most paddle models can be deployed with one line command.
- Industrial serving features supported, such as models management, online loading, online A/B testing etc.
- Highly concurrent and efficient communication between clients and servers supported.
The introduction and tutorial of Paddle Serving service deployment framework reference [document](https://github.com/PaddlePaddle/Serving/blob/develop/README.md).
## Contents
- [Environmental preparation](#environmental-preparation)
- [Model conversion](#model-conversion)
- [Paddle Serving pipeline deployment](#paddle-serving-pipeline-deployment)
- [FAQ](#faq)
<a name="environmental-preparation"></a>
## Environmental preparation
Arcface operating environment and Paddle Serving operating environment are needed.
1. Please prepare Arcface operating environment reference [link](../../README_en.md).
Download the corresponding paddle whl package according to the environment, it is recommended to install version 2.2+.
2. The steps of PaddleServing operating environment prepare are as follows:
Install serving which used to start the service
```
pip3 install paddle-serving-server==0.6.3 # for CPU
pip3 install paddle-serving-server-gpu==0.6.3 # for GPU
# Other GPU environments need to confirm the environment and then choose to execute the following commands
pip3 install paddle-serving-server-gpu==0.6.3.post101 # GPU with CUDA10.1 + TensorRT6
pip3 install paddle-serving-server-gpu==0.6.3.post11 # GPU with CUDA11 + TensorRT7
```
3. Install the client to send requests to the service
In [download link](https://github.com/PaddlePaddle/Serving/blob/develop/doc/LATEST_PACKAGES.md) find the client installation package corresponding to the python version.
The python3.7 version is recommended here:
```
pip3 install paddle-serving-client==0.6.3
```
4. Install serving-app
```
pip3 install paddle-serving-app==0.6.3
```
**note:** If you want to install the latest version of PaddleServing, refer to [link](https://github.com/PaddlePaddle/Serving/blob/develop/doc/LATEST_PACKAGES.md).
<a name="model-conversion"></a>
## Model conversion
When using PaddleServing for service deployment, you need to convert the saved inference model into a serving model that is easy to deploy.
Firstly, download the inference model of Arcface
```
wget -nc -P ./inference https://paddle-model-ecology.bj.bcebos.com/model/insight-face/mobileface_v1.0_infer.tar
tar xf inference/mobileface_v1.0_infer.tar --strip-components 1 -C inference
```
Then, you can use installed paddle_serving_client tool to convert inference model to mobile model.
```
python3 -m paddle_serving_client.convert --dirname ./inference/ \
--model_filename inference.pdmodel \
--params_filename inference.pdiparams \
--serving_server ./MobileFaceNet_128_serving/ \
--serving_client ./MobileFaceNet_128_client/
```
After the detection model is converted, there will be additional folders of `MobileFaceNet_128_serving` and `MobileFaceNet_128_client` in the current folder, with the following format:
```
MobileFaceNet_128_serving
├── __model__
├── __params__
├── serving_server_conf.prototxt
└── serving_server_conf.stream.prototxt
MobileFaceNet_128_client/
├── serving_client_conf.prototxt
└── serving_client_conf.stream.prototxt
```
The recognition model is the same.
<a name="paddle-serving-pipeline-deployment"></a>
## Paddle Serving pipeline deployment
1. Download the PaddleOCR code, if you have already downloaded it, you can skip this step.
```
git clone https://github.com/deepinsight/insightface
# Enter the working directory
cd recognition/arcface_paddle/deploy/pdserving
```
The pdserver directory contains the code to start the pipeline service and send prediction requests, including:
```
__init__.py
config.yml # Start the service configuration file
ocr_reader.py # pre-processing and post-processing code implementation
pipeline_http_client.py # Script to send pipeline prediction request
web_service.py # Start the script of the pipeline server
```
2. Run the following command to start the service.
```
# Start the service and save the running log in log.txt
python3 web_service.py &>log.txt &
```
After the service is successfully started, a log similar to the following will be printed in log.txt

3. Send service request
```
python3 pipeline_http_client.py
```
After successfully running, the predicted result of the model will be printed in the cmd window. An example of the result is:

Adjust the number of concurrency in config.yml to get the largest QPS. Generally, the number of concurrent detection and recognition is 2:1
```
det:
concurrency: 8
...
rec:
concurrency: 4
...
```
Multiple service requests can be sent at the same time if necessary.
The predicted performance data will be automatically written into the `PipelineServingLogs/pipeline.tracer` file.
Tested on 700 real picture. The average QPS on V100 GPU can reach around 57:
```
2021-11-04 13:38:52,507 Op(ArcFace):
2021-11-04 13:38:52,507 in[135.4579597902098 ms]
2021-11-04 13:38:52,507 prep[0.9921311188811189 ms]
2021-11-04 13:38:52,507 midp[3.9232132867132865 ms]
2021-11-04 13:38:52,507 postp[0.12166258741258741 ms]
2021-11-04 13:38:52,507 out[0.9898286713286714 ms]
2021-11-04 13:38:52,508 idle[0.9643989520087675]
2021-11-04 13:38:52,508 DAGExecutor:
2021-11-04 13:38:52,508 Query count[573]
2021-11-04 13:38:52,508 QPS[57.3 q/s]
2021-11-04 13:38:52,509 Succ[0.9982547993019197]
2021-11-04 13:38:52,509 Error req[394]
2021-11-04 13:38:52,509 Latency:
2021-11-04 13:38:52,509 ave[11.52941186736475 ms]
2021-11-04 13:38:52,509 .50[11.492 ms]
2021-11-04 13:38:52,509 .60[11.658 ms]
2021-11-04 13:38:52,509 .70[11.95 ms]
2021-11-04 13:38:52,509 .80[12.251 ms]
2021-11-04 13:38:52,509 .90[12.736 ms]
2021-11-04 13:38:52,509 .95[13.21 ms]
2021-11-04 13:38:52,509 .99[13.987 ms]
2021-11-04 13:38:52,510 Channel (server worker num[10]):
2021-11-04 13:38:52,510 chl0(In: ['@DAGExecutor'], Out: ['ArcFace']) size[0/0]
2021-11-04 13:38:52,510 chl1(In: ['ArcFace'], Out: ['@DAGExecutor']) size[0/0]
```
<a name="faq"></a>
## FAQ
**Q1**: No result return after sending the request.
**A1**: Do not set the proxy when starting the service and sending the request. You can close the proxy before starting the service and before sending the request. The command to close the proxy is:
```
unset https_proxy
unset http_proxy
```
| insightface/recognition/arcface_paddle/deploy/pdserving/README.md/0 | {
"file_path": "insightface/recognition/arcface_paddle/deploy/pdserving/README.md",
"repo_id": "insightface",
"token_count": 2642
} | 121 |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import os
import sys
import numpy as np
import logging
import paddle
from visualdl import LogWriter
from utils.logging import AverageMeter, CallBackLogging
from datasets import CommonDataset, SyntheticDataset
from utils import losses
from .utils.verification import CallBackVerification
from .utils.io import Checkpoint
from . import classifiers
from . import backbones
from .static_model import StaticModel
RELATED_FLAGS_SETTING = {
'FLAGS_cudnn_exhaustive_search': 1,
'FLAGS_cudnn_batchnorm_spatial_persistent': 1,
'FLAGS_max_inplace_grad_add': 8,
'FLAGS_fraction_of_gpu_memory_to_use': 0.9999,
}
paddle.fluid.set_flags(RELATED_FLAGS_SETTING)
def train(args):
writer = LogWriter(logdir=args.logdir)
rank = int(os.getenv("PADDLE_TRAINER_ID", 0))
world_size = int(os.getenv("PADDLE_TRAINERS_NUM", 1))
gpu_id = int(os.getenv("FLAGS_selected_gpus", 0))
place = paddle.CUDAPlace(gpu_id)
if world_size > 1:
import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy()
strategy.without_graph_optimization = True
fleet.init(is_collective=True, strategy=strategy)
if args.use_synthetic_dataset:
trainset = SyntheticDataset(args.num_classes, fp16=args.fp16)
else:
trainset = CommonDataset(
root_dir=args.data_dir,
label_file=args.label_file,
fp16=args.fp16,
is_bin=args.is_bin)
num_image = len(trainset)
total_batch_size = args.batch_size * world_size
steps_per_epoch = num_image // total_batch_size
if args.train_unit == 'epoch':
warmup_steps = steps_per_epoch * args.warmup_num
total_steps = steps_per_epoch * args.train_num
decay_steps = [x * steps_per_epoch for x in args.decay_boundaries]
total_epoch = args.train_num
else:
warmup_steps = args.warmup_num
total_steps = args.train_num
decay_steps = [x for x in args.decay_boundaries]
total_epoch = (total_steps + steps_per_epoch - 1) // steps_per_epoch
if rank == 0:
logging.info('world_size: {}'.format(world_size))
logging.info('total_batch_size: {}'.format(total_batch_size))
logging.info('warmup_steps: {}'.format(warmup_steps))
logging.info('steps_per_epoch: {}'.format(steps_per_epoch))
logging.info('total_steps: {}'.format(total_steps))
logging.info('total_epoch: {}'.format(total_epoch))
logging.info('decay_steps: {}'.format(decay_steps))
base_lr = total_batch_size * args.lr / 512
lr_scheduler = paddle.optimizer.lr.PiecewiseDecay(
boundaries=decay_steps,
values=[
base_lr * (args.lr_decay**i) for i in range(len(decay_steps) + 1)
])
if warmup_steps > 0:
lr_scheduler = paddle.optimizer.lr.LinearWarmup(
lr_scheduler, warmup_steps, 0, base_lr)
train_program = paddle.static.Program()
test_program = paddle.static.Program()
startup_program = paddle.static.Program()
margin_loss_params = eval("losses.{}".format(args.loss))()
train_model = StaticModel(
main_program=train_program,
startup_program=startup_program,
backbone_class_name=args.backbone,
embedding_size=args.embedding_size,
classifier_class_name=args.classifier,
num_classes=args.num_classes,
sample_ratio=args.sample_ratio,
lr_scheduler=lr_scheduler,
momentum=args.momentum,
weight_decay=args.weight_decay,
dropout=args.dropout,
mode='train',
fp16=args.fp16,
fp16_configs={
'init_loss_scaling': args.init_loss_scaling,
'incr_every_n_steps': args.incr_every_n_steps,
'decr_every_n_nan_or_inf': args.decr_every_n_nan_or_inf,
'incr_ratio': args.incr_ratio,
'decr_ratio': args.decr_ratio,
'use_dynamic_loss_scaling': args.use_dynamic_loss_scaling,
'use_pure_fp16': args.fp16,
'custom_white_list': args.custom_white_list,
'custom_black_list': args.custom_black_list,
},
margin_loss_params=margin_loss_params, )
if rank == 0:
with open(os.path.join(args.output, 'main_program.txt'), 'w') as f:
f.write(str(train_program))
if rank == 0 and args.do_validation_while_train:
test_model = StaticModel(
main_program=test_program,
startup_program=startup_program,
backbone_class_name=args.backbone,
embedding_size=args.embedding_size,
dropout=args.dropout,
mode='test',
fp16=args.fp16, )
callback_verification = CallBackVerification(
args.validation_interval_step, rank, args.batch_size, test_program,
list(test_model.backbone.input_dict.values()),
list(test_model.backbone.output_dict.values()), args.val_targets,
args.data_dir)
callback_logging = CallBackLogging(args.log_interval_step, rank,
world_size, total_steps,
args.batch_size, writer)
checkpoint = Checkpoint(
rank=rank,
world_size=world_size,
embedding_size=args.embedding_size,
num_classes=args.num_classes,
model_save_dir=os.path.join(args.output, args.backbone),
checkpoint_dir=args.checkpoint_dir,
max_num_last_checkpoint=args.max_num_last_checkpoint)
exe = paddle.static.Executor(place)
exe.run(startup_program)
start_epoch = 0
global_step = 0
loss_avg = AverageMeter()
if args.resume:
extra_info = checkpoint.load(program=train_program, for_train=True)
start_epoch = extra_info['epoch'] + 1
lr_state = extra_info['lr_state']
# there last_epoch means last_step in for PiecewiseDecay
# since we always use step style for lr_scheduler
global_step = lr_state['last_epoch']
train_model.lr_scheduler.set_state_dict(lr_state)
train_loader = paddle.io.DataLoader(
trainset,
feed_list=list(train_model.backbone.input_dict.values()),
places=place,
return_list=False,
num_workers=args.num_workers,
batch_sampler=paddle.io.DistributedBatchSampler(
dataset=trainset,
batch_size=args.batch_size,
shuffle=True,
drop_last=True))
max_loss_scaling = np.array([args.max_loss_scaling]).astype(np.float32)
for epoch in range(start_epoch, total_epoch):
train_reader_cost = 0.0
train_run_cost = 0.0
total_samples = 0
reader_start = time.time()
for step, data in enumerate(train_loader):
train_reader_cost += time.time() - reader_start
global_step += 1
train_start = time.time()
loss_v = exe.run(
train_program,
feed=data,
fetch_list=[train_model.classifier.output_dict['loss']],
use_program_cache=True)
train_run_cost += time.time() - train_start
total_samples += args.batch_size
loss_avg.update(np.array(loss_v)[0], 1)
lr_value = train_model.optimizer.get_lr()
callback_logging(
global_step,
loss_avg,
epoch,
lr_value,
avg_reader_cost=train_reader_cost / args.log_interval_step,
avg_batch_cost=(train_reader_cost + train_run_cost) / args.log_interval_step,
avg_samples=total_samples / args.log_interval_step,
ips=total_samples / (train_reader_cost + train_run_cost))
if rank == 0 and args.do_validation_while_train:
callback_verification(global_step)
train_model.lr_scheduler.step()
if global_step >= total_steps:
break
sys.stdout.flush()
if rank is 0 and global_step > 0 and global_step % args.log_interval_step == 0:
train_reader_cost = 0.0
train_run_cost = 0.0
total_samples = 0
reader_start = time.time()
checkpoint.save(
train_program,
lr_scheduler=train_model.lr_scheduler,
epoch=epoch,
for_train=True)
writer.close()
| insightface/recognition/arcface_paddle/static/train.py/0 | {
"file_path": "insightface/recognition/arcface_paddle/static/train.py",
"repo_id": "insightface",
"token_count": 4158
} | 122 |
# Linux端基础训练预测功能测试
Linux端基础训练预测功能测试的主程序为`test_train_inference_python.sh`,可以测试基于Python的模型训练、评估、推理等基本功能。
## 1. 测试结论汇总
- 训练相关:
| 算法名称 | 模型名称 | 单机单卡 | 单机多卡 | 多机多卡 | 模型压缩(单机多卡) |
| :----: | :----: | :----: | :----: | :----: | :----: |
| ArcFace | mobileface| 正常训练| 正常训练 | 正常训练 | - |
- 预测相关:预测功能汇总如下,
| 模型类型 |device | batchsize | tensorrt | mkldnn | cpu多线程 |
| :----: | :----: | :----: | :----: | :----: | :----: |
| 正常模型 | GPU | 1 | fp32 | - | - |
| 正常模型 | CPU | 1 | - | fp32 | 支持 |
## 2. 测试流程
运行环境配置请参考[文档](./install.md)的内容配置TIPC的运行环境。
### 2.1 安装依赖
- 安装PaddlePaddle >= 2.2
- 安装依赖
```
pip3 install -r requirements.txt
```
- 安装autolog(规范化日志输出工具)
```
pip3 install git+https://github.com/LDOUBLEV/AutoLog --force-reinstall
```
### 2.2 功能测试
先运行`prepare.sh`准备数据和模型,然后运行`test_train_inference_python.sh`进行测试,最终在```test_tipc/output```目录下生成`python_infer_*.log`格式的日志文件。
`test_train_inference_python.sh`包含5种运行模式,每种模式的运行数据不同,分别用于测试速度和精度,本文档只测试lite_train_lite_infer一种模式:
- 模式1:lite_train_lite_infer,使用少量数据训练,用于快速验证训练到预测的走通流程,不验证精度和速度;
```shell
bash test_tipc/prepare.sh ./test_tipc/configs/ms1mv2_mobileface/train_infer_python.txt 'lite_train_lite_infer'
bash test_tipc/test_train_inference_python.sh ./test_tipc/ms1mv2_mobileface/train_infer_python.txt 'lite_train_lite_infer'
```
运行相应指令后,在`test_tipc/output`文件夹下自动会保存运行日志。如'lite_train_lite_infer'模式下,会运行训练+inference的链条,因此,在`test_tipc/output`文件夹有以下文件:
```
test_tipc/output/
|- results_python.log # 运行指令状态的日志
|- norm_train_gpus_0_autocast_null_fp16_False/ # GPU 0号卡上正常训练的训练日志和模型保存文件夹
|- norm_train_gpus_0_autocast_null_fp16_Trule/ # GPU 0号卡上fp16训练的训练日志和模型保存文件夹
......
|- python_infer_cpu_usemkldnn_True_threads_1_precision_fp32_batchsize_1.log # CPU上开启Mkldnn线程数设置为1,测试batch_size=1条件下的预测运行日志
|- python_infer_gpu_usetrt_True_precision_fp32_batchsize_1.log # GPU上开启TensorRT,测试batch_size=1的预测日志
......
```
其中`results_python.log`中包含了每条指令的运行状态,如果运行成功会输出:
```
Run successfully with command - python3.7 tools/train.py --config_file=configs/ms1mv2_mobileface.py --is_static=False --embedding_size=128 --fp16=False --dataset=MS1M_v2 --data_dir=MS1M_v2/ --label_file=MS1M_v2/label.txt --num_classes=85742 --log_interval_step=1 --output=./test_tipc/output/norm_train_gpus_0_autocast_null_fp16_Trule --train_num=1 --fp16=Trule!
Run successfully with command - python3.7 tools/validation.py --is_static=False --backbone=MobileFaceNet_128 --embedding_size=128 --data_dir=MS1M_v2 --val_targets=lfw --batch_size=128 --checkpoint_dir=./test_tipc/output/norm_train_gpus_0_autocast_null_fp16_Trule/MobileFaceNet_128/0 !
......
```
如果运行失败,会输出:
```
Run failed with command - python3.7 tools/train.py --config_file=configs/ms1mv2_mobileface.py --is_static=False --embedding_size=128 --fp16=False --dataset=MS1M_v2 --data_dir=MS1M_v2/ --label_file=MS1M_v2/label.txt --num_classes=85742 --log_interval_step=1 --output=./test_tipc/output/norm_train_gpus_0_autocast_null_fp16_Trule --train_num=1 --fp16=Trule!
Run failed with command - python3.7 tools/validation.py --is_static=False --backbone=MobileFaceNet_128 --embedding_size=128 --data_dir=MS1M_v2 --val_targets=lfw --batch_size=128 --checkpoint_dir=./test_tipc/output/norm_train_gpus_0_autocast_null_fp16_Trule/MobileFaceNet_128/0 !
......
```
可以很方便的根据`results_python.log`中的内容判定哪一个指令运行错误。
## 3. 更多教程
本文档为功能测试用,更丰富的训练预测使用教程请参考:
[模型训练与预测](../../README_cn.md) | insightface/recognition/arcface_paddle/test_tipc/docs/test_train_inference_python.md/0 | {
"file_path": "insightface/recognition/arcface_paddle/test_tipc/docs/test_train_inference_python.md",
"repo_id": "insightface",
"token_count": 2496
} | 123 |
#!/bin/bash
# run `python ijb.py --help` for more information
python -u ijb.py \
--model-prefix /home/face/insightface/recognition/partial_fc/mxnet/evaluation/glint360k_r100FC_0.1/model \
--image-path /data/anxiang/IJB_release/IJBC \
--result-dir ./results/test \
--model-epoch 0 \
--gpu 0,1,2,3 \
--target IJBC \
--job partial_fc \
--batch-size 256 \
-es 512
| insightface/recognition/partial_fc/mxnet/evaluation/example.sh/0 | {
"file_path": "insightface/recognition/partial_fc/mxnet/evaluation/example.sh",
"repo_id": "insightface",
"token_count": 149
} | 124 |
#!/bin/bash
# install mpi
#wget https://www.open-mpi.org/software/ompi/v4.0/downloads/openmpi-4.0.0.tar.gz
#sudo rm -rf /usr/local/lib/openmpi /usr/local/lib/libmca* /usr/local/lib/libmpi*
#sudo rm -rf /usr/local/lib/libompitrace* /usr/local/lib/libopen* /usr/local/lib/liboshmem* /usr/local/lib/mpi_*
tar zxf openmpi-4.0.0.tar.gz
cd openmpi-4.0.0 || return
sudo ./configure --enable-orterun-prefix-by-default
sudo make -j 48 all
sudo make install
ldconfig | insightface/recognition/partial_fc/mxnet/setup-utils/install-mpi.sh/0 | {
"file_path": "insightface/recognition/partial_fc/mxnet/setup-utils/install-mpi.sh",
"repo_id": "insightface",
"token_count": 201
} | 125 |
from .iresnet import iresnet18, iresnet34, iresnet50, iresnet100, iresnet200
def get_model(name, **kwargs):
if name == "r18":
return iresnet18(False, **kwargs)
elif name == "r34":
return iresnet34(False, **kwargs)
elif name == "r50":
return iresnet50(False, **kwargs)
elif name == "r100":
return iresnet100(False, **kwargs)
elif name == "r200":
return iresnet200(False, **kwargs)
elif name == "r1024":
from .iresnet1024 import iresnet1024
return iresnet1024(False, **kwargs)
else:
raise ValueError()
| insightface/recognition/vpl/backbones/__init__.py/0 | {
"file_path": "insightface/recognition/vpl/backbones/__init__.py",
"repo_id": "insightface",
"token_count": 270
} | 126 |
import argparse
import logging
import os
import time
import torch
import torch.distributed as dist
import torch.nn.functional as F
import torch.utils.data.distributed
from torch.nn.utils import clip_grad_norm_
import losses
from backbones import get_model
from dataset import MXFaceDataset, DataLoaderX
from torch.utils.data import DataLoader, Dataset
from vpl import VPL
from utils.utils_amp import MaxClipGradScaler
from utils.utils_callbacks import CallBackVerification, CallBackLogging, CallBackModelCheckpoint
from utils.utils_logging import AverageMeter, init_logging
from utils.utils_dist import concat_all_gather, batch_shuffle_ddp, batch_unshuffle_ddp
from utils.utils_config import get_config
def main(args):
cfg = get_config(args.config)
if not cfg.tf32:
torch.backends.cuda.matmul.allow_tf32 = False
torch.backends.cudnn.allow_tf32 = False
try:
world_size = int(os.environ['WORLD_SIZE'])
rank = int(os.environ['RANK'])
dist_url = "tcp://{}:{}".format(os.environ["MASTER_ADDR"], os.environ["MASTER_PORT"])
except KeyError:
world_size = 1
rank = 0
dist_url = "tcp://127.0.0.1:12584"
dist.init_process_group(backend='nccl', init_method=dist_url, rank=rank, world_size=world_size)
local_rank = args.local_rank
torch.cuda.set_device(local_rank)
if not os.path.exists(cfg.output) and rank==0:
os.makedirs(cfg.output)
else:
time.sleep(2)
log_root = logging.getLogger()
init_logging(log_root, rank, cfg.output)
if rank==0:
logging.info(args)
logging.info(cfg)
train_set = MXFaceDataset(root_dir=cfg.rec, local_rank=local_rank)
train_sampler = torch.utils.data.distributed.DistributedSampler(
train_set, shuffle=True)
train_loader = DataLoaderX(
local_rank=local_rank, dataset=train_set, batch_size=cfg.batch_size,
sampler=train_sampler, num_workers=2, pin_memory=True, drop_last=True)
dropout = 0.4 if cfg.dataset == "webface" else 0
backbone = get_model(cfg.network, dropout=dropout, fp16=cfg.fp16).to(local_rank)
backbone_onnx = get_model(cfg.network, dropout=dropout, fp16=False)
if args.resume:
try:
backbone_pth = os.path.join(cfg.output, "backbone.pth")
backbone.load_state_dict(torch.load(backbone_pth, map_location=torch.device(local_rank)))
if rank==0:
logging.info("backbone resume successfully!")
except (FileNotFoundError, KeyError, IndexError, RuntimeError):
logging.info("resume fail, backbone init successfully!")
for ps in backbone.parameters():
dist.broadcast(ps, 0)
backbone = torch.nn.parallel.DistributedDataParallel(
module=backbone, broadcast_buffers=False, device_ids=[local_rank])
backbone.train()
cfg_vpl = cfg.vpl
vpl_momentum = cfg_vpl['momentum']
if vpl_momentum:
backbone_w = get_model(cfg.network, dropout=dropout, fp16=cfg.fp16).to(local_rank)
backbone_w.train()
for param_b, param_w in zip(backbone.module.parameters(), backbone_w.parameters()):
param_w.data.copy_(param_b.data)
param_w.requires_grad = False
margin_softmax = losses.get_loss(cfg.loss)
module_fc = VPL(
rank=rank, local_rank=local_rank, world_size=world_size, resume=args.resume,
batch_size=cfg.batch_size, margin_softmax=margin_softmax, num_classes=cfg.num_classes,
sample_rate=cfg.sample_rate, embedding_size=cfg.embedding_size, prefix=cfg.output,
cfg = cfg_vpl)
#print('AAA')
opt_backbone = torch.optim.SGD(
params=[{'params': backbone.parameters()}],
lr=cfg.lr / 512 * cfg.batch_size * world_size,
momentum=0.9, weight_decay=cfg.weight_decay)
opt_pfc = torch.optim.SGD(
params=[{'params': module_fc.parameters()}],
lr=cfg.lr / 512 * cfg.batch_size * world_size,
momentum=0.9, weight_decay=cfg.weight_decay)
#print('AAA')
scheduler_backbone = torch.optim.lr_scheduler.LambdaLR(
optimizer=opt_backbone, lr_lambda=cfg.lr_func)
scheduler_pfc = torch.optim.lr_scheduler.LambdaLR(
optimizer=opt_pfc, lr_lambda=cfg.lr_func)
start_epoch = 0
total_step = int(len(train_set) / cfg.batch_size / world_size * cfg.num_epoch)
if rank==0: logging.info("Total Step is: %d" % total_step)
#for epoch in range(start_epoch, cfg.num_epoch):
# _lr = cfg.lr_func(epoch)
# logging.info('%d:%f'%(epoch, _lr))
callback_verification = CallBackVerification(10000, rank, cfg.val_targets, cfg.rec)
callback_logging = CallBackLogging(50, rank, total_step, cfg.batch_size, world_size, None)
callback_checkpoint = CallBackModelCheckpoint(rank, cfg.output)
loss = AverageMeter()
global_step = 0
grad_amp = MaxClipGradScaler(cfg.batch_size, 128 * cfg.batch_size, growth_interval=100) if cfg.fp16 else None
use_batch_shuffle = True
alpha = 0.999
for epoch in range(start_epoch, cfg.num_epoch):
train_sampler.set_epoch(epoch)
for step, (img, label) in enumerate(train_loader):
global_step += 1
#img = img.to(memory_format=torch.channels_last)
features = F.normalize(backbone(img))
feature_w = None
if vpl_momentum:
with torch.no_grad():
for param_b, param_w in zip(backbone.module.parameters(), backbone_w.parameters()):
param_w.data = param_w.data * alpha + param_b.data * (1. - alpha)
if use_batch_shuffle:
img_w, idx_unshuffle = batch_shuffle_ddp(img, rank, world_size)
feature_w = F.normalize(backbone_w(img_w))
if use_batch_shuffle:
feature_w = batch_unshuffle_ddp(feature_w, idx_unshuffle, rank, world_size)
feature_w = feature_w.detach()
x_grad, loss_v = module_fc.forward_backward(label, features, opt_pfc, feature_w)
if cfg.fp16:
features.backward(grad_amp.scale(x_grad))
grad_amp.unscale_(opt_backbone)
clip_grad_norm_(backbone.parameters(), max_norm=5, norm_type=2)
grad_amp.step(opt_backbone)
grad_amp.update()
else:
features.backward(x_grad)
clip_grad_norm_(backbone.parameters(), max_norm=5, norm_type=2)
opt_backbone.step()
opt_pfc.step()
module_fc.update()
opt_backbone.zero_grad()
opt_pfc.zero_grad()
loss.update(loss_v, 1)
callback_logging(global_step, loss, epoch, cfg.fp16, grad_amp)
callback_verification(global_step, backbone)
callback_checkpoint(global_step, backbone, module_fc, backbone_onnx)
scheduler_backbone.step()
scheduler_pfc.step()
dist.destroy_process_group()
if __name__ == "__main__":
torch.backends.cudnn.benchmark = True
parser = argparse.ArgumentParser(description='PyTorch ArcFace-VPL Training')
parser.add_argument('config', type=str, help='py config file')
parser.add_argument('--local_rank', type=int, default=0, help='local_rank')
parser.add_argument('--resume', type=int, default=0, help='model resuming')
args_ = parser.parse_args()
main(args_)
| insightface/recognition/vpl/train.py/0 | {
"file_path": "insightface/recognition/vpl/train.py",
"repo_id": "insightface",
"token_count": 3342
} | 127 |
import torch
from torch import nn
from torch.nn import functional as F
class IFLoss(nn.Module):
def __init__(self, eikonal_weight, mask_weight, reg_weight, normal_weight, alpha):
super().__init__()
self.eikonal_weight = eikonal_weight
self.mask_weight = mask_weight
self.reg_weight = reg_weight
self.normal_weight = normal_weight
self.alpha = alpha
self.l1_loss = nn.L1Loss(reduction='sum')
self.l2_loss = nn.MSELoss(reduction='sum')
self.cosine = nn.CosineSimilarity()
def get_rgb_loss(self,rgb_values, rgb_gt, network_object_mask, object_mask):
if (network_object_mask & object_mask).sum() == 0:
return torch.tensor(0.0).cuda().float()
rgb_values = rgb_values[network_object_mask & object_mask]
rgb_gt = rgb_gt.reshape(-1, 3)[network_object_mask & object_mask]
rgb_loss = self.l1_loss(rgb_values, rgb_gt) / float(object_mask.shape[0])
return rgb_loss
def get_eikonal_loss(self, grad_theta):
if grad_theta.shape[0] == 0:
return torch.tensor(0.0).cuda().float()
eikonal_loss = ((grad_theta.norm(2, dim=1) - 1) ** 2).mean()
return eikonal_loss
def get_mask_loss(self, sdf_output, network_object_mask, object_mask):
mask = ~(network_object_mask & object_mask)
if mask.sum() == 0:
return torch.tensor(0.0).cuda().float()
sdf_pred = -self.alpha * sdf_output[mask]
gt = object_mask[mask].float()
mask_loss = (1 / self.alpha) * F.binary_cross_entropy_with_logits(sdf_pred.squeeze(), gt, reduction='sum') / float(object_mask.shape[0])
return mask_loss
def get_reg_loss(self, point_gt, point_pre):
loss = self.l2_loss(point_gt, point_pre) / len(point_pre)
return loss
def forward(self, model_outputs, ground_truth):
rgb_gt = ground_truth['rgb'].cuda()
network_object_mask = model_outputs['network_object_mask']
object_mask = model_outputs['object_mask']
rgb_loss = self.get_rgb_loss(model_outputs['rgb_values'], rgb_gt, network_object_mask, object_mask)
mask_loss = self.get_mask_loss(model_outputs['sdf_output'], network_object_mask, object_mask)
eikonal_loss = self.get_eikonal_loss(model_outputs['grad_theta'])
reg_loss = self.get_reg_loss(model_outputs['points_mesh_ray_gt'], model_outputs['points_pre'])
normal_loss = 1 - torch.mean(self.cosine(model_outputs['points_mesh_ray_normals'], model_outputs['surface_normals']))
loss = rgb_loss + \
self.eikonal_weight * eikonal_loss + \
self.mask_weight * mask_loss + \
self.reg_weight * reg_loss + \
self.normal_weight * normal_loss
return {
'loss': loss,
'rgb_loss': rgb_loss,
'eikonal_loss': eikonal_loss,
'mask_loss': mask_loss,
'reg_loss': reg_loss,
'normal_loss': normal_loss,
}
| insightface/reconstruction/PBIDR/code/model/loss.py/0 | {
"file_path": "insightface/reconstruction/PBIDR/code/model/loss.py",
"repo_id": "insightface",
"token_count": 1393
} | 128 |
# Generalizing Gaze Estimation with Weak-Supervision from Synthetic Views
The implementation of [Arxiv paper](https://arxiv.org/abs/2212.02997) for gaze estimation task.
## Preparation
1. Download the [dataset](https://drive.google.com/file/d/1erYIoTCbXk1amofJ6yTGhbpmsovWrrva/view?usp=sharing) and put it under ``data/``
2. Download [eyes3d.pkl](https://drive.google.com/file/d/1as7_ew6kEFTHpcrlk8QKvgFJJ8cKzM3q/view?usp=sharing) and put it under ``assets/``
3. Download [pretrained checkpoint](https://drive.google.com/file/d/1cqmChXSnTwUpk3jD7JLpZKHOuBLlC3_N/view?usp=sharing) and put it under ``assets/``
4. Install libraries:
```
pip install timm pytorch-lightning==1.8.1 albumentations==1.3.0
```
## Testing with pre-trained model
After downloading the pre-trained checkpoint above,
```
python test_gaze.py assets/latest_a.ckpt
```
## Training
```
python trainer_gaze.py
```
## Results
<img src="https://github.com/nttstar/insightface-resources/blob/master/images/gaze_0.png?raw=true" width="800" alt=""/>
<img src="https://github.com/nttstar/insightface-resources/blob/master/images/gaze_1.png?raw=true" width="800" alt=""/>
<img src="https://github.com/nttstar/insightface-resources/blob/master/images/gaze_2.png?raw=true" width="800" alt=""/>
| insightface/reconstruction/gaze/README.md/0 | {
"file_path": "insightface/reconstruction/gaze/README.md",
"repo_id": "insightface",
"token_count": 507
} | 129 |
import numbers
import os
import os.path as osp
import pickle
import queue as Queue
import threading
import logging
import numbers
import math
import pandas as pd
from scipy.spatial.transform import Rotation
import mxnet as mx
from pathlib import Path
import numpy as np
import torch
from torch.utils.data import DataLoader, Dataset
from skimage import transform as sktrans
import cv2
import albumentations as A
from albumentations.pytorch import ToTensorV2
from augs import *
def Rt26dof(R_t, degrees=False):
yaw_gt, pitch_gt, roll_gt = Rotation.from_matrix(R_t[:3, :3].T).as_euler('yxz', degrees=degrees)
label_euler = np.array([pitch_gt, yaw_gt, roll_gt])
label_translation = R_t[3, :3]
label_6dof = np.concatenate([label_euler, label_translation])
return label_6dof
def gen_target_pip(target, target_map, target_local_x, target_local_y):
map_channel, map_height, map_width = target_map.shape
target = target.reshape(-1, 2)
assert map_channel == target.shape[0]
for i in range(map_channel):
mu_x = int(math.floor(target[i][0] * map_width))
mu_y = int(math.floor(target[i][1] * map_height))
mu_x = max(0, mu_x)
mu_y = max(0, mu_y)
mu_x = min(mu_x, map_width-1)
mu_y = min(mu_y, map_height-1)
target_map[i, mu_y, mu_x] = 1
shift_x = target[i][0] * map_width - mu_x
shift_y = target[i][1] * map_height - mu_y
target_local_x[i, mu_y, mu_x] = shift_x
target_local_y[i, mu_y, mu_x] = shift_y
return target_map, target_local_x, target_local_y
def get_tris(cfg):
import trimesh
data_root = Path(cfg.root_dir)
obj_path = data_root / 'resources/example.obj'
mesh = trimesh.load(obj_path, process=False)
verts_template = np.array(mesh.vertices, dtype=np.float32)
tris = np.array(mesh.faces, dtype=np.int32)
#print(verts_template.shape, tris.shape)
return tris
class BackgroundGenerator(threading.Thread):
def __init__(self, generator, local_rank, max_prefetch=6):
super(BackgroundGenerator, self).__init__()
self.queue = Queue.Queue(max_prefetch)
self.generator = generator
self.local_rank = local_rank
self.daemon = True
self.start()
def run(self):
torch.cuda.set_device(self.local_rank)
for item in self.generator:
self.queue.put(item)
self.queue.put(None)
def next(self):
next_item = self.queue.get()
if next_item is None:
raise StopIteration
return next_item
def __next__(self):
return self.next()
def __iter__(self):
return self
class DataLoaderX(DataLoader):
def __init__(self, local_rank, **kwargs):
super(DataLoaderX, self).__init__(**kwargs)
self.stream = torch.cuda.Stream(local_rank)
self.local_rank = local_rank
def __iter__(self):
self.iter = super(DataLoaderX, self).__iter__()
self.iter = BackgroundGenerator(self.iter, self.local_rank)
self.preload()
return self
def preload(self):
self.batch = next(self.iter, None)
if self.batch is None:
return None
with torch.cuda.stream(self.stream):
for k in range(len(self.batch)):
self.batch[k] = self.batch[k].to(device=self.local_rank,
non_blocking=True)
def __next__(self):
torch.cuda.current_stream().wait_stream(self.stream)
batch = self.batch
if batch is None:
raise StopIteration
self.preload()
return batch
class FaceDataset(Dataset):
def __init__(self, cfg, is_train=True, is_test=False, local_rank=0):
super(FaceDataset, self).__init__()
self.data_root = Path(cfg.root_dir)
self.input_size = cfg.input_size
self.transform = get_aug_transform(cfg)
self.local_rank = local_rank
self.is_test = is_test
txt_path = self.data_root / 'resources/projection_matrix.txt'
self.M_proj = np.loadtxt(txt_path, dtype=np.float32)
if is_test:
data_root = Path(cfg.root_dir)
csv_path = data_root / 'list/WCPA_track2_test.csv'
self.df = pd.read_csv(csv_path, dtype={'subject_id': str, 'facial_action': str, 'img_id': str})
else:
if is_train:
self.df = pd.read_csv(osp.join(cfg.cache_dir, 'train_list.csv'), dtype={'subject_id': str, 'facial_action': str, 'img_id': str})
else:
self.df = pd.read_csv(osp.join(cfg.cache_dir, 'val_list.csv'), dtype={'subject_id': str, 'facial_action': str, 'img_id': str})
self.label_6dof_mean = [-0.018197, -0.017891, 0.025348, -0.005368, 0.001176, -0.532206] # mean of pitch, yaw, roll, tx, ty, tz
self.label_6dof_std = [0.314015, 0.271809, 0.081881, 0.022173, 0.048839, 0.065444] # std of pitch, yaw, roll, tx, ty, tz
self.align_face = cfg.align_face
if not self.align_face:
self.dst_pts = np.float32([
[0, 0],
[0, cfg.input_size- 1],
[cfg.input_size- 1, 0]
])
else:
dst_pts = np.array([
[38.2946, 51.6963],
[73.5318, 51.5014],
[56.0252, 71.7366],
[41.5493, 92.3655],
[70.7299, 92.2041] ], dtype=np.float32 )
new_size = 144
dst_pts[:,0] += ((new_size-112)//2)
dst_pts[:,1] += 8
dst_pts[:,:] *= (self.input_size/float(new_size))
self.dst_pts = dst_pts
if local_rank==0:
logging.info('data_transform_list:%s'%self.transform)
logging.info('len:%d'%len(self.df))
self.is_test_aug = False
self.eye_dataset = None
if cfg.eyes is not None:
from eye_dataset import EyeDataset
self.eye_dataset = EyeDataset(cfg.eyes['root'])
def set_test_aug(self):
if not self.is_test_aug:
from easydict import EasyDict as edict
cfg = edict()
cfg.aug_modes = ['test-aug']
cfg.input_size = self.input_size
cfg.task = 0
self.transform = get_aug_transform(cfg)
self.is_test_aug = True
def get_names(self, index):
subject_id = self.df['subject_id'][index]
facial_action = self.df['facial_action'][index]
img_id = self.df['img_id'][index]
return subject_id, facial_action, img_id
def __getitem__(self, index):
subject_id = self.df['subject_id'][index]
facial_action = self.df['facial_action'][index]
img_id = self.df['img_id'][index]
img_path = self.data_root / 'image' / subject_id / facial_action / f'{img_id}_ar.jpg'
npz_path = self.data_root / 'info' / subject_id / facial_action / f'{img_id}_info.npz'
txt_path = self.data_root / '68landmarks' / subject_id / facial_action / f'{img_id}_68landmarks.txt'
#if not osp.exists(img_path):
# continue
#print(img_path)
img_raw = cv2.imread(str(img_path))
#if img_raw is None:
# print('XXX ERR:', img_path)
img_raw = cv2.cvtColor(img_raw, cv2.COLOR_BGR2RGB)
#print(img_raw.shape)
img_h, img_w, _ = img_raw.shape
pts68 = np.loadtxt(txt_path, dtype=np.int32)
x_min, y_min = pts68.min(axis=0)
x_max, y_max = pts68.max(axis=0)
x_center = (x_min + x_max) / 2
y_center = (y_min + y_max) / 2
w, h = x_max - x_min, y_max - y_min
if not self.align_face:
size = max(w, h)
ss = np.array([0.75, 0.75, 0.85, 0.65]) # predefined expand size
left = x_center - ss[0] * size
right = x_center + ss[1] * size
top = y_center - ss[2] * size
bottom = y_center + ss[3] * size
src_pts = np.float32([
[left, top],
[left, bottom],
[right, top]
])
tform = cv2.getAffineTransform(src_pts, self.dst_pts)
else:
src_pts = np.float32([
(pts68[36] + pts68[39])/2,
(pts68[42] + pts68[45])/2,
pts68[30],
pts68[48],
pts68[54]
])
tf = sktrans.SimilarityTransform()
tf.estimate(src_pts, self.dst_pts)
tform = tf.params[0:2,:]
img_local = cv2.warpAffine(img_raw, tform, (self.input_size,)*2, flags=cv2.INTER_CUBIC)
fake_points2d = np.ones( (1,2), dtype=np.float32) * (self.input_size//2)
#tform_inv = cv2.invertAffineTransform(tform)
#img_global = cv2.warpAffine(img_local, tform_inv, (img_w, img_h), borderValue=0.0)
#img_global = cv2.resize(img_global, (self.input_size, self.input_size))
if self.transform is not None:
t = self.transform(image=img_local, keypoints=fake_points2d)
img_local = t['image']
if self.is_test_aug:
height, width = img_local.shape[:2]
for trans in t["replay"]["transforms"]:
if trans['__class_fullname__']=='ShiftScaleRotate' and trans['applied']:
param = trans['params']
dx, dy, angle, scale = param['dx'], param['dy'], param['angle'], param['scale']
center = (width / 2, height / 2)
matrix = cv2.getRotationMatrix2D(center, angle, scale)
matrix[0, 2] += dx * width
matrix[1, 2] += dy * height
new_matrix = np.identity(3)
new_matrix[:2,:3] = matrix
old_tform = np.identity(3)
old_tform[:2,:3] = tform
#new_tform = np.dot(old_tform, new_matrix)
new_tform = np.dot(new_matrix, old_tform)
#print('label_tform:')
#print(label_tform.flatten())
#print(new_matrix.flatten())
#print(new_tform.flatten())
tform = new_tform[:2,:3]
break
#print('trans param:', param)
#img_global = self.transform(image=img_global)['image']
tform_tensor = torch.tensor(tform, dtype=torch.float32)
d = {'img_local': img_local, 'tform': tform_tensor}
if self.eye_dataset is not None:
eye_key = str(Path('image') / subject_id / facial_action / f'{img_id}_ar.jpg')
#print(eye_key)
eyel, eyer = self.eye_dataset.get(eye_key, to_homo=True)
if eyel is not None:
#print(eye_key, el_inv.shape, er_inv.shape)
d['eye_world_left'] = torch.tensor(eyel, dtype=torch.float32)
d['eye_world_right'] = torch.tensor(eyer, dtype=torch.float32)
if not self.is_test:
M = np.load(npz_path)
#yaw_gt, pitch_gt, roll_gt = Rotation.from_matrix(M['R_t'][:3, :3].T).as_euler('yxz', degrees=False)
#label_euler = np.array([pitch_gt, yaw_gt, roll_gt])
#label_translation = M['R_t'][3, :3]
#label_6dof = np.concatenate([label_euler, label_translation])
#label_6dof = (label_6dof - self.label_6dof_mean) / self.label_6dof_std
#label_6dof_tensor = torch.tensor(label_6dof, dtype=torch.float32)
#label_verts = M['verts'] * 10.0 # roughly [-1, 1]
#label_verts_tensor = torch.tensor(label_verts, dtype=torch.float32)
#return img_local, label_verts_tensor, label_6dof_tensor
label_verts_tensor = torch.tensor(M['verts'], dtype=torch.float32)
label_Rt_tensor = torch.tensor(M['R_t'], dtype=torch.float32)
d['verts'] = label_verts_tensor
d['rt'] = label_Rt_tensor
#return img_local, img_global, label_verts_tensor, label_Rt_tensor, tform_tensor
#return img_local, label_verts_tensor, label_Rt_tensor, tform_tensor
else:
#return img_local, img_global, tform_tensor
index_tensor = torch.tensor(index, dtype=torch.long)
d['index'] = index_tensor
#return img_local, tform_tensor, index_tensor
return d
def __len__(self):
return len(self.df)
class MXFaceDataset(Dataset):
def __init__(self, cfg, is_train=True, norm_6dof=True, degrees_6dof=False, local_rank=0):
super(MXFaceDataset, self).__init__()
self.is_train = is_train
self.data_root = Path(cfg.root_dir)
self.input_size = cfg.input_size
self.transform = get_aug_transform(cfg)
self.local_rank = local_rank
self.use_trainval = cfg.use_trainval
self.use_eye = cfg.eyes is not None
if is_train:
#self.df = pd.read_csv(osp.join(cfg.cache_dir, 'train_list.csv'), dtype={'subject_id': str, 'facial_action': str, 'img_id': str})
path_imgrec = os.path.join(cfg.cache_dir, 'train.rec')
path_imgidx = os.path.join(cfg.cache_dir, 'train.idx')
self.imgrec = mx.recordio.MXIndexedRecordIO(path_imgidx, path_imgrec, 'r')
self.imgidx = list(self.imgrec.keys)
self.imggroup = [0] * len(self.imgidx)
self.size_train = len(self.imgidx)
if self.use_trainval:
assert not cfg.sampling_hard
path_imgrec = os.path.join(cfg.cache_dir, 'val.rec')
path_imgidx = os.path.join(cfg.cache_dir, 'val.idx')
self.imgrec2 = mx.recordio.MXIndexedRecordIO(path_imgidx, path_imgrec, 'r')
imgidx2 = list(self.imgrec2.keys)
self.imggroup += [1] * len(imgidx2)
self.imgidx += imgidx2
else:
#self.df = pd.read_csv(osp.join(cfg.cache_dir, 'val_list.csv'), dtype={'subject_id': str, 'facial_action': str, 'img_id': str})
path_imgrec = os.path.join(cfg.cache_dir, 'val.rec')
path_imgidx = os.path.join(cfg.cache_dir, 'val.idx')
self.imgrec = mx.recordio.MXIndexedRecordIO(path_imgidx, path_imgrec, 'r')
self.imgidx = list(self.imgrec.keys)
self.imggroup = [0] * len(self.imgidx)
self.imgidx = np.array(self.imgidx)
self.imggroup = np.array(self.imggroup)
if cfg.sampling_hard and is_train:
meta = np.load(os.path.join(cfg.cache_dir, 'train.meta.npy'))
assert meta.shape[0]==len(self.imgidx)
new_imgidx = []
for i in range(len(self.imgidx)):
idx = self.imgidx[i]
assert i==idx
pose = np.abs(meta[i,:2])
#repeat = np.sum(pose>=35)*3+1
if np.max(pose)<15:
repeat = 2
else:
repeat = 1
new_imgidx += [idx]*repeat
if local_rank==0:
print('new-imgidx:', len(self.imgidx), len(new_imgidx))
self.imgidx = np.array(new_imgidx)
self.label_6dof_mean = [-0.018197, -0.017891, 0.025348, -0.005368, 0.001176, -0.532206] # mean of pitch, yaw, roll, tx, ty, tz
self.label_6dof_std = [0.314015, 0.271809, 0.081881, 0.022173, 0.048839, 0.065444] # std of pitch, yaw, roll, tx, ty, tz
txt_path = self.data_root / 'resources/projection_matrix.txt'
self.M_proj = np.loadtxt(txt_path, dtype=np.float32)
self.M1 = np.array([
[400.0, 0, 0, 0],
[ 0, 400.0, 0, 0],
[ 0, 0, 1, 0],
[400.0, 400.0, 0, 1]
])
self.dst_pts = np.float32([
[0, 0],
[0, cfg.input_size- 1],
[cfg.input_size- 1, 0]
])
self.norm_6dof = norm_6dof
self.degrees_6dof = degrees_6dof
self.task = cfg.task
self.num_verts = cfg.num_verts
self.loss_pip = cfg.loss_pip
self.net_stride = 32
if local_rank==0:
logging.info('data_transform_list:%s'%self.transform)
logging.info('len:%d'%len(self.imgidx))
logging.info('glen:%d'%len(self.imggroup))
self.is_test_aug = False
self.enable_flip = cfg.enable_flip
self.flipindex = cfg.flipindex.copy()
self.verts3d_central_index = cfg.verts3d_central_index
self.eye_dataset = None
self.use_eye = False
if cfg.eyes is not None:
#from eye_dataset import EyeDataset
#self.eye_dataset = EyeDataset(cfg.eyes['root'], load_data=False)
self.use_eye = True
def set_test_aug(self):
if not self.is_test_aug:
from easydict import EasyDict as edict
cfg = edict()
cfg.aug_modes = ['test-aug']
cfg.input_size = self.input_size
cfg.task = 0
self.transform = get_aug_transform(cfg)
self.is_test_aug = True
def __getitem__(self, index):
idx = self.imgidx[index]
group = self.imggroup[index]
if group==0:
imgrec = self.imgrec
elif group==1:
imgrec = self.imgrec2
elif group==2:
imgrec = self.imgrec3
s = imgrec.read_idx(idx)
header, img = mx.recordio.unpack(s)
hlabel = header.label
img = mx.image.imdecode(img).asnumpy() #rgb numpy
label_verts = np.array(hlabel[:1220*3], dtype=np.float32).reshape(-1,3)
label_Rt = np.array(hlabel[1220*3:1220*3+16], dtype=np.float32).reshape(4,4)
label_tform = np.array(hlabel[1220*3+16:1220*3+16+6], dtype=np.float32).reshape(2,3)
label_6dof = Rt26dof(label_Rt, self.degrees_6dof)
if self.norm_6dof:
label_6dof = (label_6dof - self.label_6dof_mean) / self.label_6dof_std
label_6dof_tensor = torch.tensor(label_6dof, dtype=torch.float32)
el_inv = None
er_inv = None
if self.use_eye:
a = 1220*3+16+6
el_inv = np.array(hlabel[a:a+481*3], dtype=np.float32).reshape(-1,3)
a+=481*3
er_inv = np.array(hlabel[a:a+481*3], dtype=np.float32).reshape(-1,3)
#el_inv = torch.tensor(el_inv, dtype=torch.float32)
#er_inv = torch.tensor(er_inv, dtype=torch.float32)
#eye_verts = [el_inv, er_inv]
eye_verts = np.concatenate( (el_inv, er_inv), axis=0 )
#img_local = None
img_raw = None
#if self.task==0 or self.task==2:
# img_raw = img[:,self.input_size:,:]
#if self.task==0 or self.task==1 or self.task==3:
# img_local = img[:,:self.input_size,:]
assert img.shape[0]==img.shape[1] and img.shape[0]>=self.input_size
if img.shape[0]>self.input_size:
scale = float(self.input_size) / img.shape[0]
#print('scale:', scale)
#src_pts = np.float32([
# [0, 0],
# [0, 799],
# [799, 0]
#])
#tform = cv2.getAffineTransform(src_pts, self.dst_pts)
#new_tform = np.identity(3)
#new_tform[:2,:3] = tform
#label_tform = np.dot(new_tform, label_tform.T).T
src_pts = np.float32([
[0, 0, 1],
[0, 799, 1],
[799, 0, 1]
])
dst_pts = np.dot(label_tform, src_pts.T).T
dst_pts *= scale
dst_pts = dst_pts.copy()
src_pts = src_pts[:,:2].copy()
#print('index:', index)
#print(src_pts.shape, dst_pts.shape)
#print(label_tform.shape)
#print(src_pts.dtype)
#print(dst_pts.dtype)
tform = cv2.getAffineTransform(src_pts, dst_pts)
label_tform = tform
img = cv2.resize(img, (self.input_size, self.input_size))
img_local = img
need_points2d = (self.task==0 or self.task==3)
if need_points2d:
ones = np.ones([label_verts.shape[0], 1])
verts_homo = np.concatenate([label_verts, ones], axis=1)
verts = verts_homo @ label_Rt @ self.M_proj @ self.M1
w_ = verts[:, [3]]
verts = verts / w_
points2d = verts[:, :3]
points2d[:, 1] = 800.0 - points2d[:, 1]
verts2d = points2d[:,:2].copy()
points2d[:,2] = 1.0
points2d = np.dot(label_tform, points2d.T).T
else:
points2d = np.ones( (1,2), dtype=np.float32) * (self.input_size//2)
if self.use_eye:
verts_homo = eye_verts
if verts_homo.shape[1] == 3:
ones = np.ones([verts_homo.shape[0], 1])
verts_homo = np.concatenate([verts_homo, ones], axis=1)
verts_out = verts_homo @ label_Rt @ self.M_proj @ self.M1
w_ = verts_out[:, [3]]
verts_out = verts_out / w_
_points2d = verts_out[:, :3]
_points2d[:, 1] = 800.0 - _points2d[:, 1]
_points2d[:,2] = 1.0
_points2d = np.dot(label_tform, _points2d.T).T
eye_points = _points2d
#if img.shape[0]!=self.input_size:
# assert img.shape[0]>self.input_size
#img = cv2.resize(img, (self.input_size, self.input_size))
#scale = float(self.input_size) / img.shape[0]
#points2d *= scale
if self.transform is not None:
if img_raw is not None:
img_raw = self.transform(image=img_raw, keypoints=points2d)['image']
if img_local is not None:
height, width = img_local.shape[:2]
x = self.transform(image=img_local, keypoints=points2d)
img_local = x['image']
points2d = x['keypoints']
points2d = np.array(points2d, dtype=np.float32)
if self.is_test_aug:
for trans in x["replay"]["transforms"]:
if trans['__class_fullname__']=='ShiftScaleRotate' and trans['applied']:
param = trans['params']
dx, dy, angle, scale = param['dx'], param['dy'], param['angle'], param['scale']
center = (width / 2, height / 2)
matrix = cv2.getRotationMatrix2D(center, angle, scale)
matrix[0, 2] += dx * width
matrix[1, 2] += dy * height
new_matrix = np.identity(3)
new_matrix[:2,:3] = matrix
old_tform = np.identity(3)
old_tform[:2,:3] = label_tform
#new_tform = np.dot(old_tform, new_matrix)
new_tform = np.dot(new_matrix, old_tform)
#print('label_tform:')
#print(label_tform.flatten())
#print(new_matrix.flatten())
#print(new_tform.flatten())
label_tform = new_tform[:2,:3]
break
#print('trans param:', param)
if self.loss_pip:
target_map = np.zeros((self.num_verts, int(self.input_size/self.net_stride), int(self.input_size/self.net_stride)))
target_local_x = np.zeros((self.num_verts, int(self.input_size/self.net_stride), int(self.input_size/self.net_stride)))
target_local_y = np.zeros((self.num_verts, int(self.input_size/self.net_stride), int(self.input_size/self.net_stride)))
target = points2d / self.input_size
target_map, target_local_x, target_local_y = gen_target_pip(target, target_map, target_local_x, target_local_y)
target_map_tensor = torch.tensor(target_map, dtype=torch.float32)
target_x_tensor = torch.tensor(target_local_x, dtype=torch.float32)
target_y_tensor = torch.tensor(target_local_y, dtype=torch.float32)
d['pip_map'] = target_map_tensor
d['pip_x'] = target_x_tensor
d['pip_y'] = target_y_tensor
if self.is_train and self.enable_flip and np.random.random()<0.5:
#if self.local_rank==0:
# print('XXX:', label_verts[:5,:2])
img_local = img_local.flip([2])
x_of_central = 0.0
#x_of_central = label_verts[self.verts3d_central_index,0]
#x_of_central = np.mean(x_of_central)
label_verts = label_verts[self.flipindex,:]
label_verts[:,0] -= x_of_central
label_verts[:,0] *= -1.0
label_verts[:,0] += x_of_central
if need_points2d:
flipped_p2d = points2d[self.flipindex,:].copy()
flipped_p2d[:,0] = self.input_size - 1 - flipped_p2d[:,0]
points2d = flipped_p2d
if self.use_eye:
flipped_p2d = eye_points[self.flipindex,:].copy()
flipped_p2d[:,0] = self.input_size - 1 - flipped_p2d[:,0]
eye_points = flipped_p2d
label_verts_tensor = torch.tensor(label_verts*10.0, dtype=torch.float32)
d = {}
d['img_local'] = img_local
d['verts'] = label_verts_tensor
d['6dof'] = label_6dof_tensor
d['rt'] = torch.tensor(label_Rt, dtype=torch.float32)
if need_points2d:
points2d = points2d / (self.input_size//2) - 1.0
points2d_tensor = torch.tensor(points2d, dtype=torch.float32)
d['points2d'] = points2d_tensor
if self.use_eye:
d['eye_verts'] = torch.tensor(eye_verts, dtype=torch.float32)
eye_points = eye_points / (self.input_size//2) - 1.0
eye_points_tensor = torch.tensor(eye_points, dtype=torch.float32)
d['eye_points'] = eye_points_tensor
loss_weight = 1.0
if group!=0:
loss_weight = 0.0
loss_weight_tensor = torch.tensor(loss_weight, dtype=torch.float32)
d['loss_weight'] = loss_weight_tensor
label_tform_tensor = torch.tensor(label_tform, dtype=torch.float32)
d['tform'] = label_tform_tensor
#if img_local is None:
# image = (img_raw,)
#elif img_raw is None:
# image = (img_local,)
#else:
# image = (img_local,img_raw)
#ret = image + (label_verts_tensor, label_6dof_tensor, points2d_tensor)
if not self.is_train:
idx_tensor = torch.tensor([idx], dtype=torch.long)
d['idx'] = idx_tensor
d['verts2d'] = torch.tensor(verts2d, dtype=torch.float32)
return d
def __len__(self):
return len(self.imgidx)
def test_dataset1(cfg):
cfg.task = 0
is_train = False
center_axis = []
dataset = MXFaceDataset(cfg, is_train=is_train, norm_6dof=False, local_rank=0)
for i in range(len(dataset.flipindex)):
if i==dataset.flipindex[i]:
center_axis.append(i)
print(center_axis)
#dataset.transform = None
print('total:', len(dataset))
total = len(dataset)
#total = 50
list_6dof = []
all_mean_xs = []
for idx in range(total):
#img_local, img_raw, label_verts, label_6dof, = dataset[idx]
#img_local, img_raw, label_verts, label_6dof, points2d, tform, data_idx = dataset[idx]
#img_local, label_verts, label_6dof, points2d, tform, data_idx = dataset[idx]
d = dataset[idx]
img_local = d['img_local']
label_verts = d['verts']
label_6dof = d['6dof']
points2d = d['points2d']
label_verts = label_verts.numpy()
label_6dof = label_6dof.numpy()
points2d = points2d.numpy()
#print(img_local.shape, label_verts.shape, label_6dof.shape, points2d.shape)
verts3d = label_verts / 10.0
xs = []
for c in center_axis:
_x = verts3d[c,0]
xs.append(_x)
_std = np.std(xs)
print(xs)
print(_std)
#print(np.mean(xs))
all_mean_xs.append(np.mean(xs))
if idx%100==0:
print('processing:', idx, np.mean(all_mean_xs))
#print(label_verts[:3,:], label_6dof)
#list_6dof.append(label_6dof)
#print(image.__class__, label_verts.__class__)
#label = list(label_verts.numpy().flatten()) + list(label_6dof.numpy().flatten())
#points2d = label_verts2[:,:2]
#points2d = (points2d+1) * 128.0
#img_local = img_local.numpy()
#img_local = (img_local+1.0) * 128.0
#draw = img_local.astype(np.uint8).transpose( (1,2,0) )[:,:,::-1].copy()
#for i in range(points2d.shape[0]):
# pt = points2d[i].astype(np.int)
# cv2.circle(draw, pt, 2, (255,0,0), 2)
##output_path = "outputs/%d_%.3f_%.3f_%.3f.jpg"%(idx, label_6dof[0], label_6dof[1], label_6dof[2])
#output_path = "outputs/%06d.jpg"%(idx)
#cv2.imwrite(output_path, draw)
#list_6dof = np.array(list_6dof)
#print('MEAN:')
#print(np.mean(list_6dof, axis=0))
def test_loader1(cfg):
cfg.task = 0
is_train = True
dataset = MXFaceDataset(cfg, is_train=is_train, norm_6dof=False, local_rank=0)
loader = DataLoader(dataset, batch_size=64, shuffle=True)
for index, d in enumerate(loader):
#img_local = d['img_local']
label_verts = d['verts']
points2d = d['points2d']
tform = d['tform']
label_verts /= 10.0
points2d = (points2d + 1.0) * (cfg.input_size//2)
tform = tform.numpy()
verts = label_verts.numpy()
points2d = points2d.numpy()
print(verts.shape, points2d.shape, tform.shape)
np.save("temp/verts3d.npy", verts)
np.save("temp/points2d.npy", points2d)
np.save("temp/tform.npy", tform)
break
def test_facedataset1(cfg):
cfg.task = 0
cfg.input_size = 512
dataset = FaceDataset(cfg, is_train=True, local_rank=0)
for idx in range(100000):
img_local, label_verts, label_Rt, tform = dataset[idx]
label_Rt = label_Rt.numpy()
if label_Rt[0,0]>1.0:
print(idx, label_Rt.shape)
print(label_Rt)
break
def test_arcface(cfg):
cfg.task = 0
is_train = True
dataset = MXFaceDataset(cfg, is_train=is_train, norm_6dof=False, local_rank=0)
loader = DataLoader(dataset, batch_size=1, shuffle=True)
for index, d in enumerate(loader):
img = d['img_local'].numpy()
img /= 2.0
img += 0.5
img *= 255.0
img = img[0]
img = img.transpose( (1,2,0) )
img = img.astype(np.uint8)
img = cv2.resize(img, (144,144))
img = img[:,:,::-1]
img = img[8:120,16:128,:]
print(img.shape)
cv2.imwrite("temp/arc_%d.jpg"%index, img)
#np.save("temp/verts3d.npy", verts)
#np.save("temp/points2d.npy", points2d)
#np.save("temp/tform.npy", tform)
if index>100:
break
def test_dataset2(cfg):
cfg.task = 0
is_train = False
center_axis = []
dataset = MXFaceDataset(cfg, is_train=is_train, norm_6dof=False, local_rank=0)
for i in range(len(dataset.flipindex)):
if i==dataset.flipindex[i]:
center_axis.append(i)
print(center_axis)
#dataset.transform = None
print('total:', len(dataset))
total = len(dataset)
total = 50
list_6dof = []
all_mean_xs = []
for idx in range(total):
d = dataset[idx]
img_local = d['img_local']
label_verts = d['verts']
label_6dof = d['6dof']
points2d = d['points2d']
label_verts = label_verts.numpy()
label_6dof = label_6dof.numpy()
points2d = points2d.numpy()
eye_points = d['eye_points'].numpy()
eye_verts = d['eye_verts'].numpy()
print(eye_verts[:5,:])
#print(img_local.shape, label_verts.shape, label_6dof.shape, points2d.shape)
verts3d = label_verts / 10.0
#print(label_verts[:3,:], label_6dof)
#list_6dof.append(label_6dof)
#print(image.__class__, label_verts.__class__)
#label = list(label_verts.numpy().flatten()) + list(label_6dof.numpy().flatten())
#points2d = label_verts2[:,:2]
points2d = (points2d+1) * 128.0
eye_points = (eye_points+1) * 128.0
img_local = img_local.numpy()
img_local = (img_local+1.0) * 128.0
draw = img_local.astype(np.uint8).transpose( (1,2,0) )[:,:,::-1].copy()
for i in range(points2d.shape[0]):
pt = points2d[i].astype(np.int)
cv2.circle(draw, pt, 2, (255,0,0), 2)
for i in range(eye_points.shape[0]):
pt = eye_points[i].astype(np.int)
cv2.circle(draw, pt, 2, (0,255,0), 2)
##output_path = "outputs/%d_%.3f_%.3f_%.3f.jpg"%(idx, label_6dof[0], label_6dof[1], label_6dof[2])
output_path = "outputs/%06d.jpg"%(idx)
cv2.imwrite(output_path, draw)
#list_6dof = np.array(list_6dof)
#print('MEAN:')
#print(np.mean(list_6dof, axis=0))
if __name__ == "__main__":
from utils.utils_config import get_config
#cfg = get_config('configs/r0_a1.py')
cfg = get_config('configs/s2')
#test_loader1(cfg)
#test_facedataset1(cfg)
#test_arcface(cfg)
test_dataset2(cfg)
| insightface/reconstruction/jmlr/dataset.py/0 | {
"file_path": "insightface/reconstruction/jmlr/dataset.py",
"repo_id": "insightface",
"token_count": 18131
} | 130 |
from dataset import FaceDataset, DataLoaderX, MXFaceDataset
import argparse
import logging
import os
import time
import timm
import glob
import numpy as np
import os.path as osp
from utils.utils_config import get_config
from scipy.spatial.transform import Rotation
import torch
import torch.distributed as dist
from torch import nn
import torch.nn.functional as F
import torch.utils.data.distributed
from backbones import get_network
from inference_simple import JMLRInference, Rt_from_6dof
from dataset import Rt26dof
def l2_distance(a, b):
dist = np.sqrt(np.sum(np.square(a-b), axis=1))
distance_list = np.sqrt(((a - b) ** 2).sum(axis=2)).mean(axis=1)
return distance_list
def main(args):
cfg = get_config(args.config)
torch.backends.cuda.matmul.allow_tf32 = False
torch.backends.cudnn.allow_tf32 = False
world_size = int(os.environ['WORLD_SIZE'])
rank = int(os.environ['RANK'])
#dist_url = "tcp://{}:{}".format(os.environ["MASTER_ADDR"], os.environ["MASTER_PORT"])
dist.init_process_group('nccl')
local_rank = args.local_rank
torch.cuda.set_device(local_rank)
task1 = cfg.task
cfg.aug_modes = []
cfg.task = 0
batch_size = cfg.batch_size
dataset = MXFaceDataset(cfg=cfg, is_train=False, local_rank=local_rank)
if local_rank==0:
print('total:', len(dataset))
print('total batch:', len(dataset)//(batch_size*world_size))
cfg.task = task1
net = JMLRInference(cfg, local_rank)
net = net.to(local_rank)
net.eval()
#net = torch.nn.parallel.DistributedDataParallel(
# module=net, broadcast_buffers=False, device_ids=[local_rank])
sampler = torch.utils.data.distributed.DistributedSampler(dataset, shuffle=False)
loader = torch.utils.data.DataLoader(
dataset,
batch_size=batch_size,
sampler=sampler,
shuffle=False,
pin_memory=True,
num_workers=3,
drop_last=False,
)
num_epochs = 1
all_pred_verts = torch.zeros((len(dataset),1220,3), requires_grad=False).to(local_rank)
all_pred_R = torch.zeros((len(dataset),3,3), requires_grad=False).to(local_rank)
all_pred_t = torch.zeros((len(dataset),1,3), requires_grad=False).to(local_rank)
all_pred_verts2d = torch.zeros((len(dataset),1220,2), requires_grad=False).to(local_rank)
all_label_verts = torch.zeros((len(dataset),1220,3), requires_grad=False).to(local_rank)
all_label_R = torch.zeros((len(dataset),3,3), requires_grad=False).to(local_rank)
all_label_t = torch.zeros((len(dataset),1,3), requires_grad=False).to(local_rank)
all_label_verts2d = torch.zeros((len(dataset),1220,2), requires_grad=False).to(local_rank)
all_weight = 0.0
FLIPS = [False, True] if cfg.enable_flip else [False]
#FLIPS = [False]
if local_rank==0:
print('FLIPS:', FLIPS)
for epoch in range(num_epochs):
weight = 1.0
if epoch>0:
dataset.set_test_aug()
weight = 0.6
all_weight += weight
#all_distance = torch.zeros((len(dataset),), requires_grad=False).to(local_rank)
diff_R = []
diff_t = []
sampler.set_epoch(epoch)
for idx, sample in enumerate(loader):
img_local = sample['img_local']
label_verts = sample['verts']
tform = sample['tform']
label_6dof = sample['6dof']
data_idx = sample['idx']
label_verts2d = sample['verts2d']
img_local = img_local.to(local_rank)
pred_verts, pred_verts2d, pred_points2d = [], [], []
for is_flip in FLIPS:
with torch.no_grad():
#pred_verts, R_pred, t_pred = infer.forward(img_local, img_raw, tform)
#pred1, pred2 = net(img_local.to(local_rank), img_raw.to(local_rank))
pred1, pred2, meta = net(img_local, is_flip=is_flip)
_pred_verts = net.convert_verts(pred1, meta)
pred_verts.append(_pred_verts)
_pred_verts2d, _pred_points2d = net.convert_2d(pred2, tform, meta)
pred_verts2d.append(_pred_verts2d)
pred_points2d.append(_pred_points2d)
pred_verts = sum(pred_verts) / len(pred_verts)
pred_verts2d = sum(pred_verts2d) / len(pred_verts2d)
pred_points2d = sum(pred_points2d) / len(pred_points2d)
R_pred, t_pred = net.solve(pred_verts, pred_verts2d)
label_6dof = label_6dof.cpu().numpy()
label_6dof = label_6dof * cfg.label_6dof_std.reshape(1, 6) + cfg.label_6dof_mean.reshape(1,6)
R_label, t_label = Rt_from_6dof(label_6dof)
diff_R.append(np.mean(np.abs(R_pred - R_label)))
diff_t.append(np.mean(np.abs(t_pred - t_label)))
#distance = torch.tensor(distance, dtype=torch.float32, requires_grad=False).to(local_rank)
data_idx = data_idx.view(-1)
#all_distance[data_idx] = distance
label_verts = label_verts.view(-1,1220,3) / 10.0
if epoch==0:
all_label_verts[data_idx,:,:] = label_verts.to(local_rank)
all_label_R[data_idx,:,:] = torch.tensor(R_label).to(local_rank)
all_label_t[data_idx,:,:] = torch.tensor(t_label).to(local_rank)
all_label_verts2d[data_idx,:,:] = label_verts2d.to(local_rank)
all_pred_verts[data_idx,:,:] += torch.tensor(pred_verts).to(local_rank) * weight
#all_pred_R[data_idx,:,:] += torch.tensor(R_pred).to(local_rank) * weight
#all_pred_t[data_idx,:,:] += torch.tensor(t_pred).to(local_rank) * weight
all_pred_verts2d[data_idx,:,:] += torch.tensor(pred_verts2d).to(local_rank) * weight
if idx%20==0 and local_rank==0:
print('processing-epoch-idx:', epoch, idx)
#print('distance:', distance.shape, distance.cpu().numpy().mean())
print('diff_R:', np.mean(diff_R))
print('diff_t:', np.mean(diff_t))
dist.all_reduce(all_label_verts, op=dist.ReduceOp.SUM)
dist.all_reduce(all_label_verts2d, op=dist.ReduceOp.SUM)
dist.all_reduce(all_label_R, op=dist.ReduceOp.SUM)
dist.all_reduce(all_label_t, op=dist.ReduceOp.SUM)
dist.all_reduce(all_pred_verts, op=dist.ReduceOp.SUM)
dist.all_reduce(all_pred_verts2d, op=dist.ReduceOp.SUM)
#dist.all_reduce(all_pred_R, op=dist.ReduceOp.SUM)
#dist.all_reduce(all_pred_t, op=dist.ReduceOp.SUM)
#dist.all_reduce(all_distance, op=dist.ReduceOp.SUM)
if local_rank==0:
label_verts = all_label_verts.cpu().numpy()
label_verts2d = all_label_verts2d.cpu().numpy()
R_label = all_label_R.cpu().numpy()
t_label = all_label_t.cpu().numpy()
pred_verts = all_pred_verts.cpu().numpy() / all_weight
#R_pred = all_pred_R.cpu().numpy() / all_weight
#t_pred = all_pred_t.cpu().numpy() / all_weight
pred_verts2d = all_pred_verts2d.cpu().numpy() / all_weight
R_pred, t_pred = net.solve(pred_verts, pred_verts2d)
#R_pred, t_pred = net.solve(pred_verts, label_verts2d)
#R_pred, t_pred = net.solve(label_verts, pred_verts2d)
X1 = label_verts @ R_label + t_label
X2 = pred_verts @ R_pred + t_pred
X3 = label_verts @ R_pred + t_pred
X4 = pred_verts @ R_label + t_label
distance = l2_distance(X1, X2) + l2_distance(X1, X3) + 10.0*l2_distance(X1,X4)
distance *= 1000.0
print('top20 distance:', np.mean(distance[:20]))
score = np.mean(distance)
print('epoch distance:', epoch, score)
with open(os.path.join(cfg.output, 'val.txt'), 'w') as f:
f.write("%f\n"%score)
if __name__ == "__main__":
#torch.backends.cudnn.benchmark = True
parser = argparse.ArgumentParser(description='JMLR validation')
parser.add_argument('config', type=str, help='config file')
parser.add_argument('--local_rank', type=int, default=0, help='local_rank')
args_ = parser.parse_args()
main(args_)
| insightface/reconstruction/jmlr/validate_dist.py/0 | {
"file_path": "insightface/reconstruction/jmlr/validate_dist.py",
"repo_id": "insightface",
"token_count": 3899
} | 131 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
import menpo.io as mio
from menpo.image import Image
from menpo.shape import PointCloud
import cv2
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.contrib.framework.python.ops import variables
from tensorflow.python.training import optimizer as tf_optimizer
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import variables as tf_variables
from menpo.transform import Translation, Scale
from menpo.shape import PointCloud
slim = tf.contrib.slim
def generate_heatmap(logits, num_classes):
"""Generates a coloured heatmap from the keypoint logits.
Args:
features: A `Tensor` of dimensions [num_batch, height, width, FLAGS.n_landmarks + 1].
"""
keypoint_colours = np.array([plt.cm.spectral(x) for x in np.linspace(0, 1, num_classes + 1)])[
..., :3].astype(np.float32)
prediction = tf.nn.softmax(logits)
heatmap = tf.matmul(tf.reshape(prediction, (-1, num_classes + 1)), keypoint_colours)
heatmap = tf.reshape(heatmap, (tf.shape(prediction)[0],
tf.shape(prediction)[1],
tf.shape(prediction)[2], 3))
return heatmap
def generate_landmarks(keypoints):
is_background = tf.equal(keypoints, 0)
ones = tf.to_float(tf.ones_like(is_background))
zeros = tf.to_float(tf.zeros_like(is_background))
return tf.where(is_background, zeros, ones) * 255
def project_landmarks_to_shape_model(landmarks):
final = []
for lms in landmarks:
lms = PointCloud(lms)
similarity = AlignmentSimilarity(pca.global_transform.source, lms)
projected_target = similarity.pseudoinverse().apply(lms)
target = pca.model.reconstruct(projected_target)
target = similarity.apply(target)
final.append(target.points)
return np.array(final).astype(np.float32)
def rescale_image(image, stride_width=64):
# make sure smallest size is 600 pixels wide & dimensions are (k * stride_width) + 1
height, width = image.shape
# Taken from 'szross'
scale_up = 625. / min(height, width)
scale_cap = 961. / max(height, width)
scale_up = min(scale_up, scale_cap)
new_height = stride_width * round((height * scale_up) / stride_width) + 1
new_width = stride_width * round((width * scale_up) / stride_width) + 1
image, tr = image.resize([new_height, new_width], return_transform=True)
image.inverse_tr = tr
return image
def frankotchellappa(dzdx, dzdy):
from numpy.fft import ifftshift, fft2, ifft2
rows, cols = dzdx.shape
# The following sets up matrices specifying frequencies in the x and y
# directions corresponding to the Fourier transforms of the gradient
# data. They range from -0.5 cycles/pixel to + 0.5 cycles/pixel.
# The scaling of this is irrelevant as long as it represents a full
# circle domain. This is functionally equivalent to any constant * pi
pi_over_2 = np.pi / 2.0
row_grid = np.linspace(-pi_over_2, pi_over_2, rows)
col_grid = np.linspace(-pi_over_2, pi_over_2, cols)
wy, wx = np.meshgrid(row_grid, col_grid, indexing='ij')
# Quadrant shift to put zero frequency at the appropriate edge
wx = ifftshift(wx)
wy = ifftshift(wy)
# Fourier transforms of gradients
DZDX = fft2(dzdx)
DZDY = fft2(dzdy)
# Integrate in the frequency domain by phase shifting by pi/2 and
# weighting the Fourier coefficients by their frequencies in x and y and
# then dividing by the squared frequency
denom = (wx ** 2 + wy ** 2)
Z = (-1j * wx * DZDX - 1j * wy * DZDY) / denom
Z = np.nan_to_num(Z)
return np.real(ifft2(Z))
def line(image, x0, y0, x1, y1, color):
steep = False
if x0 < 0 or x0 >= 400 or x1 < 0 or x1 >= 400 or y0 < 0 or y0 >= 400 or y1 < 0 or y1 >= 400:
return
if abs(x0 - x1) < abs(y0 - y1):
x0, y0 = y0, x0
x1, y1 = y1, x1
steep = True
if x0 > x1:
x0, x1 = x1, x0
y0, y1 = y1, y0
for x in range(int(x0), int(x1) + 1):
t = (x - x0) / float(x1 - x0)
y = y0 * (1 - t) + y1 * t
if steep:
image[x, int(y)] = color
else:
image[int(y), x] = color
def draw_landmarks(img, lms):
try:
img = img.copy()
for i, part in enumerate(parts_68[1:]):
circular = []
if i in (4, 5, 6, 7):
circular = [part[0]]
for p1, p2 in zip(part, list(part[1:]) + circular):
p1, p2 = lms[p1], lms[p2]
line(img, p2[1], p2[0], p1[1], p1[0], 1)
except:
pass
return img
def batch_draw_landmarks(imgs, lms):
return np.array([draw_landmarks(img, l) for img, l in zip(imgs, lms)])
def build_graph(inputs, tree, transpose=(2,3,1,0), layers=[]):
net = inputs
if tree['name'] == 'nn.Sequential':
with tf.name_scope('nn.Sequential'):
for tr in tree['children']:
net = build_graph(net, tr, transpose, layers)
elif tree['name'] == 'nn.ConcatTable':
net_table = []
with tf.name_scope('nn.ConcatTable'):
for tr in tree['children']:
net_table.append(build_graph(net, tr, transpose, layers))
net = net_table
elif tree['name'] == 'nn.JoinTable':
net = tf.concat(3, net)
elif tree['name'] == 'nn.CAddTable':
net = tf.add_n(net)
elif tree['name'] == 'nn.SpatialConvolution':
out_channel = int(tree['nOutputPlane'])
kernal_shape = (int(tree['kH']),int(tree['kW']))
stride_shape = (int(tree['dH']),int(tree['dW']))
net = tf.pad(
net, [
[0,0],
[int(tree['padH']),int(tree['padH'])],
[int(tree['padW']),int(tree['padW'])],
[0,0]
])
if 'weight' in tree.keys() and 'bias' in tree.keys():
net = slim.conv2d(net,
out_channel,
kernal_shape,
stride_shape,
activation_fn=None,
padding='VALID',
weights_initializer=tf.constant_initializer(tree['weight'].transpose(*transpose)),
biases_initializer=tf.constant_initializer(tree['bias'])
)
else:
net = slim.conv2d(net,
out_channel,
kernal_shape,
stride_shape,
activation_fn=None,
padding='VALID'
)
tree['tfname'] = net.name
tree['tfvar'] = net
elif tree['name'] == 'nn.SpatialFullConvolution':
out_channel = int(tree['nOutputPlane'])
kernal_shape = (int(tree['kH']),int(tree['kW']))
stride_shape = (int(tree['dH']),int(tree['dW']))
net = tf.pad(
net, [
[0,0],
[int(tree['padH']),int(tree['padH'])],
[int(tree['padW']),int(tree['padW'])],
[0,0]
])
if 'weight' in tree.keys() and 'bias' in tree.keys():
net = slim.conv2d_transpose(net,
out_channel,
kernal_shape,
stride_shape,
activation_fn=None,
padding='VALID',
weights_initializer=tf.constant_initializer(tree['weight'].transpose(*transpose)),
biases_initializer=tf.constant_initializer(tree['bias'])
)
else:
net = slim.conv2d_transpose(net,
out_channel,
kernal_shape,
stride_shape,
activation_fn=None,
padding='VALID'
)
tree['tfname'] = net.name
tree['tfvar'] = net
elif tree['name'] == 'nn.SpatialBatchNormalization':
net = slim.nn.batch_normalization(net,
tree['running_mean'],
tree['running_var'],
tree['bias'],
tree['weight'],
tree['eps'])
tree['tfname'] = net.name
tree['tfvar'] = net
elif tree['name'] == 'nn.ReLU':
net = slim.nn.relu(net)
tree['tfname'] = net.name
tree['tfvar'] = net
elif tree['name'] == 'nn.Sigmoid':
net = slim.nn.sigmoid(net)
tree['tfname'] = net.name
tree['tfvar'] = net
elif tree['name'] == 'nn.SpatialMaxPooling':
net = slim.max_pool2d(
tf.pad(
net, [
[0,0],
[int(tree['padH']),int(tree['padH'])],
[int(tree['padW']),int(tree['padW'])],
[0,0]
]),
(int(tree['kH']),int(tree['kW'])),
(int(tree['dH']),int(tree['dW']))
)
tree['tfname'] = net.name
tree['tfvar'] = net
elif tree['name'] == 'nn.Identity':
pass
else:
raise Exception(tree['name'])
return net
def build_graph_old(inputs, tree, transpose=(2,3,1,0)):
net = inputs
if tree['name'] == 'nn.Sequential':
with tf.name_scope('nn.Sequential'):
for tr in tree['children']:
net = build_graph(net, tr, transpose, layers)
elif tree['name'] == 'nn.ConcatTable':
net_table = []
with tf.name_scope('nn.ConcatTable'):
for tr in tree['children']:
net_table.append(build_graph(net, tr, transpose, layers))
net = net_table
elif tree['name'] == 'nn.JoinTable':
net = tf.concat(3, net)
elif tree['name'] == 'nn.CAddTable':
net = tf.add_n(net)
elif tree['name'] == 'nn.SpatialConvolution':
out_channel = int(tree['nOutputPlane'])
kernal_shape = (int(tree['kH']),int(tree['kW']))
stride_shape = (int(tree['dH']),int(tree['dW']))
net = tf.pad(
net, [
[0,0],
[int(tree['padH']),int(tree['padH'])],
[int(tree['padW']),int(tree['padW'])],
[0,0]
])
if 'weight' in tree.keys() and 'bias' in tree.keys():
net = slim.conv2d(net,
out_channel,
kernal_shape,
stride_shape,
activation_fn=None,
padding='VALID',
weights_initializer=tf.constant_initializer(tree['weight'].transpose(*transpose)),
biases_initializer=tf.constant_initializer(tree['bias'])
)
else:
net = slim.conv2d(net,
out_channel,
kernal_shape,
stride_shape,
activation_fn=None,
padding='VALID'
)
tree['tfname'] = net.name
tree['tfvar'] = net
elif tree['name'] == 'nn.SpatialFullConvolution':
out_channel = int(tree['nOutputPlane'])
kernal_shape = (int(tree['kH']),int(tree['kW']))
rate = np.min(int(tree['dH']),int(tree['dW']))
h,w = tf.shape(net)[1:3]
net = tf.image.resize_bilinear(net, (h,w,out_channel))
tree['tfname'] = net.name
tree['tfvar'] = net
elif tree['name'] == 'nn.SpatialBatchNormalization':
net = slim.batch_norm(net)
tree['tfname'] = net.name
tree['tfvar'] = net
elif tree['name'] == 'nn.ReLU':
net = slim.nn.relu(net)
tree['tfname'] = net.name
tree['tfvar'] = net
elif tree['name'] == 'nn.Sigmoid':
net = slim.nn.sigmoid(net)
tree['tfname'] = net.name
tree['tfvar'] = net
elif tree['name'] == 'nn.SpatialMaxPooling':
net = slim.max_pool2d(
tf.pad(
net, [
[0,0],
[int(tree['padH']),int(tree['padH'])],
[int(tree['padW']),int(tree['padW'])],
[0,0]
]),
(int(tree['kH']),int(tree['kW'])),
(int(tree['dH']),int(tree['dW']))
)
tree['tfname'] = net.name
tree['tfvar'] = net
elif tree['name'] == 'nn.Identity':
pass
else:
raise Exception(tree['name'])
return net
def keypts_encoding(keypoints, num_classes):
keypoints = tf.to_int32(keypoints)
keypoints = tf.reshape(keypoints, (-1,))
keypoints = slim.layers.one_hot_encoding(keypoints, num_classes=num_classes+1)
return keypoints
def get_weight(keypoints, mask=None, ng_w=0.01, ps_w=1.0):
is_background = tf.equal(keypoints, 0)
ones = tf.to_float(tf.ones_like(is_background))
weights = tf.where(is_background, ones * ng_w, ones*ps_w)
# if mask is not None:
# weights *= tf.to_float(mask)
return weights
def ced_accuracy(t, dists):
# Head Shoulder Elbow Wrist Hip Knee Ankle
pts_r = tf.transpose(tf.gather(tf.transpose(dists), [8,12,11,10,2,1,0]))
pts_l = tf.transpose(tf.gather(tf.transpose(dists), [9,13,14,15,3,4,5]))
part_pckh = (tf.to_int32(pts_r <= t) + tf.to_int32(pts_l <= t)) / 2
return tf.concat(1, [part_pckh, tf.reduce_sum(tf.to_int32(dists <= t), 1)[...,None] / tf.shape(dists)[1]])
def pckh(preds, gts, scales):
t_range = np.arange(0,0.51,0.01)
dists = tf.sqrt(tf.reduce_sum(tf.pow(preds - gts, 2), reduction_indices=-1)) / scales
# pckh = [ced_accuracy(t, dists) for t in t_range]
# return pckh[-1]
return ced_accuracy(0.5, dists)
def import_image(img_path):
img = cv2.imread(str(img_path))
original_image = Image.init_from_channels_at_back(img[:,:,-1::-1])
try:
original_image_lms = mio.import_landmark_file('{}/{}.ljson'.format(img_path.parent, img_path.stem)).lms.points.astype(np.float32)
original_image.landmarks['LJSON'] = PointCloud(original_image_lms)
except:
pass
return original_image
def crop_image(img, center, scale, res, base=384):
h = base * scale
t = Translation(
[
res[0] * (-center[0] / h + .5),
res[1] * (-center[1] / h + .5)
]).compose_after(Scale((res[0] / h, res[1] / h))).pseudoinverse()
# Upper left point
ul = np.floor(t.apply([0,0]))
# Bottom right point
br = np.ceil(t.apply(res).astype(np.int))
# crop and rescale
cimg, trans = img.warp_to_shape(br-ul, Translation(-(br-ul)/2+(br+ul)/2) ,return_transform=True)
c_scale = np.min(cimg.shape) / np.mean(res)
new_img = cimg.rescale(1 / c_scale).resize(res)
return new_img, trans, c_scale
| insightface/reconstruction/ostec/external/landmark_detector/utils.py/0 | {
"file_path": "insightface/reconstruction/ostec/external/landmark_detector/utils.py",
"repo_id": "insightface",
"token_count": 8095
} | 132 |
// Copyright (c) 2019, NVIDIA Corporation. All rights reserved.
//
// This work is made available under the Nvidia Source Code License-NC.
// To view a copy of this license, visit
// https://nvlabs.github.io/stylegan2/license.html
#define EIGEN_USE_GPU
#define __CUDA_INCLUDE_COMPILER_INTERNAL_HEADERS__
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/shape_inference.h"
#include <stdio.h>
using namespace tensorflow;
using namespace tensorflow::shape_inference;
#define OP_CHECK_CUDA_ERROR(CTX, CUDA_CALL) do { cudaError_t err = CUDA_CALL; OP_REQUIRES(CTX, err == cudaSuccess, errors::Internal(cudaGetErrorName(err))); } while (false)
//------------------------------------------------------------------------
// CUDA kernel.
template <class T>
struct FusedBiasActKernelParams
{
const T* x; // [sizeX]
const T* b; // [sizeB] or NULL
const T* ref; // [sizeX] or NULL
T* y; // [sizeX]
int grad;
int axis;
int act;
float alpha;
float gain;
int sizeX;
int sizeB;
int stepB;
int loopX;
};
template <class T>
static __global__ void FusedBiasActKernel(const FusedBiasActKernelParams<T> p)
{
const float expRange = 80.0f;
const float halfExpRange = 40.0f;
const float seluScale = 1.0507009873554804934193349852946f;
const float seluAlpha = 1.6732632423543772848170429916717f;
// Loop over elements.
int xi = blockIdx.x * p.loopX * blockDim.x + threadIdx.x;
for (int loopIdx = 0; loopIdx < p.loopX && xi < p.sizeX; loopIdx++, xi += blockDim.x)
{
// Load and apply bias.
float x = (float)p.x[xi];
if (p.b)
x += (float)p.b[(xi / p.stepB) % p.sizeB];
float ref = (p.ref) ? (float)p.ref[xi] : 0.0f;
if (p.gain != 0.0f & p.act != 9)
ref /= p.gain;
// Evaluate activation func.
float y;
switch (p.act * 10 + p.grad)
{
// linear
default:
case 10: y = x; break;
case 11: y = x; break;
case 12: y = 0.0f; break;
// relu
case 20: y = (x > 0.0f) ? x : 0.0f; break;
case 21: y = (ref > 0.0f) ? x : 0.0f; break;
case 22: y = 0.0f; break;
// lrelu
case 30: y = (x > 0.0f) ? x : x * p.alpha; break;
case 31: y = (ref > 0.0f) ? x : x * p.alpha; break;
case 32: y = 0.0f; break;
// tanh
case 40: { float c = expf(x); float d = 1.0f / c; y = (x < -expRange) ? -1.0f : (x > expRange) ? 1.0f : (c - d) / (c + d); } break;
case 41: y = x * (1.0f - ref * ref); break;
case 42: y = x * (1.0f - ref * ref) * (-2.0f * ref); break;
// sigmoid
case 50: y = (x < -expRange) ? 0.0f : 1.0f / (expf(-x) + 1.0f); break;
case 51: y = x * ref * (1.0f - ref); break;
case 52: y = x * ref * (1.0f - ref) * (1.0f - 2.0f * ref); break;
// elu
case 60: y = (x >= 0.0f) ? x : expf(x) - 1.0f; break;
case 61: y = (ref >= 0.0f) ? x : x * (ref + 1.0f); break;
case 62: y = (ref >= 0.0f) ? 0.0f : x * (ref + 1.0f); break;
// selu
case 70: y = (x >= 0.0f) ? seluScale * x : (seluScale * seluAlpha) * (expf(x) - 1.0f); break;
case 71: y = (ref >= 0.0f) ? x * seluScale : x * (ref + seluScale * seluAlpha); break;
case 72: y = (ref >= 0.0f) ? 0.0f : x * (ref + seluScale * seluAlpha); break;
// softplus
case 80: y = (x > expRange) ? x : logf(expf(x) + 1.0f); break;
case 81: y = x * (1.0f - expf(-ref)); break;
case 82: { float c = expf(-ref); y = x * c * (1.0f - c); } break;
// swish
case 90: y = (x < -expRange) ? 0.0f : x / (expf(-x) + 1.0f); break;
case 91: { float c = expf(ref); float d = c + 1.0f; y = (ref > halfExpRange) ? x : x * c * (ref + d) / (d * d); } break;
case 92: { float c = expf(ref); float d = c + 1.0f; y = (ref > halfExpRange) ? 0.0f : x * c * (ref * (2.0f - d) + 2.0f * d) / (d * d * d); } break;
}
// Apply gain and store.
p.y[xi] = (T)(y * p.gain);
}
}
//------------------------------------------------------------------------
// TensorFlow op.
template <class T>
struct FusedBiasActOp : public OpKernel
{
FusedBiasActKernelParams<T> m_attribs;
FusedBiasActOp(OpKernelConstruction* ctx) : OpKernel(ctx)
{
memset(&m_attribs, 0, sizeof(m_attribs));
OP_REQUIRES_OK(ctx, ctx->GetAttr("grad", &m_attribs.grad));
OP_REQUIRES_OK(ctx, ctx->GetAttr("axis", &m_attribs.axis));
OP_REQUIRES_OK(ctx, ctx->GetAttr("act", &m_attribs.act));
OP_REQUIRES_OK(ctx, ctx->GetAttr("alpha", &m_attribs.alpha));
OP_REQUIRES_OK(ctx, ctx->GetAttr("gain", &m_attribs.gain));
OP_REQUIRES(ctx, m_attribs.grad >= 0, errors::InvalidArgument("grad must be non-negative"));
OP_REQUIRES(ctx, m_attribs.axis >= 0, errors::InvalidArgument("axis must be non-negative"));
OP_REQUIRES(ctx, m_attribs.act >= 0, errors::InvalidArgument("act must be non-negative"));
}
void Compute(OpKernelContext* ctx)
{
FusedBiasActKernelParams<T> p = m_attribs;
cudaStream_t stream = ctx->eigen_device<Eigen::GpuDevice>().stream();
const Tensor& x = ctx->input(0); // [...]
const Tensor& b = ctx->input(1); // [sizeB] or [0]
const Tensor& ref = ctx->input(2); // x.shape or [0]
p.x = x.flat<T>().data();
p.b = (b.NumElements()) ? b.flat<T>().data() : NULL;
p.ref = (ref.NumElements()) ? ref.flat<T>().data() : NULL;
OP_REQUIRES(ctx, b.NumElements() == 0 || m_attribs.axis < x.dims(), errors::InvalidArgument("axis out of bounds"));
OP_REQUIRES(ctx, b.dims() == 1, errors::InvalidArgument("b must have rank 1"));
OP_REQUIRES(ctx, b.NumElements() == 0 || b.NumElements() == x.dim_size(m_attribs.axis), errors::InvalidArgument("b has wrong number of elements"));
OP_REQUIRES(ctx, ref.NumElements() == ((p.grad == 0) ? 0 : x.NumElements()), errors::InvalidArgument("ref has wrong number of elements"));
OP_REQUIRES(ctx, x.NumElements() <= kint32max, errors::InvalidArgument("x is too large"));
p.sizeX = (int)x.NumElements();
p.sizeB = (int)b.NumElements();
p.stepB = 1;
for (int i = m_attribs.axis + 1; i < x.dims(); i++)
p.stepB *= (int)x.dim_size(i);
Tensor* y = NULL; // x.shape
OP_REQUIRES_OK(ctx, ctx->allocate_output(0, x.shape(), &y));
p.y = y->flat<T>().data();
p.loopX = 4;
int blockSize = 4 * 32;
int gridSize = (p.sizeX - 1) / (p.loopX * blockSize) + 1;
void* args[] = {&p};
OP_CHECK_CUDA_ERROR(ctx, cudaLaunchKernel((void*)FusedBiasActKernel<T>, gridSize, blockSize, args, 0, stream));
}
};
REGISTER_OP("FusedBiasAct")
.Input ("x: T")
.Input ("b: T")
.Input ("ref: T")
.Output ("y: T")
.Attr ("T: {float, half}")
.Attr ("grad: int = 0")
.Attr ("axis: int = 1")
.Attr ("act: int = 0")
.Attr ("alpha: float = 0.0")
.Attr ("gain: float = 1.0");
REGISTER_KERNEL_BUILDER(Name("FusedBiasAct").Device(DEVICE_GPU).TypeConstraint<float>("T"), FusedBiasActOp<float>);
REGISTER_KERNEL_BUILDER(Name("FusedBiasAct").Device(DEVICE_GPU).TypeConstraint<Eigen::half>("T"), FusedBiasActOp<Eigen::half>);
//------------------------------------------------------------------------
| insightface/reconstruction/ostec/external/stylegan2/dnnlib/tflib/ops/fused_bias_act.cu/0 | {
"file_path": "insightface/reconstruction/ostec/external/stylegan2/dnnlib/tflib/ops/fused_bias_act.cu",
"repo_id": "insightface",
"token_count": 3843
} | 133 |
# Copyright (c) 2019, NVIDIA Corporation. All rights reserved.
#
# This work is made available under the Nvidia Source Code License-NC.
# To view a copy of this license, visit
# https://nvlabs.github.io/stylegan2/license.html
"""Common definitions for GAN metrics."""
import os
import time
import hashlib
import numpy as np
import tensorflow as tf
import dnnlib
import dnnlib.tflib as tflib
from training import misc
from training import dataset
#----------------------------------------------------------------------------
# Base class for metrics.
class MetricBase:
def __init__(self, name):
self.name = name
self._dataset_obj = None
self._progress_lo = None
self._progress_hi = None
self._progress_max = None
self._progress_sec = None
self._progress_time = None
self._reset()
def close(self):
self._reset()
def _reset(self, network_pkl=None, run_dir=None, data_dir=None, dataset_args=None, mirror_augment=None):
if self._dataset_obj is not None:
self._dataset_obj.close()
self._network_pkl = network_pkl
self._data_dir = data_dir
self._dataset_args = dataset_args
self._dataset_obj = None
self._mirror_augment = mirror_augment
self._eval_time = 0
self._results = []
if (dataset_args is None or mirror_augment is None) and run_dir is not None:
run_config = misc.parse_config_for_previous_run(run_dir)
self._dataset_args = dict(run_config['dataset'])
self._dataset_args['shuffle_mb'] = 0
self._mirror_augment = run_config['train'].get('mirror_augment', False)
def configure_progress_reports(self, plo, phi, pmax, psec=15):
self._progress_lo = plo
self._progress_hi = phi
self._progress_max = pmax
self._progress_sec = psec
def run(self, network_pkl, run_dir=None, data_dir=None, dataset_args=None, mirror_augment=None, num_gpus=1, tf_config=None, log_results=True, Gs_kwargs=dict(is_validation=True)):
self._reset(network_pkl=network_pkl, run_dir=run_dir, data_dir=data_dir, dataset_args=dataset_args, mirror_augment=mirror_augment)
time_begin = time.time()
with tf.Graph().as_default(), tflib.create_session(tf_config).as_default(): # pylint: disable=not-context-manager
self._report_progress(0, 1)
_G, _D, Gs = misc.load_pkl(self._network_pkl)
self._evaluate(Gs, Gs_kwargs=Gs_kwargs, num_gpus=num_gpus)
self._report_progress(1, 1)
self._eval_time = time.time() - time_begin # pylint: disable=attribute-defined-outside-init
if log_results:
if run_dir is not None:
log_file = os.path.join(run_dir, 'metric-%s.txt' % self.name)
with dnnlib.util.Logger(log_file, 'a'):
print(self.get_result_str().strip())
else:
print(self.get_result_str().strip())
def get_result_str(self):
network_name = os.path.splitext(os.path.basename(self._network_pkl))[0]
if len(network_name) > 29:
network_name = '...' + network_name[-26:]
result_str = '%-30s' % network_name
result_str += ' time %-12s' % dnnlib.util.format_time(self._eval_time)
for res in self._results:
result_str += ' ' + self.name + res.suffix + ' '
result_str += res.fmt % res.value
return result_str
def update_autosummaries(self):
for res in self._results:
tflib.autosummary.autosummary('Metrics/' + self.name + res.suffix, res.value)
def _evaluate(self, Gs, Gs_kwargs, num_gpus):
raise NotImplementedError # to be overridden by subclasses
def _report_result(self, value, suffix='', fmt='%-10.4f'):
self._results += [dnnlib.EasyDict(value=value, suffix=suffix, fmt=fmt)]
def _report_progress(self, pcur, pmax, status_str=''):
if self._progress_lo is None or self._progress_hi is None or self._progress_max is None:
return
t = time.time()
if self._progress_sec is not None and self._progress_time is not None and t < self._progress_time + self._progress_sec:
return
self._progress_time = t
val = self._progress_lo + (pcur / pmax) * (self._progress_hi - self._progress_lo)
dnnlib.RunContext.get().update(status_str, int(val), self._progress_max)
def _get_cache_file_for_reals(self, extension='pkl', **kwargs):
all_args = dnnlib.EasyDict(metric_name=self.name, mirror_augment=self._mirror_augment)
all_args.update(self._dataset_args)
all_args.update(kwargs)
md5 = hashlib.md5(repr(sorted(all_args.items())).encode('utf-8'))
dataset_name = self._dataset_args.get('tfrecord_dir', None) or self._dataset_args.get('h5_file', None)
dataset_name = os.path.splitext(os.path.basename(dataset_name))[0]
return os.path.join('.stylegan2-cache', '%s-%s-%s.%s' % (md5.hexdigest(), self.name, dataset_name, extension))
def _get_dataset_obj(self):
if self._dataset_obj is None:
self._dataset_obj = dataset.load_dataset(data_dir=self._data_dir, **self._dataset_args)
return self._dataset_obj
def _iterate_reals(self, minibatch_size):
dataset_obj = self._get_dataset_obj()
while True:
images, _labels = dataset_obj.get_minibatch_np(minibatch_size)
if self._mirror_augment:
images = misc.apply_mirror_augment(images)
yield images
def _iterate_fakes(self, Gs, minibatch_size, num_gpus):
while True:
latents = np.random.randn(minibatch_size, *Gs.input_shape[1:])
fmt = dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True)
images = Gs.run(latents, None, output_transform=fmt, is_validation=True, num_gpus=num_gpus, assume_frozen=True)
yield images
def _get_random_labels_tf(self, minibatch_size):
return self._get_dataset_obj().get_random_labels_tf(minibatch_size)
#----------------------------------------------------------------------------
# Group of multiple metrics.
class MetricGroup:
def __init__(self, metric_kwarg_list):
self.metrics = [dnnlib.util.call_func_by_name(**kwargs) for kwargs in metric_kwarg_list]
def run(self, *args, **kwargs):
for metric in self.metrics:
metric.run(*args, **kwargs)
def get_result_str(self):
return ' '.join(metric.get_result_str() for metric in self.metrics)
def update_autosummaries(self):
for metric in self.metrics:
metric.update_autosummaries()
#----------------------------------------------------------------------------
# Dummy metric for debugging purposes.
class DummyMetric(MetricBase):
def _evaluate(self, Gs, Gs_kwargs, num_gpus):
_ = Gs, Gs_kwargs, num_gpus
self._report_result(0.0)
#----------------------------------------------------------------------------
| insightface/reconstruction/ostec/external/stylegan2/metrics/metric_base.py/0 | {
"file_path": "insightface/reconstruction/ostec/external/stylegan2/metrics/metric_base.py",
"repo_id": "insightface",
"token_count": 3030
} | 134 |
# Copyright (c) 2019, NVIDIA Corporation. All rights reserved.
#
# This work is made available under the Nvidia Source Code License-NC.
# To view a copy of this license, visit
# https://nvlabs.github.io/stylegan2/license.html
"""Network architectures used in the StyleGAN2 paper."""
import numpy as np
import tensorflow as tf
import dnnlib
import dnnlib.tflib as tflib
from dnnlib.tflib.ops.upfirdn_2d import upsample_2d, downsample_2d, upsample_conv_2d, conv_downsample_2d
from dnnlib.tflib.ops.fused_bias_act import fused_bias_act
# NOTE: Do not import any application-specific modules here!
# Specify all network parameters as kwargs.
#----------------------------------------------------------------------------
# Get/create weight tensor for a convolution or fully-connected layer.
def get_weight(shape, gain=1, use_wscale=True, lrmul=1, weight_var='weight'):
fan_in = np.prod(shape[:-1]) # [kernel, kernel, fmaps_in, fmaps_out] or [in, out]
he_std = gain / np.sqrt(fan_in) # He init
# Equalized learning rate and custom learning rate multiplier.
if use_wscale:
init_std = 1.0 / lrmul
runtime_coef = he_std * lrmul
else:
init_std = he_std / lrmul
runtime_coef = lrmul
# Create variable.
init = tf.initializers.random_normal(0, init_std)
return tf.get_variable(weight_var, shape=shape, initializer=init) * runtime_coef
#----------------------------------------------------------------------------
# Fully-connected layer.
def dense_layer(x, fmaps, gain=1, use_wscale=True, lrmul=1, weight_var='weight'):
if len(x.shape) > 2:
x = tf.reshape(x, [-1, np.prod([d.value for d in x.shape[1:]])])
w = get_weight([x.shape[1].value, fmaps], gain=gain, use_wscale=use_wscale, lrmul=lrmul, weight_var=weight_var)
w = tf.cast(w, x.dtype)
return tf.matmul(x, w)
#----------------------------------------------------------------------------
# Convolution layer with optional upsampling or downsampling.
def conv2d_layer(x, fmaps, kernel, up=False, down=False, resample_kernel=None, gain=1, use_wscale=True, lrmul=1, weight_var='weight'):
assert not (up and down)
assert kernel >= 1 and kernel % 2 == 1
w = get_weight([kernel, kernel, x.shape[1].value, fmaps], gain=gain, use_wscale=use_wscale, lrmul=lrmul, weight_var=weight_var)
if up:
x = upsample_conv_2d(x, tf.cast(w, x.dtype), data_format='NCHW', k=resample_kernel)
elif down:
x = conv_downsample_2d(x, tf.cast(w, x.dtype), data_format='NCHW', k=resample_kernel)
else:
x = tf.nn.conv2d(x, tf.cast(w, x.dtype), data_format='NCHW', strides=[1,1,1,1], padding='SAME')
return x
#----------------------------------------------------------------------------
# Apply bias and activation func.
def apply_bias_act(x, act='linear', alpha=None, gain=None, lrmul=1, bias_var='bias'):
b = tf.get_variable(bias_var, shape=[x.shape[1]], initializer=tf.initializers.zeros()) * lrmul
return fused_bias_act(x, b=tf.cast(b, x.dtype), act=act, alpha=alpha, gain=gain)
#----------------------------------------------------------------------------
# Naive upsampling (nearest neighbor) and downsampling (average pooling).
def naive_upsample_2d(x, factor=2):
with tf.variable_scope('NaiveUpsample'):
_N, C, H, W = x.shape.as_list()
x = tf.reshape(x, [-1, C, H, 1, W, 1])
x = tf.tile(x, [1, 1, 1, factor, 1, factor])
return tf.reshape(x, [-1, C, H * factor, W * factor])
def naive_downsample_2d(x, factor=2):
with tf.variable_scope('NaiveDownsample'):
_N, C, H, W = x.shape.as_list()
x = tf.reshape(x, [-1, C, H // factor, factor, W // factor, factor])
return tf.reduce_mean(x, axis=[3,5])
#----------------------------------------------------------------------------
# Modulated convolution layer.
def modulated_conv2d_layer(x, y, fmaps, kernel, up=False, down=False, demodulate=True, resample_kernel=None, gain=1, use_wscale=True, lrmul=1, fused_modconv=True, weight_var='weight', mod_weight_var='mod_weight', mod_bias_var='mod_bias'):
assert not (up and down)
assert kernel >= 1 and kernel % 2 == 1
# Get weight.
w = get_weight([kernel, kernel, x.shape[1].value, fmaps], gain=gain, use_wscale=use_wscale, lrmul=lrmul, weight_var=weight_var)
ww = w[np.newaxis] # [BkkIO] Introduce minibatch dimension.
# Modulate.
s = dense_layer(y, fmaps=x.shape[1].value, weight_var=mod_weight_var) # [BI] Transform incoming W to style.
s = apply_bias_act(s, bias_var=mod_bias_var) + 1 # [BI] Add bias (initially 1).
ww *= tf.cast(s[:, np.newaxis, np.newaxis, :, np.newaxis], w.dtype) # [BkkIO] Scale input feature maps.
# Demodulate.
if demodulate:
d = tf.rsqrt(tf.reduce_sum(tf.square(ww), axis=[1,2,3]) + 1e-8) # [BO] Scaling factor.
ww *= d[:, np.newaxis, np.newaxis, np.newaxis, :] # [BkkIO] Scale output feature maps.
# Reshape/scale input.
if fused_modconv:
x = tf.reshape(x, [1, -1, x.shape[2], x.shape[3]]) # Fused => reshape minibatch to convolution groups.
w = tf.reshape(tf.transpose(ww, [1, 2, 3, 0, 4]), [ww.shape[1], ww.shape[2], ww.shape[3], -1])
else:
x *= tf.cast(s[:, :, np.newaxis, np.newaxis], x.dtype) # [BIhw] Not fused => scale input activations.
# Convolution with optional up/downsampling.
if up:
x = upsample_conv_2d(x, tf.cast(w, x.dtype), data_format='NCHW', k=resample_kernel)
elif down:
x = conv_downsample_2d(x, tf.cast(w, x.dtype), data_format='NCHW', k=resample_kernel)
else:
x = tf.nn.conv2d(x, tf.cast(w, x.dtype), data_format='NCHW', strides=[1,1,1,1], padding='SAME')
# Reshape/scale output.
if fused_modconv:
x = tf.reshape(x, [-1, fmaps, x.shape[2], x.shape[3]]) # Fused => reshape convolution groups back to minibatch.
elif demodulate:
x *= tf.cast(d[:, :, np.newaxis, np.newaxis], x.dtype) # [BOhw] Not fused => scale output activations.
return x
#----------------------------------------------------------------------------
# Minibatch standard deviation layer.
def minibatch_stddev_layer(x, group_size=4, num_new_features=1):
group_size = tf.minimum(group_size, tf.shape(x)[0]) # Minibatch must be divisible by (or smaller than) group_size.
s = x.shape # [NCHW] Input shape.
y = tf.reshape(x, [group_size, -1, num_new_features, s[1]//num_new_features, s[2], s[3]]) # [GMncHW] Split minibatch into M groups of size G. Split channels into n channel groups c.
y = tf.cast(y, tf.float32) # [GMncHW] Cast to FP32.
y -= tf.reduce_mean(y, axis=0, keepdims=True) # [GMncHW] Subtract mean over group.
y = tf.reduce_mean(tf.square(y), axis=0) # [MncHW] Calc variance over group.
y = tf.sqrt(y + 1e-8) # [MncHW] Calc stddev over group.
y = tf.reduce_mean(y, axis=[2,3,4], keepdims=True) # [Mn111] Take average over fmaps and pixels.
y = tf.reduce_mean(y, axis=[2]) # [Mn11] Split channels into c channel groups
y = tf.cast(y, x.dtype) # [Mn11] Cast back to original data type.
y = tf.tile(y, [group_size, 1, s[2], s[3]]) # [NnHW] Replicate over group and pixels.
return tf.concat([x, y], axis=1) # [NCHW] Append as new fmap.
#----------------------------------------------------------------------------
# Main generator network.
# Composed of two sub-networks (mapping and synthesis) that are defined below.
# Used in configs B-F (Table 1).
def G_main(
latents_in, # First input: Latent vectors (Z) [minibatch, latent_size].
labels_in, # Second input: Conditioning labels [minibatch, label_size].
truncation_psi = 0.5, # Style strength multiplier for the truncation trick. None = disable.
truncation_cutoff = None, # Number of layers for which to apply the truncation trick. None = disable.
truncation_psi_val = None, # Value for truncation_psi to use during validation.
truncation_cutoff_val = None, # Value for truncation_cutoff to use during validation.
dlatent_avg_beta = 0.995, # Decay for tracking the moving average of W during training. None = disable.
style_mixing_prob = 0.9, # Probability of mixing styles during training. None = disable.
is_training = False, # Network is under training? Enables and disables specific features.
is_validation = False, # Network is under validation? Chooses which value to use for truncation_psi.
return_dlatents = False, # Return dlatents in addition to the images?
is_template_graph = False, # True = template graph constructed by the Network class, False = actual evaluation.
components = dnnlib.EasyDict(), # Container for sub-networks. Retained between calls.
mapping_func = 'G_mapping', # Build func name for the mapping network.
synthesis_func = 'G_synthesis_stylegan2', # Build func name for the synthesis network.
**kwargs): # Arguments for sub-networks (mapping and synthesis).
# Validate arguments.
assert not is_training or not is_validation
assert isinstance(components, dnnlib.EasyDict)
if is_validation:
truncation_psi = truncation_psi_val
truncation_cutoff = truncation_cutoff_val
if is_training or (truncation_psi is not None and not tflib.is_tf_expression(truncation_psi) and truncation_psi == 1):
truncation_psi = None
if is_training:
truncation_cutoff = None
if not is_training or (dlatent_avg_beta is not None and not tflib.is_tf_expression(dlatent_avg_beta) and dlatent_avg_beta == 1):
dlatent_avg_beta = None
if not is_training or (style_mixing_prob is not None and not tflib.is_tf_expression(style_mixing_prob) and style_mixing_prob <= 0):
style_mixing_prob = None
# Setup components.
if 'synthesis' not in components:
components.synthesis = tflib.Network('G_synthesis', func_name=globals()[synthesis_func], **kwargs)
num_layers = components.synthesis.input_shape[1]
dlatent_size = components.synthesis.input_shape[2]
if 'mapping' not in components:
components.mapping = tflib.Network('G_mapping', func_name=globals()[mapping_func], dlatent_broadcast=num_layers, **kwargs)
# Setup variables.
lod_in = tf.get_variable('lod', initializer=np.float32(0), trainable=False)
dlatent_avg = tf.get_variable('dlatent_avg', shape=[dlatent_size], initializer=tf.initializers.zeros(), trainable=False)
# Evaluate mapping network.
dlatents = components.mapping.get_output_for(latents_in, labels_in, is_training=is_training, **kwargs)
dlatents = tf.cast(dlatents, tf.float32)
# Update moving average of W.
if dlatent_avg_beta is not None:
with tf.variable_scope('DlatentAvg'):
batch_avg = tf.reduce_mean(dlatents[:, 0], axis=0)
update_op = tf.assign(dlatent_avg, tflib.lerp(batch_avg, dlatent_avg, dlatent_avg_beta))
with tf.control_dependencies([update_op]):
dlatents = tf.identity(dlatents)
# Perform style mixing regularization.
if style_mixing_prob is not None:
with tf.variable_scope('StyleMix'):
latents2 = tf.random_normal(tf.shape(latents_in))
dlatents2 = components.mapping.get_output_for(latents2, labels_in, is_training=is_training, **kwargs)
dlatents2 = tf.cast(dlatents2, tf.float32)
layer_idx = np.arange(num_layers)[np.newaxis, :, np.newaxis]
cur_layers = num_layers - tf.cast(lod_in, tf.int32) * 2
mixing_cutoff = tf.cond(
tf.random_uniform([], 0.0, 1.0) < style_mixing_prob,
lambda: tf.random_uniform([], 1, cur_layers, dtype=tf.int32),
lambda: cur_layers)
dlatents = tf.where(tf.broadcast_to(layer_idx < mixing_cutoff, tf.shape(dlatents)), dlatents, dlatents2)
# Apply truncation trick.
if truncation_psi is not None:
with tf.variable_scope('Truncation'):
layer_idx = np.arange(num_layers)[np.newaxis, :, np.newaxis]
layer_psi = np.ones(layer_idx.shape, dtype=np.float32)
if truncation_cutoff is None:
layer_psi *= truncation_psi
else:
layer_psi = tf.where(layer_idx < truncation_cutoff, layer_psi * truncation_psi, layer_psi)
dlatents = tflib.lerp(dlatent_avg, dlatents, layer_psi)
# Evaluate synthesis network.
deps = []
if 'lod' in components.synthesis.vars:
deps.append(tf.assign(components.synthesis.vars['lod'], lod_in))
with tf.control_dependencies(deps):
images_out = components.synthesis.get_output_for(dlatents, is_training=is_training, force_clean_graph=is_template_graph, **kwargs)
# Return requested outputs.
images_out = tf.identity(images_out, name='images_out')
if return_dlatents:
return images_out, dlatents
return images_out
#----------------------------------------------------------------------------
# Mapping network.
# Transforms the input latent code (z) to the disentangled latent code (w).
# Used in configs B-F (Table 1).
def G_mapping(
latents_in, # First input: Latent vectors (Z) [minibatch, latent_size].
labels_in, # Second input: Conditioning labels [minibatch, label_size].
latent_size = 512, # Latent vector (Z) dimensionality.
label_size = 0, # Label dimensionality, 0 if no labels.
dlatent_size = 512, # Disentangled latent (W) dimensionality.
dlatent_broadcast = None, # Output disentangled latent (W) as [minibatch, dlatent_size] or [minibatch, dlatent_broadcast, dlatent_size].
mapping_layers = 8, # Number of mapping layers.
mapping_fmaps = 512, # Number of activations in the mapping layers.
mapping_lrmul = 0.01, # Learning rate multiplier for the mapping layers.
mapping_nonlinearity = 'lrelu', # Activation function: 'relu', 'lrelu', etc.
normalize_latents = True, # Normalize latent vectors (Z) before feeding them to the mapping layers?
dtype = 'float32', # Data type to use for activations and outputs.
**_kwargs): # Ignore unrecognized keyword args.
act = mapping_nonlinearity
# Inputs.
latents_in.set_shape([None, latent_size])
labels_in.set_shape([None, label_size])
latents_in = tf.cast(latents_in, dtype)
labels_in = tf.cast(labels_in, dtype)
x = latents_in
# Embed labels and concatenate them with latents.
if label_size:
with tf.variable_scope('LabelConcat'):
w = tf.get_variable('weight', shape=[label_size, latent_size], initializer=tf.initializers.random_normal())
y = tf.matmul(labels_in, tf.cast(w, dtype))
x = tf.concat([x, y], axis=1)
# Normalize latents.
if normalize_latents:
with tf.variable_scope('Normalize'):
x *= tf.rsqrt(tf.reduce_mean(tf.square(x), axis=1, keepdims=True) + 1e-8)
# Mapping layers.
for layer_idx in range(mapping_layers):
with tf.variable_scope('Dense%d' % layer_idx):
fmaps = dlatent_size if layer_idx == mapping_layers - 1 else mapping_fmaps
x = apply_bias_act(dense_layer(x, fmaps=fmaps, lrmul=mapping_lrmul), act=act, lrmul=mapping_lrmul)
# Broadcast.
if dlatent_broadcast is not None:
with tf.variable_scope('Broadcast'):
x = tf.tile(x[:, np.newaxis], [1, dlatent_broadcast, 1])
# Output.
assert x.dtype == tf.as_dtype(dtype)
return tf.identity(x, name='dlatents_out')
#----------------------------------------------------------------------------
# StyleGAN synthesis network with revised architecture (Figure 2d).
# Implements progressive growing, but no skip connections or residual nets (Figure 7).
# Used in configs B-D (Table 1).
def G_synthesis_stylegan_revised(
dlatents_in, # Input: Disentangled latents (W) [minibatch, num_layers, dlatent_size].
dlatent_size = 512, # Disentangled latent (W) dimensionality.
num_channels = 3, # Number of output color channels.
resolution = 1024, # Output resolution.
fmap_base = 16 << 10, # Overall multiplier for the number of feature maps.
fmap_decay = 1.0, # log2 feature map reduction when doubling the resolution.
fmap_min = 1, # Minimum number of feature maps in any layer.
fmap_max = 512, # Maximum number of feature maps in any layer.
randomize_noise = True, # True = randomize noise inputs every time (non-deterministic), False = read noise inputs from variables.
nonlinearity = 'lrelu', # Activation function: 'relu', 'lrelu', etc.
dtype = 'float32', # Data type to use for activations and outputs.
resample_kernel = [1,3,3,1], # Low-pass filter to apply when resampling activations. None = no filtering.
fused_modconv = True, # Implement modulated_conv2d_layer() as a single fused op?
structure = 'auto', # 'fixed' = no progressive growing, 'linear' = human-readable, 'recursive' = efficient, 'auto' = select automatically.
is_template_graph = False, # True = template graph constructed by the Network class, False = actual evaluation.
force_clean_graph = False, # True = construct a clean graph that looks nice in TensorBoard, False = default behavior.
**_kwargs): # Ignore unrecognized keyword args.
resolution_log2 = int(np.log2(resolution))
assert resolution == 2**resolution_log2 and resolution >= 4
def nf(stage): return np.clip(int(fmap_base / (2.0 ** (stage * fmap_decay))), fmap_min, fmap_max)
if is_template_graph: force_clean_graph = True
if force_clean_graph: randomize_noise = False
if structure == 'auto': structure = 'linear' if force_clean_graph else 'recursive'
act = nonlinearity
num_layers = resolution_log2 * 2 - 2
images_out = None
# Primary inputs.
dlatents_in.set_shape([None, num_layers, dlatent_size])
dlatents_in = tf.cast(dlatents_in, dtype)
lod_in = tf.cast(tf.get_variable('lod', initializer=np.float32(0), trainable=False), dtype)
# Noise inputs.
noise_inputs = []
for layer_idx in range(num_layers - 1):
res = (layer_idx + 5) // 2
shape = [1, 1, 2**res, 2**res]
noise_inputs.append(tf.get_variable('noise%d' % layer_idx, shape=shape, initializer=tf.initializers.random_normal(), trainable=False))
# Single convolution layer with all the bells and whistles.
def layer(x, layer_idx, fmaps, kernel, up=False):
x = modulated_conv2d_layer(x, dlatents_in[:, layer_idx], fmaps=fmaps, kernel=kernel, up=up, resample_kernel=resample_kernel, fused_modconv=fused_modconv)
if randomize_noise:
noise = tf.random_normal([tf.shape(x)[0], 1, x.shape[2], x.shape[3]], dtype=x.dtype)
else:
noise = tf.cast(noise_inputs[layer_idx], x.dtype)
noise_strength = tf.get_variable('noise_strength', shape=[], initializer=tf.initializers.zeros())
x += noise * tf.cast(noise_strength, x.dtype)
return apply_bias_act(x, act=act)
# Early layers.
with tf.variable_scope('4x4'):
with tf.variable_scope('Const'):
x = tf.get_variable('const', shape=[1, nf(1), 4, 4], initializer=tf.initializers.random_normal())
x = tf.tile(tf.cast(x, dtype), [tf.shape(dlatents_in)[0], 1, 1, 1])
with tf.variable_scope('Conv'):
x = layer(x, layer_idx=0, fmaps=nf(1), kernel=3)
# Building blocks for remaining layers.
def block(res, x): # res = 3..resolution_log2
with tf.variable_scope('%dx%d' % (2**res, 2**res)):
with tf.variable_scope('Conv0_up'):
x = layer(x, layer_idx=res*2-5, fmaps=nf(res-1), kernel=3, up=True)
with tf.variable_scope('Conv1'):
x = layer(x, layer_idx=res*2-4, fmaps=nf(res-1), kernel=3)
return x
def torgb(res, x): # res = 2..resolution_log2
with tf.variable_scope('ToRGB_lod%d' % (resolution_log2 - res)):
return apply_bias_act(modulated_conv2d_layer(x, dlatents_in[:, res*2-3], fmaps=num_channels, kernel=1, demodulate=False, fused_modconv=fused_modconv))
# Fixed structure: simple and efficient, but does not support progressive growing.
if structure == 'fixed':
for res in range(3, resolution_log2 + 1):
x = block(res, x)
images_out = torgb(resolution_log2, x)
# Linear structure: simple but inefficient.
if structure == 'linear':
images_out = torgb(2, x)
for res in range(3, resolution_log2 + 1):
lod = resolution_log2 - res
x = block(res, x)
img = torgb(res, x)
with tf.variable_scope('Upsample_lod%d' % lod):
images_out = upsample_2d(images_out)
with tf.variable_scope('Grow_lod%d' % lod):
images_out = tflib.lerp_clip(img, images_out, lod_in - lod)
# Recursive structure: complex but efficient.
if structure == 'recursive':
def cset(cur_lambda, new_cond, new_lambda):
return lambda: tf.cond(new_cond, new_lambda, cur_lambda)
def grow(x, res, lod):
y = block(res, x)
img = lambda: naive_upsample_2d(torgb(res, y), factor=2**lod)
img = cset(img, (lod_in > lod), lambda: naive_upsample_2d(tflib.lerp(torgb(res, y), upsample_2d(torgb(res - 1, x)), lod_in - lod), factor=2**lod))
if lod > 0: img = cset(img, (lod_in < lod), lambda: grow(y, res + 1, lod - 1))
return img()
images_out = grow(x, 3, resolution_log2 - 3)
assert images_out.dtype == tf.as_dtype(dtype)
return tf.identity(images_out, name='images_out')
#----------------------------------------------------------------------------
# StyleGAN2 synthesis network (Figure 7).
# Implements skip connections and residual nets (Figure 7), but no progressive growing.
# Used in configs E-F (Table 1).
def G_synthesis_stylegan2(
dlatents_in, # Input: Disentangled latents (W) [minibatch, num_layers, dlatent_size].
dlatent_size = 512, # Disentangled latent (W) dimensionality.
num_channels = 3, # Number of output color channels.
resolution = 1024, # Output resolution.
fmap_base = 16 << 10, # Overall multiplier for the number of feature maps.
fmap_decay = 1.0, # log2 feature map reduction when doubling the resolution.
fmap_min = 1, # Minimum number of feature maps in any layer.
fmap_max = 512, # Maximum number of feature maps in any layer.
randomize_noise = True, # True = randomize noise inputs every time (non-deterministic), False = read noise inputs from variables.
architecture = 'skip', # Architecture: 'orig', 'skip', 'resnet'.
nonlinearity = 'lrelu', # Activation function: 'relu', 'lrelu', etc.
dtype = 'float32', # Data type to use for activations and outputs.
resample_kernel = [1,3,3,1], # Low-pass filter to apply when resampling activations. None = no filtering.
fused_modconv = True, # Implement modulated_conv2d_layer() as a single fused op?
**_kwargs): # Ignore unrecognized keyword args.
resolution_log2 = int(np.log2(resolution))
assert resolution == 2**resolution_log2 and resolution >= 4
def nf(stage): return np.clip(int(fmap_base / (2.0 ** (stage * fmap_decay))), fmap_min, fmap_max)
assert architecture in ['orig', 'skip', 'resnet']
act = nonlinearity
num_layers = resolution_log2 * 2 - 2
images_out = None
# Primary inputs.
dlatents_in.set_shape([None, num_layers, dlatent_size])
dlatents_in = tf.cast(dlatents_in, dtype)
# Noise inputs.
noise_inputs = []
for layer_idx in range(num_layers - 1):
res = (layer_idx + 5) // 2
shape = [1, 1, 2**res, 2**res]
noise_inputs.append(tf.get_variable('noise%d' % layer_idx, shape=shape, initializer=tf.initializers.random_normal(), trainable=False))
# Single convolution layer with all the bells and whistles.
def layer(x, layer_idx, fmaps, kernel, up=False):
x = modulated_conv2d_layer(x, dlatents_in[:, layer_idx], fmaps=fmaps, kernel=kernel, up=up, resample_kernel=resample_kernel, fused_modconv=fused_modconv)
if randomize_noise:
noise = tf.random_normal([tf.shape(x)[0], 1, x.shape[2], x.shape[3]], dtype=x.dtype)
else:
noise = tf.cast(noise_inputs[layer_idx], x.dtype)
noise_strength = tf.get_variable('noise_strength', shape=[], initializer=tf.initializers.zeros())
x += noise * tf.cast(noise_strength, x.dtype)
return apply_bias_act(x, act=act)
# Building blocks for main layers.
def block(x, res): # res = 3..resolution_log2
t = x
with tf.variable_scope('Conv0_up'):
x = layer(x, layer_idx=res*2-5, fmaps=nf(res-1), kernel=3, up=True)
with tf.variable_scope('Conv1'):
x = layer(x, layer_idx=res*2-4, fmaps=nf(res-1), kernel=3)
if architecture == 'resnet':
with tf.variable_scope('Skip'):
t = conv2d_layer(t, fmaps=nf(res-1), kernel=1, up=True, resample_kernel=resample_kernel)
x = (x + t) * (1 / np.sqrt(2))
return x
def upsample(y):
with tf.variable_scope('Upsample'):
return upsample_2d(y, k=resample_kernel)
def torgb(x, y, res): # res = 2..resolution_log2
with tf.variable_scope('ToRGB'):
t = apply_bias_act(modulated_conv2d_layer(x, dlatents_in[:, res*2-3], fmaps=num_channels, kernel=1, demodulate=False, fused_modconv=fused_modconv))
return t if y is None else y + t
# Early layers.
y = None
with tf.variable_scope('4x4'):
with tf.variable_scope('Const'):
x = tf.get_variable('const', shape=[1, nf(1), 4, 4], initializer=tf.initializers.random_normal())
x = tf.tile(tf.cast(x, dtype), [tf.shape(dlatents_in)[0], 1, 1, 1])
with tf.variable_scope('Conv'):
x = layer(x, layer_idx=0, fmaps=nf(1), kernel=3)
if architecture == 'skip':
y = torgb(x, y, 2)
# Main layers.
for res in range(3, resolution_log2 + 1):
with tf.variable_scope('%dx%d' % (2**res, 2**res)):
x = block(x, res)
if architecture == 'skip':
y = upsample(y)
if architecture == 'skip' or res == resolution_log2:
y = torgb(x, y, res)
images_out = y
assert images_out.dtype == tf.as_dtype(dtype)
return tf.identity(images_out, name='images_out')
#----------------------------------------------------------------------------
# Original StyleGAN discriminator.
# Used in configs B-D (Table 1).
def D_stylegan(
images_in, # First input: Images [minibatch, channel, height, width].
labels_in, # Second input: Labels [minibatch, label_size].
num_channels = 3, # Number of input color channels. Overridden based on dataset.
resolution = 1024, # Input resolution. Overridden based on dataset.
label_size = 0, # Dimensionality of the labels, 0 if no labels. Overridden based on dataset.
fmap_base = 16 << 10, # Overall multiplier for the number of feature maps.
fmap_decay = 1.0, # log2 feature map reduction when doubling the resolution.
fmap_min = 1, # Minimum number of feature maps in any layer.
fmap_max = 512, # Maximum number of feature maps in any layer.
nonlinearity = 'lrelu', # Activation function: 'relu', 'lrelu', etc.
mbstd_group_size = 4, # Group size for the minibatch standard deviation layer, 0 = disable.
mbstd_num_features = 1, # Number of features for the minibatch standard deviation layer.
dtype = 'float32', # Data type to use for activations and outputs.
resample_kernel = [1,3,3,1], # Low-pass filter to apply when resampling activations. None = no filtering.
structure = 'auto', # 'fixed' = no progressive growing, 'linear' = human-readable, 'recursive' = efficient, 'auto' = select automatically.
is_template_graph = False, # True = template graph constructed by the Network class, False = actual evaluation.
**_kwargs): # Ignore unrecognized keyword args.
resolution_log2 = int(np.log2(resolution))
assert resolution == 2**resolution_log2 and resolution >= 4
def nf(stage): return np.clip(int(fmap_base / (2.0 ** (stage * fmap_decay))), fmap_min, fmap_max)
if structure == 'auto': structure = 'linear' if is_template_graph else 'recursive'
act = nonlinearity
images_in.set_shape([None, num_channels, resolution, resolution])
labels_in.set_shape([None, label_size])
images_in = tf.cast(images_in, dtype)
labels_in = tf.cast(labels_in, dtype)
lod_in = tf.cast(tf.get_variable('lod', initializer=np.float32(0.0), trainable=False), dtype)
# Building blocks for spatial layers.
def fromrgb(x, res): # res = 2..resolution_log2
with tf.variable_scope('FromRGB_lod%d' % (resolution_log2 - res)):
return apply_bias_act(conv2d_layer(x, fmaps=nf(res-1), kernel=1), act=act)
def block(x, res): # res = 2..resolution_log2
with tf.variable_scope('%dx%d' % (2**res, 2**res)):
with tf.variable_scope('Conv0'):
x = apply_bias_act(conv2d_layer(x, fmaps=nf(res-1), kernel=3), act=act)
with tf.variable_scope('Conv1_down'):
x = apply_bias_act(conv2d_layer(x, fmaps=nf(res-2), kernel=3, down=True, resample_kernel=resample_kernel), act=act)
return x
# Fixed structure: simple and efficient, but does not support progressive growing.
if structure == 'fixed':
x = fromrgb(images_in, resolution_log2)
for res in range(resolution_log2, 2, -1):
x = block(x, res)
# Linear structure: simple but inefficient.
if structure == 'linear':
img = images_in
x = fromrgb(img, resolution_log2)
for res in range(resolution_log2, 2, -1):
lod = resolution_log2 - res
x = block(x, res)
with tf.variable_scope('Downsample_lod%d' % lod):
img = downsample_2d(img)
y = fromrgb(img, res - 1)
with tf.variable_scope('Grow_lod%d' % lod):
x = tflib.lerp_clip(x, y, lod_in - lod)
# Recursive structure: complex but efficient.
if structure == 'recursive':
def cset(cur_lambda, new_cond, new_lambda):
return lambda: tf.cond(new_cond, new_lambda, cur_lambda)
def grow(res, lod):
x = lambda: fromrgb(naive_downsample_2d(images_in, factor=2**lod), res)
if lod > 0: x = cset(x, (lod_in < lod), lambda: grow(res + 1, lod - 1))
x = block(x(), res); y = lambda: x
y = cset(y, (lod_in > lod), lambda: tflib.lerp(x, fromrgb(naive_downsample_2d(images_in, factor=2**(lod+1)), res - 1), lod_in - lod))
return y()
x = grow(3, resolution_log2 - 3)
# Final layers at 4x4 resolution.
with tf.variable_scope('4x4'):
if mbstd_group_size > 1:
with tf.variable_scope('MinibatchStddev'):
x = minibatch_stddev_layer(x, mbstd_group_size, mbstd_num_features)
with tf.variable_scope('Conv'):
x = apply_bias_act(conv2d_layer(x, fmaps=nf(1), kernel=3), act=act)
with tf.variable_scope('Dense0'):
x = apply_bias_act(dense_layer(x, fmaps=nf(0)), act=act)
# Output layer with label conditioning from "Which Training Methods for GANs do actually Converge?"
with tf.variable_scope('Output'):
x = apply_bias_act(dense_layer(x, fmaps=max(labels_in.shape[1], 1)))
if labels_in.shape[1] > 0:
x = tf.reduce_sum(x * labels_in, axis=1, keepdims=True)
scores_out = x
# Output.
assert scores_out.dtype == tf.as_dtype(dtype)
scores_out = tf.identity(scores_out, name='scores_out')
return scores_out
#----------------------------------------------------------------------------
# StyleGAN2 discriminator (Figure 7).
# Implements skip connections and residual nets (Figure 7), but no progressive growing.
# Used in configs E-F (Table 1).
def D_stylegan2(
images_in, # First input: Images [minibatch, channel, height, width].
labels_in, # Second input: Labels [minibatch, label_size].
num_channels = 3, # Number of input color channels. Overridden based on dataset.
resolution = 1024, # Input resolution. Overridden based on dataset.
label_size = 0, # Dimensionality of the labels, 0 if no labels. Overridden based on dataset.
fmap_base = 16 << 10, # Overall multiplier for the number of feature maps.
fmap_decay = 1.0, # log2 feature map reduction when doubling the resolution.
fmap_min = 1, # Minimum number of feature maps in any layer.
fmap_max = 512, # Maximum number of feature maps in any layer.
architecture = 'resnet', # Architecture: 'orig', 'skip', 'resnet'.
nonlinearity = 'lrelu', # Activation function: 'relu', 'lrelu', etc.
mbstd_group_size = 4, # Group size for the minibatch standard deviation layer, 0 = disable.
mbstd_num_features = 1, # Number of features for the minibatch standard deviation layer.
dtype = 'float32', # Data type to use for activations and outputs.
resample_kernel = [1,3,3,1], # Low-pass filter to apply when resampling activations. None = no filtering.
**_kwargs): # Ignore unrecognized keyword args.
resolution_log2 = int(np.log2(resolution))
assert resolution == 2**resolution_log2 and resolution >= 4
def nf(stage): return np.clip(int(fmap_base / (2.0 ** (stage * fmap_decay))), fmap_min, fmap_max)
assert architecture in ['orig', 'skip', 'resnet']
act = nonlinearity
images_in.set_shape([None, num_channels, resolution, resolution])
labels_in.set_shape([None, label_size])
images_in = tf.cast(images_in, dtype)
labels_in = tf.cast(labels_in, dtype)
# Building blocks for main layers.
def fromrgb(x, y, res): # res = 2..resolution_log2
with tf.variable_scope('FromRGB'):
t = apply_bias_act(conv2d_layer(y, fmaps=nf(res-1), kernel=1), act=act)
return t if x is None else x + t
def block(x, res): # res = 2..resolution_log2
t = x
with tf.variable_scope('Conv0'):
x = apply_bias_act(conv2d_layer(x, fmaps=nf(res-1), kernel=3), act=act)
with tf.variable_scope('Conv1_down'):
x = apply_bias_act(conv2d_layer(x, fmaps=nf(res-2), kernel=3, down=True, resample_kernel=resample_kernel), act=act)
if architecture == 'resnet':
with tf.variable_scope('Skip'):
t = conv2d_layer(t, fmaps=nf(res-2), kernel=1, down=True, resample_kernel=resample_kernel)
x = (x + t) * (1 / np.sqrt(2))
return x
def downsample(y):
with tf.variable_scope('Downsample'):
return downsample_2d(y, k=resample_kernel)
# Main layers.
x = None
y = images_in
for res in range(resolution_log2, 2, -1):
with tf.variable_scope('%dx%d' % (2**res, 2**res)):
if architecture == 'skip' or res == resolution_log2:
x = fromrgb(x, y, res)
x = block(x, res)
if architecture == 'skip':
y = downsample(y)
# Final layers.
with tf.variable_scope('4x4'):
if architecture == 'skip':
x = fromrgb(x, y, 2)
if mbstd_group_size > 1:
with tf.variable_scope('MinibatchStddev'):
x = minibatch_stddev_layer(x, mbstd_group_size, mbstd_num_features)
with tf.variable_scope('Conv'):
x = apply_bias_act(conv2d_layer(x, fmaps=nf(1), kernel=3), act=act)
with tf.variable_scope('Dense0'):
x = apply_bias_act(dense_layer(x, fmaps=nf(0)), act=act)
# Output layer with label conditioning from "Which Training Methods for GANs do actually Converge?"
with tf.variable_scope('Output'):
x = apply_bias_act(dense_layer(x, fmaps=max(labels_in.shape[1], 1)))
if labels_in.shape[1] > 0:
x = tf.reduce_sum(x * labels_in, axis=1, keepdims=True)
scores_out = x
# Output.
assert scores_out.dtype == tf.as_dtype(dtype)
scores_out = tf.identity(scores_out, name='scores_out')
return scores_out
#----------------------------------------------------------------------------
| insightface/reconstruction/ostec/external/stylegan2/training/networks_stylegan2.py/0 | {
"file_path": "insightface/reconstruction/ostec/external/stylegan2/training/networks_stylegan2.py",
"repo_id": "insightface",
"token_count": 16652
} | 135 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from onnx import numpy_helper, ValueInfoProto, AttributeProto, GraphProto, NodeProto, TensorProto, TensorShapeProto
from typing import Any, Text, Iterable, List, Dict, Sequence, Optional, Tuple, Union
from typing_extensions import Protocol
import numpy as np
class Transformer(Protocol):
def __call__(self, graph): # type: (Graph) -> Graph
pass
EdgeInfo = Tuple[Text, Any, TensorShapeProto]
AttributeValue = Any # TODO Union[Sequence[float], Sequence[int], Sequence[Text], Sequence[TensorProto], Sequence[GraphProto]]
def _input_from_onnx_input(input): # type: (ValueInfoProto) -> EdgeInfo
name = input.name
type = input.type.tensor_type.elem_type
shape = tuple([d.dim_value for d in input.type.tensor_type.shape.dim])
return (name, type, shape)
def _convertAttributeProto(onnx_arg): # type: (AttributeProto) -> AttributeValue
"""
Convert an ONNX AttributeProto into an appropriate Python object
for the type.
NB: Tensor attribute gets returned as numpy array
"""
if onnx_arg.HasField('f'):
return onnx_arg.f
elif onnx_arg.HasField('i'):
return onnx_arg.i
elif onnx_arg.HasField('s'):
return onnx_arg.s
elif onnx_arg.HasField('t'):
return numpy_helper.to_array(onnx_arg.t)
elif len(onnx_arg.floats):
return list(onnx_arg.floats)
elif len(onnx_arg.ints):
return list(onnx_arg.ints)
elif len(onnx_arg.strings):
return list(onnx_arg.strings)
else:
raise ValueError("Unsupported ONNX attribute: {}".format(onnx_arg))
class Attributes(Dict[Text, Any]):
@staticmethod
def from_onnx(args): # type: (Iterable[AttributeProto]) -> Attributes
d = Attributes()
for arg in args:
d[arg.name] = _convertAttributeProto(arg)
return d
class Node(object):
def __init__(self,
name, # type: Optional[Text]
op_type, # type: Text
attrs, # type: Dict[Text, AttributeValue]
inputs, # type: List[Text]
outputs, # type: List[Text]
):
# type: (...) -> None
self.name = name
self.op_type = op_type
self.attrs = attrs
self.inputs = inputs
self.outputs = outputs
self.input_tensors = {} # type: Dict[Text, np._ArrayLike[Any]]
self.parents = [] # type: List[Node]
self.children = [] # type: List[Node]
self.metadata = {} # type: Dict[Any, Any]
def add_parent(self, parent_node): # type: (Node) -> None
assert parent_node not in self.parents
self.parents.append(parent_node)
if self not in parent_node.children:
parent_node.children.append(self)
def add_child(self, child_node): # type: (Node) -> None
assert child_node not in self.children
self.children.append(child_node)
if self not in child_node.parents:
child_node.parents.append(self)
def get_only_parent(self): # type: () -> Node
if len(self.parents) != 1:
raise ValueError('Node ({}) expected to have 1 parent. Found {}.'
.format(self, len(self.parents)))
return self.parents[0]
@staticmethod
def from_onnx(node): # type: (NodeProto) -> Node
attrs = Attributes.from_onnx(node.attribute)
name = Text(node.name)
if len(name) == 0:
name = "_".join(node.output)
return Node(
name, node.op_type, attrs, list(node.input), list(node.output)
)
class Graph(object):
def __init__(self,
nodes, # type: List[Node]
inputs, # type: List[EdgeInfo]
outputs, # type: List[EdgeInfo]
shape_dict, # type: Dict[Text,Tuple[int,...]]
):
# type: (...) -> None
self.nodes = nodes
self.inputs = inputs
self.outputs = outputs
self.shape_dict = shape_dict # data blob name to its shape
# data blob name to the list of op types it feeds into
self.blob_to_op_type = {} # type: Dict[Text, List[Text]]
# data blob name to the op_type that generates it
self.blob_from_op_type = {} # type: Dict[Text, Text]
for node_ in nodes:
for input_ in node_.inputs:
if input_ in self.blob_to_op_type:
self.blob_to_op_type[input_].append(node_.op_type)
else:
self.blob_to_op_type[input_] = [node_.op_type]
for output_ in node_.outputs:
if output_ in self.blob_from_op_type:
raise ValueError("Data blob: %s, is generated by more than 1 op" %(output_))
self.blob_from_op_type[output_] = node_.op_type
def transformed(self, transformers): # type: (Iterable[Transformer]) -> Graph
graph = self
for transformer in transformers:
graph = transformer(graph)
return graph
def has_edge_name(self, name): # type: (Text) -> bool
'''
Check if name is already used for graph inputs/outputs or for nodes
inputs/outputs
'''
names = set()
for input in self.inputs:
names.add(input[0])
for output in self.outputs:
names.add(output[0])
for node in self.nodes:
names.update(node.inputs)
names.update(node.outputs)
return name in names
def get_unique_edge_name(self, name): # type: (Text) -> Text
n_ = name
i = 0
while self.has_edge_name(n_):
n_ = "{}_{}".format(name, i)
i += 1
return n_
@staticmethod
def from_onnx(graph): # type: (GraphProto) -> Graph
input_tensors = {
t.name: numpy_helper.to_array(t) for t in graph.initializer
}
nodes_ = []
nodes_by_input = {} # type: Dict[Text, List[Node]]
nodes_by_output = {}
for node in graph.node:
node_ = Node.from_onnx(node)
for input_ in node_.inputs:
if input_ in input_tensors:
node_.input_tensors[input_] = input_tensors[input_]
else:
if input_ in nodes_by_input:
input_nodes = nodes_by_input[input_]
else:
input_nodes = []
nodes_by_input[input_] = input_nodes
input_nodes.append(node_)
for output_ in node_.outputs:
nodes_by_output[output_] = node_
nodes_.append(node_)
inputs = []
for i in graph.input:
if i.name not in input_tensors:
inputs.append(_input_from_onnx_input(i))
outputs = []
for o in graph.output:
outputs.append(_input_from_onnx_input(o))
for node_ in nodes_:
for input_ in node_.inputs:
if input_ in nodes_by_output:
node_.parents.append(nodes_by_output[input_])
for output_ in node_.outputs:
if output_ in nodes_by_input:
node_.children.extend(nodes_by_input[output_])
# Dictionary to hold the "value_info" field from ONNX graph
shape_dict = {} # type: Dict[Text,Tuple[int,...]]
def extract_value_info(shape_dict, # type: Dict[Text,Tuple[int,...]]
value_info, # type: ValueInfoProto[...]
):
# type: (...) -> None
shape_dict[value_info.name] = tuple([int(dim.dim_value) for dim in value_info.type.tensor_type.shape.dim])
for value_info in graph.value_info:
extract_value_info(shape_dict, value_info)
for value_info in graph.input:
extract_value_info(shape_dict, value_info)
for value_info in graph.output:
extract_value_info(shape_dict, value_info)
return Graph(nodes_, inputs, outputs, shape_dict)
| insightface/tools/onnx2caffe/onnx2caffe/_graph.py/0 | {
"file_path": "insightface/tools/onnx2caffe/onnx2caffe/_graph.py",
"repo_id": "insightface",
"token_count": 3952
} | 136 |
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="RemoteRepositoriesConfiguration">
<remote-repository>
<option name="id" value="aliyunmaven" />
<option name="name" value="阿里云公共仓库" />
<option name="url" value="https://maven.aliyun.com/repository/public" />
</remote-repository>
<remote-repository>
<option name="id" value="central" />
<option name="name" value="Central Repository" />
<option name="url" value="https://repo.maven.apache.org/maven2" />
</remote-repository>
<remote-repository>
<option name="id" value="sonatype-oss-snapshots" />
<option name="name" value="Sonatype OSS Snapshots Repository" />
<option name="url" value="https://oss.sonatype.org/content/repositories/snapshots" />
</remote-repository>
<remote-repository>
<option name="id" value="central" />
<option name="name" value="Maven Central repository" />
<option name="url" value="https://repo1.maven.org/maven2" />
</remote-repository>
<remote-repository>
<option name="id" value="jboss.community" />
<option name="name" value="JBoss Community repository" />
<option name="url" value="https://repository.jboss.org/nexus/content/repositories/public/" />
</remote-repository>
</component>
</project> | mybatis-native-demo/.idea/jarRepositories.xml/0 | {
"file_path": "mybatis-native-demo/.idea/jarRepositories.xml",
"repo_id": "mybatis-native-demo",
"token_count": 531
} | 137 |
package com.example.nativedemo;
import com.baomidou.mybatisplus.annotation.DbType;
import com.baomidou.mybatisplus.extension.plugins.MybatisPlusInterceptor;
import com.baomidou.mybatisplus.extension.plugins.inner.PaginationInnerInterceptor;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
/**
* @author nieqiurong 2023年8月3日 21:12:08
*/
@Configuration
//TODO 如果使用这个方式的话,记得一定要指定sqlSessionTemplateRef或sqlSessionFactoryRef https://github.com/mybatis/spring-boot-starter/wiki/Quick-Start-for-building-native-image#how-to-use-mapperscan
//@MapperScan(basePackages = "com.example.nativedemo", sqlSessionTemplateRef = "sqlSessionTemplate")
public class MybatisPlusConfig {
@Bean
public MybatisPlusInterceptor mybatisPlusInterceptor() {
MybatisPlusInterceptor plusInterceptor = new MybatisPlusInterceptor();
plusInterceptor.addInnerInterceptor(new PaginationInnerInterceptor(DbType.H2));
//TODO 3.5.3.1 以下插件多的话,可能会报异常,这个在最新版本修改. https://github.com/baomidou/mybatis-plus/issues/5532
// plusInterceptor.addInnerInterceptor(new BlockAttackInnerInterceptor());
// plusInterceptor.addInnerInterceptor(new IllegalSQLInnerInterceptor());
// plusInterceptor.addInnerInterceptor(new OptimisticLockerInnerInterceptor());
return plusInterceptor;
}
}
| mybatis-native-demo/src/main/java/com/example/nativedemo/MybatisPlusConfig.java/0 | {
"file_path": "mybatis-native-demo/src/main/java/com/example/nativedemo/MybatisPlusConfig.java",
"repo_id": "mybatis-native-demo",
"token_count": 553
} | 138 |
[
{
"condition": {
"typeReachable": "ch.qos.logback.classic.LoggerContext"
},
"name": "ch.qos.logback.classic.pattern.CallerDataConverter",
"allPublicConstructors": true
},
{
"condition": {
"typeReachable": "ch.qos.logback.classic.LoggerContext"
},
"name": "ch.qos.logback.classic.pattern.ClassOfCallerConverter",
"allPublicConstructors": true
},
{
"condition": {
"typeReachable": "ch.qos.logback.classic.LoggerContext"
},
"name": "ch.qos.logback.classic.pattern.ContextNameConverter",
"allPublicConstructors": true
},
{
"condition": {
"typeReachable": "ch.qos.logback.classic.LoggerContext"
},
"name": "ch.qos.logback.classic.pattern.DateConverter",
"allPublicConstructors": true
},
{
"condition": {
"typeReachable": "ch.qos.logback.classic.LoggerContext"
},
"name": "ch.qos.logback.classic.pattern.ExtendedThrowableProxyConverter",
"allPublicConstructors": true
},
{
"condition": {
"typeReachable": "ch.qos.logback.classic.LoggerContext"
},
"name": "ch.qos.logback.classic.pattern.FileOfCallerConverter",
"allPublicConstructors": true
},
{
"condition": {
"typeReachable": "ch.qos.logback.classic.LoggerContext"
},
"name": "ch.qos.logback.classic.pattern.LevelConverter",
"allPublicConstructors": true
},
{
"condition": {
"typeReachable": "ch.qos.logback.classic.LoggerContext"
},
"name": "ch.qos.logback.classic.pattern.LineOfCallerConverter",
"allPublicConstructors": true
},
{
"condition": {
"typeReachable": "ch.qos.logback.classic.LoggerContext"
},
"name": "ch.qos.logback.classic.pattern.LineSeparatorConverter",
"allPublicConstructors": true
},
{
"condition": {
"typeReachable": "ch.qos.logback.classic.LoggerContext"
},
"name": "ch.qos.logback.classic.pattern.LocalSequenceNumberConverter",
"allPublicConstructors": true
},
{
"condition": {
"typeReachable": "ch.qos.logback.classic.LoggerContext"
},
"name": "ch.qos.logback.classic.pattern.LoggerConverter",
"allPublicConstructors": true
},
{
"condition": {
"typeReachable": "ch.qos.logback.classic.LoggerContext"
},
"name": "ch.qos.logback.classic.pattern.MDCConverter",
"allPublicConstructors": true
},
{
"condition": {
"typeReachable": "ch.qos.logback.classic.LoggerContext"
},
"name": "ch.qos.logback.classic.pattern.MarkerConverter",
"allPublicConstructors": true
},
{
"condition": {
"typeReachable": "ch.qos.logback.classic.LoggerContext"
},
"name": "ch.qos.logback.classic.pattern.MessageConverter",
"allPublicConstructors": true
},
{
"condition": {
"typeReachable": "ch.qos.logback.classic.LoggerContext"
},
"name": "ch.qos.logback.classic.pattern.MethodOfCallerConverter",
"allPublicConstructors": true
},
{
"condition": {
"typeReachable": "ch.qos.logback.classic.LoggerContext"
},
"name": "ch.qos.logback.classic.pattern.NopThrowableInformationConverter",
"allPublicConstructors": true
},
{
"condition": {
"typeReachable": "ch.qos.logback.classic.LoggerContext"
},
"name": "ch.qos.logback.classic.pattern.PrefixCompositeConverter",
"allPublicConstructors": true
},
{
"condition": {
"typeReachable": "ch.qos.logback.classic.LoggerContext"
},
"name": "ch.qos.logback.classic.pattern.PropertyConverter",
"allPublicConstructors": true
},
{
"condition": {
"typeReachable": "ch.qos.logback.classic.LoggerContext"
},
"name": "ch.qos.logback.classic.pattern.RelativeTimeConverter",
"allPublicConstructors": true
},
{
"condition": {
"typeReachable": "ch.qos.logback.classic.LoggerContext"
},
"name": "ch.qos.logback.classic.pattern.RootCauseFirstThrowableProxyConverter",
"allPublicConstructors": true
},
{
"condition": {
"typeReachable": "ch.qos.logback.classic.LoggerContext"
},
"name": "ch.qos.logback.classic.pattern.ThreadConverter",
"allPublicConstructors": true
},
{
"condition": {
"typeReachable": "ch.qos.logback.classic.LoggerContext"
},
"name": "ch.qos.logback.classic.pattern.ThrowableProxyConverter",
"allPublicConstructors": true
},
{
"condition": {
"typeReachable": "ch.qos.logback.classic.LoggerContext"
},
"name": "ch.qos.logback.classic.pattern.color.HighlightingCompositeConverter",
"allPublicConstructors": true
},
{
"condition": {
"typeReachable": "ch.qos.logback.classic.LoggerContext"
},
"name": "ch.qos.logback.core.pattern.IdentityCompositeConverter",
"allPublicConstructors": true
},
{
"condition": {
"typeReachable": "ch.qos.logback.classic.LoggerContext"
},
"name": "ch.qos.logback.core.pattern.ReplacingCompositeConverter",
"allPublicConstructors": true
},
{
"condition": {
"typeReachable": "ch.qos.logback.classic.LoggerContext"
},
"name": "ch.qos.logback.core.pattern.color.BlackCompositeConverter",
"allPublicConstructors": true
},
{
"condition": {
"typeReachable": "ch.qos.logback.classic.LoggerContext"
},
"name": "ch.qos.logback.core.pattern.color.BlueCompositeConverter",
"allPublicConstructors": true
},
{
"condition": {
"typeReachable": "ch.qos.logback.classic.LoggerContext"
},
"name": "ch.qos.logback.core.pattern.color.BoldBlueCompositeConverter",
"allPublicConstructors": true
},
{
"condition": {
"typeReachable": "ch.qos.logback.classic.LoggerContext"
},
"name": "ch.qos.logback.core.pattern.color.BoldCyanCompositeConverter",
"allPublicConstructors": true
},
{
"condition": {
"typeReachable": "ch.qos.logback.classic.LoggerContext"
},
"name": "ch.qos.logback.core.pattern.color.BoldGreenCompositeConverter",
"allPublicConstructors": true
},
{
"condition": {
"typeReachable": "ch.qos.logback.classic.LoggerContext"
},
"name": "ch.qos.logback.core.pattern.color.BoldMagentaCompositeConverter",
"allPublicConstructors": true
},
{
"condition": {
"typeReachable": "ch.qos.logback.classic.LoggerContext"
},
"name": "ch.qos.logback.core.pattern.color.BoldRedCompositeConverter",
"allPublicConstructors": true
},
{
"condition": {
"typeReachable": "ch.qos.logback.classic.LoggerContext"
},
"name": "ch.qos.logback.core.pattern.color.BoldWhiteCompositeConverter",
"allPublicConstructors": true
},
{
"condition": {
"typeReachable": "ch.qos.logback.classic.LoggerContext"
},
"name": "ch.qos.logback.core.pattern.color.BoldYellowCompositeConverter",
"allPublicConstructors": true
},
{
"condition": {
"typeReachable": "ch.qos.logback.classic.LoggerContext"
},
"name": "ch.qos.logback.core.pattern.color.CyanCompositeConverter",
"allPublicConstructors": true
},
{
"condition": {
"typeReachable": "ch.qos.logback.classic.LoggerContext"
},
"name": "ch.qos.logback.core.pattern.color.GrayCompositeConverter",
"allPublicConstructors": true
},
{
"condition": {
"typeReachable": "ch.qos.logback.classic.LoggerContext"
},
"name": "ch.qos.logback.core.pattern.color.GreenCompositeConverter",
"allPublicConstructors": true
},
{
"condition": {
"typeReachable": "ch.qos.logback.classic.LoggerContext"
},
"name": "ch.qos.logback.core.pattern.color.MagentaCompositeConverter",
"allPublicConstructors": true
},
{
"condition": {
"typeReachable": "ch.qos.logback.classic.LoggerContext"
},
"name": "ch.qos.logback.core.pattern.color.RedCompositeConverter",
"allPublicConstructors": true
},
{
"condition": {
"typeReachable": "ch.qos.logback.classic.LoggerContext"
},
"name": "ch.qos.logback.core.pattern.color.WhiteCompositeConverter",
"allPublicConstructors": true
},
{
"condition": {
"typeReachable": "ch.qos.logback.classic.LoggerContext"
},
"name": "ch.qos.logback.core.pattern.color.YellowCompositeConverter",
"allPublicConstructors": true
},
{
"condition": {
"typeReachable": "ch.qos.logback.classic.LoggerContext"
},
"name": "org.slf4j.impl.StaticLoggerBinder"
}
]
| mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/ch.qos.logback/logback-classic/1.2.11/reflect-config.json/0 | {
"file_path": "mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/ch.qos.logback/logback-classic/1.2.11/reflect-config.json",
"repo_id": "mybatis-native-demo",
"token_count": 3550
} | 139 |
[
"reflect-config.json",
"resource-config.json"
]
| mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/com.h2database/h2/2.1.210/index.json/0 | {
"file_path": "mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/com.h2database/h2/2.1.210/index.json",
"repo_id": "mybatis-native-demo",
"token_count": 22
} | 140 |
[
"reflect-config.json",
"resource-config.json"
]
| mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/com.sun.mail/jakarta.mail/2.0.1/index.json/0 | {
"file_path": "mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/com.sun.mail/jakarta.mail/2.0.1/index.json",
"repo_id": "mybatis-native-demo",
"token_count": 22
} | 141 |
[
"reflect-config.json",
"resource-config.json"
]
| mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/io.jsonwebtoken/jjwt-gson/0.11.5/index.json/0 | {
"file_path": "mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/io.jsonwebtoken/jjwt-gson/0.11.5/index.json",
"repo_id": "mybatis-native-demo",
"token_count": 22
} | 142 |
[
{
"latest": true,
"override": true,
"metadata-version": "4.1.80.Final",
"module": "io.netty:netty-codec-http",
"tested-versions": [
"4.1.80.Final"
]
}
]
| mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/io.netty/netty-codec-http/index.json/0 | {
"file_path": "mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/io.netty/netty-codec-http/index.json",
"repo_id": "mybatis-native-demo",
"token_count": 98
} | 143 |
[
"reflect-config.json"
]
| mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/io.opentelemetry/opentelemetry-exporter-logging/1.19.0/index.json/0 | {
"file_path": "mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/io.opentelemetry/opentelemetry-exporter-logging/1.19.0/index.json",
"repo_id": "mybatis-native-demo",
"token_count": 13
} | 144 |
[
{
"condition": {
"typeReachable": "io.undertow.Undertow"
},
"name": "[B"
},
{
"condition": {
"typeReachable": "io.undertow.Undertow"
},
"name": "[C"
},
{
"condition": {
"typeReachable": "io.undertow.Undertow"
},
"name": "[D"
},
{
"condition": {
"typeReachable": "io.undertow.Undertow"
},
"name": "[F"
},
{
"condition": {
"typeReachable": "io.undertow.Undertow"
},
"name": "[I"
},
{
"condition": {
"typeReachable": "io.undertow.Undertow"
},
"name": "[J"
},
{
"condition": {
"typeReachable": "io.undertow.Undertow"
},
"name": "[Ljava.lang.String;"
},
{
"condition": {
"typeReachable": "io.undertow.Undertow"
},
"name": "[S"
},
{
"condition": {
"typeReachable": "io.undertow.Undertow"
},
"name": "[Z"
},
{
"condition": {
"typeReachable": "io.undertow.UndertowLogger"
},
"name": "io.undertow.UndertowLogger_$logger",
"methods": [
{
"name": "<init>",
"parameterTypes": [
"org.jboss.logging.Logger"
]
}
]
},
{
"condition": {
"typeReachable": "io.undertow.UndertowMessages"
},
"name": "io.undertow.UndertowMessages_$bundle",
"fields": [
{
"name": "INSTANCE"
}
]
},
{
"condition": {
"typeReachable": "io.undertow.client.UndertowClientMessages"
},
"name": "io.undertow.client.UndertowClientMessages_$bundle",
"fields": [
{
"name": "INSTANCE"
}
]
},
{
"condition": {
"typeReachable": "io.undertow.server.protocol.http.HttpRequestParser"
},
"name": "io.undertow.server.protocol.http.HttpRequestParser$$generated",
"methods": [
{
"name": "<init>",
"parameterTypes": [
"org.xnio.OptionMap"
]
}
]
},
{
"condition": {
"typeReachable": "io.undertow.servlet.UndertowServletLogger"
},
"name": "io.undertow.servlet.UndertowServletLogger_$logger",
"methods": [
{
"name": "<init>",
"parameterTypes": [
"org.jboss.logging.Logger"
]
}
]
},
{
"condition": {
"typeReachable": "io.undertow.servlet.UndertowServletMessages"
},
"name": "io.undertow.servlet.UndertowServletMessages_$bundle",
"fields": [
{
"name": "INSTANCE"
}
]
},
{
"condition": {
"typeReachable": "io.undertow.servlet.api.ServletInfo"
},
"name": "io.undertow.servlet.handlers.DefaultServlet",
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "io.undertow.servlet.core.ManagedServlet$DefaultInstanceStrategy"
},
"name": "io.undertow.servlet.handlers.DefaultServlet",
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "io.undertow.servlet.spec.ServletPrintWriterDelegate"
},
"name": "io.undertow.servlet.spec.ServletPrintWriterDelegate",
"unsafeAllocated": true
},
{
"condition": {
"typeReachable": "io.undertow.util.ConcurrentDirectDeque"
},
"name": "io.undertow.util.FastConcurrentDirectDeque",
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "io.undertow.util.FastConcurrentDirectDeque"
},
"name": "io.undertow.util.FastConcurrentDirectDeque",
"fields": [
{
"name": "head"
},
{
"name": "tail"
}
]
},
{
"condition": {
"typeReachable": "io.undertow.util.FastConcurrentDirectDeque$Node"
},
"name": "io.undertow.util.FastConcurrentDirectDeque$Node",
"fields": [
{
"name": "item"
},
{
"name": "next"
},
{
"name": "prev"
}
]
},
{
"condition": {
"typeReachable": "io.undertow.server.protocol.http.HttpRequestParser"
},
"name": "io.undertow.util.Headers",
"allDeclaredFields": true
},
{
"condition": {
"typeReachable": "io.undertow.util.Headers$1"
},
"name": "io.undertow.util.Headers",
"allDeclaredFields": true
},
{
"condition": {
"typeReachable": "io.undertow.util.HttpString"
},
"name": "io.undertow.util.HttpString",
"fields": [
{
"name": "hashCode"
}
]
},
{
"condition": {
"typeReachable": "io.undertow.server.protocol.http.HttpRequestParser"
},
"name": "io.undertow.util.Methods",
"allDeclaredFields": true
},
{
"condition": {
"typeReachable": "io.undertow.server.protocol.http.HttpRequestParser"
},
"name": "io.undertow.util.Protocols",
"allDeclaredFields": true
},
{
"condition": {
"typeReachable": "io.undertow.websockets.core.WebSocketLogger"
},
"name": "io.undertow.websockets.core.WebSocketLogger_$logger",
"methods": [
{
"name": "<init>",
"parameterTypes": [
"org.jboss.logging.Logger"
]
}
]
},
{
"condition": {
"typeReachable": "io.undertow.websockets.core.WebSocketMessages"
},
"name": "io.undertow.websockets.core.WebSocketMessages_$bundle",
"fields": [
{
"name": "INSTANCE"
}
]
},
{
"condition": {
"typeReachable": "io.undertow.servlet.api.ListenerInfo"
},
"name": "io.undertow.websockets.jsr.Bootstrap$WebSocketListener",
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "io.undertow.servlet.core.ApplicationListeners"
},
"name": "io.undertow.websockets.jsr.Bootstrap$WebSocketListener",
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "io.undertow.servlet.core.ManagedFilter"
},
"name": "io.undertow.websockets.jsr.JsrWebSocketFilter",
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "io.undertow.servlet.spec.ServletContextImpl"
},
"name": "io.undertow.websockets.jsr.JsrWebSocketFilter",
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "io.undertow.servlet.core.ApplicationListeners"
},
"name": "io.undertow.websockets.jsr.JsrWebSocketFilter$LogoutListener",
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "io.undertow.servlet.spec.ServletContextImpl"
},
"name": "io.undertow.websockets.jsr.JsrWebSocketFilter$LogoutListener",
"queriedMethods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "io.undertow.websockets.jsr.JsrWebSocketLogger"
},
"name": "io.undertow.websockets.jsr.JsrWebSocketLogger_$logger",
"methods": [
{
"name": "<init>",
"parameterTypes": [
"org.jboss.logging.Logger"
]
}
]
},
{
"condition": {
"typeReachable": "io.undertow.websockets.jsr.JsrWebSocketMessages"
},
"name": "io.undertow.websockets.jsr.JsrWebSocketMessages_$bundle",
"fields": [
{
"name": "INSTANCE"
}
]
},
{
"condition": {
"typeReachable": "io.undertow.Undertow"
},
"name": "jdk.management.jfr.ConfigurationInfo",
"queryAllPublicMethods": true
},
{
"condition": {
"typeReachable": "io.undertow.Undertow"
},
"name": "jdk.management.jfr.EventTypeInfo",
"queryAllPublicMethods": true
},
{
"condition": {
"typeReachable": "io.undertow.Undertow"
},
"name": "jdk.management.jfr.FlightRecorderMXBean",
"queryAllPublicMethods": true
},
{
"condition": {
"typeReachable": "io.undertow.Undertow"
},
"name": "jdk.management.jfr.FlightRecorderMXBeanImpl",
"queryAllPublicConstructors": true
},
{
"condition": {
"typeReachable": "io.undertow.Undertow"
},
"name": "jdk.management.jfr.RecordingInfo",
"queryAllPublicMethods": true
},
{
"condition": {
"typeReachable": "io.undertow.Undertow"
},
"name": "jdk.management.jfr.SettingDescriptorInfo",
"queryAllPublicMethods": true
},
{
"condition": {
"typeReachable": "io.undertow.Undertow"
},
"name": "org.jboss.threads.EnhancedQueueExecutor",
"fields": [
{
"name": "activeCount"
},
{
"name": "peakQueueSize"
},
{
"name": "peakThreadCount"
},
{
"name": "queueSize"
},
{
"name": "sequence"
},
{
"name": "terminationWaiters"
}
]
},
{
"condition": {
"typeReachable": "io.undertow.Undertow"
},
"name": "org.jboss.threads.EnhancedQueueExecutor$QNode",
"fields": [
{
"name": "next"
}
]
},
{
"condition": {
"typeReachable": "io.undertow.Undertow"
},
"name": "org.jboss.threads.EnhancedQueueExecutorBase1",
"fields": [
{
"name": "tail"
},
{
"name": "tailLock"
}
]
},
{
"condition": {
"typeReachable": "io.undertow.Undertow"
},
"name": "org.jboss.threads.EnhancedQueueExecutorBase3",
"fields": [
{
"name": "head"
}
]
},
{
"condition": {
"typeReachable": "io.undertow.Undertow"
},
"name": "org.jboss.threads.EnhancedQueueExecutorBase5",
"fields": [
{
"name": "threadStatus"
}
]
},
{
"condition": {
"typeReachable": "io.undertow.Undertow"
},
"name": "org.jboss.threads.Messages_$logger",
"methods": [
{
"name": "<init>",
"parameterTypes": [
"org.jboss.logging.Logger"
]
}
]
},
{
"condition": {
"typeReachable": "io.undertow.Undertow"
},
"name": "org.xnio._private.Messages_$logger",
"methods": [
{
"name": "<init>",
"parameterTypes": [
"org.jboss.logging.Logger"
]
}
]
},
{
"condition": {
"typeReachable": "io.undertow.Undertow"
},
"name": "org.xnio.management.XnioProviderMXBean",
"queryAllPublicMethods": true
},
{
"condition": {
"typeReachable": "io.undertow.Undertow"
},
"name": "org.xnio.management.XnioServerMXBean",
"queryAllPublicMethods": true
},
{
"condition": {
"typeReachable": "io.undertow.Undertow"
},
"name": "org.xnio.management.XnioWorkerMXBean",
"queryAllPublicMethods": true
},
{
"condition": {
"typeReachable": "io.undertow.Undertow"
},
"name": "org.xnio.nio.Log_$logger",
"methods": [
{
"name": "<init>",
"parameterTypes": [
"org.jboss.logging.Logger"
]
}
]
},
{
"condition": {
"typeReachable": "io.undertow.Undertow"
},
"name": "org.xnio.nio.NioTcpServer$1",
"queryAllPublicConstructors": true
},
{
"condition": {
"typeReachable": "io.undertow.Undertow"
},
"name": "org.xnio.nio.NioXnio$3",
"queryAllPublicConstructors": true
},
{
"condition": {
"typeReachable": "io.undertow.Undertow"
},
"name": "org.xnio.nio.NioXnioWorker$NioWorkerMetrics",
"queryAllPublicConstructors": true
},
{
"condition": {
"typeReachable": "io.undertow.Undertow"
},
"name": "sun.misc.Unsafe",
"fields": [
{
"name": "theUnsafe"
}
]
},
{
"condition": {
"typeReachable": "io.undertow.server.DirectByteBufferDeallocator"
},
"name": "sun.misc.Unsafe",
"fields": [
{
"name": "theUnsafe"
}
],
"methods": [
{
"name": "invokeCleaner",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "io.undertow.servlet.spec.ServletPrintWriterDelegate"
},
"name": "sun.misc.Unsafe",
"fields": [
{
"name": "theUnsafe"
}
]
},
{
"condition": {
"typeReachable": "io.undertow.util.FastConcurrentDirectDeque"
},
"name": "sun.misc.Unsafe",
"fields": [
{
"name": "theUnsafe"
}
]
},
{
"condition": {
"typeReachable": "io.undertow.server.session.SecureRandomSessionIdGenerator"
},
"name": "sun.security.provider.NativePRNG",
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "io.undertow.websockets.client.WebSocket13ClientHandshake"
},
"name": "sun.security.provider.NativePRNG",
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "io.undertow.websockets.client.WebSocket13ClientHandshake"
},
"name": "sun.security.provider.SHA",
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "io.undertow.websockets.core.protocol.version07.Hybi07Handshake"
},
"name": "sun.security.provider.SHA",
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
}
]
| mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/io.undertow/undertow-core/2.2.19.Final/reflect-config.json/0 | {
"file_path": "mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/io.undertow/undertow-core/2.2.19.Final/reflect-config.json",
"repo_id": "mybatis-native-demo",
"token_count": 6905
} | 145 |
[
{
"latest": true,
"metadata-version": "11.0.12",
"module": "org.eclipse.jetty:jetty-server",
"tested-versions": [
"11.0.12"
]
}
]
| mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/org.eclipse.jetty/jetty-server/index.json/0 | {
"file_path": "mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/org.eclipse.jetty/jetty-server/index.json",
"repo_id": "mybatis-native-demo",
"token_count": 84
} | 146 |
[
{
"condition": {
"typeReachable": "org.ehcache.jsr107.Eh107CacheManager"
},
"name": "[B"
},
{
"condition": {
"typeReachable": "org.ehcache.sizeof.SizeOf"
},
"name": "[B",
"allDeclaredFields": true
},
{
"condition": {
"typeReachable": "org.ehcache.sizeof.impl.JvmInformation"
},
"name": "[B"
},
{
"condition": {
"typeReachable": "org.ehcache.jsr107.Eh107CacheManager"
},
"name": "[C"
},
{
"condition": {
"typeReachable": "org.ehcache.sizeof.impl.JvmInformation"
},
"name": "[C"
},
{
"condition": {
"typeReachable": "org.ehcache.jsr107.Eh107CacheManager"
},
"name": "[D"
},
{
"condition": {
"typeReachable": "org.ehcache.sizeof.impl.JvmInformation"
},
"name": "[D"
},
{
"condition": {
"typeReachable": "org.ehcache.jsr107.Eh107CacheManager"
},
"name": "[F"
},
{
"condition": {
"typeReachable": "org.ehcache.sizeof.impl.JvmInformation"
},
"name": "[F"
},
{
"condition": {
"typeReachable": "org.ehcache.jsr107.Eh107CacheManager"
},
"name": "[I"
},
{
"condition": {
"typeReachable": "org.ehcache.sizeof.impl.JvmInformation"
},
"name": "[I"
},
{
"condition": {
"typeReachable": "org.ehcache.jsr107.Eh107CacheManager"
},
"name": "[J"
},
{
"condition": {
"typeReachable": "org.ehcache.sizeof.impl.JvmInformation"
},
"name": "[J"
},
{
"condition": {
"typeReachable": "org.ehcache.jsr107.Eh107CacheManager"
},
"name": "[Ljava.lang.String;"
},
{
"condition": {
"typeReachable": "org.ehcache.sizeof.impl.JvmInformation"
},
"name": "[Ljava.lang.String;"
},
{
"condition": {
"typeReachable": "org.ehcache.jsr107.Eh107CacheManager"
},
"name": "[S"
},
{
"condition": {
"typeReachable": "org.ehcache.sizeof.impl.JvmInformation"
},
"name": "[S"
},
{
"condition": {
"typeReachable": "org.ehcache.jsr107.Eh107CacheManager"
},
"name": "[Z"
},
{
"condition": {
"typeReachable": "org.ehcache.sizeof.impl.JvmInformation"
},
"name": "[Z"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser"
},
"name": "com.sun.org.apache.xalan.internal.xsltc.trax.TransformerFactoryImpl",
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration"
},
"name": "com.sun.org.apache.xalan.internal.xsltc.trax.TransformerFactoryImpl",
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ResourceConfigurationParser"
},
"name": "com.sun.org.apache.xerces.internal.impl.dv.xs.ExtendedSchemaDVFactoryImpl",
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser"
},
"name": "com.sun.org.apache.xerces.internal.impl.dv.xs.SchemaDVFactoryImpl",
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ResourceConfigurationParser"
},
"name": "com.sun.org.apache.xerces.internal.impl.dv.xs.SchemaDVFactoryImpl",
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.xml.XmlUtil"
},
"name": "com.sun.org.apache.xerces.internal.impl.dv.xs.SchemaDVFactoryImpl",
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration"
},
"name": "com.sun.org.apache.xerces.internal.impl.dv.xs.SchemaDVFactoryImpl",
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser"
},
"name": "com.sun.org.apache.xerces.internal.jaxp.DocumentBuilderFactoryImpl",
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.sizeof.impl.AgentLoader"
},
"name": "com.sun.tools.attach.VirtualMachine",
"methods": [
{
"name": "attach",
"parameterTypes": [
"java.lang.String"
]
},
{
"name": "detach",
"parameterTypes": []
},
{
"name": "loadAgent",
"parameterTypes": [
"java.lang.String"
]
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.impl.config.SizedResourcePoolImpl"
},
"name": "java.io.FilePermission"
},
{
"condition": {
"typeReachable": "org.ehcache.jsr107.Eh107CacheManager"
},
"name": "java.lang.Boolean",
"fields": [
{
"name": "TYPE"
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.sizeof.impl.JvmInformation"
},
"name": "java.lang.Boolean",
"fields": [
{
"name": "TYPE"
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.jsr107.Eh107CacheManager"
},
"name": "java.lang.Byte",
"fields": [
{
"name": "TYPE"
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.sizeof.impl.JvmInformation"
},
"name": "java.lang.Byte",
"fields": [
{
"name": "TYPE"
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.jsr107.Eh107CacheManager"
},
"name": "java.lang.Character",
"fields": [
{
"name": "TYPE"
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.sizeof.impl.JvmInformation"
},
"name": "java.lang.Character",
"fields": [
{
"name": "TYPE"
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.jsr107.Eh107CacheManager"
},
"name": "java.lang.Deprecated",
"queryAllPublicMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.sizeof.impl.JvmInformation"
},
"name": "java.lang.Deprecated",
"queryAllPublicMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.jsr107.Eh107CacheManager"
},
"name": "java.lang.Double",
"fields": [
{
"name": "TYPE"
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.sizeof.impl.JvmInformation"
},
"name": "java.lang.Double",
"fields": [
{
"name": "TYPE"
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.jsr107.Eh107CacheManager"
},
"name": "java.lang.Float",
"fields": [
{
"name": "TYPE"
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.sizeof.impl.JvmInformation"
},
"name": "java.lang.Float",
"fields": [
{
"name": "TYPE"
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.jsr107.Eh107CacheManager"
},
"name": "java.lang.Integer",
"fields": [
{
"name": "TYPE"
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.sizeof.impl.JvmInformation"
},
"name": "java.lang.Integer",
"fields": [
{
"name": "TYPE"
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.jsr107.Eh107CacheManager"
},
"name": "java.lang.Long",
"fields": [
{
"name": "TYPE"
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.sizeof.impl.JvmInformation"
},
"name": "java.lang.Long",
"fields": [
{
"name": "TYPE"
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser"
},
"name": "java.lang.Long"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.XmlConfiguration"
},
"name": "java.lang.Long"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.XmlConfiguration"
},
"name": "java.lang.Number"
},
{
"condition": {
"typeReachable": "org.ehcache.shadow.org.terracotta.context.ContextManager"
},
"name": "java.lang.Object",
"allDeclaredFields": true
},
{
"condition": {
"typeReachable": "org.ehcache.shadow.org.terracotta.context.extractor.ObjectContextExtractor"
},
"name": "java.lang.Object",
"allDeclaredFields": true
},
{
"condition": {
"typeReachable": "org.ehcache.sizeof.ObjectGraphWalker"
},
"name": "java.lang.Object",
"allDeclaredFields": true
},
{
"condition": {
"typeReachable": "org.ehcache.sizeof.SizeOf"
},
"name": "java.lang.Object",
"allDeclaredFields": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser"
},
"name": "java.lang.Object"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser$$Lambda$615/0x0000000800e52c78"
},
"name": "java.lang.Object"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser$$Lambda$618/0x0000000800e56040"
},
"name": "java.lang.Object"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ResourceConfigurationParser"
},
"name": "java.lang.Object"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.XmlConfiguration"
},
"name": "java.lang.Object",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration"
},
"name": "java.lang.Object",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$836/0x0000000800f8e260"
},
"name": "java.lang.Object"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$836/0x0000000800f8f148"
},
"name": "java.lang.Object"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$836/0x0000000800f8f820"
},
"name": "java.lang.Object"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$840/0x0000000800f3e880"
},
"name": "java.lang.Object"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$849/0x0000000800fadad0"
},
"name": "java.lang.Object"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$849/0x0000000800faf688"
},
"name": "java.lang.Object"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$849/0x0000000800fafc88"
},
"name": "java.lang.Object"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$853/0x0000000800f96f50"
},
"name": "java.lang.Object"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$1"
},
"name": "java.lang.Object",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$1$1"
},
"name": "java.lang.Object",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$SingleConfig"
},
"name": "java.lang.Object"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$VariantConfig"
},
"name": "java.lang.Object",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.impl.config.SizedResourcePoolImpl"
},
"name": "java.lang.RuntimePermission"
},
{
"condition": {
"typeReachable": "org.ehcache.jsr107.Eh107CacheManager"
},
"name": "java.lang.Short",
"fields": [
{
"name": "TYPE"
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.sizeof.impl.JvmInformation"
},
"name": "java.lang.Short",
"fields": [
{
"name": "TYPE"
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.jsr107.Eh107CacheManager"
},
"name": "java.lang.StackTraceElement",
"queryAllPublicMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.sizeof.impl.JvmInformation"
},
"name": "java.lang.StackTraceElement",
"queryAllPublicMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.jsr107.Eh107CacheManager"
},
"name": "java.lang.String"
},
{
"condition": {
"typeReachable": "org.ehcache.sizeof.SizeOf"
},
"name": "java.lang.String",
"allDeclaredFields": true
},
{
"condition": {
"typeReachable": "org.ehcache.sizeof.impl.JvmInformation"
},
"name": "java.lang.String"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.XmlConfiguration"
},
"name": "java.lang.String"
},
{
"condition": {
"typeReachable": "org.ehcache.impl.internal.concurrent.ThreadLocalRandomUtil"
},
"name": "java.lang.Thread",
"fields": [
{
"name": "threadLocalRandomProbe"
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.jsr107.Eh107CacheManager"
},
"name": "java.lang.Void",
"fields": [
{
"name": "TYPE"
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.sizeof.impl.JvmInformation"
},
"name": "java.lang.Void",
"fields": [
{
"name": "TYPE"
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser"
},
"name": "java.lang.annotation.Annotation",
"methods": [
{
"name": "annotationType",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ResourceConfigurationParser"
},
"name": "java.lang.annotation.Annotation",
"methods": [
{
"name": "annotationType",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration"
},
"name": "java.lang.annotation.Annotation",
"methods": [
{
"name": "annotationType",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.jsr107.Eh107CacheManager"
},
"name": "java.math.BigDecimal"
},
{
"condition": {
"typeReachable": "org.ehcache.sizeof.impl.JvmInformation"
},
"name": "java.math.BigDecimal"
},
{
"condition": {
"typeReachable": "org.ehcache.jsr107.Eh107CacheManager"
},
"name": "java.math.BigInteger"
},
{
"condition": {
"typeReachable": "org.ehcache.sizeof.impl.JvmInformation"
},
"name": "java.math.BigInteger"
},
{
"condition": {
"typeReachable": "org.ehcache.impl.config.SizedResourcePoolImpl"
},
"name": "java.net.NetPermission"
},
{
"condition": {
"typeReachable": "org.ehcache.impl.config.SizedResourcePoolImpl"
},
"name": "java.net.SocketPermission"
},
{
"condition": {
"typeReachable": "org.ehcache.impl.config.SizedResourcePoolImpl"
},
"name": "java.net.URLPermission",
"methods": [
{
"name": "<init>",
"parameterTypes": [
"java.lang.String",
"java.lang.String"
]
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.sizeof.impl.AgentLoader"
},
"name": "java.nio.ByteBuffer"
},
{
"condition": {
"typeReachable": "org.ehcache.impl.config.SizedResourcePoolImpl"
},
"name": "java.security.AllPermission"
},
{
"condition": {
"typeReachable": "org.ehcache.shadow.org.terracotta.utilities.io.Files"
},
"name": "java.security.SecureRandomParameters"
},
{
"condition": {
"typeReachable": "org.ehcache.shadow.org.terracotta.utilities.io.FilesSupport"
},
"name": "java.security.SecureRandomParameters"
},
{
"condition": {
"typeReachable": "org.ehcache.impl.config.SizedResourcePoolImpl"
},
"name": "java.security.SecurityPermission"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser"
},
"name": "java.util.ArrayList",
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration"
},
"name": "java.util.ArrayList",
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.jsr107.Eh107CacheManager"
},
"name": "java.util.Date"
},
{
"condition": {
"typeReachable": "org.ehcache.sizeof.impl.JvmInformation"
},
"name": "java.util.Date"
},
{
"condition": {
"typeReachable": "org.ehcache.sizeof.FlyweightType"
},
"name": "java.util.Locale",
"allPublicFields": true
},
{
"condition": {
"typeReachable": "org.ehcache.impl.config.SizedResourcePoolImpl"
},
"name": "java.util.PropertyPermission"
},
{
"condition": {
"typeReachable": "org.ehcache.jsr107.Eh107CacheManager"
},
"name": "java.util.PropertyPermission",
"methods": [
{
"name": "<init>",
"parameterTypes": [
"java.lang.String",
"java.lang.String"
]
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.sizeof.impl.JvmInformation"
},
"name": "java.util.PropertyPermission",
"methods": [
{
"name": "<init>",
"parameterTypes": [
"java.lang.String",
"java.lang.String"
]
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.jsr107.Eh107CacheManager"
},
"name": "java.util.logging.LogManager",
"methods": [
{
"name": "getLoggingMXBean",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.sizeof.impl.JvmInformation"
},
"name": "java.util.logging.LogManager",
"methods": [
{
"name": "getLoggingMXBean",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.jsr107.Eh107CacheManager"
},
"name": "java.util.logging.LoggingMXBean",
"queryAllPublicMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.sizeof.impl.JvmInformation"
},
"name": "java.util.logging.LoggingMXBean",
"queryAllPublicMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.core.Ehcache"
},
"name": "org.ehcache.core.Ehcache",
"queryAllPublicMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.core.EhcacheBase"
},
"name": "org.ehcache.core.Ehcache",
"allDeclaredFields": true
},
{
"condition": {
"typeReachable": "org.ehcache.core.EhcacheManager"
},
"name": "org.ehcache.core.Ehcache",
"queryAllPublicMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.core.PersistentUserManagedEhcache"
},
"name": "org.ehcache.core.Ehcache",
"queryAllPublicMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.jsr107.Eh107CacheManager"
},
"name": "org.ehcache.core.Ehcache",
"queryAllPublicMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.core.EhcacheBase"
},
"name": "org.ehcache.core.EhcacheBase",
"allDeclaredFields": true
},
{
"condition": {
"typeReachable": "org.ehcache.core.EhcacheManager"
},
"name": "org.ehcache.core.internal.statistics.StatsUtils$1",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.core.StatusTransitioner"
},
"name": "org.ehcache.core.internal.statistics.StatsUtils$1"
},
{
"condition": {
"typeReachable": "org.ehcache.core.internal.statistics.DefaultCacheStatistics"
},
"name": "org.ehcache.core.internal.statistics.StatsUtils$1"
},
{
"condition": {
"typeReachable": "org.ehcache.core.internal.statistics.DefaultStatisticsService"
},
"name": "org.ehcache.core.internal.statistics.StatsUtils$1"
},
{
"condition": {
"typeReachable": "org.ehcache.core.internal.statistics.DefaultTierStatistics"
},
"name": "org.ehcache.core.internal.statistics.StatsUtils$1"
},
{
"condition": {
"typeReachable": "org.ehcache.core.internal.statistics.StatsUtils"
},
"name": "org.ehcache.core.internal.statistics.StatsUtils$1"
},
{
"condition": {
"typeReachable": "org.ehcache.core.internal.statistics.StatsUtils$1"
},
"name": "org.ehcache.core.internal.statistics.StatsUtils$1"
},
{
"condition": {
"typeReachable": "org.ehcache.jsr107.Eh107CacheManager"
},
"name": "org.ehcache.core.internal.statistics.StatsUtils$1",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.core.EhcacheManager"
},
"name": "org.ehcache.core.internal.statistics.StatsUtils$2",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.core.StatusTransitioner"
},
"name": "org.ehcache.core.internal.statistics.StatsUtils$2"
},
{
"condition": {
"typeReachable": "org.ehcache.core.internal.statistics.DefaultCacheStatistics"
},
"name": "org.ehcache.core.internal.statistics.StatsUtils$2"
},
{
"condition": {
"typeReachable": "org.ehcache.core.internal.statistics.DefaultStatisticsService"
},
"name": "org.ehcache.core.internal.statistics.StatsUtils$2"
},
{
"condition": {
"typeReachable": "org.ehcache.core.internal.statistics.DefaultTierStatistics"
},
"name": "org.ehcache.core.internal.statistics.StatsUtils$2"
},
{
"condition": {
"typeReachable": "org.ehcache.core.internal.statistics.StatsUtils"
},
"name": "org.ehcache.core.internal.statistics.StatsUtils$2"
},
{
"condition": {
"typeReachable": "org.ehcache.core.internal.statistics.StatsUtils$2"
},
"name": "org.ehcache.core.internal.statistics.StatsUtils$2"
},
{
"condition": {
"typeReachable": "org.ehcache.jsr107.Eh107CacheManager"
},
"name": "org.ehcache.core.internal.statistics.StatsUtils$2",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.core.internal.statistics.DefaultStatisticsService"
},
"name": "org.ehcache.core.internal.statistics.StatsUtils$3"
},
{
"condition": {
"typeReachable": "org.ehcache.core.internal.statistics.StatsUtils"
},
"name": "org.ehcache.core.internal.statistics.StatsUtils$3"
},
{
"condition": {
"typeReachable": "org.ehcache.core.internal.statistics.StatsUtils$3"
},
"name": "org.ehcache.core.internal.statistics.StatsUtils$3",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.impl.store.BaseStore$BaseStoreProvider"
},
"name": "org.ehcache.core.internal.statistics.StatsUtils$3"
},
{
"condition": {
"typeReachable": "org.ehcache.impl.store.BaseStore$BaseStoreProvider$$Lambda$433/0x0000000800d91a90"
},
"name": "org.ehcache.core.internal.statistics.StatsUtils$3"
},
{
"condition": {
"typeReachable": "org.ehcache.impl.store.BaseStore$BaseStoreProvider$$Lambda$436/0x0000000800d96e18"
},
"name": "org.ehcache.core.internal.statistics.StatsUtils$3"
},
{
"condition": {
"typeReachable": "org.ehcache.core.util.ClassLoading$ChainedClassLoader"
},
"name": "org.ehcache.core.spi.service.StatisticsService"
},
{
"condition": {
"typeReachable": "org.ehcache.sizeof.SizeOf"
},
"name": "org.ehcache.core.spi.store.AbstractValueHolder",
"allDeclaredFields": true
},
{
"condition": {
"typeReachable": "org.ehcache.impl.internal.spi.copy.DefaultCopyProvider"
},
"name": "org.ehcache.impl.copy.IdentityCopier",
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.sizeof.SizeOf"
},
"name": "org.ehcache.impl.copy.IdentityCopier",
"allDeclaredFields": true
},
{
"condition": {
"typeReachable": "org.ehcache.sizeof.SizeOf"
},
"name": "org.ehcache.impl.copy.ReadWriteCopier",
"allDeclaredFields": true
},
{
"condition": {
"typeReachable": "org.ehcache.impl.internal.concurrent.ConcurrentHashMap"
},
"name": "org.ehcache.impl.internal.concurrent.ConcurrentHashMap",
"fields": [
{
"name": "baseCount"
},
{
"name": "cellsBusy"
},
{
"name": "sizeCtl"
},
{
"name": "transferIndex"
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.impl.internal.concurrent.ConcurrentHashMap"
},
"name": "org.ehcache.impl.internal.concurrent.ConcurrentHashMap$CounterCell",
"fields": [
{
"name": "value"
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.sizeof.SizeOf"
},
"name": "org.ehcache.impl.internal.concurrent.ConcurrentHashMap$Node",
"allDeclaredFields": true
},
{
"condition": {
"typeReachable": "org.ehcache.impl.internal.concurrent.ConcurrentHashMap$TreeBin"
},
"name": "org.ehcache.impl.internal.concurrent.ConcurrentHashMap$TreeBin",
"fields": [
{
"name": "lockState"
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.sizeof.SizeOf"
},
"name": "org.ehcache.impl.internal.concurrent.ConcurrentHashMap$TreeBin",
"allDeclaredFields": true
},
{
"condition": {
"typeReachable": "org.ehcache.sizeof.SizeOf"
},
"name": "org.ehcache.impl.internal.concurrent.ConcurrentHashMap$TreeNode",
"allDeclaredFields": true
},
{
"condition": {
"typeReachable": "org.ehcache.core.EhcacheManager"
},
"name": "org.ehcache.impl.internal.resilience.RobustResilienceStrategy",
"queryAllPublicConstructors": true
},
{
"condition": {
"typeReachable": "org.ehcache.impl.internal.spi.resilience.DefaultResilienceStrategyProvider"
},
"name": "org.ehcache.impl.internal.resilience.RobustResilienceStrategy"
},
{
"condition": {
"typeReachable": "org.ehcache.impl.internal.spi.resilience.DefaultResilienceStrategyProvider$ComponentProvider"
},
"name": "org.ehcache.impl.internal.resilience.RobustResilienceStrategy",
"methods": [
{
"name": "<init>",
"parameterTypes": [
"org.ehcache.spi.resilience.RecoveryStore"
]
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.jsr107.Eh107CacheManager"
},
"name": "org.ehcache.impl.internal.resilience.RobustResilienceStrategy",
"queryAllPublicConstructors": true
},
{
"condition": {
"typeReachable": "org.ehcache.impl.internal.store.disk.OffHeapDiskStore$Provider"
},
"name": "org.ehcache.impl.internal.store.disk.OffHeapDiskStore",
"allDeclaredFields": true,
"queryAllPublicMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.core.Ehcache"
},
"name": "org.ehcache.impl.internal.store.heap.OnHeapStore",
"allDeclaredFields": true,
"queryAllPublicMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.core.EhcacheManager"
},
"name": "org.ehcache.impl.internal.store.heap.OnHeapStore",
"allDeclaredFields": true,
"queryAllPublicMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.impl.internal.store.heap.OnHeapStore"
},
"name": "org.ehcache.impl.internal.store.heap.OnHeapStore",
"allDeclaredFields": true
},
{
"condition": {
"typeReachable": "org.ehcache.impl.internal.store.heap.OnHeapStore$Provider"
},
"name": "org.ehcache.impl.internal.store.heap.OnHeapStore",
"allDeclaredFields": true,
"queryAllPublicMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.jsr107.Eh107CacheManager"
},
"name": "org.ehcache.impl.internal.store.heap.OnHeapStore",
"queryAllPublicMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.sizeof.SizeOf"
},
"name": "org.ehcache.impl.internal.store.heap.holders.BaseOnHeapKey",
"allDeclaredFields": true
},
{
"condition": {
"typeReachable": "org.ehcache.sizeof.SizeOf"
},
"name": "org.ehcache.impl.internal.store.heap.holders.CopiedOnHeapKey",
"allDeclaredFields": true
},
{
"condition": {
"typeReachable": "org.ehcache.sizeof.SizeOf"
},
"name": "org.ehcache.impl.internal.store.heap.holders.CopiedOnHeapValueHolder",
"allDeclaredFields": true
},
{
"condition": {
"typeReachable": "org.ehcache.sizeof.SizeOf"
},
"name": "org.ehcache.impl.internal.store.heap.holders.OnHeapValueHolder",
"allDeclaredFields": true
},
{
"condition": {
"typeReachable": "org.ehcache.impl.internal.store.disk.OffHeapDiskStore$Provider"
},
"name": "org.ehcache.impl.internal.store.offheap.AbstractOffHeapStore",
"allDeclaredFields": true
},
{
"condition": {
"typeReachable": "org.ehcache.impl.internal.store.offheap.OffHeapStore"
},
"name": "org.ehcache.impl.internal.store.offheap.AbstractOffHeapStore",
"allDeclaredFields": true
},
{
"condition": {
"typeReachable": "org.ehcache.impl.internal.store.offheap.OffHeapStore"
},
"name": "org.ehcache.impl.internal.store.offheap.OffHeapStore",
"allDeclaredFields": true
},
{
"condition": {
"typeReachable": "org.ehcache.impl.internal.store.offheap.OffHeapStore$Provider"
},
"name": "org.ehcache.impl.internal.store.offheap.OffHeapStore",
"queryAllPublicMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.impl.internal.store.tiering.CompoundCachingTier$Provider"
},
"name": "org.ehcache.impl.internal.store.tiering.CompoundCachingTier",
"allDeclaredFields": true,
"queryAllPublicMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.core.PersistentUserManagedEhcache"
},
"name": "org.ehcache.impl.internal.store.tiering.TieredStore",
"allDeclaredFields": true,
"queryAllPublicMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.core.EhcacheManager"
},
"name": "org.ehcache.impl.serialization.CompactJavaSerializer",
"methods": [
{
"name": "<init>",
"parameterTypes": [
"java.lang.ClassLoader"
]
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.config.builders.UserManagedCacheBuilder"
},
"name": "org.ehcache.impl.serialization.LongSerializer",
"methods": [
{
"name": "<init>",
"parameterTypes": [
"java.lang.ClassLoader"
]
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.core.EhcacheManager"
},
"name": "org.ehcache.impl.serialization.LongSerializer",
"methods": [
{
"name": "<init>",
"parameterTypes": [
"java.lang.ClassLoader"
]
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.impl.internal.spi.serialization.DefaultSerializationProvider"
},
"name": "org.ehcache.impl.serialization.LongSerializer",
"methods": [
{
"name": "<init>",
"parameterTypes": [
"java.lang.ClassLoader"
]
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.impl.config.serializer.DefaultSerializationProviderConfiguration"
},
"name": "org.ehcache.impl.serialization.PlainJavaSerializer",
"methods": [
{
"name": "<init>",
"parameterTypes": [
"java.lang.ClassLoader"
]
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.impl.internal.spi.serialization.DefaultSerializationProvider"
},
"name": "org.ehcache.impl.serialization.StringSerializer",
"methods": [
{
"name": "<init>",
"parameterTypes": [
"java.lang.ClassLoader"
]
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.jsr107.Eh107CacheManager"
},
"name": "org.ehcache.impl.serialization.StringSerializer",
"methods": [
{
"name": "<init>",
"parameterTypes": [
"java.lang.ClassLoader"
]
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.core.Ehcache"
},
"name": "org.ehcache.impl.store.BaseStore",
"allDeclaredFields": true
},
{
"condition": {
"typeReachable": "org.ehcache.core.internal.statistics.DefaultStatisticsService"
},
"name": "org.ehcache.impl.store.BaseStore",
"allDeclaredFields": true
},
{
"condition": {
"typeReachable": "org.ehcache.jsr107.Eh107CacheManager"
},
"name": "org.ehcache.jsr107.Eh107CacheMXBean",
"queryAllPublicConstructors": true
},
{
"condition": {
"typeReachable": "org.ehcache.jsr107.Eh107CacheManager"
},
"name": "org.ehcache.jsr107.Eh107CacheStatisticsMXBean",
"queryAllPublicConstructors": true
},
{
"condition": {
"typeReachable": "org.ehcache.core.EhcacheManager"
},
"name": "org.ehcache.shadow.org.terracotta.context.query.Matchers$1",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.core.StatusTransitioner"
},
"name": "org.ehcache.shadow.org.terracotta.context.query.Matchers$1"
},
{
"condition": {
"typeReachable": "org.ehcache.core.internal.statistics.DefaultCacheStatistics"
},
"name": "org.ehcache.shadow.org.terracotta.context.query.Matchers$1",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.core.internal.statistics.DefaultStatisticsService"
},
"name": "org.ehcache.shadow.org.terracotta.context.query.Matchers$1"
},
{
"condition": {
"typeReachable": "org.ehcache.core.internal.statistics.DefaultTierStatistics"
},
"name": "org.ehcache.shadow.org.terracotta.context.query.Matchers$1"
},
{
"condition": {
"typeReachable": "org.ehcache.core.internal.statistics.StatsUtils"
},
"name": "org.ehcache.shadow.org.terracotta.context.query.Matchers$1"
},
{
"condition": {
"typeReachable": "org.ehcache.impl.store.BaseStore$BaseStoreProvider"
},
"name": "org.ehcache.shadow.org.terracotta.context.query.Matchers$1"
},
{
"condition": {
"typeReachable": "org.ehcache.impl.store.BaseStore$BaseStoreProvider$$Lambda$433/0x0000000800d91a90"
},
"name": "org.ehcache.shadow.org.terracotta.context.query.Matchers$1"
},
{
"condition": {
"typeReachable": "org.ehcache.impl.store.BaseStore$BaseStoreProvider$$Lambda$436/0x0000000800d96e18"
},
"name": "org.ehcache.shadow.org.terracotta.context.query.Matchers$1"
},
{
"condition": {
"typeReachable": "org.ehcache.jsr107.Eh107CacheManager"
},
"name": "org.ehcache.shadow.org.terracotta.context.query.Matchers$1",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.shadow.org.terracotta.context.query.Matchers"
},
"name": "org.ehcache.shadow.org.terracotta.context.query.Matchers$1"
},
{
"condition": {
"typeReachable": "org.ehcache.shadow.org.terracotta.context.query.Matchers$1"
},
"name": "org.ehcache.shadow.org.terracotta.context.query.Matchers$1"
},
{
"condition": {
"typeReachable": "org.ehcache.shadow.org.terracotta.statistics.MappedOperationStatistic"
},
"name": "org.ehcache.shadow.org.terracotta.context.query.Matchers$1"
},
{
"condition": {
"typeReachable": "org.ehcache.core.EhcacheManager"
},
"name": "org.ehcache.shadow.org.terracotta.context.query.Matchers$2",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.core.StatusTransitioner"
},
"name": "org.ehcache.shadow.org.terracotta.context.query.Matchers$2"
},
{
"condition": {
"typeReachable": "org.ehcache.core.internal.statistics.DefaultCacheStatistics"
},
"name": "org.ehcache.shadow.org.terracotta.context.query.Matchers$2",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.core.internal.statistics.DefaultStatisticsService"
},
"name": "org.ehcache.shadow.org.terracotta.context.query.Matchers$2"
},
{
"condition": {
"typeReachable": "org.ehcache.core.internal.statistics.DefaultTierStatistics"
},
"name": "org.ehcache.shadow.org.terracotta.context.query.Matchers$2"
},
{
"condition": {
"typeReachable": "org.ehcache.core.internal.statistics.StatsUtils"
},
"name": "org.ehcache.shadow.org.terracotta.context.query.Matchers$2"
},
{
"condition": {
"typeReachable": "org.ehcache.impl.store.BaseStore$BaseStoreProvider"
},
"name": "org.ehcache.shadow.org.terracotta.context.query.Matchers$2"
},
{
"condition": {
"typeReachable": "org.ehcache.impl.store.BaseStore$BaseStoreProvider$$Lambda$433/0x0000000800d91a90"
},
"name": "org.ehcache.shadow.org.terracotta.context.query.Matchers$2"
},
{
"condition": {
"typeReachable": "org.ehcache.impl.store.BaseStore$BaseStoreProvider$$Lambda$436/0x0000000800d96e18"
},
"name": "org.ehcache.shadow.org.terracotta.context.query.Matchers$2"
},
{
"condition": {
"typeReachable": "org.ehcache.jsr107.Eh107CacheManager"
},
"name": "org.ehcache.shadow.org.terracotta.context.query.Matchers$2",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.shadow.org.terracotta.context.query.Matchers"
},
"name": "org.ehcache.shadow.org.terracotta.context.query.Matchers$2"
},
{
"condition": {
"typeReachable": "org.ehcache.shadow.org.terracotta.context.query.Matchers$2"
},
"name": "org.ehcache.shadow.org.terracotta.context.query.Matchers$2"
},
{
"condition": {
"typeReachable": "org.ehcache.shadow.org.terracotta.statistics.MappedOperationStatistic"
},
"name": "org.ehcache.shadow.org.terracotta.context.query.Matchers$2"
},
{
"condition": {
"typeReachable": "org.ehcache.core.internal.statistics.DefaultStatisticsService"
},
"name": "org.ehcache.shadow.org.terracotta.context.query.Matchers$3"
},
{
"condition": {
"typeReachable": "org.ehcache.core.internal.statistics.StatsUtils"
},
"name": "org.ehcache.shadow.org.terracotta.context.query.Matchers$3"
},
{
"condition": {
"typeReachable": "org.ehcache.impl.store.BaseStore$BaseStoreProvider"
},
"name": "org.ehcache.shadow.org.terracotta.context.query.Matchers$3"
},
{
"condition": {
"typeReachable": "org.ehcache.impl.store.BaseStore$BaseStoreProvider$$Lambda$433/0x0000000800d91a90"
},
"name": "org.ehcache.shadow.org.terracotta.context.query.Matchers$3"
},
{
"condition": {
"typeReachable": "org.ehcache.impl.store.BaseStore$BaseStoreProvider$$Lambda$436/0x0000000800d96e18"
},
"name": "org.ehcache.shadow.org.terracotta.context.query.Matchers$3"
},
{
"condition": {
"typeReachable": "org.ehcache.shadow.org.terracotta.context.query.Matchers"
},
"name": "org.ehcache.shadow.org.terracotta.context.query.Matchers$3",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.shadow.org.terracotta.context.query.Matchers$3"
},
"name": "org.ehcache.shadow.org.terracotta.context.query.Matchers$3"
},
{
"condition": {
"typeReachable": "org.ehcache.shadow.org.terracotta.statistics.MappedOperationStatistic"
},
"name": "org.ehcache.shadow.org.terracotta.context.query.Matchers$3"
},
{
"condition": {
"typeReachable": "org.ehcache.core.internal.statistics.DefaultStatisticsService"
},
"name": "org.ehcache.shadow.org.terracotta.context.query.Matchers$4"
},
{
"condition": {
"typeReachable": "org.ehcache.core.internal.statistics.StatsUtils"
},
"name": "org.ehcache.shadow.org.terracotta.context.query.Matchers$4"
},
{
"condition": {
"typeReachable": "org.ehcache.impl.store.BaseStore$BaseStoreProvider"
},
"name": "org.ehcache.shadow.org.terracotta.context.query.Matchers$4",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.impl.store.BaseStore$BaseStoreProvider$$Lambda$433/0x0000000800d91a90"
},
"name": "org.ehcache.shadow.org.terracotta.context.query.Matchers$4"
},
{
"condition": {
"typeReachable": "org.ehcache.impl.store.BaseStore$BaseStoreProvider$$Lambda$436/0x0000000800d96e18"
},
"name": "org.ehcache.shadow.org.terracotta.context.query.Matchers$4"
},
{
"condition": {
"typeReachable": "org.ehcache.shadow.org.terracotta.context.query.Matchers"
},
"name": "org.ehcache.shadow.org.terracotta.context.query.Matchers$4"
},
{
"condition": {
"typeReachable": "org.ehcache.shadow.org.terracotta.context.query.Matchers$4"
},
"name": "org.ehcache.shadow.org.terracotta.context.query.Matchers$4"
},
{
"condition": {
"typeReachable": "org.ehcache.shadow.org.terracotta.statistics.MappedOperationStatistic"
},
"name": "org.ehcache.shadow.org.terracotta.context.query.Matchers$4"
},
{
"condition": {
"typeReachable": "org.ehcache.core.EhcacheManager"
},
"name": "org.ehcache.shadow.org.terracotta.context.query.Matchers$5",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.core.StatusTransitioner"
},
"name": "org.ehcache.shadow.org.terracotta.context.query.Matchers$5"
},
{
"condition": {
"typeReachable": "org.ehcache.core.internal.statistics.DefaultCacheStatistics"
},
"name": "org.ehcache.shadow.org.terracotta.context.query.Matchers$5",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.core.internal.statistics.DefaultStatisticsService"
},
"name": "org.ehcache.shadow.org.terracotta.context.query.Matchers$5"
},
{
"condition": {
"typeReachable": "org.ehcache.core.internal.statistics.DefaultTierStatistics"
},
"name": "org.ehcache.shadow.org.terracotta.context.query.Matchers$5"
},
{
"condition": {
"typeReachable": "org.ehcache.core.internal.statistics.StatsUtils"
},
"name": "org.ehcache.shadow.org.terracotta.context.query.Matchers$5"
},
{
"condition": {
"typeReachable": "org.ehcache.impl.store.BaseStore$BaseStoreProvider"
},
"name": "org.ehcache.shadow.org.terracotta.context.query.Matchers$5"
},
{
"condition": {
"typeReachable": "org.ehcache.impl.store.BaseStore$BaseStoreProvider$$Lambda$433/0x0000000800d91a90"
},
"name": "org.ehcache.shadow.org.terracotta.context.query.Matchers$5"
},
{
"condition": {
"typeReachable": "org.ehcache.impl.store.BaseStore$BaseStoreProvider$$Lambda$436/0x0000000800d96e18"
},
"name": "org.ehcache.shadow.org.terracotta.context.query.Matchers$5"
},
{
"condition": {
"typeReachable": "org.ehcache.jsr107.Eh107CacheManager"
},
"name": "org.ehcache.shadow.org.terracotta.context.query.Matchers$5",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.shadow.org.terracotta.context.query.Matchers"
},
"name": "org.ehcache.shadow.org.terracotta.context.query.Matchers$5"
},
{
"condition": {
"typeReachable": "org.ehcache.shadow.org.terracotta.context.query.Matchers$5"
},
"name": "org.ehcache.shadow.org.terracotta.context.query.Matchers$5"
},
{
"condition": {
"typeReachable": "org.ehcache.shadow.org.terracotta.statistics.MappedOperationStatistic"
},
"name": "org.ehcache.shadow.org.terracotta.context.query.Matchers$5"
},
{
"condition": {
"typeReachable": "org.ehcache.core.EhcacheManager"
},
"name": "org.ehcache.shadow.org.terracotta.context.query.Matchers$6",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.core.StatusTransitioner"
},
"name": "org.ehcache.shadow.org.terracotta.context.query.Matchers$6"
},
{
"condition": {
"typeReachable": "org.ehcache.core.internal.statistics.DefaultCacheStatistics"
},
"name": "org.ehcache.shadow.org.terracotta.context.query.Matchers$6"
},
{
"condition": {
"typeReachable": "org.ehcache.core.internal.statistics.DefaultStatisticsService"
},
"name": "org.ehcache.shadow.org.terracotta.context.query.Matchers$6"
},
{
"condition": {
"typeReachable": "org.ehcache.core.internal.statistics.DefaultTierStatistics"
},
"name": "org.ehcache.shadow.org.terracotta.context.query.Matchers$6"
},
{
"condition": {
"typeReachable": "org.ehcache.core.internal.statistics.StatsUtils"
},
"name": "org.ehcache.shadow.org.terracotta.context.query.Matchers$6"
},
{
"condition": {
"typeReachable": "org.ehcache.impl.store.BaseStore$BaseStoreProvider"
},
"name": "org.ehcache.shadow.org.terracotta.context.query.Matchers$6"
},
{
"condition": {
"typeReachable": "org.ehcache.impl.store.BaseStore$BaseStoreProvider$$Lambda$433/0x0000000800d91a90"
},
"name": "org.ehcache.shadow.org.terracotta.context.query.Matchers$6"
},
{
"condition": {
"typeReachable": "org.ehcache.impl.store.BaseStore$BaseStoreProvider$$Lambda$436/0x0000000800d96e18"
},
"name": "org.ehcache.shadow.org.terracotta.context.query.Matchers$6"
},
{
"condition": {
"typeReachable": "org.ehcache.jsr107.Eh107CacheManager"
},
"name": "org.ehcache.shadow.org.terracotta.context.query.Matchers$6",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.shadow.org.terracotta.context.query.Matchers"
},
"name": "org.ehcache.shadow.org.terracotta.context.query.Matchers$6"
},
{
"condition": {
"typeReachable": "org.ehcache.shadow.org.terracotta.context.query.Matchers$6"
},
"name": "org.ehcache.shadow.org.terracotta.context.query.Matchers$6"
},
{
"condition": {
"typeReachable": "org.ehcache.shadow.org.terracotta.statistics.MappedOperationStatistic"
},
"name": "org.ehcache.shadow.org.terracotta.context.query.Matchers$6"
},
{
"condition": {
"typeReachable": "org.ehcache.core.EhcacheManager"
},
"name": "org.ehcache.shadow.org.terracotta.context.query.Matchers$8",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.core.StatusTransitioner"
},
"name": "org.ehcache.shadow.org.terracotta.context.query.Matchers$8"
},
{
"condition": {
"typeReachable": "org.ehcache.core.internal.statistics.DefaultCacheStatistics"
},
"name": "org.ehcache.shadow.org.terracotta.context.query.Matchers$8",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.core.internal.statistics.DefaultStatisticsService"
},
"name": "org.ehcache.shadow.org.terracotta.context.query.Matchers$8"
},
{
"condition": {
"typeReachable": "org.ehcache.core.internal.statistics.DefaultTierStatistics"
},
"name": "org.ehcache.shadow.org.terracotta.context.query.Matchers$8"
},
{
"condition": {
"typeReachable": "org.ehcache.core.internal.statistics.StatsUtils"
},
"name": "org.ehcache.shadow.org.terracotta.context.query.Matchers$8"
},
{
"condition": {
"typeReachable": "org.ehcache.impl.store.BaseStore$BaseStoreProvider"
},
"name": "org.ehcache.shadow.org.terracotta.context.query.Matchers$8"
},
{
"condition": {
"typeReachable": "org.ehcache.impl.store.BaseStore$BaseStoreProvider$$Lambda$433/0x0000000800d91a90"
},
"name": "org.ehcache.shadow.org.terracotta.context.query.Matchers$8"
},
{
"condition": {
"typeReachable": "org.ehcache.impl.store.BaseStore$BaseStoreProvider$$Lambda$436/0x0000000800d96e18"
},
"name": "org.ehcache.shadow.org.terracotta.context.query.Matchers$8"
},
{
"condition": {
"typeReachable": "org.ehcache.jsr107.Eh107CacheManager"
},
"name": "org.ehcache.shadow.org.terracotta.context.query.Matchers$8",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.shadow.org.terracotta.context.query.Matchers"
},
"name": "org.ehcache.shadow.org.terracotta.context.query.Matchers$8"
},
{
"condition": {
"typeReachable": "org.ehcache.shadow.org.terracotta.context.query.Matchers$8"
},
"name": "org.ehcache.shadow.org.terracotta.context.query.Matchers$8"
},
{
"condition": {
"typeReachable": "org.ehcache.shadow.org.terracotta.statistics.MappedOperationStatistic"
},
"name": "org.ehcache.shadow.org.terracotta.context.query.Matchers$8"
},
{
"condition": {
"typeReachable": "org.ehcache.shadow.org.terracotta.offheapstore.util.Validation"
},
"name": "org.ehcache.shadow.org.terracotta.offheapstore.util.ValidationTest"
},
{
"condition": {
"typeReachable": "org.ehcache.shadow.org.terracotta.statistics.StatisticsManager"
},
"name": "org.ehcache.shadow.org.terracotta.statistics.AbstractOperationStatistic",
"allDeclaredFields": true
},
{
"condition": {
"typeReachable": "org.ehcache.shadow.org.terracotta.statistics.StatisticsManager"
},
"name": "org.ehcache.shadow.org.terracotta.statistics.AbstractSourceStatistic",
"allDeclaredFields": true
},
{
"condition": {
"typeReachable": "org.ehcache.core.Ehcache"
},
"name": "org.ehcache.shadow.org.terracotta.statistics.GeneralOperationStatistic",
"queryAllPublicMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.core.EhcacheBase"
},
"name": "org.ehcache.shadow.org.terracotta.statistics.GeneralOperationStatistic"
},
{
"condition": {
"typeReachable": "org.ehcache.core.EhcacheManager"
},
"name": "org.ehcache.shadow.org.terracotta.statistics.GeneralOperationStatistic",
"queryAllPublicMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.core.PersistentUserManagedEhcache"
},
"name": "org.ehcache.shadow.org.terracotta.statistics.GeneralOperationStatistic",
"queryAllPublicMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.core.internal.statistics.DefaultStatisticsService"
},
"name": "org.ehcache.shadow.org.terracotta.statistics.GeneralOperationStatistic"
},
{
"condition": {
"typeReachable": "org.ehcache.impl.internal.store.disk.OffHeapDiskStore$Provider"
},
"name": "org.ehcache.shadow.org.terracotta.statistics.GeneralOperationStatistic",
"queryAllPublicMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.impl.internal.store.heap.OnHeapStore"
},
"name": "org.ehcache.shadow.org.terracotta.statistics.GeneralOperationStatistic"
},
{
"condition": {
"typeReachable": "org.ehcache.impl.internal.store.heap.OnHeapStore$Provider"
},
"name": "org.ehcache.shadow.org.terracotta.statistics.GeneralOperationStatistic",
"queryAllPublicMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.impl.internal.store.offheap.OffHeapStore"
},
"name": "org.ehcache.shadow.org.terracotta.statistics.GeneralOperationStatistic"
},
{
"condition": {
"typeReachable": "org.ehcache.impl.internal.store.offheap.OffHeapStore$Provider"
},
"name": "org.ehcache.shadow.org.terracotta.statistics.GeneralOperationStatistic",
"queryAllPublicMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.impl.store.BaseStore"
},
"name": "org.ehcache.shadow.org.terracotta.statistics.GeneralOperationStatistic"
},
{
"condition": {
"typeReachable": "org.ehcache.jsr107.Eh107CacheManager"
},
"name": "org.ehcache.shadow.org.terracotta.statistics.GeneralOperationStatistic",
"queryAllPublicMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.shadow.org.terracotta.statistics.StatisticBuilder$OperationStatisticBuilder"
},
"name": "org.ehcache.shadow.org.terracotta.statistics.GeneralOperationStatistic"
},
{
"condition": {
"typeReachable": "org.ehcache.shadow.org.terracotta.statistics.StatisticsManager"
},
"name": "org.ehcache.shadow.org.terracotta.statistics.GeneralOperationStatistic",
"allDeclaredFields": true
},
{
"condition": {
"typeReachable": "org.ehcache.core.internal.statistics.DefaultStatisticsService"
},
"name": "org.ehcache.shadow.org.terracotta.statistics.MappedOperationStatistic",
"allDeclaredFields": true
},
{
"condition": {
"typeReachable": "org.ehcache.impl.store.BaseStore$BaseStoreProvider"
},
"name": "org.ehcache.shadow.org.terracotta.statistics.MappedOperationStatistic"
},
{
"condition": {
"typeReachable": "org.ehcache.impl.store.BaseStore$BaseStoreProvider$$Lambda$433/0x0000000800d91a90"
},
"name": "org.ehcache.shadow.org.terracotta.statistics.MappedOperationStatistic"
},
{
"condition": {
"typeReachable": "org.ehcache.impl.store.BaseStore$BaseStoreProvider$$Lambda$436/0x0000000800d96e18"
},
"name": "org.ehcache.shadow.org.terracotta.statistics.MappedOperationStatistic"
},
{
"condition": {
"typeReachable": "org.ehcache.core.internal.statistics.DefaultStatisticsService"
},
"name": "org.ehcache.shadow.org.terracotta.statistics.MappedOperationStatistic$1"
},
{
"condition": {
"typeReachable": "org.ehcache.impl.store.BaseStore$BaseStoreProvider"
},
"name": "org.ehcache.shadow.org.terracotta.statistics.MappedOperationStatistic$1"
},
{
"condition": {
"typeReachable": "org.ehcache.impl.store.BaseStore$BaseStoreProvider$$Lambda$433/0x0000000800d91a90"
},
"name": "org.ehcache.shadow.org.terracotta.statistics.MappedOperationStatistic$1"
},
{
"condition": {
"typeReachable": "org.ehcache.impl.store.BaseStore$BaseStoreProvider$$Lambda$436/0x0000000800d96e18"
},
"name": "org.ehcache.shadow.org.terracotta.statistics.MappedOperationStatistic$1"
},
{
"condition": {
"typeReachable": "org.ehcache.shadow.org.terracotta.statistics.MappedOperationStatistic"
},
"name": "org.ehcache.shadow.org.terracotta.statistics.MappedOperationStatistic$1"
},
{
"condition": {
"typeReachable": "org.ehcache.shadow.org.terracotta.statistics.MappedOperationStatistic$1"
},
"name": "org.ehcache.shadow.org.terracotta.statistics.MappedOperationStatistic$1",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.core.EhcacheManager"
},
"name": "org.ehcache.shadow.org.terracotta.statistics.PassThroughStatistic",
"queryAllPublicMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.core.internal.statistics.DefaultStatisticsService"
},
"name": "org.ehcache.shadow.org.terracotta.statistics.PassThroughStatistic"
},
{
"condition": {
"typeReachable": "org.ehcache.impl.internal.store.disk.OffHeapDiskStore$Provider"
},
"name": "org.ehcache.shadow.org.terracotta.statistics.PassThroughStatistic",
"queryAllPublicMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.impl.internal.store.heap.OnHeapStore"
},
"name": "org.ehcache.shadow.org.terracotta.statistics.PassThroughStatistic"
},
{
"condition": {
"typeReachable": "org.ehcache.impl.internal.store.heap.OnHeapStore$Provider"
},
"name": "org.ehcache.shadow.org.terracotta.statistics.PassThroughStatistic",
"queryAllPublicMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.impl.internal.store.offheap.OffHeapStore"
},
"name": "org.ehcache.shadow.org.terracotta.statistics.PassThroughStatistic"
},
{
"condition": {
"typeReachable": "org.ehcache.impl.internal.store.offheap.OffHeapStore$Provider"
},
"name": "org.ehcache.shadow.org.terracotta.statistics.PassThroughStatistic",
"queryAllPublicMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.impl.store.BaseStore"
},
"name": "org.ehcache.shadow.org.terracotta.statistics.PassThroughStatistic"
},
{
"condition": {
"typeReachable": "org.ehcache.jsr107.Eh107CacheManager"
},
"name": "org.ehcache.shadow.org.terracotta.statistics.PassThroughStatistic",
"queryAllPublicMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.shadow.org.terracotta.statistics.StatisticsManager"
},
"name": "org.ehcache.shadow.org.terracotta.statistics.PassThroughStatistic",
"allDeclaredFields": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ResourceConfigurationParser"
},
"name": "org.ehcache.xml.model.Adapter2",
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.xml.XmlConfiguration"
},
"name": "org.ehcache.xml.model.Adapter2",
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser"
},
"name": "org.ehcache.xml.model.BaseCacheType",
"allDeclaredFields": true,
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser$$Lambda$615/0x0000000800e52c78"
},
"name": "org.ehcache.xml.model.BaseCacheType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser$$Lambda$618/0x0000000800e56040"
},
"name": "org.ehcache.xml.model.BaseCacheType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ResourceConfigurationParser"
},
"name": "org.ehcache.xml.model.BaseCacheType",
"allDeclaredFields": true,
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.xml.XmlConfiguration"
},
"name": "org.ehcache.xml.model.BaseCacheType",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration"
},
"name": "org.ehcache.xml.model.BaseCacheType",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$836/0x0000000800f8e260"
},
"name": "org.ehcache.xml.model.BaseCacheType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$836/0x0000000800f8f148"
},
"name": "org.ehcache.xml.model.BaseCacheType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$836/0x0000000800f8f820"
},
"name": "org.ehcache.xml.model.BaseCacheType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$840/0x0000000800f3e880"
},
"name": "org.ehcache.xml.model.BaseCacheType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$849/0x0000000800fadad0"
},
"name": "org.ehcache.xml.model.BaseCacheType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$849/0x0000000800faf688"
},
"name": "org.ehcache.xml.model.BaseCacheType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$849/0x0000000800fafc88"
},
"name": "org.ehcache.xml.model.BaseCacheType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$853/0x0000000800f96f50"
},
"name": "org.ehcache.xml.model.BaseCacheType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$SingleConfig"
},
"name": "org.ehcache.xml.model.BaseCacheType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$VariantConfig"
},
"name": "org.ehcache.xml.model.BaseCacheType",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser"
},
"name": "org.ehcache.xml.model.CacheEntryType",
"allDeclaredFields": true,
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser$$Lambda$615/0x0000000800e52c78"
},
"name": "org.ehcache.xml.model.CacheEntryType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser$$Lambda$618/0x0000000800e56040"
},
"name": "org.ehcache.xml.model.CacheEntryType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ResourceConfigurationParser"
},
"name": "org.ehcache.xml.model.CacheEntryType",
"allDeclaredFields": true,
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.xml.XmlConfiguration"
},
"name": "org.ehcache.xml.model.CacheEntryType",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration"
},
"name": "org.ehcache.xml.model.CacheEntryType",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$836/0x0000000800f8e260"
},
"name": "org.ehcache.xml.model.CacheEntryType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$836/0x0000000800f8f148"
},
"name": "org.ehcache.xml.model.CacheEntryType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$836/0x0000000800f8f820"
},
"name": "org.ehcache.xml.model.CacheEntryType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$840/0x0000000800f3e880"
},
"name": "org.ehcache.xml.model.CacheEntryType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$849/0x0000000800fadad0"
},
"name": "org.ehcache.xml.model.CacheEntryType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$849/0x0000000800faf688"
},
"name": "org.ehcache.xml.model.CacheEntryType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$849/0x0000000800fafc88"
},
"name": "org.ehcache.xml.model.CacheEntryType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$853/0x0000000800f96f50"
},
"name": "org.ehcache.xml.model.CacheEntryType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$SingleConfig"
},
"name": "org.ehcache.xml.model.CacheEntryType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$VariantConfig"
},
"name": "org.ehcache.xml.model.CacheEntryType",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser"
},
"name": "org.ehcache.xml.model.CacheLoaderWriterType",
"allDeclaredFields": true,
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser$$Lambda$615/0x0000000800e52c78"
},
"name": "org.ehcache.xml.model.CacheLoaderWriterType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser$$Lambda$618/0x0000000800e56040"
},
"name": "org.ehcache.xml.model.CacheLoaderWriterType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ResourceConfigurationParser"
},
"name": "org.ehcache.xml.model.CacheLoaderWriterType",
"allDeclaredFields": true,
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.xml.XmlConfiguration"
},
"name": "org.ehcache.xml.model.CacheLoaderWriterType",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration"
},
"name": "org.ehcache.xml.model.CacheLoaderWriterType",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$836/0x0000000800f8e260"
},
"name": "org.ehcache.xml.model.CacheLoaderWriterType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$836/0x0000000800f8f148"
},
"name": "org.ehcache.xml.model.CacheLoaderWriterType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$836/0x0000000800f8f820"
},
"name": "org.ehcache.xml.model.CacheLoaderWriterType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$840/0x0000000800f3e880"
},
"name": "org.ehcache.xml.model.CacheLoaderWriterType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$849/0x0000000800fadad0"
},
"name": "org.ehcache.xml.model.CacheLoaderWriterType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$849/0x0000000800faf688"
},
"name": "org.ehcache.xml.model.CacheLoaderWriterType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$849/0x0000000800fafc88"
},
"name": "org.ehcache.xml.model.CacheLoaderWriterType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$853/0x0000000800f96f50"
},
"name": "org.ehcache.xml.model.CacheLoaderWriterType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$SingleConfig"
},
"name": "org.ehcache.xml.model.CacheLoaderWriterType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$VariantConfig"
},
"name": "org.ehcache.xml.model.CacheLoaderWriterType",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser"
},
"name": "org.ehcache.xml.model.CacheLoaderWriterType$WriteBehind",
"allDeclaredFields": true,
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser$$Lambda$615/0x0000000800e52c78"
},
"name": "org.ehcache.xml.model.CacheLoaderWriterType$WriteBehind"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser$$Lambda$618/0x0000000800e56040"
},
"name": "org.ehcache.xml.model.CacheLoaderWriterType$WriteBehind"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ResourceConfigurationParser"
},
"name": "org.ehcache.xml.model.CacheLoaderWriterType$WriteBehind",
"allDeclaredFields": true,
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.xml.XmlConfiguration"
},
"name": "org.ehcache.xml.model.CacheLoaderWriterType$WriteBehind",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration"
},
"name": "org.ehcache.xml.model.CacheLoaderWriterType$WriteBehind",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$836/0x0000000800f8e260"
},
"name": "org.ehcache.xml.model.CacheLoaderWriterType$WriteBehind"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$836/0x0000000800f8f148"
},
"name": "org.ehcache.xml.model.CacheLoaderWriterType$WriteBehind"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$836/0x0000000800f8f820"
},
"name": "org.ehcache.xml.model.CacheLoaderWriterType$WriteBehind"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$840/0x0000000800f3e880"
},
"name": "org.ehcache.xml.model.CacheLoaderWriterType$WriteBehind"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$849/0x0000000800fadad0"
},
"name": "org.ehcache.xml.model.CacheLoaderWriterType$WriteBehind"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$849/0x0000000800faf688"
},
"name": "org.ehcache.xml.model.CacheLoaderWriterType$WriteBehind"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$849/0x0000000800fafc88"
},
"name": "org.ehcache.xml.model.CacheLoaderWriterType$WriteBehind"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$853/0x0000000800f96f50"
},
"name": "org.ehcache.xml.model.CacheLoaderWriterType$WriteBehind"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$SingleConfig"
},
"name": "org.ehcache.xml.model.CacheLoaderWriterType$WriteBehind"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$VariantConfig"
},
"name": "org.ehcache.xml.model.CacheLoaderWriterType$WriteBehind",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser"
},
"name": "org.ehcache.xml.model.CacheLoaderWriterType$WriteBehind$Batching",
"allDeclaredFields": true,
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser$$Lambda$615/0x0000000800e52c78"
},
"name": "org.ehcache.xml.model.CacheLoaderWriterType$WriteBehind$Batching"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser$$Lambda$618/0x0000000800e56040"
},
"name": "org.ehcache.xml.model.CacheLoaderWriterType$WriteBehind$Batching"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ResourceConfigurationParser"
},
"name": "org.ehcache.xml.model.CacheLoaderWriterType$WriteBehind$Batching",
"allDeclaredFields": true,
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.xml.XmlConfiguration"
},
"name": "org.ehcache.xml.model.CacheLoaderWriterType$WriteBehind$Batching",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration"
},
"name": "org.ehcache.xml.model.CacheLoaderWriterType$WriteBehind$Batching",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$836/0x0000000800f8e260"
},
"name": "org.ehcache.xml.model.CacheLoaderWriterType$WriteBehind$Batching"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$836/0x0000000800f8f148"
},
"name": "org.ehcache.xml.model.CacheLoaderWriterType$WriteBehind$Batching"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$836/0x0000000800f8f820"
},
"name": "org.ehcache.xml.model.CacheLoaderWriterType$WriteBehind$Batching"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$840/0x0000000800f3e880"
},
"name": "org.ehcache.xml.model.CacheLoaderWriterType$WriteBehind$Batching"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$849/0x0000000800fadad0"
},
"name": "org.ehcache.xml.model.CacheLoaderWriterType$WriteBehind$Batching"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$849/0x0000000800faf688"
},
"name": "org.ehcache.xml.model.CacheLoaderWriterType$WriteBehind$Batching"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$849/0x0000000800fafc88"
},
"name": "org.ehcache.xml.model.CacheLoaderWriterType$WriteBehind$Batching"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$853/0x0000000800f96f50"
},
"name": "org.ehcache.xml.model.CacheLoaderWriterType$WriteBehind$Batching"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$SingleConfig"
},
"name": "org.ehcache.xml.model.CacheLoaderWriterType$WriteBehind$Batching"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$VariantConfig"
},
"name": "org.ehcache.xml.model.CacheLoaderWriterType$WriteBehind$Batching",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser"
},
"name": "org.ehcache.xml.model.CacheLoaderWriterType$WriteBehind$NonBatching",
"allDeclaredFields": true,
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser$$Lambda$615/0x0000000800e52c78"
},
"name": "org.ehcache.xml.model.CacheLoaderWriterType$WriteBehind$NonBatching"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser$$Lambda$618/0x0000000800e56040"
},
"name": "org.ehcache.xml.model.CacheLoaderWriterType$WriteBehind$NonBatching"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ResourceConfigurationParser"
},
"name": "org.ehcache.xml.model.CacheLoaderWriterType$WriteBehind$NonBatching",
"allDeclaredFields": true,
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.xml.XmlConfiguration"
},
"name": "org.ehcache.xml.model.CacheLoaderWriterType$WriteBehind$NonBatching",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration"
},
"name": "org.ehcache.xml.model.CacheLoaderWriterType$WriteBehind$NonBatching",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$836/0x0000000800f8e260"
},
"name": "org.ehcache.xml.model.CacheLoaderWriterType$WriteBehind$NonBatching"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$836/0x0000000800f8f148"
},
"name": "org.ehcache.xml.model.CacheLoaderWriterType$WriteBehind$NonBatching"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$836/0x0000000800f8f820"
},
"name": "org.ehcache.xml.model.CacheLoaderWriterType$WriteBehind$NonBatching"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$840/0x0000000800f3e880"
},
"name": "org.ehcache.xml.model.CacheLoaderWriterType$WriteBehind$NonBatching"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$849/0x0000000800fadad0"
},
"name": "org.ehcache.xml.model.CacheLoaderWriterType$WriteBehind$NonBatching"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$849/0x0000000800faf688"
},
"name": "org.ehcache.xml.model.CacheLoaderWriterType$WriteBehind$NonBatching"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$849/0x0000000800fafc88"
},
"name": "org.ehcache.xml.model.CacheLoaderWriterType$WriteBehind$NonBatching"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$853/0x0000000800f96f50"
},
"name": "org.ehcache.xml.model.CacheLoaderWriterType$WriteBehind$NonBatching"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$SingleConfig"
},
"name": "org.ehcache.xml.model.CacheLoaderWriterType$WriteBehind$NonBatching"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$VariantConfig"
},
"name": "org.ehcache.xml.model.CacheLoaderWriterType$WriteBehind$NonBatching",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser"
},
"name": "org.ehcache.xml.model.CacheTemplateType",
"allDeclaredFields": true,
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser$$Lambda$615/0x0000000800e52c78"
},
"name": "org.ehcache.xml.model.CacheTemplateType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser$$Lambda$618/0x0000000800e56040"
},
"name": "org.ehcache.xml.model.CacheTemplateType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ResourceConfigurationParser"
},
"name": "org.ehcache.xml.model.CacheTemplateType",
"allDeclaredFields": true,
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.xml.XmlConfiguration"
},
"name": "org.ehcache.xml.model.CacheTemplateType",
"queryAllDeclaredMethods": true,
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration"
},
"name": "org.ehcache.xml.model.CacheTemplateType",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$836/0x0000000800f8e260"
},
"name": "org.ehcache.xml.model.CacheTemplateType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$836/0x0000000800f8f148"
},
"name": "org.ehcache.xml.model.CacheTemplateType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$836/0x0000000800f8f820"
},
"name": "org.ehcache.xml.model.CacheTemplateType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$840/0x0000000800f3e880"
},
"name": "org.ehcache.xml.model.CacheTemplateType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$849/0x0000000800fadad0"
},
"name": "org.ehcache.xml.model.CacheTemplateType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$849/0x0000000800faf688"
},
"name": "org.ehcache.xml.model.CacheTemplateType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$849/0x0000000800fafc88"
},
"name": "org.ehcache.xml.model.CacheTemplateType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$853/0x0000000800f96f50"
},
"name": "org.ehcache.xml.model.CacheTemplateType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$SingleConfig"
},
"name": "org.ehcache.xml.model.CacheTemplateType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$VariantConfig"
},
"name": "org.ehcache.xml.model.CacheTemplateType",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser"
},
"name": "org.ehcache.xml.model.CacheType",
"allDeclaredFields": true,
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser$$Lambda$615/0x0000000800e52c78"
},
"name": "org.ehcache.xml.model.CacheType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser$$Lambda$618/0x0000000800e56040"
},
"name": "org.ehcache.xml.model.CacheType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ResourceConfigurationParser"
},
"name": "org.ehcache.xml.model.CacheType",
"allDeclaredFields": true,
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.xml.XmlConfiguration"
},
"name": "org.ehcache.xml.model.CacheType",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration"
},
"name": "org.ehcache.xml.model.CacheType",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$836/0x0000000800f8e260"
},
"name": "org.ehcache.xml.model.CacheType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$836/0x0000000800f8f148"
},
"name": "org.ehcache.xml.model.CacheType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$836/0x0000000800f8f820"
},
"name": "org.ehcache.xml.model.CacheType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$840/0x0000000800f3e880"
},
"name": "org.ehcache.xml.model.CacheType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$849/0x0000000800fadad0"
},
"name": "org.ehcache.xml.model.CacheType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$849/0x0000000800faf688"
},
"name": "org.ehcache.xml.model.CacheType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$849/0x0000000800fafc88"
},
"name": "org.ehcache.xml.model.CacheType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$853/0x0000000800f96f50"
},
"name": "org.ehcache.xml.model.CacheType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$SingleConfig"
},
"name": "org.ehcache.xml.model.CacheType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$VariantConfig"
},
"name": "org.ehcache.xml.model.CacheType",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser"
},
"name": "org.ehcache.xml.model.ConfigType",
"allDeclaredFields": true,
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser$$Lambda$615/0x0000000800e52c78"
},
"name": "org.ehcache.xml.model.ConfigType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser$$Lambda$618/0x0000000800e56040"
},
"name": "org.ehcache.xml.model.ConfigType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ResourceConfigurationParser"
},
"name": "org.ehcache.xml.model.ConfigType",
"allDeclaredFields": true,
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.xml.XmlConfiguration"
},
"name": "org.ehcache.xml.model.ConfigType",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration"
},
"name": "org.ehcache.xml.model.ConfigType",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$836/0x0000000800f8e260"
},
"name": "org.ehcache.xml.model.ConfigType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$836/0x0000000800f8f148"
},
"name": "org.ehcache.xml.model.ConfigType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$836/0x0000000800f8f820"
},
"name": "org.ehcache.xml.model.ConfigType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$840/0x0000000800f3e880"
},
"name": "org.ehcache.xml.model.ConfigType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$849/0x0000000800fadad0"
},
"name": "org.ehcache.xml.model.ConfigType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$849/0x0000000800faf688"
},
"name": "org.ehcache.xml.model.ConfigType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$849/0x0000000800fafc88"
},
"name": "org.ehcache.xml.model.ConfigType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$853/0x0000000800f96f50"
},
"name": "org.ehcache.xml.model.ConfigType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$SingleConfig"
},
"name": "org.ehcache.xml.model.ConfigType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$VariantConfig"
},
"name": "org.ehcache.xml.model.ConfigType",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser"
},
"name": "org.ehcache.xml.model.CopierType",
"allDeclaredFields": true,
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser$$Lambda$615/0x0000000800e52c78"
},
"name": "org.ehcache.xml.model.CopierType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser$$Lambda$618/0x0000000800e56040"
},
"name": "org.ehcache.xml.model.CopierType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ResourceConfigurationParser"
},
"name": "org.ehcache.xml.model.CopierType",
"allDeclaredFields": true,
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.xml.XmlConfiguration"
},
"name": "org.ehcache.xml.model.CopierType",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration"
},
"name": "org.ehcache.xml.model.CopierType",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$836/0x0000000800f8e260"
},
"name": "org.ehcache.xml.model.CopierType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$836/0x0000000800f8f148"
},
"name": "org.ehcache.xml.model.CopierType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$836/0x0000000800f8f820"
},
"name": "org.ehcache.xml.model.CopierType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$840/0x0000000800f3e880"
},
"name": "org.ehcache.xml.model.CopierType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$849/0x0000000800fadad0"
},
"name": "org.ehcache.xml.model.CopierType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$849/0x0000000800faf688"
},
"name": "org.ehcache.xml.model.CopierType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$849/0x0000000800fafc88"
},
"name": "org.ehcache.xml.model.CopierType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$853/0x0000000800f96f50"
},
"name": "org.ehcache.xml.model.CopierType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$SingleConfig"
},
"name": "org.ehcache.xml.model.CopierType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$VariantConfig"
},
"name": "org.ehcache.xml.model.CopierType",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser"
},
"name": "org.ehcache.xml.model.CopierType$Copier",
"allDeclaredFields": true,
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser$$Lambda$615/0x0000000800e52c78"
},
"name": "org.ehcache.xml.model.CopierType$Copier"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser$$Lambda$618/0x0000000800e56040"
},
"name": "org.ehcache.xml.model.CopierType$Copier"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ResourceConfigurationParser"
},
"name": "org.ehcache.xml.model.CopierType$Copier",
"allDeclaredFields": true,
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.xml.XmlConfiguration"
},
"name": "org.ehcache.xml.model.CopierType$Copier",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration"
},
"name": "org.ehcache.xml.model.CopierType$Copier",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$836/0x0000000800f8e260"
},
"name": "org.ehcache.xml.model.CopierType$Copier"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$836/0x0000000800f8f148"
},
"name": "org.ehcache.xml.model.CopierType$Copier"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$836/0x0000000800f8f820"
},
"name": "org.ehcache.xml.model.CopierType$Copier"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$840/0x0000000800f3e880"
},
"name": "org.ehcache.xml.model.CopierType$Copier"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$849/0x0000000800fadad0"
},
"name": "org.ehcache.xml.model.CopierType$Copier"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$849/0x0000000800faf688"
},
"name": "org.ehcache.xml.model.CopierType$Copier"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$849/0x0000000800fafc88"
},
"name": "org.ehcache.xml.model.CopierType$Copier"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$853/0x0000000800f96f50"
},
"name": "org.ehcache.xml.model.CopierType$Copier"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$SingleConfig"
},
"name": "org.ehcache.xml.model.CopierType$Copier"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$VariantConfig"
},
"name": "org.ehcache.xml.model.CopierType$Copier",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser"
},
"name": "org.ehcache.xml.model.Disk",
"methods": [
{
"name": "<init>",
"parameterTypes": [
"org.ehcache.xml.model.PersistableMemoryTypeWithPropSubst"
]
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ResourceConfigurationParser"
},
"name": "org.ehcache.xml.model.Disk",
"methods": [
{
"name": "<init>",
"parameterTypes": [
"org.ehcache.xml.model.PersistableMemoryTypeWithPropSubst"
]
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser"
},
"name": "org.ehcache.xml.model.DiskStoreSettingsType",
"allDeclaredFields": true,
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser$$Lambda$615/0x0000000800e52c78"
},
"name": "org.ehcache.xml.model.DiskStoreSettingsType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser$$Lambda$618/0x0000000800e56040"
},
"name": "org.ehcache.xml.model.DiskStoreSettingsType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ResourceConfigurationParser"
},
"name": "org.ehcache.xml.model.DiskStoreSettingsType",
"allDeclaredFields": true,
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.xml.XmlConfiguration"
},
"name": "org.ehcache.xml.model.DiskStoreSettingsType",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration"
},
"name": "org.ehcache.xml.model.DiskStoreSettingsType",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$836/0x0000000800f8e260"
},
"name": "org.ehcache.xml.model.DiskStoreSettingsType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$836/0x0000000800f8f148"
},
"name": "org.ehcache.xml.model.DiskStoreSettingsType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$836/0x0000000800f8f820"
},
"name": "org.ehcache.xml.model.DiskStoreSettingsType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$840/0x0000000800f3e880"
},
"name": "org.ehcache.xml.model.DiskStoreSettingsType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$849/0x0000000800fadad0"
},
"name": "org.ehcache.xml.model.DiskStoreSettingsType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$849/0x0000000800faf688"
},
"name": "org.ehcache.xml.model.DiskStoreSettingsType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$849/0x0000000800fafc88"
},
"name": "org.ehcache.xml.model.DiskStoreSettingsType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$853/0x0000000800f96f50"
},
"name": "org.ehcache.xml.model.DiskStoreSettingsType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$SingleConfig"
},
"name": "org.ehcache.xml.model.DiskStoreSettingsType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$VariantConfig"
},
"name": "org.ehcache.xml.model.DiskStoreSettingsType",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser"
},
"name": "org.ehcache.xml.model.EventFiringType",
"allDeclaredFields": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ResourceConfigurationParser"
},
"name": "org.ehcache.xml.model.EventFiringType",
"allDeclaredFields": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser"
},
"name": "org.ehcache.xml.model.EventOrderingType",
"allDeclaredFields": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ResourceConfigurationParser"
},
"name": "org.ehcache.xml.model.EventOrderingType",
"allDeclaredFields": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser"
},
"name": "org.ehcache.xml.model.EventType",
"allDeclaredFields": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ResourceConfigurationParser"
},
"name": "org.ehcache.xml.model.EventType",
"allDeclaredFields": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser"
},
"name": "org.ehcache.xml.model.ExpiryType",
"allDeclaredFields": true,
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser$$Lambda$615/0x0000000800e52c78"
},
"name": "org.ehcache.xml.model.ExpiryType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser$$Lambda$618/0x0000000800e56040"
},
"name": "org.ehcache.xml.model.ExpiryType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ResourceConfigurationParser"
},
"name": "org.ehcache.xml.model.ExpiryType",
"allDeclaredFields": true,
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.xml.XmlConfiguration"
},
"name": "org.ehcache.xml.model.ExpiryType",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration"
},
"name": "org.ehcache.xml.model.ExpiryType",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$836/0x0000000800f8e260"
},
"name": "org.ehcache.xml.model.ExpiryType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$836/0x0000000800f8f148"
},
"name": "org.ehcache.xml.model.ExpiryType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$836/0x0000000800f8f820"
},
"name": "org.ehcache.xml.model.ExpiryType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$840/0x0000000800f3e880"
},
"name": "org.ehcache.xml.model.ExpiryType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$849/0x0000000800fadad0"
},
"name": "org.ehcache.xml.model.ExpiryType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$849/0x0000000800faf688"
},
"name": "org.ehcache.xml.model.ExpiryType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$849/0x0000000800fafc88"
},
"name": "org.ehcache.xml.model.ExpiryType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$853/0x0000000800f96f50"
},
"name": "org.ehcache.xml.model.ExpiryType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$SingleConfig"
},
"name": "org.ehcache.xml.model.ExpiryType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$VariantConfig"
},
"name": "org.ehcache.xml.model.ExpiryType",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser"
},
"name": "org.ehcache.xml.model.ExpiryType$None",
"allDeclaredFields": true,
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser$$Lambda$615/0x0000000800e52c78"
},
"name": "org.ehcache.xml.model.ExpiryType$None"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser$$Lambda$618/0x0000000800e56040"
},
"name": "org.ehcache.xml.model.ExpiryType$None"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ResourceConfigurationParser"
},
"name": "org.ehcache.xml.model.ExpiryType$None",
"allDeclaredFields": true,
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.xml.XmlConfiguration"
},
"name": "org.ehcache.xml.model.ExpiryType$None",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration"
},
"name": "org.ehcache.xml.model.ExpiryType$None",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$836/0x0000000800f8e260"
},
"name": "org.ehcache.xml.model.ExpiryType$None"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$836/0x0000000800f8f148"
},
"name": "org.ehcache.xml.model.ExpiryType$None"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$836/0x0000000800f8f820"
},
"name": "org.ehcache.xml.model.ExpiryType$None"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$840/0x0000000800f3e880"
},
"name": "org.ehcache.xml.model.ExpiryType$None"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$849/0x0000000800fadad0"
},
"name": "org.ehcache.xml.model.ExpiryType$None"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$849/0x0000000800faf688"
},
"name": "org.ehcache.xml.model.ExpiryType$None"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$849/0x0000000800fafc88"
},
"name": "org.ehcache.xml.model.ExpiryType$None"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$853/0x0000000800f96f50"
},
"name": "org.ehcache.xml.model.ExpiryType$None"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$SingleConfig"
},
"name": "org.ehcache.xml.model.ExpiryType$None"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$VariantConfig"
},
"name": "org.ehcache.xml.model.ExpiryType$None",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser"
},
"name": "org.ehcache.xml.model.Heap",
"methods": [
{
"name": "<init>",
"parameterTypes": [
"org.ehcache.xml.model.ResourceTypeWithPropSubst"
]
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ResourceConfigurationParser"
},
"name": "org.ehcache.xml.model.Heap",
"methods": [
{
"name": "<init>",
"parameterTypes": [
"org.ehcache.xml.model.ResourceTypeWithPropSubst"
]
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.xml.XmlConfiguration"
},
"name": "org.ehcache.xml.model.Heap",
"methods": [
{
"name": "<init>",
"parameterTypes": [
"org.ehcache.xml.model.ResourceTypeWithPropSubst"
]
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser"
},
"name": "org.ehcache.xml.model.ListenersType",
"allDeclaredFields": true,
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser$$Lambda$615/0x0000000800e52c78"
},
"name": "org.ehcache.xml.model.ListenersType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser$$Lambda$618/0x0000000800e56040"
},
"name": "org.ehcache.xml.model.ListenersType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ResourceConfigurationParser"
},
"name": "org.ehcache.xml.model.ListenersType",
"allDeclaredFields": true,
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.xml.XmlConfiguration"
},
"name": "org.ehcache.xml.model.ListenersType",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration"
},
"name": "org.ehcache.xml.model.ListenersType",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$836/0x0000000800f8e260"
},
"name": "org.ehcache.xml.model.ListenersType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$836/0x0000000800f8f148"
},
"name": "org.ehcache.xml.model.ListenersType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$836/0x0000000800f8f820"
},
"name": "org.ehcache.xml.model.ListenersType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$840/0x0000000800f3e880"
},
"name": "org.ehcache.xml.model.ListenersType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$849/0x0000000800fadad0"
},
"name": "org.ehcache.xml.model.ListenersType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$849/0x0000000800faf688"
},
"name": "org.ehcache.xml.model.ListenersType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$849/0x0000000800fafc88"
},
"name": "org.ehcache.xml.model.ListenersType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$853/0x0000000800f96f50"
},
"name": "org.ehcache.xml.model.ListenersType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$SingleConfig"
},
"name": "org.ehcache.xml.model.ListenersType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$VariantConfig"
},
"name": "org.ehcache.xml.model.ListenersType",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser"
},
"name": "org.ehcache.xml.model.ListenersType$Listener",
"allDeclaredFields": true,
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser$$Lambda$615/0x0000000800e52c78"
},
"name": "org.ehcache.xml.model.ListenersType$Listener"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser$$Lambda$618/0x0000000800e56040"
},
"name": "org.ehcache.xml.model.ListenersType$Listener"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ResourceConfigurationParser"
},
"name": "org.ehcache.xml.model.ListenersType$Listener",
"allDeclaredFields": true,
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.xml.XmlConfiguration"
},
"name": "org.ehcache.xml.model.ListenersType$Listener",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration"
},
"name": "org.ehcache.xml.model.ListenersType$Listener",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$836/0x0000000800f8e260"
},
"name": "org.ehcache.xml.model.ListenersType$Listener"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$836/0x0000000800f8f148"
},
"name": "org.ehcache.xml.model.ListenersType$Listener"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$836/0x0000000800f8f820"
},
"name": "org.ehcache.xml.model.ListenersType$Listener"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$840/0x0000000800f3e880"
},
"name": "org.ehcache.xml.model.ListenersType$Listener"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$849/0x0000000800fadad0"
},
"name": "org.ehcache.xml.model.ListenersType$Listener"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$849/0x0000000800faf688"
},
"name": "org.ehcache.xml.model.ListenersType$Listener"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$849/0x0000000800fafc88"
},
"name": "org.ehcache.xml.model.ListenersType$Listener"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$853/0x0000000800f96f50"
},
"name": "org.ehcache.xml.model.ListenersType$Listener"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$SingleConfig"
},
"name": "org.ehcache.xml.model.ListenersType$Listener"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$VariantConfig"
},
"name": "org.ehcache.xml.model.ListenersType$Listener",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser"
},
"name": "org.ehcache.xml.model.MemoryType",
"allDeclaredFields": true,
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser$$Lambda$615/0x0000000800e52c78"
},
"name": "org.ehcache.xml.model.MemoryType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser$$Lambda$618/0x0000000800e56040"
},
"name": "org.ehcache.xml.model.MemoryType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ResourceConfigurationParser"
},
"name": "org.ehcache.xml.model.MemoryType",
"allDeclaredFields": true,
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.xml.XmlConfiguration"
},
"name": "org.ehcache.xml.model.MemoryType",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration"
},
"name": "org.ehcache.xml.model.MemoryType",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$836/0x0000000800f8e260"
},
"name": "org.ehcache.xml.model.MemoryType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$836/0x0000000800f8f148"
},
"name": "org.ehcache.xml.model.MemoryType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$836/0x0000000800f8f820"
},
"name": "org.ehcache.xml.model.MemoryType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$840/0x0000000800f3e880"
},
"name": "org.ehcache.xml.model.MemoryType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$849/0x0000000800fadad0"
},
"name": "org.ehcache.xml.model.MemoryType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$849/0x0000000800faf688"
},
"name": "org.ehcache.xml.model.MemoryType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$849/0x0000000800fafc88"
},
"name": "org.ehcache.xml.model.MemoryType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$853/0x0000000800f96f50"
},
"name": "org.ehcache.xml.model.MemoryType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$SingleConfig"
},
"name": "org.ehcache.xml.model.MemoryType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$VariantConfig"
},
"name": "org.ehcache.xml.model.MemoryType",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser"
},
"name": "org.ehcache.xml.model.MemoryTypeWithPropSubst",
"allDeclaredFields": true,
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser$$Lambda$615/0x0000000800e52c78"
},
"name": "org.ehcache.xml.model.MemoryTypeWithPropSubst"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser$$Lambda$618/0x0000000800e56040"
},
"name": "org.ehcache.xml.model.MemoryTypeWithPropSubst"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ResourceConfigurationParser"
},
"name": "org.ehcache.xml.model.MemoryTypeWithPropSubst",
"allDeclaredFields": true,
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.xml.XmlConfiguration"
},
"name": "org.ehcache.xml.model.MemoryTypeWithPropSubst",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration"
},
"name": "org.ehcache.xml.model.MemoryTypeWithPropSubst",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$836/0x0000000800f8e260"
},
"name": "org.ehcache.xml.model.MemoryTypeWithPropSubst"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$836/0x0000000800f8f148"
},
"name": "org.ehcache.xml.model.MemoryTypeWithPropSubst"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$836/0x0000000800f8f820"
},
"name": "org.ehcache.xml.model.MemoryTypeWithPropSubst"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$840/0x0000000800f3e880"
},
"name": "org.ehcache.xml.model.MemoryTypeWithPropSubst"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$849/0x0000000800fadad0"
},
"name": "org.ehcache.xml.model.MemoryTypeWithPropSubst"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$849/0x0000000800faf688"
},
"name": "org.ehcache.xml.model.MemoryTypeWithPropSubst"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$849/0x0000000800fafc88"
},
"name": "org.ehcache.xml.model.MemoryTypeWithPropSubst"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$853/0x0000000800f96f50"
},
"name": "org.ehcache.xml.model.MemoryTypeWithPropSubst"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$SingleConfig"
},
"name": "org.ehcache.xml.model.MemoryTypeWithPropSubst"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$VariantConfig"
},
"name": "org.ehcache.xml.model.MemoryTypeWithPropSubst",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser"
},
"name": "org.ehcache.xml.model.MemoryUnit",
"allDeclaredFields": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ResourceConfigurationParser"
},
"name": "org.ehcache.xml.model.MemoryUnit",
"allDeclaredFields": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser"
},
"name": "org.ehcache.xml.model.ObjectFactory"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser$$Lambda$615/0x0000000800e52c78"
},
"name": "org.ehcache.xml.model.ObjectFactory"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser$$Lambda$618/0x0000000800e56040"
},
"name": "org.ehcache.xml.model.ObjectFactory"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ResourceConfigurationParser"
},
"name": "org.ehcache.xml.model.ObjectFactory"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.XmlConfiguration"
},
"name": "org.ehcache.xml.model.ObjectFactory",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration"
},
"name": "org.ehcache.xml.model.ObjectFactory",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$836/0x0000000800f8e260"
},
"name": "org.ehcache.xml.model.ObjectFactory"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$836/0x0000000800f8f148"
},
"name": "org.ehcache.xml.model.ObjectFactory"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$836/0x0000000800f8f820"
},
"name": "org.ehcache.xml.model.ObjectFactory"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$840/0x0000000800f3e880"
},
"name": "org.ehcache.xml.model.ObjectFactory"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$849/0x0000000800fadad0"
},
"name": "org.ehcache.xml.model.ObjectFactory"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$849/0x0000000800faf688"
},
"name": "org.ehcache.xml.model.ObjectFactory"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$849/0x0000000800fafc88"
},
"name": "org.ehcache.xml.model.ObjectFactory"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$853/0x0000000800f96f50"
},
"name": "org.ehcache.xml.model.ObjectFactory"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$SingleConfig"
},
"name": "org.ehcache.xml.model.ObjectFactory"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$VariantConfig"
},
"name": "org.ehcache.xml.model.ObjectFactory",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser"
},
"name": "org.ehcache.xml.model.Offheap",
"methods": [
{
"name": "<init>",
"parameterTypes": [
"org.ehcache.xml.model.MemoryTypeWithPropSubst"
]
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ResourceConfigurationParser"
},
"name": "org.ehcache.xml.model.Offheap",
"methods": [
{
"name": "<init>",
"parameterTypes": [
"org.ehcache.xml.model.MemoryTypeWithPropSubst"
]
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser"
},
"name": "org.ehcache.xml.model.PersistableMemoryTypeWithPropSubst",
"allDeclaredFields": true,
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser$$Lambda$615/0x0000000800e52c78"
},
"name": "org.ehcache.xml.model.PersistableMemoryTypeWithPropSubst"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser$$Lambda$618/0x0000000800e56040"
},
"name": "org.ehcache.xml.model.PersistableMemoryTypeWithPropSubst"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ResourceConfigurationParser"
},
"name": "org.ehcache.xml.model.PersistableMemoryTypeWithPropSubst",
"allDeclaredFields": true,
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.xml.XmlConfiguration"
},
"name": "org.ehcache.xml.model.PersistableMemoryTypeWithPropSubst",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration"
},
"name": "org.ehcache.xml.model.PersistableMemoryTypeWithPropSubst",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$836/0x0000000800f8e260"
},
"name": "org.ehcache.xml.model.PersistableMemoryTypeWithPropSubst"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$836/0x0000000800f8f148"
},
"name": "org.ehcache.xml.model.PersistableMemoryTypeWithPropSubst"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$836/0x0000000800f8f820"
},
"name": "org.ehcache.xml.model.PersistableMemoryTypeWithPropSubst"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$840/0x0000000800f3e880"
},
"name": "org.ehcache.xml.model.PersistableMemoryTypeWithPropSubst"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$849/0x0000000800fadad0"
},
"name": "org.ehcache.xml.model.PersistableMemoryTypeWithPropSubst"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$849/0x0000000800faf688"
},
"name": "org.ehcache.xml.model.PersistableMemoryTypeWithPropSubst"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$849/0x0000000800fafc88"
},
"name": "org.ehcache.xml.model.PersistableMemoryTypeWithPropSubst"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$853/0x0000000800f96f50"
},
"name": "org.ehcache.xml.model.PersistableMemoryTypeWithPropSubst"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$SingleConfig"
},
"name": "org.ehcache.xml.model.PersistableMemoryTypeWithPropSubst"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$VariantConfig"
},
"name": "org.ehcache.xml.model.PersistableMemoryTypeWithPropSubst",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser"
},
"name": "org.ehcache.xml.model.PersistenceType",
"allDeclaredFields": true,
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser$$Lambda$615/0x0000000800e52c78"
},
"name": "org.ehcache.xml.model.PersistenceType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser$$Lambda$618/0x0000000800e56040"
},
"name": "org.ehcache.xml.model.PersistenceType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ResourceConfigurationParser"
},
"name": "org.ehcache.xml.model.PersistenceType",
"allDeclaredFields": true,
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.xml.XmlConfiguration"
},
"name": "org.ehcache.xml.model.PersistenceType",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration"
},
"name": "org.ehcache.xml.model.PersistenceType",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$836/0x0000000800f8e260"
},
"name": "org.ehcache.xml.model.PersistenceType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$836/0x0000000800f8f148"
},
"name": "org.ehcache.xml.model.PersistenceType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$836/0x0000000800f8f820"
},
"name": "org.ehcache.xml.model.PersistenceType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$840/0x0000000800f3e880"
},
"name": "org.ehcache.xml.model.PersistenceType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$849/0x0000000800fadad0"
},
"name": "org.ehcache.xml.model.PersistenceType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$849/0x0000000800faf688"
},
"name": "org.ehcache.xml.model.PersistenceType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$849/0x0000000800fafc88"
},
"name": "org.ehcache.xml.model.PersistenceType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$853/0x0000000800f96f50"
},
"name": "org.ehcache.xml.model.PersistenceType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$SingleConfig"
},
"name": "org.ehcache.xml.model.PersistenceType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$VariantConfig"
},
"name": "org.ehcache.xml.model.PersistenceType",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser"
},
"name": "org.ehcache.xml.model.ResourceTypeWithPropSubst",
"allDeclaredFields": true,
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser$$Lambda$615/0x0000000800e52c78"
},
"name": "org.ehcache.xml.model.ResourceTypeWithPropSubst"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser$$Lambda$618/0x0000000800e56040"
},
"name": "org.ehcache.xml.model.ResourceTypeWithPropSubst"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ResourceConfigurationParser"
},
"name": "org.ehcache.xml.model.ResourceTypeWithPropSubst",
"allDeclaredFields": true,
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.xml.XmlConfiguration"
},
"name": "org.ehcache.xml.model.ResourceTypeWithPropSubst",
"queryAllDeclaredMethods": true,
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration"
},
"name": "org.ehcache.xml.model.ResourceTypeWithPropSubst",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$836/0x0000000800f8e260"
},
"name": "org.ehcache.xml.model.ResourceTypeWithPropSubst"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$836/0x0000000800f8f148"
},
"name": "org.ehcache.xml.model.ResourceTypeWithPropSubst"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$836/0x0000000800f8f820"
},
"name": "org.ehcache.xml.model.ResourceTypeWithPropSubst"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$840/0x0000000800f3e880"
},
"name": "org.ehcache.xml.model.ResourceTypeWithPropSubst"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$849/0x0000000800fadad0"
},
"name": "org.ehcache.xml.model.ResourceTypeWithPropSubst"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$849/0x0000000800faf688"
},
"name": "org.ehcache.xml.model.ResourceTypeWithPropSubst"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$849/0x0000000800fafc88"
},
"name": "org.ehcache.xml.model.ResourceTypeWithPropSubst"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$853/0x0000000800f96f50"
},
"name": "org.ehcache.xml.model.ResourceTypeWithPropSubst"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$SingleConfig"
},
"name": "org.ehcache.xml.model.ResourceTypeWithPropSubst"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$VariantConfig"
},
"name": "org.ehcache.xml.model.ResourceTypeWithPropSubst",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser"
},
"name": "org.ehcache.xml.model.ResourceUnit",
"allDeclaredFields": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ResourceConfigurationParser"
},
"name": "org.ehcache.xml.model.ResourceUnit",
"allDeclaredFields": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser"
},
"name": "org.ehcache.xml.model.ResourcesType",
"allDeclaredFields": true,
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser$$Lambda$615/0x0000000800e52c78"
},
"name": "org.ehcache.xml.model.ResourcesType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser$$Lambda$618/0x0000000800e56040"
},
"name": "org.ehcache.xml.model.ResourcesType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ResourceConfigurationParser"
},
"name": "org.ehcache.xml.model.ResourcesType",
"allDeclaredFields": true,
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.xml.XmlConfiguration"
},
"name": "org.ehcache.xml.model.ResourcesType",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration"
},
"name": "org.ehcache.xml.model.ResourcesType",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$836/0x0000000800f8e260"
},
"name": "org.ehcache.xml.model.ResourcesType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$836/0x0000000800f8f148"
},
"name": "org.ehcache.xml.model.ResourcesType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$836/0x0000000800f8f820"
},
"name": "org.ehcache.xml.model.ResourcesType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$840/0x0000000800f3e880"
},
"name": "org.ehcache.xml.model.ResourcesType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$849/0x0000000800fadad0"
},
"name": "org.ehcache.xml.model.ResourcesType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$849/0x0000000800faf688"
},
"name": "org.ehcache.xml.model.ResourcesType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$849/0x0000000800fafc88"
},
"name": "org.ehcache.xml.model.ResourcesType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$853/0x0000000800f96f50"
},
"name": "org.ehcache.xml.model.ResourcesType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$SingleConfig"
},
"name": "org.ehcache.xml.model.ResourcesType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$VariantConfig"
},
"name": "org.ehcache.xml.model.ResourcesType",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser"
},
"name": "org.ehcache.xml.model.SerializerType",
"allDeclaredFields": true,
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser$$Lambda$615/0x0000000800e52c78"
},
"name": "org.ehcache.xml.model.SerializerType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser$$Lambda$618/0x0000000800e56040"
},
"name": "org.ehcache.xml.model.SerializerType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ResourceConfigurationParser"
},
"name": "org.ehcache.xml.model.SerializerType",
"allDeclaredFields": true,
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.xml.XmlConfiguration"
},
"name": "org.ehcache.xml.model.SerializerType",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration"
},
"name": "org.ehcache.xml.model.SerializerType",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$836/0x0000000800f8e260"
},
"name": "org.ehcache.xml.model.SerializerType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$836/0x0000000800f8f148"
},
"name": "org.ehcache.xml.model.SerializerType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$836/0x0000000800f8f820"
},
"name": "org.ehcache.xml.model.SerializerType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$840/0x0000000800f3e880"
},
"name": "org.ehcache.xml.model.SerializerType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$849/0x0000000800fadad0"
},
"name": "org.ehcache.xml.model.SerializerType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$849/0x0000000800faf688"
},
"name": "org.ehcache.xml.model.SerializerType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$849/0x0000000800fafc88"
},
"name": "org.ehcache.xml.model.SerializerType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$853/0x0000000800f96f50"
},
"name": "org.ehcache.xml.model.SerializerType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$SingleConfig"
},
"name": "org.ehcache.xml.model.SerializerType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$VariantConfig"
},
"name": "org.ehcache.xml.model.SerializerType",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser"
},
"name": "org.ehcache.xml.model.SerializerType$Serializer",
"allDeclaredFields": true,
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser$$Lambda$615/0x0000000800e52c78"
},
"name": "org.ehcache.xml.model.SerializerType$Serializer"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser$$Lambda$618/0x0000000800e56040"
},
"name": "org.ehcache.xml.model.SerializerType$Serializer"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ResourceConfigurationParser"
},
"name": "org.ehcache.xml.model.SerializerType$Serializer",
"allDeclaredFields": true,
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.xml.XmlConfiguration"
},
"name": "org.ehcache.xml.model.SerializerType$Serializer",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration"
},
"name": "org.ehcache.xml.model.SerializerType$Serializer",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$836/0x0000000800f8e260"
},
"name": "org.ehcache.xml.model.SerializerType$Serializer"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$836/0x0000000800f8f148"
},
"name": "org.ehcache.xml.model.SerializerType$Serializer"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$836/0x0000000800f8f820"
},
"name": "org.ehcache.xml.model.SerializerType$Serializer"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$840/0x0000000800f3e880"
},
"name": "org.ehcache.xml.model.SerializerType$Serializer"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$849/0x0000000800fadad0"
},
"name": "org.ehcache.xml.model.SerializerType$Serializer"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$849/0x0000000800faf688"
},
"name": "org.ehcache.xml.model.SerializerType$Serializer"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$849/0x0000000800fafc88"
},
"name": "org.ehcache.xml.model.SerializerType$Serializer"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$853/0x0000000800f96f50"
},
"name": "org.ehcache.xml.model.SerializerType$Serializer"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$SingleConfig"
},
"name": "org.ehcache.xml.model.SerializerType$Serializer"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$VariantConfig"
},
"name": "org.ehcache.xml.model.SerializerType$Serializer",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser"
},
"name": "org.ehcache.xml.model.ServiceType",
"allDeclaredFields": true,
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser$$Lambda$615/0x0000000800e52c78"
},
"name": "org.ehcache.xml.model.ServiceType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser$$Lambda$618/0x0000000800e56040"
},
"name": "org.ehcache.xml.model.ServiceType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ResourceConfigurationParser"
},
"name": "org.ehcache.xml.model.ServiceType",
"allDeclaredFields": true,
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.xml.XmlConfiguration"
},
"name": "org.ehcache.xml.model.ServiceType",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration"
},
"name": "org.ehcache.xml.model.ServiceType",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$836/0x0000000800f8e260"
},
"name": "org.ehcache.xml.model.ServiceType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$836/0x0000000800f8f148"
},
"name": "org.ehcache.xml.model.ServiceType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$836/0x0000000800f8f820"
},
"name": "org.ehcache.xml.model.ServiceType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$840/0x0000000800f3e880"
},
"name": "org.ehcache.xml.model.ServiceType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$849/0x0000000800fadad0"
},
"name": "org.ehcache.xml.model.ServiceType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$849/0x0000000800faf688"
},
"name": "org.ehcache.xml.model.ServiceType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$849/0x0000000800fafc88"
},
"name": "org.ehcache.xml.model.ServiceType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$853/0x0000000800f96f50"
},
"name": "org.ehcache.xml.model.ServiceType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$SingleConfig"
},
"name": "org.ehcache.xml.model.ServiceType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$VariantConfig"
},
"name": "org.ehcache.xml.model.ServiceType",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser"
},
"name": "org.ehcache.xml.model.SizeofType",
"allDeclaredFields": true,
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser$$Lambda$615/0x0000000800e52c78"
},
"name": "org.ehcache.xml.model.SizeofType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser$$Lambda$618/0x0000000800e56040"
},
"name": "org.ehcache.xml.model.SizeofType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ResourceConfigurationParser"
},
"name": "org.ehcache.xml.model.SizeofType",
"allDeclaredFields": true,
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.xml.XmlConfiguration"
},
"name": "org.ehcache.xml.model.SizeofType",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration"
},
"name": "org.ehcache.xml.model.SizeofType",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$836/0x0000000800f8e260"
},
"name": "org.ehcache.xml.model.SizeofType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$836/0x0000000800f8f148"
},
"name": "org.ehcache.xml.model.SizeofType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$836/0x0000000800f8f820"
},
"name": "org.ehcache.xml.model.SizeofType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$840/0x0000000800f3e880"
},
"name": "org.ehcache.xml.model.SizeofType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$849/0x0000000800fadad0"
},
"name": "org.ehcache.xml.model.SizeofType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$849/0x0000000800faf688"
},
"name": "org.ehcache.xml.model.SizeofType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$849/0x0000000800fafc88"
},
"name": "org.ehcache.xml.model.SizeofType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$853/0x0000000800f96f50"
},
"name": "org.ehcache.xml.model.SizeofType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$SingleConfig"
},
"name": "org.ehcache.xml.model.SizeofType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$VariantConfig"
},
"name": "org.ehcache.xml.model.SizeofType",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser"
},
"name": "org.ehcache.xml.model.SizeofType$MaxObjectGraphSize",
"allDeclaredFields": true,
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser$$Lambda$615/0x0000000800e52c78"
},
"name": "org.ehcache.xml.model.SizeofType$MaxObjectGraphSize"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser$$Lambda$618/0x0000000800e56040"
},
"name": "org.ehcache.xml.model.SizeofType$MaxObjectGraphSize"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ResourceConfigurationParser"
},
"name": "org.ehcache.xml.model.SizeofType$MaxObjectGraphSize",
"allDeclaredFields": true,
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.xml.XmlConfiguration"
},
"name": "org.ehcache.xml.model.SizeofType$MaxObjectGraphSize",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration"
},
"name": "org.ehcache.xml.model.SizeofType$MaxObjectGraphSize",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$836/0x0000000800f8e260"
},
"name": "org.ehcache.xml.model.SizeofType$MaxObjectGraphSize"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$836/0x0000000800f8f148"
},
"name": "org.ehcache.xml.model.SizeofType$MaxObjectGraphSize"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$836/0x0000000800f8f820"
},
"name": "org.ehcache.xml.model.SizeofType$MaxObjectGraphSize"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$840/0x0000000800f3e880"
},
"name": "org.ehcache.xml.model.SizeofType$MaxObjectGraphSize"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$849/0x0000000800fadad0"
},
"name": "org.ehcache.xml.model.SizeofType$MaxObjectGraphSize"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$849/0x0000000800faf688"
},
"name": "org.ehcache.xml.model.SizeofType$MaxObjectGraphSize"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$849/0x0000000800fafc88"
},
"name": "org.ehcache.xml.model.SizeofType$MaxObjectGraphSize"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$853/0x0000000800f96f50"
},
"name": "org.ehcache.xml.model.SizeofType$MaxObjectGraphSize"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$SingleConfig"
},
"name": "org.ehcache.xml.model.SizeofType$MaxObjectGraphSize"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$VariantConfig"
},
"name": "org.ehcache.xml.model.SizeofType$MaxObjectGraphSize",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser"
},
"name": "org.ehcache.xml.model.ThreadPoolReferenceType",
"allDeclaredFields": true,
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser$$Lambda$615/0x0000000800e52c78"
},
"name": "org.ehcache.xml.model.ThreadPoolReferenceType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser$$Lambda$618/0x0000000800e56040"
},
"name": "org.ehcache.xml.model.ThreadPoolReferenceType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ResourceConfigurationParser"
},
"name": "org.ehcache.xml.model.ThreadPoolReferenceType",
"allDeclaredFields": true,
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.xml.XmlConfiguration"
},
"name": "org.ehcache.xml.model.ThreadPoolReferenceType",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration"
},
"name": "org.ehcache.xml.model.ThreadPoolReferenceType",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$836/0x0000000800f8e260"
},
"name": "org.ehcache.xml.model.ThreadPoolReferenceType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$836/0x0000000800f8f148"
},
"name": "org.ehcache.xml.model.ThreadPoolReferenceType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$836/0x0000000800f8f820"
},
"name": "org.ehcache.xml.model.ThreadPoolReferenceType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$840/0x0000000800f3e880"
},
"name": "org.ehcache.xml.model.ThreadPoolReferenceType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$849/0x0000000800fadad0"
},
"name": "org.ehcache.xml.model.ThreadPoolReferenceType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$849/0x0000000800faf688"
},
"name": "org.ehcache.xml.model.ThreadPoolReferenceType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$849/0x0000000800fafc88"
},
"name": "org.ehcache.xml.model.ThreadPoolReferenceType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$853/0x0000000800f96f50"
},
"name": "org.ehcache.xml.model.ThreadPoolReferenceType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$SingleConfig"
},
"name": "org.ehcache.xml.model.ThreadPoolReferenceType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$VariantConfig"
},
"name": "org.ehcache.xml.model.ThreadPoolReferenceType",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser"
},
"name": "org.ehcache.xml.model.ThreadPoolsType",
"allDeclaredFields": true,
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser$$Lambda$615/0x0000000800e52c78"
},
"name": "org.ehcache.xml.model.ThreadPoolsType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser$$Lambda$618/0x0000000800e56040"
},
"name": "org.ehcache.xml.model.ThreadPoolsType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ResourceConfigurationParser"
},
"name": "org.ehcache.xml.model.ThreadPoolsType",
"allDeclaredFields": true,
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.xml.XmlConfiguration"
},
"name": "org.ehcache.xml.model.ThreadPoolsType",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration"
},
"name": "org.ehcache.xml.model.ThreadPoolsType",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$836/0x0000000800f8e260"
},
"name": "org.ehcache.xml.model.ThreadPoolsType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$836/0x0000000800f8f148"
},
"name": "org.ehcache.xml.model.ThreadPoolsType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$836/0x0000000800f8f820"
},
"name": "org.ehcache.xml.model.ThreadPoolsType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$840/0x0000000800f3e880"
},
"name": "org.ehcache.xml.model.ThreadPoolsType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$849/0x0000000800fadad0"
},
"name": "org.ehcache.xml.model.ThreadPoolsType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$849/0x0000000800faf688"
},
"name": "org.ehcache.xml.model.ThreadPoolsType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$849/0x0000000800fafc88"
},
"name": "org.ehcache.xml.model.ThreadPoolsType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$853/0x0000000800f96f50"
},
"name": "org.ehcache.xml.model.ThreadPoolsType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$SingleConfig"
},
"name": "org.ehcache.xml.model.ThreadPoolsType"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$VariantConfig"
},
"name": "org.ehcache.xml.model.ThreadPoolsType",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser"
},
"name": "org.ehcache.xml.model.ThreadPoolsType$ThreadPool",
"allDeclaredFields": true,
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser$$Lambda$615/0x0000000800e52c78"
},
"name": "org.ehcache.xml.model.ThreadPoolsType$ThreadPool"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser$$Lambda$618/0x0000000800e56040"
},
"name": "org.ehcache.xml.model.ThreadPoolsType$ThreadPool"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ResourceConfigurationParser"
},
"name": "org.ehcache.xml.model.ThreadPoolsType$ThreadPool",
"allDeclaredFields": true,
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.xml.XmlConfiguration"
},
"name": "org.ehcache.xml.model.ThreadPoolsType$ThreadPool",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration"
},
"name": "org.ehcache.xml.model.ThreadPoolsType$ThreadPool",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$836/0x0000000800f8e260"
},
"name": "org.ehcache.xml.model.ThreadPoolsType$ThreadPool"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$836/0x0000000800f8f148"
},
"name": "org.ehcache.xml.model.ThreadPoolsType$ThreadPool"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$836/0x0000000800f8f820"
},
"name": "org.ehcache.xml.model.ThreadPoolsType$ThreadPool"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$840/0x0000000800f3e880"
},
"name": "org.ehcache.xml.model.ThreadPoolsType$ThreadPool"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$849/0x0000000800fadad0"
},
"name": "org.ehcache.xml.model.ThreadPoolsType$ThreadPool"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$849/0x0000000800faf688"
},
"name": "org.ehcache.xml.model.ThreadPoolsType$ThreadPool"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$849/0x0000000800fafc88"
},
"name": "org.ehcache.xml.model.ThreadPoolsType$ThreadPool"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$853/0x0000000800f96f50"
},
"name": "org.ehcache.xml.model.ThreadPoolsType$ThreadPool"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$SingleConfig"
},
"name": "org.ehcache.xml.model.ThreadPoolsType$ThreadPool"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$VariantConfig"
},
"name": "org.ehcache.xml.model.ThreadPoolsType$ThreadPool",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser"
},
"name": "org.ehcache.xml.model.TimeTypeWithPropSubst",
"allDeclaredFields": true,
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser$$Lambda$615/0x0000000800e52c78"
},
"name": "org.ehcache.xml.model.TimeTypeWithPropSubst"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser$$Lambda$618/0x0000000800e56040"
},
"name": "org.ehcache.xml.model.TimeTypeWithPropSubst"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ResourceConfigurationParser"
},
"name": "org.ehcache.xml.model.TimeTypeWithPropSubst",
"allDeclaredFields": true,
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.xml.XmlConfiguration"
},
"name": "org.ehcache.xml.model.TimeTypeWithPropSubst",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration"
},
"name": "org.ehcache.xml.model.TimeTypeWithPropSubst",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$836/0x0000000800f8e260"
},
"name": "org.ehcache.xml.model.TimeTypeWithPropSubst"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$836/0x0000000800f8f148"
},
"name": "org.ehcache.xml.model.TimeTypeWithPropSubst"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$836/0x0000000800f8f820"
},
"name": "org.ehcache.xml.model.TimeTypeWithPropSubst"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$840/0x0000000800f3e880"
},
"name": "org.ehcache.xml.model.TimeTypeWithPropSubst"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$849/0x0000000800fadad0"
},
"name": "org.ehcache.xml.model.TimeTypeWithPropSubst"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$849/0x0000000800faf688"
},
"name": "org.ehcache.xml.model.TimeTypeWithPropSubst"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$849/0x0000000800fafc88"
},
"name": "org.ehcache.xml.model.TimeTypeWithPropSubst"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$$Lambda$853/0x0000000800f96f50"
},
"name": "org.ehcache.xml.model.TimeTypeWithPropSubst"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$SingleConfig"
},
"name": "org.ehcache.xml.model.TimeTypeWithPropSubst"
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$VariantConfig"
},
"name": "org.ehcache.xml.model.TimeTypeWithPropSubst",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ConfigurationParser"
},
"name": "org.ehcache.xml.model.TimeUnit",
"allDeclaredFields": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.ResourceConfigurationParser"
},
"name": "org.ehcache.xml.model.TimeUnit",
"allDeclaredFields": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration"
},
"name": "org.ehcache.xml.multi.model.Configurations",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true,
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$1"
},
"name": "org.ehcache.xml.multi.model.Configurations",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$1$1"
},
"name": "org.ehcache.xml.multi.model.Configurations",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration"
},
"name": "org.ehcache.xml.multi.model.Configurations$Configuration",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true,
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$1"
},
"name": "org.ehcache.xml.multi.model.Configurations$Configuration",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$1$1"
},
"name": "org.ehcache.xml.multi.model.Configurations$Configuration",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration"
},
"name": "org.ehcache.xml.multi.model.Configurations$Configuration$Variant",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true,
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$1"
},
"name": "org.ehcache.xml.multi.model.Configurations$Configuration$Variant",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$1$1"
},
"name": "org.ehcache.xml.multi.model.Configurations$Configuration$Variant",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration"
},
"name": "org.ehcache.xml.multi.model.ObjectFactory",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$1"
},
"name": "org.ehcache.xml.multi.model.ObjectFactory",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.xml.multi.XmlMultiConfiguration$1$1"
},
"name": "org.ehcache.xml.multi.model.ObjectFactory",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.ehcache.sizeof.impl.AgentLoader"
},
"name": "sun.jvmstat.perfdata.monitor.v2_0.PerfDataBuffer",
"methods": [
{
"name": "<init>",
"parameterTypes": [
"java.nio.ByteBuffer",
"int"
]
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.impl.internal.concurrent.ThreadLocalRandomUtil"
},
"name": "sun.misc.Unsafe",
"allDeclaredFields": true
},
{
"condition": {
"typeReachable": "org.ehcache.sizeof.impl.UnsafeSizeOf"
},
"name": "sun.misc.Unsafe",
"fields": [
{
"name": "theUnsafe"
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.shadow.org.terracotta.utilities.io.Files"
},
"name": "sun.security.provider.NativePRNG",
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.shadow.org.terracotta.utilities.io.FilesSupport"
},
"name": "sun.security.provider.NativePRNG",
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.impl.persistence.FileUtils"
},
"name": "sun.security.provider.SHA",
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.shadow.org.terracotta.utilities.io.FilesSupport"
},
"name": "sun.security.provider.SHA",
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
}
]
| mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/org.ehcache/ehcache/3.10.8-jakarta/reflect-config.json/0 | {
"file_path": "mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/org.ehcache/ehcache/3.10.8-jakarta/reflect-config.json",
"repo_id": "mybatis-native-demo",
"token_count": 74613
} | 147 |
[
"reflect-config.json",
"resource-config.json"
]
| mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/org.glassfish.jaxb/jaxb-runtime/3.0.2/index.json/0 | {
"file_path": "mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/org.glassfish.jaxb/jaxb-runtime/3.0.2/index.json",
"repo_id": "mybatis-native-demo",
"token_count": 22
} | 148 |
[
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"interfaces": []
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.TypeContributorImpl"
},
"interfaces": []
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"interfaces": [
"jakarta.xml.bind.annotation.XmlAccessorType"
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"interfaces": [
"jakarta.xml.bind.annotation.XmlAccessorType",
"org.glassfish.jaxb.core.v2.model.annotation.Locatable"
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"interfaces": [
"jakarta.xml.bind.annotation.XmlAttribute"
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"interfaces": [
"jakarta.xml.bind.annotation.XmlElement"
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"interfaces": [
"jakarta.xml.bind.annotation.XmlElementDecl"
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"interfaces": [
"jakarta.xml.bind.annotation.XmlElementRef"
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"interfaces": [
"jakarta.xml.bind.annotation.XmlElementRefs"
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"interfaces": [
"jakarta.xml.bind.annotation.XmlElements"
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"interfaces": [
"jakarta.xml.bind.annotation.XmlElements",
"org.glassfish.jaxb.core.v2.model.annotation.Locatable"
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"interfaces": [
"jakarta.xml.bind.annotation.XmlEnum"
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"interfaces": [
"jakarta.xml.bind.annotation.XmlEnumValue"
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"interfaces": [
"jakarta.xml.bind.annotation.XmlEnumValue",
"org.glassfish.jaxb.core.v2.model.annotation.Locatable"
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"interfaces": [
"jakarta.xml.bind.annotation.XmlMixed"
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"interfaces": [
"jakarta.xml.bind.annotation.XmlMixed",
"org.glassfish.jaxb.core.v2.model.annotation.Locatable"
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"interfaces": [
"jakarta.xml.bind.annotation.XmlRootElement"
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"interfaces": [
"jakarta.xml.bind.annotation.XmlSeeAlso"
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"interfaces": [
"jakarta.xml.bind.annotation.XmlSeeAlso",
"org.glassfish.jaxb.core.v2.model.annotation.Locatable"
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"interfaces": [
"jakarta.xml.bind.annotation.XmlType"
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"interfaces": [
"jakarta.xml.bind.annotation.XmlValue"
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"interfaces": [
"jakarta.xml.bind.annotation.adapters.XmlJavaTypeAdapter"
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"interfaces": [
"jakarta.xml.bind.annotation.adapters.XmlJavaTypeAdapter",
"org.glassfish.jaxb.core.v2.model.annotation.Locatable"
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.boot.internal.AdditionalJaxbMappingProducerImpl$1"
},
"interfaces": [
"jdk.internal.ValueBased"
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.configuration.internal.RevisionInfoConfiguration$RevisionEntityResolver"
},
"interfaces": [
"org.hibernate.envers.Audited"
]
},
{
"condition": {
"typeReachable": "org.hibernate.envers.configuration.internal.metadata.reader.AuditedPropertiesReader"
},
"interfaces": [
"org.hibernate.envers.NotAudited"
]
}
]
| mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/org.hibernate.orm/hibernate-envers/6.1.1.Final/proxy-config.json/0 | {
"file_path": "mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/org.hibernate.orm/hibernate-envers/6.1.1.Final/proxy-config.json",
"repo_id": "mybatis-native-demo",
"token_count": 2554
} | 149 |
[
{
"latest": true,
"metadata-version": "5.6.14.Final",
"module": "org.hibernate:hibernate-core",
"tested-versions": [
"5.6.14.Final"
]
}
]
| mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/org.hibernate/hibernate-core/index.json/0 | {
"file_path": "mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/org.hibernate/hibernate-core/index.json",
"repo_id": "mybatis-native-demo",
"token_count": 89
} | 150 |
[
{
"condition": {
"typeReachable": "org.jline.utils.Signals"
},
"interfaces": [
"sun.misc.SignalHandler"
]
}
]
| mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/org.jline/jline/3.21.0/proxy-config.json/0 | {
"file_path": "mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/org.jline/jline/3.21.0/proxy-config.json",
"repo_id": "mybatis-native-demo",
"token_count": 74
} | 151 |
[
"reflect-config.json",
"resource-config.json"
]
| mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/org.mariadb.jdbc/mariadb-java-client/3.0.6/index.json/0 | {
"file_path": "mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/org.mariadb.jdbc/mariadb-java-client/3.0.6/index.json",
"repo_id": "mybatis-native-demo",
"token_count": 22
} | 152 |
[
"reflect-config.json"
]
| mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/org.postgresql/postgresql/42.3.4/index.json/0 | {
"file_path": "mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/org.postgresql/postgresql/42.3.4/index.json",
"repo_id": "mybatis-native-demo",
"token_count": 13
} | 153 |
[
"reflect-config.json"
]
| mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/org.thymeleaf/thymeleaf-spring6/3.1.0.M2/index.json/0 | {
"file_path": "mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/org.thymeleaf/thymeleaf-spring6/3.1.0.M2/index.json",
"repo_id": "mybatis-native-demo",
"token_count": 13
} | 154 |
com/example/nativedemo/NativeDemoApplicationTests.class
| mybatis-native-demo/target/maven-status/maven-compiler-plugin/testCompile/default-testCompile/createdFiles.lst/0 | {
"file_path": "mybatis-native-demo/target/maven-status/maven-compiler-plugin/testCompile/default-testCompile/createdFiles.lst",
"repo_id": "mybatis-native-demo",
"token_count": 17
} | 155 |
package com.example.nativedemo;
import org.springframework.beans.factory.config.BeanDefinition;
import org.springframework.beans.factory.support.RootBeanDefinition;
/**
* Bean definitions for {@link MyMetaObjectHandler}.
*/
public class MyMetaObjectHandler__BeanDefinitions {
/**
* Get the bean definition for 'myMetaObjectHandler'.
*/
public static BeanDefinition getMyMetaObjectHandlerBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(MyMetaObjectHandler.class);
beanDefinition.setInstanceSupplier(MyMetaObjectHandler::new);
return beanDefinition;
}
}
| mybatis-native-demo/target/spring-aot/main/sources/com/example/nativedemo/MyMetaObjectHandler__BeanDefinitions.java/0 | {
"file_path": "mybatis-native-demo/target/spring-aot/main/sources/com/example/nativedemo/MyMetaObjectHandler__BeanDefinitions.java",
"repo_id": "mybatis-native-demo",
"token_count": 173
} | 156 |
package org.springframework.boot.autoconfigure.aop;
import org.springframework.beans.factory.aot.BeanInstanceSupplier;
import org.springframework.beans.factory.config.BeanDefinition;
import org.springframework.beans.factory.config.BeanFactoryPostProcessor;
import org.springframework.beans.factory.support.RootBeanDefinition;
/**
* Bean definitions for {@link AopAutoConfiguration}.
*/
public class AopAutoConfiguration__BeanDefinitions {
/**
* Get the bean definition for 'aopAutoConfiguration'.
*/
public static BeanDefinition getAopAutoConfigurationBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(AopAutoConfiguration.class);
beanDefinition.setInstanceSupplier(AopAutoConfiguration::new);
return beanDefinition;
}
/**
* Bean definitions for {@link AopAutoConfiguration.ClassProxyingConfiguration}.
*/
public static class ClassProxyingConfiguration {
/**
* Get the bean definition for 'classProxyingConfiguration'.
*/
public static BeanDefinition getClassProxyingConfigurationBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(AopAutoConfiguration.ClassProxyingConfiguration.class);
beanDefinition.setInstanceSupplier(AopAutoConfiguration.ClassProxyingConfiguration::new);
return beanDefinition;
}
/**
* Get the bean definition for 'forceAutoProxyCreatorToUseClassProxying'.
*/
public static BeanDefinition getForceAutoProxyCreatorToUseClassProxyingBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(AopAutoConfiguration.ClassProxyingConfiguration.class);
beanDefinition.setTargetType(BeanFactoryPostProcessor.class);
beanDefinition.setInstanceSupplier(BeanInstanceSupplier.<BeanFactoryPostProcessor>forFactoryMethod(AopAutoConfiguration.ClassProxyingConfiguration.class, "forceAutoProxyCreatorToUseClassProxying").withGenerator((registeredBean) -> AopAutoConfiguration.ClassProxyingConfiguration.forceAutoProxyCreatorToUseClassProxying()));
return beanDefinition;
}
}
}
| mybatis-native-demo/target/spring-aot/main/sources/org/springframework/boot/autoconfigure/aop/AopAutoConfiguration__BeanDefinitions.java/0 | {
"file_path": "mybatis-native-demo/target/spring-aot/main/sources/org/springframework/boot/autoconfigure/aop/AopAutoConfiguration__BeanDefinitions.java",
"repo_id": "mybatis-native-demo",
"token_count": 604
} | 157 |
package org.springframework.boot.autoconfigure.jdbc;
import org.springframework.beans.factory.config.BeanDefinition;
import org.springframework.beans.factory.support.RootBeanDefinition;
/**
* Bean definitions for {@link DataSourceProperties}.
*/
public class DataSourceProperties__BeanDefinitions {
/**
* Get the bean definition for 'dataSourceProperties'.
*/
public static BeanDefinition getDataSourcePropertiesBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(DataSourceProperties.class);
beanDefinition.setInstanceSupplier(DataSourceProperties::new);
return beanDefinition;
}
}
| mybatis-native-demo/target/spring-aot/main/sources/org/springframework/boot/autoconfigure/jdbc/DataSourceProperties__BeanDefinitions.java/0 | {
"file_path": "mybatis-native-demo/target/spring-aot/main/sources/org/springframework/boot/autoconfigure/jdbc/DataSourceProperties__BeanDefinitions.java",
"repo_id": "mybatis-native-demo",
"token_count": 180
} | 158 |
package org.springframework.boot.autoconfigure.task;
import org.springframework.beans.factory.config.BeanDefinition;
import org.springframework.beans.factory.support.RootBeanDefinition;
/**
* Bean definitions for {@link TaskSchedulingAutoConfiguration}.
*/
public class TaskSchedulingAutoConfiguration__BeanDefinitions {
/**
* Get the bean definition for 'taskSchedulingAutoConfiguration'.
*/
public static BeanDefinition getTaskSchedulingAutoConfigurationBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(TaskSchedulingAutoConfiguration.class);
beanDefinition.setInstanceSupplier(TaskSchedulingAutoConfiguration::new);
return beanDefinition;
}
}
| mybatis-native-demo/target/spring-aot/main/sources/org/springframework/boot/autoconfigure/task/TaskSchedulingAutoConfiguration__BeanDefinitions.java/0 | {
"file_path": "mybatis-native-demo/target/spring-aot/main/sources/org/springframework/boot/autoconfigure/task/TaskSchedulingAutoConfiguration__BeanDefinitions.java",
"repo_id": "mybatis-native-demo",
"token_count": 190
} | 159 |
package org.springframework.cloud.autoconfigure;
import org.springframework.beans.factory.aot.BeanInstanceSupplier;
import org.springframework.beans.factory.config.BeanDefinition;
import org.springframework.beans.factory.support.RootBeanDefinition;
import org.springframework.cloud.context.refresh.ConfigDataContextRefresher;
import org.springframework.cloud.context.refresh.ContextRefresher;
import org.springframework.cloud.context.refresh.RefreshScopeLifecycle;
import org.springframework.cloud.context.scope.refresh.RefreshScope;
import org.springframework.cloud.endpoint.event.RefreshEventListener;
import org.springframework.cloud.logging.LoggingRebinder;
import org.springframework.context.ConfigurableApplicationContext;
/**
* Bean definitions for {@link RefreshAutoConfiguration}.
*/
public class RefreshAutoConfiguration__BeanDefinitions {
/**
* Get the bean definition for 'refreshAutoConfiguration'.
*/
public static BeanDefinition getRefreshAutoConfigurationBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(RefreshAutoConfiguration.class);
beanDefinition.setInstanceSupplier(RefreshAutoConfiguration::new);
return beanDefinition;
}
/**
* Get the bean definition for 'refreshScope'.
*/
public static BeanDefinition getRefreshScopeBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(RefreshAutoConfiguration.class);
beanDefinition.setTargetType(RefreshScope.class);
beanDefinition.setInstanceSupplier(BeanInstanceSupplier.<RefreshScope>forFactoryMethod(RefreshAutoConfiguration.class, "refreshScope").withGenerator((registeredBean) -> RefreshAutoConfiguration.refreshScope()));
return beanDefinition;
}
/**
* Get the bean definition for 'loggingRebinder'.
*/
public static BeanDefinition getLoggingRebinderBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(RefreshAutoConfiguration.class);
beanDefinition.setTargetType(LoggingRebinder.class);
beanDefinition.setInstanceSupplier(BeanInstanceSupplier.<LoggingRebinder>forFactoryMethod(RefreshAutoConfiguration.class, "loggingRebinder").withGenerator((registeredBean) -> RefreshAutoConfiguration.loggingRebinder()));
return beanDefinition;
}
/**
* Get the bean instance supplier for 'configDataContextRefresher'.
*/
private static BeanInstanceSupplier<ConfigDataContextRefresher> getConfigDataContextRefresherInstanceSupplier(
) {
return BeanInstanceSupplier.<ConfigDataContextRefresher>forFactoryMethod(RefreshAutoConfiguration.class, "configDataContextRefresher", ConfigurableApplicationContext.class, RefreshScope.class, RefreshAutoConfiguration.RefreshProperties.class)
.withGenerator((registeredBean, args) -> registeredBean.getBeanFactory().getBean(RefreshAutoConfiguration.class).configDataContextRefresher(args.get(0), args.get(1), args.get(2)));
}
/**
* Get the bean definition for 'configDataContextRefresher'.
*/
public static BeanDefinition getConfigDataContextRefresherBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(ConfigDataContextRefresher.class);
beanDefinition.setInstanceSupplier(getConfigDataContextRefresherInstanceSupplier());
return beanDefinition;
}
/**
* Get the bean instance supplier for 'refreshScopeLifecycle'.
*/
private static BeanInstanceSupplier<RefreshScopeLifecycle> getRefreshScopeLifecycleInstanceSupplier(
) {
return BeanInstanceSupplier.<RefreshScopeLifecycle>forFactoryMethod(RefreshAutoConfiguration.class, "refreshScopeLifecycle", ContextRefresher.class)
.withGenerator((registeredBean, args) -> registeredBean.getBeanFactory().getBean(RefreshAutoConfiguration.class).refreshScopeLifecycle(args.get(0)));
}
/**
* Get the bean definition for 'refreshScopeLifecycle'.
*/
public static BeanDefinition getRefreshScopeLifecycleBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(RefreshScopeLifecycle.class);
beanDefinition.setInstanceSupplier(getRefreshScopeLifecycleInstanceSupplier());
return beanDefinition;
}
/**
* Get the bean instance supplier for 'refreshEventListener'.
*/
private static BeanInstanceSupplier<RefreshEventListener> getRefreshEventListenerInstanceSupplier(
) {
return BeanInstanceSupplier.<RefreshEventListener>forFactoryMethod(RefreshAutoConfiguration.class, "refreshEventListener", ContextRefresher.class)
.withGenerator((registeredBean, args) -> registeredBean.getBeanFactory().getBean(RefreshAutoConfiguration.class).refreshEventListener(args.get(0)));
}
/**
* Get the bean definition for 'refreshEventListener'.
*/
public static BeanDefinition getRefreshEventListenerBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(RefreshEventListener.class);
beanDefinition.setInstanceSupplier(getRefreshEventListenerInstanceSupplier());
return beanDefinition;
}
/**
* Bean definitions for {@link RefreshAutoConfiguration.RefreshProperties}.
*/
public static class RefreshProperties {
/**
* Get the bean definition for 'refreshProperties'.
*/
public static BeanDefinition getRefreshPropertiesBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(RefreshAutoConfiguration.RefreshProperties.class);
beanDefinition.setInstanceSupplier(RefreshAutoConfiguration.RefreshProperties::new);
return beanDefinition;
}
}
/**
* Bean definitions for {@link RefreshAutoConfiguration.RefreshScopeBeanDefinitionEnhancer}.
*/
public static class RefreshScopeBeanDefinitionEnhancer {
/**
* Get the bean definition for 'refreshScopeBeanDefinitionEnhancer'.
*/
public static BeanDefinition getRefreshScopeBeanDefinitionEnhancerBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(RefreshAutoConfiguration.RefreshScopeBeanDefinitionEnhancer.class);
beanDefinition.setInstanceSupplier(RefreshAutoConfiguration.RefreshScopeBeanDefinitionEnhancer::new);
return beanDefinition;
}
}
}
| mybatis-native-demo/target/spring-aot/main/sources/org/springframework/cloud/autoconfigure/RefreshAutoConfiguration__BeanDefinitions.java/0 | {
"file_path": "mybatis-native-demo/target/spring-aot/main/sources/org/springframework/cloud/autoconfigure/RefreshAutoConfiguration__BeanDefinitions.java",
"repo_id": "mybatis-native-demo",
"token_count": 1762
} | 160 |
package org.springframework.cloud.configuration;
import java.util.List;
import org.springframework.beans.factory.aot.BeanInstanceSupplier;
import org.springframework.beans.factory.config.BeanDefinition;
import org.springframework.beans.factory.support.RootBeanDefinition;
/**
* Bean definitions for {@link CompatibilityVerifierAutoConfiguration}.
*/
public class CompatibilityVerifierAutoConfiguration__BeanDefinitions {
/**
* Get the bean definition for 'compatibilityVerifierAutoConfiguration'.
*/
public static BeanDefinition getCompatibilityVerifierAutoConfigurationBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(CompatibilityVerifierAutoConfiguration.class);
beanDefinition.setInstanceSupplier(CompatibilityVerifierAutoConfiguration::new);
return beanDefinition;
}
/**
* Get the bean instance supplier for 'compositeCompatibilityVerifier'.
*/
private static BeanInstanceSupplier<CompositeCompatibilityVerifier> getCompositeCompatibilityVerifierInstanceSupplier(
) {
return BeanInstanceSupplier.<CompositeCompatibilityVerifier>forFactoryMethod(CompatibilityVerifierAutoConfiguration.class, "compositeCompatibilityVerifier", List.class)
.withGenerator((registeredBean, args) -> registeredBean.getBeanFactory().getBean(CompatibilityVerifierAutoConfiguration.class).compositeCompatibilityVerifier(args.get(0)));
}
/**
* Get the bean definition for 'compositeCompatibilityVerifier'.
*/
public static BeanDefinition getCompositeCompatibilityVerifierBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(CompositeCompatibilityVerifier.class);
beanDefinition.setInstanceSupplier(getCompositeCompatibilityVerifierInstanceSupplier());
return beanDefinition;
}
/**
* Get the bean instance supplier for 'springBootVersionVerifier'.
*/
private static BeanInstanceSupplier<SpringBootVersionVerifier> getSpringBootVersionVerifierInstanceSupplier(
) {
return BeanInstanceSupplier.<SpringBootVersionVerifier>forFactoryMethod(CompatibilityVerifierAutoConfiguration.class, "springBootVersionVerifier", CompatibilityVerifierProperties.class)
.withGenerator((registeredBean, args) -> registeredBean.getBeanFactory().getBean(CompatibilityVerifierAutoConfiguration.class).springBootVersionVerifier(args.get(0)));
}
/**
* Get the bean definition for 'springBootVersionVerifier'.
*/
public static BeanDefinition getSpringBootVersionVerifierBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(SpringBootVersionVerifier.class);
beanDefinition.setInstanceSupplier(getSpringBootVersionVerifierInstanceSupplier());
return beanDefinition;
}
/**
* Get the bean instance supplier for 'sleuthPresentVerifier'.
*/
private static BeanInstanceSupplier<SleuthPresentVerifier> getSleuthPresentVerifierInstanceSupplier(
) {
return BeanInstanceSupplier.<SleuthPresentVerifier>forFactoryMethod(CompatibilityVerifierAutoConfiguration.class, "sleuthPresentVerifier")
.withGenerator((registeredBean) -> registeredBean.getBeanFactory().getBean(CompatibilityVerifierAutoConfiguration.class).sleuthPresentVerifier());
}
/**
* Get the bean definition for 'sleuthPresentVerifier'.
*/
public static BeanDefinition getSleuthPresentVerifierBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(SleuthPresentVerifier.class);
beanDefinition.setInstanceSupplier(getSleuthPresentVerifierInstanceSupplier());
return beanDefinition;
}
}
| mybatis-native-demo/target/spring-aot/main/sources/org/springframework/cloud/configuration/CompatibilityVerifierAutoConfiguration__BeanDefinitions.java/0 | {
"file_path": "mybatis-native-demo/target/spring-aot/main/sources/org/springframework/cloud/configuration/CompatibilityVerifierAutoConfiguration__BeanDefinitions.java",
"repo_id": "mybatis-native-demo",
"token_count": 1002
} | 161 |
PREFER_HOST_MODE=hostname
MODE=standalone
SPRING_DATASOURCE_PLATFORM=mysql
NACOS_AUTH_IDENTITY_KEY=2222
NACOS_AUTH_IDENTITY_VALUE=2xxx
NACOS_AUTH_TOKEN=SecretKey012345678901234567890123456789012345678901234567890123456789 | nacos-docker/env/custom-application-config.env/0 | {
"file_path": "nacos-docker/env/custom-application-config.env",
"repo_id": "nacos-docker",
"token_count": 104
} | 162 |
version: "2"
services:
nacos:
image: nacos/nacos-server:${NACOS_VERSION}
container_name: nacos-standalone
environment:
- PREFER_HOST_MODE=hostname
- MODE=standalone
- NACOS_AUTH_IDENTITY_KEY=serverIdentity
- NACOS_AUTH_IDENTITY_VALUE=security
- NACOS_AUTH_TOKEN=SecretKey012345678901234567890123456789012345678901234567890123456789
volumes:
- ./standalone-logs/:/home/nacos/logs
ports:
- "8848:8848"
- "9848:9848"
prometheus:
container_name: prometheus
image: prom/prometheus:latest
volumes:
- ./prometheus/prometheus-standalone.yaml:/etc/prometheus/prometheus.yml
ports:
- "9090:9090"
depends_on:
- nacos
restart: on-failure
grafana:
container_name: grafana
image: grafana/grafana:latest
ports:
- 3000:3000
restart: on-failure
| nacos-docker/example/standalone-derby.yaml/0 | {
"file_path": "nacos-docker/example/standalone-derby.yaml",
"repo_id": "nacos-docker",
"token_count": 410
} | 163 |
2024-06-06 14:18:26,075 INFO [dump] md5 changed, save to disk cache ,groupKey=remote.yml+DEFAULT_GROUP+pgvector, newMd5=83267b8356f558ebb72b0da2ebdde5f0,oldMd5=
2024-06-06 14:18:26,111 INFO [dump] md5 changed, update md5 and timestamp in jvm cache ,groupKey=remote.yml+DEFAULT_GROUP+pgvector, newMd5=83267b8356f558ebb72b0da2ebdde5f0,oldMd5=,lastModifiedTs=1716778181146
2024-06-06 14:18:26,113 INFO [dump-all-ok] remote.yml+DEFAULT_GROUP, 1716778181146, length=271,md5UTF8=83267b8356f558ebb72b0da2ebdde5f0
| nacos-docker/example/standalone-logs/config-dump.log.2024-06-06.0/0 | {
"file_path": "nacos-docker/example/standalone-logs/config-dump.log.2024-06-06.0",
"repo_id": "nacos-docker",
"token_count": 235
} | 164 |
package org.springframework.boot.autoconfigure.orm.jpa;
import java.lang.String;
import java.util.List;
import org.springframework.aot.generate.Generated;
import org.springframework.beans.factory.ObjectProvider;
import org.springframework.beans.factory.aot.BeanInstanceSupplier;
import org.springframework.beans.factory.config.BeanDefinition;
import org.springframework.beans.factory.support.RootBeanDefinition;
import org.springframework.boot.orm.jpa.EntityManagerFactoryBuilder;
import org.springframework.orm.jpa.JpaVendorAdapter;
import org.springframework.orm.jpa.LocalContainerEntityManagerFactoryBean;
import org.springframework.orm.jpa.persistenceunit.PersistenceManagedTypes;
import org.springframework.orm.jpa.support.OpenEntityManagerInViewInterceptor;
import org.springframework.transaction.PlatformTransactionManager;
import org.springframework.web.servlet.config.annotation.WebMvcConfigurer;
/**
* Bean definitions for {@link JpaBaseConfiguration}.
*/
@Generated
public class JpaBaseConfiguration__BeanDefinitions {
/**
* Get the bean instance supplier for 'transactionManager'.
*/
private static BeanInstanceSupplier<PlatformTransactionManager> getTransactionManagerInstanceSupplier(
) {
return BeanInstanceSupplier.<PlatformTransactionManager>forFactoryMethod(JpaBaseConfiguration.class, "transactionManager", ObjectProvider.class)
.withGenerator((registeredBean, args) -> registeredBean.getBeanFactory().getBean(JpaBaseConfiguration.class).transactionManager(args.get(0)));
}
/**
* Get the bean definition for 'transactionManager'.
*/
public static BeanDefinition getTransactionManagerBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(PlatformTransactionManager.class);
beanDefinition.setInstanceSupplier(getTransactionManagerInstanceSupplier());
return beanDefinition;
}
/**
* Get the bean instance supplier for 'jpaVendorAdapter'.
*/
private static BeanInstanceSupplier<JpaVendorAdapter> getJpaVendorAdapterInstanceSupplier() {
return BeanInstanceSupplier.<JpaVendorAdapter>forFactoryMethod(JpaBaseConfiguration.class, "jpaVendorAdapter")
.withGenerator((registeredBean) -> registeredBean.getBeanFactory().getBean(JpaBaseConfiguration.class).jpaVendorAdapter());
}
/**
* Get the bean definition for 'jpaVendorAdapter'.
*/
public static BeanDefinition getJpaVendorAdapterBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(JpaVendorAdapter.class);
beanDefinition.setInstanceSupplier(getJpaVendorAdapterInstanceSupplier());
return beanDefinition;
}
/**
* Get the bean instance supplier for 'entityManagerFactoryBuilder'.
*/
private static BeanInstanceSupplier<EntityManagerFactoryBuilder> getEntityManagerFactoryBuilderInstanceSupplier(
) {
return BeanInstanceSupplier.<EntityManagerFactoryBuilder>forFactoryMethod(JpaBaseConfiguration.class, "entityManagerFactoryBuilder", JpaVendorAdapter.class, ObjectProvider.class, ObjectProvider.class)
.withGenerator((registeredBean, args) -> registeredBean.getBeanFactory().getBean(JpaBaseConfiguration.class).entityManagerFactoryBuilder(args.get(0), args.get(1), args.get(2)));
}
/**
* Get the bean definition for 'entityManagerFactoryBuilder'.
*/
public static BeanDefinition getEntityManagerFactoryBuilderBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(EntityManagerFactoryBuilder.class);
beanDefinition.setInstanceSupplier(getEntityManagerFactoryBuilderInstanceSupplier());
return beanDefinition;
}
/**
* Get the bean instance supplier for 'entityManagerFactory'.
*/
private static BeanInstanceSupplier<LocalContainerEntityManagerFactoryBean> getEntityManagerFactoryInstanceSupplier(
) {
return BeanInstanceSupplier.<LocalContainerEntityManagerFactoryBean>forFactoryMethod(JpaBaseConfiguration.class, "entityManagerFactory", EntityManagerFactoryBuilder.class, PersistenceManagedTypes.class)
.withGenerator((registeredBean, args) -> registeredBean.getBeanFactory().getBean(JpaBaseConfiguration.class).entityManagerFactory(args.get(0), args.get(1)));
}
/**
* Get the bean definition for 'entityManagerFactory'.
*/
public static BeanDefinition getEntityManagerFactoryBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(LocalContainerEntityManagerFactoryBean.class);
beanDefinition.setPrimary(true);
beanDefinition.setDependsOn("dataSourceScriptDatabaseInitializer");
beanDefinition.setInstanceSupplier(getEntityManagerFactoryInstanceSupplier());
return beanDefinition;
}
/**
* Bean definitions for {@link JpaBaseConfiguration.PersistenceManagedTypesConfiguration}.
*/
@Generated
public static class PersistenceManagedTypesConfiguration {
/**
* Get the bean definition for 'persistenceManagedTypesConfiguration'.
*/
public static BeanDefinition getPersistenceManagedTypesConfigurationBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(JpaBaseConfiguration.PersistenceManagedTypesConfiguration.class);
beanDefinition.setInstanceSupplier(JpaBaseConfiguration.PersistenceManagedTypesConfiguration::new);
return beanDefinition;
}
/**
* Get the bean instance for 'persistenceManagedTypes'.
*/
private static PersistenceManagedTypes getPersistenceManagedTypesInstance() {
List<String> managedClassNames = List.of("org.example.pgvector.entity.DatabaseRecord");
List<String> managedPackages = List.of();
return PersistenceManagedTypes.of(managedClassNames, managedPackages);
}
/**
* Get the bean definition for 'persistenceManagedTypes'.
*/
public static BeanDefinition getPersistenceManagedTypesBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(JpaBaseConfiguration.PersistenceManagedTypesConfiguration.class);
beanDefinition.setTargetType(PersistenceManagedTypes.class);
beanDefinition.setPrimary(true);
beanDefinition.setInstanceSupplier(PersistenceManagedTypesConfiguration::getPersistenceManagedTypesInstance);
return beanDefinition;
}
}
/**
* Bean definitions for {@link JpaBaseConfiguration.JpaWebConfiguration}.
*/
@Generated
public static class JpaWebConfiguration {
/**
* Get the bean instance supplier for 'org.springframework.boot.autoconfigure.orm.jpa.JpaBaseConfiguration$JpaWebConfiguration'.
*/
private static BeanInstanceSupplier<JpaBaseConfiguration.JpaWebConfiguration> getJpaWebConfigurationInstanceSupplier(
) {
return BeanInstanceSupplier.<JpaBaseConfiguration.JpaWebConfiguration>forConstructor(JpaProperties.class)
.withGenerator((registeredBean, args) -> new JpaBaseConfiguration.JpaWebConfiguration(args.get(0)));
}
/**
* Get the bean definition for 'jpaWebConfiguration'.
*/
public static BeanDefinition getJpaWebConfigurationBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(JpaBaseConfiguration.JpaWebConfiguration.class);
beanDefinition.setInstanceSupplier(getJpaWebConfigurationInstanceSupplier());
return beanDefinition;
}
/**
* Get the bean instance supplier for 'openEntityManagerInViewInterceptor'.
*/
private static BeanInstanceSupplier<OpenEntityManagerInViewInterceptor> getOpenEntityManagerInViewInterceptorInstanceSupplier(
) {
return BeanInstanceSupplier.<OpenEntityManagerInViewInterceptor>forFactoryMethod(JpaBaseConfiguration.JpaWebConfiguration.class, "openEntityManagerInViewInterceptor")
.withGenerator((registeredBean) -> registeredBean.getBeanFactory().getBean(JpaBaseConfiguration.JpaWebConfiguration.class).openEntityManagerInViewInterceptor());
}
/**
* Get the bean definition for 'openEntityManagerInViewInterceptor'.
*/
public static BeanDefinition getOpenEntityManagerInViewInterceptorBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(OpenEntityManagerInViewInterceptor.class);
beanDefinition.setInstanceSupplier(getOpenEntityManagerInViewInterceptorInstanceSupplier());
return beanDefinition;
}
/**
* Get the bean instance supplier for 'openEntityManagerInViewInterceptorConfigurer'.
*/
private static BeanInstanceSupplier<WebMvcConfigurer> getOpenEntityManagerInViewInterceptorConfigurerInstanceSupplier(
) {
return BeanInstanceSupplier.<WebMvcConfigurer>forFactoryMethod(JpaBaseConfiguration.JpaWebConfiguration.class, "openEntityManagerInViewInterceptorConfigurer", OpenEntityManagerInViewInterceptor.class)
.withGenerator((registeredBean, args) -> registeredBean.getBeanFactory().getBean(JpaBaseConfiguration.JpaWebConfiguration.class).openEntityManagerInViewInterceptorConfigurer(args.get(0)));
}
/**
* Get the bean definition for 'openEntityManagerInViewInterceptorConfigurer'.
*/
public static BeanDefinition getOpenEntityManagerInViewInterceptorConfigurerBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(WebMvcConfigurer.class);
beanDefinition.setInstanceSupplier(getOpenEntityManagerInViewInterceptorConfigurerInstanceSupplier());
return beanDefinition;
}
}
}
| pgvector/build/generated/aotSources/org/springframework/boot/autoconfigure/orm/jpa/JpaBaseConfiguration__BeanDefinitions.java/0 | {
"file_path": "pgvector/build/generated/aotSources/org/springframework/boot/autoconfigure/orm/jpa/JpaBaseConfiguration__BeanDefinitions.java",
"repo_id": "pgvector",
"token_count": 2748
} | 165 |
package org.springframework.boot.autoconfigure.web.servlet;
import org.springframework.aot.generate.Generated;
import org.springframework.beans.factory.config.BeanDefinition;
import org.springframework.beans.factory.support.RootBeanDefinition;
/**
* Bean definitions for {@link WebMvcProperties}.
*/
@Generated
public class WebMvcProperties__BeanDefinitions {
/**
* Get the bean definition for 'webMvcProperties'.
*/
public static BeanDefinition getWebMvcPropertiesBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(WebMvcProperties.class);
beanDefinition.setInstanceSupplier(WebMvcProperties::new);
return beanDefinition;
}
}
| pgvector/build/generated/aotSources/org/springframework/boot/autoconfigure/web/servlet/WebMvcProperties__BeanDefinitions.java/0 | {
"file_path": "pgvector/build/generated/aotSources/org/springframework/boot/autoconfigure/web/servlet/WebMvcProperties__BeanDefinitions.java",
"repo_id": "pgvector",
"token_count": 207
} | 166 |
package org.springframework.data.jpa.repository.support;
import org.springframework.aot.generate.Generated;
import org.springframework.beans.factory.aot.BeanInstanceSupplier;
import org.springframework.beans.factory.config.BeanDefinition;
import org.springframework.beans.factory.support.RootBeanDefinition;
/**
* Bean definitions for {@link JpaEvaluationContextExtension}.
*/
@Generated
public class JpaEvaluationContextExtension__BeanDefinitions {
/**
* Get the bean instance supplier for 'org.springframework.data.jpa.repository.support.JpaEvaluationContextExtension'.
*/
private static BeanInstanceSupplier<JpaEvaluationContextExtension> getJpaEvaluationContextExtensionInstanceSupplier(
) {
return BeanInstanceSupplier.<JpaEvaluationContextExtension>forConstructor(char.class)
.withGenerator((registeredBean, args) -> new JpaEvaluationContextExtension(args.get(0)));
}
/**
* Get the bean definition for 'jpaEvaluationContextExtension'.
*/
public static BeanDefinition getJpaEvaluationContextExtensionBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(JpaEvaluationContextExtension.class);
beanDefinition.getConstructorArgumentValues().addIndexedArgumentValue(0, '\\');
beanDefinition.setInstanceSupplier(getJpaEvaluationContextExtensionInstanceSupplier());
return beanDefinition;
}
}
| pgvector/build/generated/aotSources/org/springframework/data/jpa/repository/support/JpaEvaluationContextExtension__BeanDefinitions.java/0 | {
"file_path": "pgvector/build/generated/aotSources/org/springframework/data/jpa/repository/support/JpaEvaluationContextExtension__BeanDefinitions.java",
"repo_id": "pgvector",
"token_count": 427
} | 167 |
override=true
| pgvector/build/native-reachability-metadata/META-INF/native-image/io.netty/netty-buffer/4.1.107.Final/reachability-metadata.properties/0 | {
"file_path": "pgvector/build/native-reachability-metadata/META-INF/native-image/io.netty/netty-buffer/4.1.107.Final/reachability-metadata.properties",
"repo_id": "pgvector",
"token_count": 5
} | 168 |
org.example.pgvector.PgVectorApplicationKt | pgvector/build/resolvedMainClassName/0 | {
"file_path": "pgvector/build/resolvedMainClassName",
"repo_id": "pgvector",
"token_count": 12
} | 169 |
rootProject.name = "pgvector"
| pgvector/settings.gradle.kts/0 | {
"file_path": "pgvector/settings.gradle.kts",
"repo_id": "pgvector",
"token_count": 10
} | 170 |
WHL_BUILD_DIR :=package
DOC_BUILD_DIR :=docs/build/
# default rule
default: whl docs
.PHONY: docs
docs:
bash .dev_scripts/build_docs.sh
.PHONY: linter
linter:
bash .dev_scripts/linter.sh
.PHONY: test
test:
bash .dev_scripts/citest.sh
.PHONY: whl
whl:
python setup.py sdist bdist_wheel
.PHONY: clean
clean:
rm -rf $(WHL_BUILD_DIR) $(DOC_BUILD_DIR)
| swift/Makefile/0 | {
"file_path": "swift/Makefile",
"repo_id": "swift",
"token_count": 159
} | 171 |
MIT | swift/benchmarks/modelscope_mmlu/mmlu/college_physics/1.0.0/fedfb5e4f551779e93567fbaaa992d74323de5ed8041b2a38b33dc9af632e3f5/LICENSE/0 | {
"file_path": "swift/benchmarks/modelscope_mmlu/mmlu/college_physics/1.0.0/fedfb5e4f551779e93567fbaaa992d74323de5ed8041b2a38b33dc9af632e3f5/LICENSE",
"repo_id": "swift",
"token_count": 1
} | 172 |
MIT | swift/benchmarks/modelscope_mmlu/mmlu/high_school_microeconomics/1.0.0/fedfb5e4f551779e93567fbaaa992d74323de5ed8041b2a38b33dc9af632e3f5/LICENSE/0 | {
"file_path": "swift/benchmarks/modelscope_mmlu/mmlu/high_school_microeconomics/1.0.0/fedfb5e4f551779e93567fbaaa992d74323de5ed8041b2a38b33dc9af632e3f5/LICENSE",
"repo_id": "swift",
"token_count": 1
} | 173 |
MIT | swift/benchmarks/modelscope_mmlu/mmlu/moral_disputes/1.0.0/fedfb5e4f551779e93567fbaaa992d74323de5ed8041b2a38b33dc9af632e3f5/LICENSE/0 | {
"file_path": "swift/benchmarks/modelscope_mmlu/mmlu/moral_disputes/1.0.0/fedfb5e4f551779e93567fbaaa992d74323de5ed8041b2a38b33dc9af632e3f5/LICENSE",
"repo_id": "swift",
"token_count": 1
} | 174 |
# 对Peft的兼容性
为了支持习惯Peft的用户,Swift提供了对于Peft的兼容性。用户可以从swift中import peft组件:
>PeftModel
>
>PeftConfig
>
>PeftModelForSeq2SeqLM
>
>PeftModelForSequenceClassification
>
>PeftModelForTokenClassification
>
>PeftModelForCausalLM
>
>PromptEncoderConfig
>
>PromptTuningConfig
>
>PrefixTuningConfig
>
>PromptLearningConfig
>
>LoraConfig
>
>get_peft_config
>
>get_peft_model_state_dict
>
>get_peft_model
以上组件均可以从swift中import:
```python
from swift import PeftModel, PeftConfig
```
Swift类也支持初始化Peft的tuner:
```python
from modelscope.models.nlp import SbertForSequenceClassification
from modelscope.models.nlp.structbert import SbertConfig
from swift import LoraConfig, Swift
model = SbertForSequenceClassification(SbertConfig())
lora_config = LoraConfig(target_modules=['query', 'key', 'value'])
model = Swift.prepare_model(model, lora_config)
```
Swift对Peft进行了浅封装,使Peft可以在from_pretrained时使用modelscope hub中的模型。
| swift/docs/source/GetStarted/在SWIFT内使用PEFT.md/0 | {
"file_path": "swift/docs/source/GetStarted/在SWIFT内使用PEFT.md",
"repo_id": "swift",
"token_count": 458
} | 175 |
# Hands-on Training and Inference with Grok 300B
This documentation introduces the process of finetuning and inferencing the Grok-MoE 300B model using an 8-GPU environment.
## Table of Contents
- [Environment Setup](#environment-setup)
- [Finetuning](#finetuning)
- [Inference](#inference)
## Environment Setup
```shell
git clone https://github.com/modelscope/swift.git
cd swift
pip install -e '.[llm]'
```
## Finetuning
### Experiment Environment
- GPU: 8 x A100 80G
- Docker Image: ModelScope official image version 1.13.1
- peft: 0.10.0
### Dataset Preparation
Grok is a base model, so we used the [DuReader Question Generation dataset](https://www.modelscope.cn/datasets/modelscope/DuReader_robust-QG/summary) as the training set. This dataset contains around 15,000 examples. With a max-length of 512, there are about 10,000 training examples (average length 305±92 tokens).
### Model Preparation
For the Grok model, we used the version provided by [ColossalAI](https://www.modelscope.cn/models/colossalai/grok-1-pytorch/summary), and additionally prepared a [tokenizer conforming to the transformers standard](https://www.modelscope.cn/models/AI-ModelScope/grok-1-tokenizer/summary).
### Training
Since the Grok model is too large, device_map and deepspeed zero3 non-offload are unable to run training. Therefore, in this experiment, we used the LoRA + deepspeed zero3 offload mode to run the training. The complete training script is as follows:
```shell
# cd examples/pytorch/llm first
nproc_per_node=8
PYTHONPATH=../../.. \
torchrun \
--nproc_per_node=$nproc_per_node \
--master_port 29500 \
llm_sft.py \
--model_type grok-1 \
--sft_type lora \
--tuner_backend peft \
--dtype bf16 \
--output_dir output \
--ddp_backend nccl \
--dataset dureader-robust-zh \
--train_dataset_sample -1 \
--num_train_epochs 1 \
--max_length 512 \
--check_dataset_strategy warning \
--lora_rank 8 \
--lora_alpha 32 \
--lora_dropout_p 0.05 \
--lora_dtype AUTO \
--lora_target_modules DEFAULT \
--gradient_checkpointing true \
--batch_size 2 \
--weight_decay 0.1 \
--learning_rate 1e-4 \
--gradient_accumulation_steps $(expr 16 / $nproc_per_node) \
--max_grad_norm 0.5 \
--warmup_ratio 0.03 \
--eval_steps 100 \
--save_steps 100 \
--save_total_limit 2 \
--logging_steps 10 \
--deepspeed zero3-offload \
```
The complete training files can be found [here](https://github.com/modelscope/swift/tree/main/examples/pytorch/llm/scripts/grok-1/lora_ddp_ds).
Here are some benchmarks from the training process:
| Metric | Value |
|---------------|------------------------------------------------------------|
| GPU Memory Usage | 8 * 21G |
| Training Speed | 45s/it |
| Total Iterations | 340 (10000(dataset_length)/16(gradient_accumulation)/2(batch_size)) |
<img src="../../resources/image-20240329122854204.png" alt="image-20240329122854204" style="zoom: 33%;" />
Since the GPU memory usage is below 24G, training should theoretically be possible on RTX 3090/A10 environments.
<img src="../../resources/grok_train_loss.png" alt="train_loss (1)" style="zoom:33%;" />
<img src="../../resources/grok_train_acc.png" alt="train_acc" style="zoom:33%;" />
The training took about 4 hours.
### Inference
The SWIFT framework currently does not support deepspeed inference, so we still use transformers' device_map for inference support. However, since the model is too large, some layers will be offloaded to the CPU, which will cause errors when loading LoRA during inference. Therefore, we patched the peft implementation (the original Linear module on the meta device does not affect LoRA, and dynamically move LoRA weights to the device during runtime).
The inference script is as follows:
```shell
# cd examples/pytorch/llm first
PYTHONPATH=../../.. \
python llm_infer.py \
--ckpt_dir output/grok-1/vx-xxx-xxx/checkpoint-xxx \
--dtype bf16 \
--load_dataset_config true \
--max_new_tokens 64 \
--do_sample true \
--dtype bf16 \
--eval_human false \
--merge_lora false \
```
Inference result:
```text
[PROMPT]Task: Question Generation
Context: 我个人感觉是吕颂贤版,剧情和原著差别不大,虽然TVB演员颜值和风光没有大陆的好。但是香港特区人口和地域的限制,只能注重在演员的演技方面发挥很出色,楼主看过大陆排《笑傲江湖》吧!在台词上表现的很生硬没有香港的注重神色配台词,比如杜燕歌把吕颂贤表情和性格几乎和原著差别不大。武打几乎沿用徐克和程小东动作的风格很注重实际技巧,没有大陆版的在武打场面依靠电脑特效表现的太夸张了。李亚鹏版的武打动作和导演还是香港的元彬,大陆毕竟还是在武侠剧起步的比较晚,主要是还是靠明星大腕压阵而香港却是恰恰相反。
Answer: 吕颂贤版
Question:[OUTPUT]笑傲江湖哪个版本好看</s>
[LABELS]笑傲江湖哪个版本好看
--------------------------------------------------
[PROMPT]Task: Question Generation
Context: 这位朋友你好,女性出现妊娠反应一般是从6-12周左右,也就是女性怀孕1个多月就会开始出现反应,第3个月的时候,妊辰反应基本结束。 而大部分女性怀孕初期都会出现恶心、呕吐的感觉,这些症状都是因人而异的,除非恶心、呕吐的非常厉害,才需要就医,否则这些都是刚怀孕的的正常症状。1-3个月的时候可以观察一下自己的皮肤,一般女性怀孕初期可能会产生皮肤色素沉淀或是腹壁产生妊娠纹,特别是在怀孕的后期更加明显。 还有很多女性怀孕初期会出现疲倦、嗜睡的情况。怀孕三个月的时候,膀胱会受到日益胀大的子宫的压迫,容量会变小,所以怀孕期间也会有尿频的现象出现。月经停止也是刚怀孕最容易出现的症状,只要是平时月经正常的女性,在性行为后超过正常经期两周,就有可能是怀孕了。 如果你想判断自己是否怀孕,可以看看自己有没有这些反应。当然这也只是多数人的怀孕表现,也有部分女性怀孕表现并不完全是这样,如果你无法确定自己是否怀孕,最好去医院检查一下。
Answer: 6-12周
Question:[OUTPUT]怀孕几个月开始反应</s>
[LABELS]怀孕多久会有反应
--------------------------------------------------
```
| swift/docs/source_en/LLM/Grok-1-best-practice.md/0 | {
"file_path": "swift/docs/source_en/LLM/Grok-1-best-practice.md",
"repo_id": "swift",
"token_count": 3051
} | 176 |
Subsets and Splits