text
stringlengths 3
11.2M
| id
stringlengths 15
188
| metadata
dict | __index_level_0__
int64 0
275
|
---|---|---|---|
import json
import torch
import triton_python_backend_utils as pb_utils
# Using dlpack causes segfaults on some machines, so not using it for now
# But it supports zero copy transfer from triton tensors to torch tensors,
# so worth investigating further
# from torch.utils.dlpack import to_dlpack, from_dlpack
from transformers import AutoModelForCausalLM
from transformers import AutoTokenizer
def pb2torch(request, name):
tensor = pb_utils.get_input_tensor_by_name(request, name)
return torch.from_numpy(tensor.as_numpy())
# return from_dlpack(tensor.to_dlpack())
def torch2pb(name, tensor):
return pb_utils.Tensor(name, tensor.numpy())
# return pb_utils.Tensor.from_dlpack(name, to_dlpack(tensor))
class TritonPythonModel:
def initialize(self, args):
self.model_config = model_config = json.loads(args["model_config"])
org_name = model_config["parameters"].get("org_name", {"string_value": "Salesforce"})["string_value"]
model_name = org_name + "/" + model_config["parameters"]["model_name"]["string_value"]
def get_bool(x):
return model_config["parameters"][x]["string_value"].lower() in ["1", "true"]
is_half = get_bool("use_half") and torch.cuda.is_available()
# This will make inference marginally slower, but will allow bigger models to fit in GPU
int8 = get_bool("use_int8") and torch.cuda.is_available()
auto_device_map = get_bool("use_auto_device_map") and torch.cuda.is_available()
print("Cuda available?", torch.cuda.is_available())
print(f"is_half: {is_half}, int8: {int8}, auto_device_map: {auto_device_map}")
self.model = AutoModelForCausalLM.from_pretrained(
model_name,
torch_dtype=torch.float16 if is_half else ("auto" if torch.cuda.is_available() else torch.float32),
load_in_8bit=int8,
device_map="auto" if auto_device_map else None,
low_cpu_mem_usage=True,
)
self.tokenizer = AutoTokenizer.from_pretrained(model_name)
print(f"Model {model_name} Loaded. Footprint: {self.model.get_memory_footprint()}")
# set max_batch_size
self.max_batch_size = 0 # model_config["max_batch_size"]
def execute(self, requests):
# TODO: don't just loop over requests. batch them up
responses = []
for request in requests:
input_ids_torch = pb2torch(request, "input_ids")
input_lengths_torch = pb2torch(request, "input_lengths")
request_output_len_torch = pb2torch(request, "request_output_len")
# Attention mask
attention_mask = None
if input_lengths_torch.min() != input_lengths_torch.max():
attention_mask = torch.zeros(input_ids_torch.shape, dtype=torch.long)
for i, l in enumerate(input_lengths_torch):
attention_mask[i, :l] = 1
# Output length
max_new_tokens = request_output_len_torch[0][0]
top_k = pb_utils.get_input_tensor_by_name(request, "runtime_top_k").as_numpy().tolist()[0]
top_p = pb_utils.get_input_tensor_by_name(request, "runtime_top_p").as_numpy().tolist()[0]
temperature = pb_utils.get_input_tensor_by_name(request, "temperature").as_numpy().tolist()[0]
# n_samples = pb_utils.get_input_tensor_by_name(request, "n")
n_samples = 1 # TODO: client doesn't send this yet. instead it duplicates the request n times
# Generate
output_ids = self.model.generate(
input_ids=input_ids_torch, attention_mask=attention_mask,
max_new_tokens=max_new_tokens, do_sample=True, top_k=top_k, top_p=top_p, num_return_sequences=n_samples,
temperature=temperature,
)
# client wants batch x beam_width x seq_len and we don't support beam_width yet
output_ids = output_ids.unsqueeze(1)
# create output tensors
out_tensor_pb = torch2pb("output_ids", output_ids)
# calculate sequence_length
sequence_length = torch.zeros(output_ids.shape[:2], dtype=torch.int32)
for i in range(output_ids.shape[0]):
sequence_length[i, 0] = torch.sum(output_ids[i, 0] != self.model.config.eos_token_id).item()
sequence_length_pb = torch2pb("sequence_length", sequence_length)
# create response
response = pb_utils.InferenceResponse([out_tensor_pb, sequence_length_pb])
responses.append(response)
return responses
| fauxpilot/python_backend/model.py/0 | {
"file_path": "fauxpilot/python_backend/model.py",
"repo_id": "fauxpilot",
"token_count": 2022
} | 88 |
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="Black">
<option name="sdkName" value="get-data" />
</component>
<component name="ProjectRootManager" version="2" project-jdk-name="get-data" project-jdk-type="Python SDK" />
</project> | get-data/.idea/misc.xml/0 | {
"file_path": "get-data/.idea/misc.xml",
"repo_id": "get-data",
"token_count": 94
} | 89 |
{
"pipeline": [
{
"limit": -1,
"progress": false,
"text_key": "text",
"id_key": "id",
"adapter": "<bound method BaseReader._default_adapter of \ud83d\udcd6 - READER: \ud83d\udc7e PersonalCopilot>",
"_empty_warning": false,
"default_metadata": null,
"data_folder": "DataFolder(path='/home/ubuntu/wensimin-work', fs=<fsspec.implementations.local.LocalFileSystem object at 0x7efddeeb6d10>)",
"recursive": true,
"glob_pattern": null,
"shuffle_files": false,
"empty_warning": false
},
{
"exclusion_writer": null,
"max_line_length_threshold": 1000,
"mean_line_length_threshold": 100,
"alphanum_threshold": 0.25
},
{
"compression": "gzip",
"output_folder": "DataFolder(path='/home/ubuntu/wensimin-work/get-data/filtered_data', fs=<fsspec.implementations.local.LocalFileSystem object at 0x7efddeeb6d10>)",
"max_file_size": -1,
"file_id_counter": {},
"output_filename": "<string.Template object at 0x7efe2fe258d0>",
"output_mg": "<datatrove.io.OutputFileManager object at 0x7efddeebe790>",
"adapter": "<bound method DiskWriter._default_adapter of \ud83d\udcbd - WRITER: \ud83d\udc3f Jsonl>",
"expand_metadata": false
}
],
"logging_dir": "DataFolder(path='/home/ubuntu/wensimin-work/get-data/logs/2024-07-05_01-48-57_ziwdg', fs=<fsspec.implementations.local.LocalFileSystem object at 0x7efddeeb6d10>)",
"skip_completed": true,
"tasks": 16,
"workers": 16,
"start_method": "forkserver",
"local_tasks": 16,
"local_rank_offset": 0,
"depends": null,
"_launched": true,
"world_size": 16
} | get-data/logs/2024-07-05_01-48-57_ziwdg/executor.json/0 | {
"file_path": "get-data/logs/2024-07-05_01-48-57_ziwdg/executor.json",
"repo_id": "get-data",
"token_count": 936
} | 90 |
{
"pipeline": [
{
"input_folder": "DataFolder(path='/home/ubuntu/wensimin-work/get-data/signatures', fs=<fsspec.implementations.local.LocalFileSystem object at 0x7efddeeb6d10>)",
"output_folder": "DataFolder(path='/home/ubuntu/wensimin-work/get-data/buckets', fs=<fsspec.implementations.local.LocalFileSystem object at 0x7efddeeb6d10>)",
"index_folder": null,
"config": {
"n_grams": 5,
"num_buckets": 14,
"hashes_per_bucket": 8,
"use_64bit_hashes": true,
"seed": 1,
"norm_config": {
"lowercase": true,
"norm_whitespace": true,
"remove_punctuation": true,
"norm_unicode_diacritics": true,
"norm_numbers": true,
"norm_weekdays": false,
"norm_monthnames": false
}
},
"only_dedup_in_index": true,
"create_index_name": null,
"lines_to_buffer": 5
}
],
"logging_dir": "DataFolder(path='/home/ubuntu/wensimin-work/get-data/logs/2024-07-05_01-48-57_zxkng', fs=<fsspec.implementations.local.LocalFileSystem object at 0x7efddeeb6d10>)",
"skip_completed": true,
"tasks": 14,
"workers": 14,
"start_method": "forkserver",
"local_tasks": 14,
"local_rank_offset": 0,
"depends": null,
"_launched": true,
"world_size": 14
} | get-data/logs/2024-07-05_01-48-57_zxkng/executor.json/0 | {
"file_path": "get-data/logs/2024-07-05_01-48-57_zxkng/executor.json",
"repo_id": "get-data",
"token_count": 837
} | 91 |
# coding=utf-8
# Copyright 2024 Sourab Mangrulkar. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datatrove.executor.base import PipelineExecutor
from datatrove.executor.local import LocalPipelineExecutor
from datatrove.pipeline.dedup import MinhashDedupSignature
from datatrove.pipeline.dedup.minhash import (
MinhashConfig,
MinhashDedupBuckets,
MinhashDedupCluster,
MinhashDedupFilter,
)
from datatrove.pipeline.tokens import TokensCounter
from datatrove.pipeline.readers import JsonlReader
from datatrove.pipeline.writers.jsonl import JsonlWriter
from reader import PersonalCopilotDatasetReader
from filter import BasicCodeFilter
MIRROR_DIRECTORY = "/home/ubuntu/wensimin-work"
TOTAL_TASKS = 16
# you can also change ngrams or the number of buckets and their size here
minhash_config = MinhashConfig(
use_64bit_hashes=True
) # better precision -> fewer false positives (collisions)
def run_code_dataset_generation():
# stage 0 reads the code data and does basic filtering
pipeline_0 = [
PersonalCopilotDatasetReader(data_folder=MIRROR_DIRECTORY),
BasicCodeFilter(),
JsonlWriter(output_folder="filtered_data"),
]
# stage 1 computes minhash signatures for each task (each task gets a set of files)
pipeline_1 = [
JsonlReader("filtered_data"),
MinhashDedupSignature(
output_folder="signatures",
config=minhash_config,
),
]
# stage 2 finds matches between signatures in each bucket
pipeline_2 = [
MinhashDedupBuckets(
input_folder="signatures",
output_folder="buckets",
config=minhash_config,
),
]
# stage 3 creates clusters of duplicates using the results from all buckets
pipeline_3 = [
MinhashDedupCluster(
input_folder="buckets",
output_folder="remove_ids",
config=minhash_config,
),
]
# stage 4 reads the original input data and removes all but 1 sample per duplicate cluster
# the data must match exactly stage 1, so number of tasks and the input source must be the same
pipeline_4 = [
JsonlReader("filtered_data"),
TokensCounter(), # nice way to see how many tokens we had before and after deduplication
MinhashDedupFilter(
input_folder="remove_ids",
exclusion_writer=JsonlWriter("removed"),
),
JsonlWriter(output_folder="hf_stack"),
]
executor_0: PipelineExecutor = LocalPipelineExecutor(
pipeline=pipeline_0, tasks=TOTAL_TASKS
)
executor_1: PipelineExecutor = LocalPipelineExecutor(
pipeline=pipeline_1, tasks=TOTAL_TASKS
)
executor_2: PipelineExecutor = LocalPipelineExecutor(
pipeline=pipeline_2,
tasks=minhash_config.num_buckets,
)
executor_3: PipelineExecutor = LocalPipelineExecutor(pipeline=pipeline_3, tasks=1)
executor_4: PipelineExecutor = LocalPipelineExecutor(
pipeline=pipeline_4, tasks=TOTAL_TASKS
)
print(executor_0.run())
print(executor_1.run())
print(executor_2.run())
print(executor_3.run())
print(executor_4.run())
if __name__ == "__main__":
run_code_dataset_generation()
| get-data/pipeline.py/0 | {
"file_path": "get-data/pipeline.py",
"repo_id": "get-data",
"token_count": 1415
} | 92 |
import numpy as np
import math
import cv2
from skimage import transform as stf
def transform(data, center, output_size, scale, rotation):
scale_ratio = float(output_size) / scale
rot = float(rotation) * np.pi / 180.0
#translation = (output_size/2-center[0]*scale_ratio, output_size/2-center[1]*scale_ratio)
t1 = stf.SimilarityTransform(scale=scale_ratio)
cx = center[0] * scale_ratio
cy = center[1] * scale_ratio
t2 = stf.SimilarityTransform(translation=(-1 * cx, -1 * cy))
t3 = stf.SimilarityTransform(rotation=rot)
t4 = stf.SimilarityTransform(translation=(output_size / 2,
output_size / 2))
t = t1 + t2 + t3 + t4
trans = t.params[0:2]
#print('M', scale, rotation, trans)
cropped = cv2.warpAffine(data,
trans, (output_size, output_size),
borderValue=0.0)
return cropped, trans
def transform_pt(pt, trans):
new_pt = np.array([pt[0], pt[1], 1.]).T
new_pt = np.dot(trans, new_pt)
#print('new_pt', new_pt.shape, new_pt)
return new_pt[:2]
def gaussian(img, pt, sigma):
# Draw a 2D gaussian
assert (sigma >= 0)
if sigma == 0:
img[pt[1], pt[0]] = 1.0
return True
#assert pt[0]<=img.shape[1]
#assert pt[1]<=img.shape[0]
# Check that any part of the gaussian is in-bounds
ul = [int(pt[0] - 3 * sigma), int(pt[1] - 3 * sigma)]
br = [int(pt[0] + 3 * sigma + 1), int(pt[1] + 3 * sigma + 1)]
if (ul[0] > img.shape[1] or ul[1] >= img.shape[0] or br[0] < 0
or br[1] < 0):
# If not, just return the image as is
#print('gaussian error')
return False
#return img
# Generate gaussian
size = 6 * sigma + 1
x = np.arange(0, size, 1, float)
y = x[:, np.newaxis]
x0 = y0 = size // 2
# The gaussian is not normalized, we want the center value to equal 1
g = np.exp(-((x - x0)**2 + (y - y0)**2) / (2 * sigma**2))
# Usable gaussian range
g_x = max(0, -ul[0]), min(br[0], img.shape[1]) - ul[0]
g_y = max(0, -ul[1]), min(br[1], img.shape[0]) - ul[1]
# Image range
img_x = max(0, ul[0]), min(br[0], img.shape[1])
img_y = max(0, ul[1]), min(br[1], img.shape[0])
img[img_y[0]:img_y[1], img_x[0]:img_x[1]] = g[g_y[0]:g_y[1], g_x[0]:g_x[1]]
return True
#return img
def estimate_trans_bbox(face, input_size, s=2.0):
w = face[2] - face[0]
h = face[3] - face[1]
wc = int((face[2] + face[0]) / 2)
hc = int((face[3] + face[1]) / 2)
im_size = max(w, h)
#size = int(im_size*1.2)
scale = input_size / (max(w, h) * s)
M = [
[scale, 0, input_size / 2 - wc * scale],
[0, scale, input_size / 2 - hc * scale],
]
M = np.array(M)
return M
| insightface/alignment/heatmap/img_helper.py/0 | {
"file_path": "insightface/alignment/heatmap/img_helper.py",
"repo_id": "insightface",
"token_count": 1384
} | 93 |
import argparse
import cv2
import sys
import numpy as np
import insightface
from insightface.app import FaceAnalysis
from insightface.data import get_image as ins_get_image
parser = argparse.ArgumentParser(description='insightface gender-age test')
# general
parser.add_argument('--ctx', default=0, type=int, help='ctx id, <0 means using cpu')
args = parser.parse_args()
app = FaceAnalysis(allowed_modules=['detection', 'genderage'])
app.prepare(ctx_id=args.ctx, det_size=(640,640))
img = ins_get_image('t1')
faces = app.get(img)
assert len(faces)==6
for face in faces:
print(face.bbox)
print(face.sex, face.age)
| insightface/attribute/gender_age/test.py/0 | {
"file_path": "insightface/attribute/gender_age/test.py",
"repo_id": "insightface",
"token_count": 212
} | 94 |
import sys
from torch.utils.data import Dataset, DataLoader
import os
import os.path as osp
import glob
import numpy as np
import random
import cv2
import pickle as pkl
import json
import h5py
import torch
import matplotlib.pyplot as plt
from lib.utils.misc import process_dataset_for_video, save_pickle, load_pickle
class Human36MDataset(Dataset):
def __init__(self, config, is_train=True):
self.is_train = is_train
self.data_path = config.DATA.TRAIN_PATH if is_train else config.DATA.VALID_PATH
self.frame_interval = config.DATA.FRAME_INTERVAL
self.num_frames = config.DATA.NUM_FRAMES
assert self.num_frames % 2, f"Please use odd number of frames, current: {self.num_frames}"
self.scale_path = osp.join("../data", "h36m_{}_scales{}".format("train" if is_train else "valid", ".pkl" if config.USE_GT else "_pre.pkl"))
self.use_gt_scale = config.TRAIN.USE_GT_SCALE
self.use_same_norm_2d, self.use_same_norm_3d = config.DATA.USE_SAME_NORM_2D, config.DATA.USE_SAME_NORM_3D
self.seed_set = False
self.head_root_distance = 1 / config.TRAIN.CAMERA_SKELETON_DISTANCE
self.v3d_2d_to_ours = [3, 2, 1, 4, 5, 6, 16, 15, 14, 11, 12, 13, 8, 0, 7, 9, 10]
# whether to use dataset adapted from k[MaÌ]inetics
self.use_gt = config.USE_GT
self.exp_tmc = config.DATA.EXP_TMC
self.exp_tmc_start = config.DATA.EXP_TMC_START
self.exp_tmc_deterministic = config.DATA.EXP_TMC_DETERMINISTIC
self.exp_tmc_interval = config.DATA.EXP_TMC_INTERVAL
self.min_diff_dist = config.DATA.MIN_DIFF_DIST
self.bound_azim = config.TRAIN.BOUND_AZIM # y axis rotation
self.bound_elev = config.TRAIN.BOUND_ELEV
self.online_rot = config.DATA.ONLINE_ROT
self.is_generic_baseline = config.TRAIN.GENERIC_BASELINE
self.is_15joints = config.DATA.NUM_JOINTS == 15
self.map_to_15joints = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 16] # exclude thorax and spine
self._load_data_set()
def _load_data_set(self):
if self.is_train:
print('start loading hum3.6m {} data.'.format("train" if self.is_train else "test"))
key = "joint_2d_gt" if self.use_gt else "joint_2d_pre"
fp = h5py.File(self.data_path, "r")
self.kp2ds = np.array(fp[key])[:, self.v3d_2d_to_ours, :2]
self.kp2ds[:, :, 0] = (self.kp2ds[:, :, 0] - 514.0435) / 500.0
self.kp2ds[:, :, 1] = (self.kp2ds[:, :, 1] - 506.7003) / 500.0
# locate root at the origin
self.kp2ds = self.kp2ds - self.kp2ds[:, 13:14]
self.kp2ds[:, 13] = 1e-5
# imagenames will be used to sample frames
self.imagenames = [name.decode() for name in fp['imagename'][:]]
if 'seqname' not in fp.keys():
# first we close the already opened (read-only) h5
fp.close()
print("Process corresponding dataset...")
process_dataset_for_video(self.data_path)
fp = h5py.File(self.data_path, "r")
self.sequence_lens = np.array(fp['seqlen'])
self.sequence_names = [name.decode() for name in fp['seqname'][:]]
self.seqname2seqindex = {name: i for i, name in enumerate(np.unique(self.sequence_names))}
self.seqindex2seqname = {i: name for name, i in self.seqname2seqindex.items()}
with open("../data/seqindex2seqname.pkl", "wb") as f:
pkl.dump(self.seqindex2seqname, f)
self.indices_in_seq = np.array(fp['index_in_seq'])
# normlize again so that the mean distance of head and root is 1/c
if not self.is_generic_baseline:
if not self.use_same_norm_2d:
factor_gt = self.head_root_distance / (np.tile(np.linalg.norm(self.kp2ds[:, -1] - self.kp2ds[:, 13], axis=1).reshape(-1, 1, 1), (1, 17, 2)) + 1e-8)
else:
factor_gt = self.head_root_distance / np.linalg.norm(self.kp2ds[:, -1] - self.kp2ds[:, 13], axis=1).mean()
self.kp2ds = self.kp2ds * factor_gt
self.kp3ds = np.array(fp['joint_3d_gt'])[:, self.v3d_2d_to_ours, :3]
factor_3d = np.linalg.norm(self.kp3ds[:, -1] - self.kp3ds[:, 13], axis=1).mean()
factor_filename = "../data/h36m_{}_factor_3d.pkl".format("train" if self.is_train else "test")
if not self.use_same_norm_3d and not osp.exists(factor_filename):
factor_3d = (np.tile(np.linalg.norm(self.kp3ds[:, -1] - self.kp3ds[:, 13], axis=1).reshape(-1, 1, 1), (1, 17, 3)) + 1e-8)
save_pickle(factor_3d, factor_filename)
self.scales = load_pickle(self.scale_path)['scale'] if osp.exists(self.scale_path) else None
if self.use_gt_scale:
assert self.scales is not None, "Want to use ground-truth, you must calculate tht beforehand, check {}".format(self.scale_path)
self.kp2ds = self.kp2ds * self.scales['scale'].reshape(-1, 1, 1)
fp.close()
print('finished load human36m {} data, total {} samples'.format("train" if self.is_train else "test", \
self.kp2ds.shape[0]))
# generate the rotation factors
num_examples = self.kp2ds.shape[0]
np.random.seed(2019)
self.bound_y = self.bound_azim; self.bound_x = self.bound_elev; self.bound_z = self.bound_elev / 2
rotation_y = (2 * np.random.random_sample((num_examples, 1)) - 1) * self.bound_y
rotation_x = (2 * np.random.random_sample((num_examples, 1)) - 1) * self.bound_x
rotation_z = (2 * np.random.random_sample((num_examples, 1)) - 1) * self.bound_z
rotation_1 = np.concatenate((rotation_y, rotation_x, rotation_z), axis=1)
rotation_2 = rotation_1.copy()
rotation_2[:, 0] = rotation_2[:, 0] + np.pi
self.rotation = np.concatenate((rotation_1, rotation_2), axis=0)
np.random.shuffle(self.rotation)
self.rotation = torch.from_numpy(self.rotation).float()
self.kp2ds = torch.from_numpy(self.kp2ds).float()
self.kp3ds = torch.from_numpy(self.kp3ds).float()
if self.scales is not None:
self.scales = torch.from_numpy(self.scales).float()
def get_seqnames(self):
return self.sequence_names
def __len__(self):
return self.kp2ds.shape[0]
def __getitem__(self, index):
if not self.seed_set:
self.seed_set = True
random.seed(index)
np.random.seed(index)
kps_3d = self.kp3ds[index]
seq_len = self.sequence_lens[index]
index_in_seq = self.indices_in_seq[index]
interval = int((self.num_frames - 1) / 2) * self.frame_interval
start, end = index_in_seq - interval, index_in_seq + interval
kps_3d = self.kp3ds[index]
kps_2d = self.kp2ds[index]
if index_in_seq + 5 < seq_len:
diff1_index = index + 5
else:
diff1_index = index - 5
if self.frame_interval > 1:
if index_in_seq + self.frame_interval < seq_len:
diff1_index = index + self.frame_interval
else:
diff1_index = index - self.frame_interval
if self.exp_tmc:
if index_in_seq + (self.exp_tmc_start + self.exp_tmc_interval) < seq_len:
diff1_index = index + (np.random.randint(self.exp_tmc_start, self.exp_tmc_start + self.exp_tmc_interval) if not self.exp_tmc_deterministic else self.exp_tmc_interval)
else:
diff1_index = index - (np.random.randint(self.exp_tmc_start, self.exp_tmc_start + self.exp_tmc_interval) if not self.exp_tmc_deterministic else self.exp_tmc_interval)
diff1 = self.kp2ds[diff1_index]
diff_dist = np.random.randint(-index_in_seq, seq_len - index_in_seq)
while abs(diff_dist) < self.min_diff_dist:
diff_dist = np.random.randint(-index_in_seq, seq_len - index_in_seq)
diff_index = index + diff_dist
diff2 = self.kp2ds[diff_index]
if not self.online_rot:
rot = self.rotation[index]
else:
rot = np.random.rand(3, ) * np.array([self.bound_y, self.bound_x, self.bound_z])
rot = torch.from_numpy(rot).float()
# the flag will always be 1 when no extra data is used
# for valdiation, simply ignore scale
if self.scales is not None and self.is_train:
scale = self.scales[index]
else:
scale = 0
seqname = self.sequence_names[index]
seqindex = self.seqname2seqindex[seqname]
if self.is_15joints:
kps_2d = kps_2d[self.map_to_15joints]
kps_3d = kps_3d[self.map_to_15joints]
diff1 = diff1[self.map_to_15joints]
diff2 = diff2[self.map_to_15joints]
return kps_2d, kps_3d, rot, diff1, diff2, scale
| insightface/body/human_pose/ambiguity_aware/lib/dataloader/h36m.py/0 | {
"file_path": "insightface/body/human_pose/ambiguity_aware/lib/dataloader/h36m.py",
"repo_id": "insightface",
"token_count": 4289
} | 95 |
#!/usr/bin/env python3
# coding=utf-8
import h5py
import numpy as np
import pickle as pkl
v3d_to_ours = [3, 2, 1, 4, 5, 6, 16, 15, 14, 11, 12, 13, 8, 0, 7, 9, 10]
filepath = "../data/h36m_valid_pred3.h5"
f = h5py.File(filepath, "r")
joints_2d_pre = np.array(f['joint_2d_pre'])[:, v3d_to_ours]
joints_3d_gt = np.array(f['joint_3d_gt'])[:, v3d_to_ours]
f.close()
factor_path = "../data/h36m_test_factor_3d.pkl"
f = open(factor_path, "rb")
factors = pkl.load(f)
f.close()
joints_2d_pre[:, :, 0] = (joints_2d_pre[:, :, 0] - 514.0435) / 500.0
joints_2d_pre[:, :, 1] = (joints_2d_pre[:, :, 1] - 506.7003) / 500.0
root2d = joints_2d_pre[:, 13:14].copy()
joints_2d_pre = joints_2d_pre - root2d
joints_2d_pre[:, 13:14] = 1e-5
factor_2d = 1 / 10 / np.linalg.norm(joints_2d_pre[:, -1] - joints_2d_pre[:, 13], axis=1).mean()
# scale the 2d joints
# joints_2d_pre = joints_2d_pre * factor_2d * factors[:, 0:1, 0:1]
joints_2d_pre = joints_2d_pre * factor_2d
# then we project the 3d joints
# minus the root and shift to (0, 0, 10)
joints_3d_gt = joints_3d_gt - joints_3d_gt[:, 13:14].copy()
joints_3d_gt = joints_3d_gt / factors
shift = np.array([0, 0, 10]).reshape(1, 1, 3)
root3d_gt = joints_3d_gt[:, 13:14].copy()
joints_3d_gt = joints_3d_gt - root3d_gt + shift
# project the 3d joints
# N * J * 2
project_gt_2d = joints_3d_gt[..., :2] / joints_3d_gt[..., 2:]
x1_min, x1_max = joints_2d_pre[..., 0:1].min(axis=1, keepdims=True), joints_2d_pre[..., 0:1].max(axis=1, keepdims=True)
y1_min, y1_max = joints_2d_pre[..., 1:].min(axis=1, keepdims=True), joints_2d_pre[..., 1:].max(axis=1, keepdims=True)
x2_min, x2_max = project_gt_2d[..., 0:1].min(axis=1, keepdims=True), project_gt_2d[..., 0:1].max(axis=1, keepdims=True)
y2_min, y2_max = project_gt_2d[..., 1:].min(axis=1, keepdims=True), project_gt_2d[..., 1:].max(axis=1, keepdims=True)
ws = x1_max - x1_min
hs = y1_max - y1_min
hws = (hs + ws) / 2
scales = ((x2_max - x2_min) / (x1_max - x1_min) + (y2_max - y2_min) / (y1_max - y1_min)) / 2
scale_mids = (scales + hws) / 2
print("Mean/Std of scale mid: {:.3f}/{:.3f}".format(scale_mids.mean(), scale_mids.std()))
with open("../data/h36m_valid_scales_pre.pkl", "wb") as f:
pkl.dump({"scale": scales.reshape(-1), "scale_mid": scale_mids.reshape(-1)}, f)
err_gt = np.linalg.norm(project_gt_2d - joints_2d_pre, axis=-1).mean()
print("Projection GT error is: {:.4f}".format(err_gt))
# first descale, minus the root, and shift
# joints_3d_pre = joints_3d_pre / factors
# root3d_pre = joints_3d_pre[:, 13:14].copy()
# joints_3d_pre = joints_3d_pre - root3d_pre + shift
# project_pre_2d = joints_3d_pre[..., :2] / joints_3d_pre[..., 2:]
# err_pre = np.linalg.norm(project_pre_2d - joints_2d_pre, axis=-1).mean()
# print("Projection PRE error is: {:.4f}".format(err_pre))
| insightface/body/human_pose/ambiguity_aware/scripts/validate_project_pre.py/0 | {
"file_path": "insightface/body/human_pose/ambiguity_aware/scripts/validate_project_pre.py",
"repo_id": "insightface",
"token_count": 1363
} | 96 |
[简体中文](README_cn.md) | English
# FaceDetection
* [1. Introduction](#Introduction)
* [2. Model Zoo](#Model_Zoo)
* [3. Installation](#Installation)
* [4. Data Pipline](#Data_Pipline)
* [5. Configuration File](#Configuration_File)
* [6. Training and Inference](#Training_and_Inference)
* [6.1 Training](#Training)
* [6.2 Evaluate on the WIDER FACE](#Evaluation)
* [6.3 Inference deployment](#Inference_deployment)
* [6.4 Improvement of inference speed](#Increase_in_inference_speed)
* [6.4 Face detection demo](#Face_detection_demo)
* [7. Citations](#Citations)
<a name="Introduction"></a>
## 1. Introduction
`Arcface-Paddle` is an open source deep face detection and recognition toolkit, powered by PaddlePaddle. `Arcface-Paddle` provides three related pretrained models now, include `BlazeFace` for face detection, `ArcFace` and `MobileFace` for face recognition.
- This tutorial is mainly about face detection based on `PaddleDetection`.
- For face recognition task, please refer to: [Face recognition tuturial](../../recognition/arcface_paddle/README_en.md).
- For Whl package inference using PaddleInference, please refer to [whl package inference](https://github.com/littletomatodonkey/insight-face-paddle).
<a name="Model_Zoo"></a>
## 2. Model Zoo
### mAP in WIDER FACE
| Model | input size | images/GPU | epochs | Easy/Medium/Hard Set | CPU time cost | GPU time cost| Model Size(MB) | Pretrained model | Inference model | Config |
|:------------:|:--------:|:----:|:-------:|:-------:|:---------:|:---------:|:----------:|:---------:|:--------:|:--------:|
| BlazeFace-FPN-SSH | 640×640 | 8 | 1000 | 0.9187 / 0.8979 / 0.8168 | 31.7ms | 5.6ms | 0.646 |[download link](https://paddledet.bj.bcebos.com/models/blazeface_fpn_ssh_1000e.pdparams) | [download link](https://paddle-model-ecology.bj.bcebos.com/model/insight-face/blazeface_fpn_ssh_1000e_v1.0_infer.tar) | [config](https://github.com/PaddlePaddle/PaddleDetection/tree/release/2.1/configs/face_detection/blazeface_fpn_ssh_1000e.yml) |
| RetinaFace | 480x640 | - | - | - / - / 0.8250 | 182.0ms | 17.4ms | 1.680 | - | - | - |
**NOTE:**
- Get mAP in `Easy/Medium/Hard Set` by multi-scale evaluation. For details can refer to [Evaluation](#Evaluate-on-the-WIDER-FACE).
- Measuring the speed, we use the resolution of `640×640`, in Intel(R) Xeon(R) Gold 6148 CPU @ 2.40GHz environment, cpu-threads are set as 5. For more details, you can refer to [Improvement of inference speed](#Increase_in_inference_speed).
- Benchmark code for `RetinaFace` is from: [../retinaface/README.md](../retinaface/README.md).
- The benchmark environment is
- CPU: Intel(R) Xeon(R) Gold 6184 CPU @ 2.40GHz
- GPU: a single NVIDIA Tesla V100
<a name="Installation"></a>
## 3. Installation
Please refer to [installation tutorial](../../recognition/arcface_paddle/install_en.md) to install PaddlePaddle and PaddleDetection.
<a name="Data_Pipline"></a>
## 4. Data Pipline
We use the [WIDER FACE dataset](http://shuoyang1213.me/WIDERFACE/) to carry out the training
and testing of the model, the official website gives detailed data introduction.
- WIDER Face data source:
Loads `wider_face` type dataset with directory structures like this:
```
dataset/wider_face/
├── wider_face_split
│ ├── wider_face_train_bbx_gt.txt
│ ├── wider_face_val_bbx_gt.txt
├── WIDER_train
│ ├── images
│ │ ├── 0--Parade
│ │ │ ├── 0_Parade_marchingband_1_100.jpg
│ │ │ ├── 0_Parade_marchingband_1_381.jpg
│ │ │ │ ...
│ │ ├── 10--People_Marching
│ │ │ ...
├── WIDER_val
│ ├── images
│ │ ├── 0--Parade
│ │ │ ├── 0_Parade_marchingband_1_1004.jpg
│ │ │ ├── 0_Parade_marchingband_1_1045.jpg
│ │ │ │ ...
│ │ ├── 10--People_Marching
│ │ │ ...
```
- Download dataset manually:
To download the WIDER FACE dataset, run the following commands:
```
cd dataset/wider_face && ./download_wider_face.sh
```
<a name="Configuration_file"></a>
## 5. Configuration file
We use the `configs/face_detection/blazeface_fpn_ssh_1000e.yml` configuration for training. The summary of the configuration file is as follows:
```yaml
_BASE_: [
'../datasets/wider_face.yml',
'../runtime.yml',
'_base_/optimizer_1000e.yml',
'_base_/blazeface_fpn.yml',
'_base_/face_reader.yml',
]
weights: output/blazeface_fpn_ssh_1000e/model_final
multi_scale_eval: True
```
`blazeface_fpn_ssh_1000e.yml` The configuration needs to rely on other configuration files, in this example it needs to rely on:
```
wider_face.yml:Mainly explains the path of training data and verification data
runtime.yml:Mainly describes the common operating parameters, such as whether to use GPU, how many epochs to store checkpoints, etc.
optimizer_1000e.yml:Mainly explains the configuration of learning rate and optimizer
blazeface_fpn.yml:Mainly explain the situation of the model and the backbone network
face_reader.yml:It mainly describes the configuration of the data reader, such as batch size, the number of concurrent loading subprocesses, etc., and also includes post-reading preprocessing operations, such as resize, data enhancement, etc.
```
According to the actual situation, modify the above files, such as the data set path, batch size, etc.
For the configuration of the base model, please refer to `configs/face_detection/_base_/blazeface.yml`.
The improved model adds the neck structure of FPN and SSH. For the configuration file, please refer to `configs/face_detection/_base_/blazeface_fpn.yml`. You can configure FPN and SSH if needed, which is as follows:
```yaml
BlazeNet:
blaze_filters: [[24, 24], [24, 24], [24, 48, 2], [48, 48], [48, 48]]
double_blaze_filters: [[48, 24, 96, 2], [96, 24, 96], [96, 24, 96],
[96, 24, 96, 2], [96, 24, 96], [96, 24, 96]]
act: hard_swish # Configure the activation function of BlazeBlock in backbone, the basic model is relu, hard_swish is required when adding FPN and SSH
BlazeNeck:
neck_type : fpn_ssh # Optional only_fpn, only_ssh and fpn_ssh
in_channel: [96,96]
```
<a name="Training_and_Inference"></a>
## 6. Training_and_Inference
<a name="Training"></a>
### 6.1 Training
Firstly, download the pretrained model.
```bash
wget https://paddledet.bj.bcebos.com/models/pretrained/blazenet_pretrain.pdparams
```
PaddleDetection provides a single-GPU/multi-GPU training mode to meet the various training needs of users.
* single-GPU training
```bash
export CUDA_VISIBLE_DEVICES=0 # Do not need to execute this command under windows and Mac
python tools/train.py -c configs/face_detection/blazeface_fpn_ssh_1000e.yml -o pretrain_weight=blazenet_pretrain
```
* multi-GPU training
```bash
export CUDA_VISIBLE_DEVICES=0,1,2,3 # Do not need to execute this command under windows and Mac
python -m paddle.distributed.launch --gpus 0,1,2,3 tools/train.py -c configs/face_detection/blazeface_fpn_ssh_1000e.yml -o pretrain_weight=blazenet_pretrain
```
* Resume training from Checkpoint
In the daily training process, if the training was be interrupted, using the -r command to resume training:
```bash
export CUDA_VISIBLE_DEVICES=0 # Do not need to execute this command under windows and Mac
python tools/train.py -c configs/face_detection/blazeface_fpn_ssh_1000e.yml -r output/blazeface_fan_ssh_1000e/100
```
* Training hyperparameters
`BlazeFace` training is based on each GPU `batch_size=32` training on 4 GPUs (total `batch_size` is 128), the learning rate is 0.002, and the total training epoch is set as 1000.
**NOTE:** Not support evaluation during train.
<a name="Evaluation"></a>
### 6.2 Evaluate on the WIDER FACE
- Evaluate and generate results files:
```shell
python -u tools/eval.py -c configs/face_detection/blazeface_fpn_ssh_1000e.yml \
-o weights=output/blazeface_fpn_ssh_1000e/model_final \
multi_scale_eval=True BBoxPostProcess.nms.score_threshold=0.1
```
Set `multi_scale_eval=True` for multi-scale evaluation,after the evaluation is completed, the test result in txt format will be generated in `output/pred`.
- Download the official evaluation script to evaluate the AP metrics:
```bash
wget http://mmlab.ie.cuhk.edu.hk/projects/WIDERFace/support/eval_script/eval_tools.zip
unzip eval_tools.zip && rm -f eval_tools.zip
```
- Start evaluation:
Method One: Python evaluation:
```bash
git clone https://github.com/wondervictor/WiderFace-Evaluation.git
cd WiderFace-Evaluation
# Compile
python3 setup.py build_ext --inplace
# Start evaluation
python3 evaluation.py -p /path/to/PaddleDetection/output/pred -g /path/to/eval_tools/ground_truth
```
Method Two: MatLab evaluation:
```bash
# Modify the result path and the name of the curve to be drawn in `eval_tools/wider_eval.m`:
pred_dir = './pred';
legend_name = 'Paddle-BlazeFace';
`wider_eval.m` is the main execution program of the evaluation module. The run command is as follows:
matlab -nodesktop -nosplash -nojvm -r "run wider_eval.m;quit;"
```
<a name="Inference_deployment"></a>
### 6.3 Inference deployment
The model file saved in the model training process includes forward prediction and back propagation. In actual industrial deployment, back propagation is not required. Therefore, the model needs to be exported into the model format required for deployment.
The `tools/export_model.py` script is provided in PaddleDetection to export the model:
```bash
python tools/export_model.py -c configs/face_detection/blazeface_fpn_ssh_1000e.yml --output_dir=./inference_model \
-o weights=output/blazeface_fpn_ssh_1000e/best_model BBoxPostProcess.nms.score_threshold=0.1
```
The inference model will be exported to the `inference_model/blazeface_fpn_ssh_1000e` directory, which are `infer_cfg.yml`, `model.pdiparams`, `model.pdiparams.info`, `model.pdmodel` If no folder is specified, the model will be exported In `output_inference`.
* `score_threshold` for nms is modified as 0.1 for inference, because it takes great speed performance improvement while has little effect on mAP. For more documentation about model export, please refer to: [export doc](https://github.com/PaddlePaddle/PaddleDetection/deploy/EXPORT_MODEL.md)
PaddleDetection provides multiple deployment forms of PaddleInference, PaddleServing, and PaddleLite, supports multiple platforms such as server, mobile, and embedded, and provides a complete deployment plan for Python and C++.
* Here, we take Python as an example to illustrate how to use PaddleInference for model deployment:
```bash
python deploy/python/infer.py --model_dir=./inference_model/blazeface_fpn_ssh_1000e --image_file=demo/road554.png --use_gpu=True
```
* `infer.py` provides a rich interface for users to access video files and cameras for prediction. For more information, please refer to: [Python deployment](https://github.com/PaddlePaddle/PaddleDetection/deploy/python.md).
* For more documentation on deployment, please refer to: [deploy doc](https://github.com/PaddlePaddle/PaddleDetection/deploy/README.md).
<a name="Increase_in_inference_speed"></a>
### 6.4 Improvement of inference speed
If you want to reproduce our speed indicators, you need to modify the input size of inference model in the `./inference_model/blazeface_fpn_ssh_1000e/infer_cfg.yml` configuration file. As follows:
```yaml
mode: fluid
draw_threshold: 0.5
metric: WiderFace
arch: Face
min_subgraph_size: 3
Preprocess:
- is_scale: false
mean:
- 123
- 117
- 104
std:
- 127.502231
- 127.502231
- 127.502231
type: NormalizeImage
- interp: 1
keep_ratio: false
target_size:
- 640
- 640
type: Resize
- type: Permute
label_list:
- face
```
If you want the model to be inferred faster in the CPU environment, install [paddlepaddle_gpu-0.0.0](https://paddle-wheel.bj.bcebos.com/develop-cpu-mkl/paddlepaddle-0.0.0-cp37-cp37m-linux_x86_64.whl) (dependency of mkldnn) and enable_mkldnn is set to True, when predicting acceleration.
```bash
# use GPU:
python deploy/python/infer.py --model_dir=./inference_model/blazeface_fpn_ssh_1000e --image_dir=./path/images --run_benchmark=True --use_gpu=True
# inference with mkldnn use CPU
# downdoad whl package
wget https://paddle-wheel.bj.bcebos.com/develop-cpu-mkl/paddlepaddle-0.0.0-cp37-cp37m-linux_x86_64.whl
#install paddlepaddle_gpu-0.0.0
pip install paddlepaddle-0.0.0-cp37-cp37m-linux_x86_64.whl
python deploy/python/infer.py --model_dir=./inference_model/blazeface_fpn_ssh_1000e --image_dir=./path/images --enable_mkldnn=True --run_benchmark=True --cpu_threads=5
```
<a name="Face_detection_demo"></a>
### 6.5 Face detection demo
This part talks about how to detect faces using BlazeFace model.
Firstly, use the following commands to download the demo image and font file for visualization.
```bash
# Demo image
wget https://raw.githubusercontent.com/littletomatodonkey/insight-face-paddle/main/demo/friends/query/friends1.jpg
# Font file for visualization
wget https://raw.githubusercontent.com/littletomatodonkey/insight-face-paddle/main/SourceHanSansCN-Medium.otf
```
The demo image is shown as follows.
<div align="center">
<img src="https://raw.githubusercontent.com/littletomatodonkey/insight-face-paddle/main/demo/friends/query/friends1.jpg" width = "800" />
</div>
Use the following command to run the face detection process.
```shell
python3.7 test_blazeface.py --input=friends1.jpg --output="./output"
```
The final result is save in folder `output/`, which is shown as follows.
<div align="center">
<img src="https://raw.githubusercontent.com/littletomatodonkey/insight-face-paddle/main/demo/friends/output/friends1.jpg" width = "800" />
</div>
For more details about parameter explanations, face recognition, index gallery construction and whl package inference, please refer to [Whl package inference tutorial](https://github.com/littletomatodonkey/insight-face-paddle).
## 7. Citations
```
@misc{long2020ppyolo,
title={PP-YOLO: An Effective and Efficient Implementation of Object Detector},
author={Xiang Long and Kaipeng Deng and Guanzhong Wang and Yang Zhang and Qingqing Dang and Yuan Gao and Hui Shen and Jianguo Ren and Shumin Han and Errui Ding and Shilei Wen},
year={2020},
eprint={2007.12099},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
@misc{ppdet2019,
title={PaddleDetection, Object detection and instance segmentation toolkit based on PaddlePaddle.},
author={PaddlePaddle Authors},
howpublished = {\url{https://github.com/PaddlePaddle/PaddleDetection}},
year={2019}
}
@article{bazarevsky2019blazeface,
title={BlazeFace: Sub-millisecond Neural Face Detection on Mobile GPUs},
author={Valentin Bazarevsky and Yury Kartynnik and Andrey Vakunov and Karthik Raveendran and Matthias Grundmann},
year={2019},
eprint={1907.05047},
archivePrefix={arXiv}
}
```
| insightface/detection/blazeface_paddle/README.md/0 | {
"file_path": "insightface/detection/blazeface_paddle/README.md",
"repo_id": "insightface",
"token_count": 5051
} | 97 |
from __future__ import print_function
try:
import cPickle as pickle
except ImportError:
import pickle
import os
import sys
import time
import mxnet as mx
import numpy as np
from builtins import range
from mxnet.module import Module
from .module import MutableModule
from rcnn.logger import logger
from rcnn.config import config
from rcnn.io import image
from rcnn.processing.bbox_transform import bbox_pred, clip_boxes
from rcnn.processing.nms import py_nms_wrapper, cpu_nms_wrapper, gpu_nms_wrapper
from rcnn.processing.bbox_transform import bbox_overlaps
def IOU(Reframe, GTframe):
x1 = Reframe[0]
y1 = Reframe[1]
width1 = Reframe[2] - Reframe[0]
height1 = Reframe[3] - Reframe[1]
x2 = GTframe[0]
y2 = GTframe[1]
width2 = GTframe[2] - GTframe[0]
height2 = GTframe[3] - GTframe[1]
endx = max(x1 + width1, x2 + width2)
startx = min(x1, x2)
width = width1 + width2 - (endx - startx)
endy = max(y1 + height1, y2 + height2)
starty = min(y1, y2)
height = height1 + height2 - (endy - starty)
if width <= 0 or height <= 0:
ratio = 0
else:
Area = width * height
Area1 = width1 * height1
Area2 = width2 * height2
ratio = Area * 1. / (Area1 + Area2 - Area)
return ratio
class Predictor(object):
def __init__(self,
symbol,
data_names,
label_names,
context=mx.cpu(),
max_data_shapes=None,
provide_data=None,
provide_label=None,
arg_params=None,
aux_params=None):
#self._mod = MutableModule(symbol, data_names, label_names,
# context=context, max_data_shapes=max_data_shapes)
self._mod = Module(symbol, data_names, label_names, context=context)
self._mod.bind(provide_data, provide_label, for_training=False)
self._mod.init_params(arg_params=arg_params, aux_params=aux_params)
def predict(self, data_batch):
self._mod.forward(data_batch)
return dict(zip(self._mod.output_names,
self._mod.get_outputs())) #TODO
#return self._mod.get_outputs()
def im_proposal(predictor, data_batch, data_names, scale):
data_dict = dict(zip(data_names, data_batch.data))
output = predictor.predict(data_batch)
# drop the batch index
boxes = output['rois_output'].asnumpy()[:, 1:]
scores = output['rois_score'].asnumpy()
# transform to original scale
boxes = boxes / scale
return scores, boxes, data_dict
def _im_proposal(predictor, data_batch, data_names, scale):
data_dict = dict(zip(data_names, data_batch.data))
output = predictor.predict(data_batch)
print('output', output)
# drop the batch index
boxes = output['rois_output'].asnumpy()[:, 1:]
scores = output['rois_score'].asnumpy()
# transform to original scale
boxes = boxes / scale
return scores, boxes, data_dict
def generate_proposals(predictor, test_data, imdb, vis=False, thresh=0.):
"""
Generate detections results using RPN.
:param predictor: Predictor
:param test_data: data iterator, must be non-shuffled
:param imdb: image database
:param vis: controls visualization
:param thresh: thresh for valid detections
:return: list of detected boxes
"""
assert vis or not test_data.shuffle
data_names = [k[0] for k in test_data.provide_data]
i = 0
t = time.time()
imdb_boxes = list()
original_boxes = list()
for im_info, data_batch in test_data:
t1 = time.time() - t
t = time.time()
scale = im_info[0, 2]
scores, boxes, data_dict = im_proposal(predictor, data_batch,
data_names, scale)
print(scores.shape, boxes.shape, file=sys.stderr)
t2 = time.time() - t
t = time.time()
# assemble proposals
dets = np.hstack((boxes, scores))
original_boxes.append(dets)
# filter proposals
keep = np.where(dets[:, 4:] > thresh)[0]
dets = dets[keep, :]
imdb_boxes.append(dets)
if vis:
vis_all_detection(data_dict['data'].asnumpy(), [dets], ['obj'],
scale)
logger.info('generating %d/%d ' % (i + 1, imdb.num_images) +
'proposal %d ' % (dets.shape[0]) + 'data %.4fs net %.4fs' %
(t1, t2))
i += 1
assert len(imdb_boxes) == imdb.num_images, 'calculations not complete'
# save results
rpn_folder = os.path.join(imdb.root_path, 'rpn_data')
if not os.path.exists(rpn_folder):
os.mkdir(rpn_folder)
rpn_file = os.path.join(rpn_folder, imdb.name + '_rpn.pkl')
with open(rpn_file, 'wb') as f:
pickle.dump(imdb_boxes, f, pickle.HIGHEST_PROTOCOL)
if thresh > 0:
full_rpn_file = os.path.join(rpn_folder, imdb.name + '_full_rpn.pkl')
with open(full_rpn_file, 'wb') as f:
pickle.dump(original_boxes, f, pickle.HIGHEST_PROTOCOL)
logger.info('wrote rpn proposals to %s' % rpn_file)
return imdb_boxes
def test_proposals(predictor, test_data, imdb, roidb, vis=False):
"""
Test detections results using RPN.
:param predictor: Predictor
:param test_data: data iterator, must be non-shuffled
:param imdb: image database
:param roidb: roidb
:param vis: controls visualization
:return: recall, mAP
"""
assert vis or not test_data.shuffle
data_names = [k[0] for k in test_data.provide_data]
#bbox_file = os.path.join(rpn_folder, imdb.name + '_bbox.txt')
#bbox_f = open(bbox_file, 'w')
i = 0
t = time.time()
output_folder = os.path.join(imdb.root_path, 'output')
if not os.path.exists(output_folder):
os.mkdir(output_folder)
imdb_boxes = list()
original_boxes = list()
gt_overlaps = np.zeros(0)
overall = [0.0, 0.0]
gt_max = np.array((0.0, 0.0))
num_pos = 0
#apply scale, for SSH
#_, roidb = image.get_image(roidb)
for im_info, data_batch in test_data:
t1 = time.time() - t
t = time.time()
oscale = im_info[0, 2]
#print('scale', scale, file=sys.stderr)
scale = 1.0 #fix scale=1.0 for SSH face detector
scores, boxes, data_dict = im_proposal(predictor, data_batch,
data_names, scale)
#print(scores.shape, boxes.shape, file=sys.stderr)
t2 = time.time() - t
t = time.time()
# assemble proposals
dets = np.hstack((boxes, scores))
original_boxes.append(dets)
# filter proposals
keep = np.where(dets[:, 4:] > config.TEST.SCORE_THRESH)[0]
dets = dets[keep, :]
imdb_boxes.append(dets)
logger.info('generating %d/%d ' % (i + 1, imdb.num_images) +
'proposal %d ' % (dets.shape[0]) + 'data %.4fs net %.4fs' %
(t1, t2))
#if dets.shape[0]==0:
# continue
if vis:
vis_all_detection(data_dict['data'].asnumpy(), [dets], ['obj'],
scale)
boxes = dets
#max_gt_overlaps = roidb[i]['gt_overlaps'].max(axis=1)
#gt_inds = np.where((roidb[i]['gt_classes'] > 0) & (max_gt_overlaps == 1))[0]
#gt_boxes = roidb[i]['boxes'][gt_inds, :]
gt_boxes = roidb[i]['boxes'].copy(
) * oscale # as roidb is the original one, need to scale GT for SSH
gt_areas = (gt_boxes[:, 2] - gt_boxes[:, 0] + 1) * (gt_boxes[:, 3] -
gt_boxes[:, 1] + 1)
num_pos += gt_boxes.shape[0]
overlaps = bbox_overlaps(boxes.astype(np.float),
gt_boxes.astype(np.float))
#print(im_info, gt_boxes.shape, boxes.shape, overlaps.shape, file=sys.stderr)
_gt_overlaps = np.zeros((gt_boxes.shape[0]))
# choose whatever is smaller to iterate
#for j in range(gt_boxes.shape[0]):
# print('gt %d,%d,%d,%d'% (gt_boxes[j][0], gt_boxes[j][1], gt_boxes[j][2]-gt_boxes[j][0], gt_boxes[j][3]-gt_boxes[j][1]), file=sys.stderr)
# gt_max = np.maximum( gt_max, np.array( (gt_boxes[j][2], gt_boxes[j][3]) ) )
#print('gt max', gt_max, file=sys.stderr)
#for j in range(boxes.shape[0]):
# print('anchor_box %.2f,%.2f,%.2f,%.2f'% (boxes[j][0], boxes[j][1], boxes[j][2]-boxes[j][0], boxes[j][3]-boxes[j][1]), file=sys.stderr)
#rounds = min(boxes.shape[0], gt_boxes.shape[0])
#for j in range(rounds):
# # find which proposal maximally covers each gt box
# argmax_overlaps = overlaps.argmax(axis=0)
# print(j, 'argmax_overlaps', argmax_overlaps, file=sys.stderr)
# # get the IoU amount of coverage for each gt box
# max_overlaps = overlaps.max(axis=0)
# print(j, 'max_overlaps', max_overlaps, file=sys.stderr)
# # find which gt box is covered by most IoU
# gt_ind = max_overlaps.argmax()
# gt_ovr = max_overlaps.max()
# assert (gt_ovr >= 0), '%s\n%s\n%s' % (boxes, gt_boxes, overlaps)
# # find the proposal box that covers the best covered gt box
# box_ind = argmax_overlaps[gt_ind]
# print('max box', gt_ind, box_ind, (boxes[box_ind][0], boxes[box_ind][1], boxes[box_ind][2]-boxes[box_ind][0], boxes[box_ind][3]-boxes[box_ind][1], boxes[box_ind][4]), file=sys.stderr)
# # record the IoU coverage of this gt box
# _gt_overlaps[j] = overlaps[box_ind, gt_ind]
# assert (_gt_overlaps[j] == gt_ovr)
# # mark the proposal box and the gt box as used
# overlaps[box_ind, :] = -1
# overlaps[:, gt_ind] = -1
if boxes.shape[0] > 0:
_gt_overlaps = overlaps.max(axis=0)
#print('max_overlaps', _gt_overlaps, file=sys.stderr)
for j in range(len(_gt_overlaps)):
if _gt_overlaps[j] > config.TEST.IOU_THRESH:
continue
print(j,
'failed',
gt_boxes[j],
'max_overlap:',
_gt_overlaps[j],
file=sys.stderr)
#_idx = np.where(overlaps[:,j]>0.4)[0]
#print(j, _idx, file=sys.stderr)
#print(overlaps[_idx,j], file=sys.stderr)
#for __idx in _idx:
# print(gt_boxes[j], boxes[__idx], overlaps[__idx,j], IOU(gt_boxes[j], boxes[__idx,0:4]), file=sys.stderr)
# append recorded IoU coverage level
found = (_gt_overlaps > config.TEST.IOU_THRESH).sum()
_recall = found / float(gt_boxes.shape[0])
print('recall',
_recall,
gt_boxes.shape[0],
boxes.shape[0],
gt_areas,
file=sys.stderr)
overall[0] += found
overall[1] += gt_boxes.shape[0]
#gt_overlaps = np.hstack((gt_overlaps, _gt_overlaps))
#_recall = (gt_overlaps >= threshold).sum() / float(num_pos)
_recall = float(overall[0]) / overall[1]
print('recall_all', _recall, file=sys.stderr)
boxes[:, 0:4] /= oscale
_vec = roidb[i]['image'].split('/')
out_dir = os.path.join(output_folder, _vec[-2])
if not os.path.exists(out_dir):
os.mkdir(out_dir)
out_file = os.path.join(out_dir, _vec[-1].replace('jpg', 'txt'))
with open(out_file, 'w') as f:
name = '/'.join(roidb[i]['image'].split('/')[-2:])
f.write("%s\n" % (name))
f.write("%d\n" % (boxes.shape[0]))
for b in range(boxes.shape[0]):
box = boxes[b]
f.write(
"%d %d %d %d %g \n" %
(box[0], box[1], box[2] - box[0], box[3] - box[1], box[4]))
i += 1
#bbox_f.close()
return
gt_overlaps = np.sort(gt_overlaps)
recalls = np.zeros_like(thresholds)
# compute recall for each IoU threshold
for i, t in enumerate(thresholds):
recalls[i] = (gt_overlaps >= t).sum() / float(num_pos)
ar = recalls.mean()
# print results
print('average recall for {}: {:.3f}'.format(area_name, ar))
for threshold, recall in zip(thresholds, recalls):
print('recall @{:.2f}: {:.3f}'.format(threshold, recall))
assert len(imdb_boxes) == imdb.num_images, 'calculations not complete'
# save results
rpn_file = os.path.join(rpn_folder, imdb.name + '_rpn.pkl')
with open(rpn_file, 'wb') as f:
pickle.dump(imdb_boxes, f, pickle.HIGHEST_PROTOCOL)
logger.info('wrote rpn proposals to %s' % rpn_file)
return imdb_boxes
def im_detect(predictor, data_batch, data_names, scale):
output = predictor.predict(data_batch)
data_dict = dict(zip(data_names, data_batch.data))
if config.TEST.HAS_RPN:
rois = output['rois_output'].asnumpy()[:, 1:]
else:
rois = data_dict['rois'].asnumpy().reshape((-1, 5))[:, 1:]
im_shape = data_dict['data'].shape
# save output
scores = output['cls_prob_reshape_output'].asnumpy()[0]
bbox_deltas = output['bbox_pred_reshape_output'].asnumpy()[0]
# post processing
pred_boxes = bbox_pred(rois, bbox_deltas)
pred_boxes = clip_boxes(pred_boxes, im_shape[-2:])
# we used scaled image & roi to train, so it is necessary to transform them back
pred_boxes = pred_boxes / scale
return scores, pred_boxes, data_dict
def pred_eval(predictor, test_data, imdb, vis=False, thresh=1e-3):
"""
wrapper for calculating offline validation for faster data analysis
in this example, all threshold are set by hand
:param predictor: Predictor
:param test_data: data iterator, must be non-shuffle
:param imdb: image database
:param vis: controls visualization
:param thresh: valid detection threshold
:return:
"""
assert vis or not test_data.shuffle
data_names = [k[0] for k in test_data.provide_data]
nms = py_nms_wrapper(config.TEST.NMS)
# limit detections to max_per_image over all classes
max_per_image = -1
num_images = imdb.num_images
# all detections are collected into:
# all_boxes[cls][image] = N x 5 array of detections in
# (x1, y1, x2, y2, score)
all_boxes = [[[] for _ in range(num_images)]
for _ in range(imdb.num_classes)]
i = 0
t = time.time()
for im_info, data_batch in test_data:
t1 = time.time() - t
t = time.time()
scale = im_info[0, 2]
scores, boxes, data_dict = im_detect(predictor, data_batch, data_names,
scale)
t2 = time.time() - t
t = time.time()
for j in range(1, imdb.num_classes):
indexes = np.where(scores[:, j] > thresh)[0]
cls_scores = scores[indexes, j, np.newaxis]
cls_boxes = boxes[indexes, j * 4:(j + 1) * 4]
cls_dets = np.hstack((cls_boxes, cls_scores))
keep = nms(cls_dets)
all_boxes[j][i] = cls_dets[keep, :]
if max_per_image > 0:
image_scores = np.hstack(
[all_boxes[j][i][:, -1] for j in range(1, imdb.num_classes)])
if len(image_scores) > max_per_image:
image_thresh = np.sort(image_scores)[-max_per_image]
for j in range(1, imdb.num_classes):
keep = np.where(all_boxes[j][i][:, -1] >= image_thresh)[0]
all_boxes[j][i] = all_boxes[j][i][keep, :]
if vis:
boxes_this_image = [[]] + [
all_boxes[j][i] for j in range(1, imdb.num_classes)
]
vis_all_detection(data_dict['data'].asnumpy(), boxes_this_image,
imdb.classes, scale)
t3 = time.time() - t
t = time.time()
logger.info('testing %d/%d data %.4fs net %.4fs post %.4fs' %
(i, imdb.num_images, t1, t2, t3))
i += 1
det_file = os.path.join(imdb.cache_path, imdb.name + '_detections.pkl')
with open(det_file, 'wb') as f:
pickle.dump(all_boxes, f, protocol=pickle.HIGHEST_PROTOCOL)
imdb.evaluate_detections(all_boxes)
def vis_all_detection(im_array, detections, class_names, scale):
"""
visualize all detections in one image
:param im_array: [b=1 c h w] in rgb
:param detections: [ numpy.ndarray([[x1 y1 x2 y2 score]]) for j in classes ]
:param class_names: list of names in imdb
:param scale: visualize the scaled image
:return:
"""
import matplotlib.pyplot as plt
import random
im = image.transform_inverse(im_array, config.PIXEL_MEANS)
plt.imshow(im)
for j, name in enumerate(class_names):
if name == '__background__':
continue
color = (random.random(), random.random(), random.random()
) # generate a random color
dets = detections[j]
for det in dets:
bbox = det[:4] * scale
score = det[-1]
rect = plt.Rectangle((bbox[0], bbox[1]),
bbox[2] - bbox[0],
bbox[3] - bbox[1],
fill=False,
edgecolor=color,
linewidth=3.5)
plt.gca().add_patch(rect)
plt.gca().text(bbox[0],
bbox[1] - 2,
'{:s} {:.3f}'.format(name, score),
bbox=dict(facecolor=color, alpha=0.5),
fontsize=12,
color='white')
plt.show()
def draw_all_detection(im_array, detections, class_names, scale):
"""
visualize all detections in one image
:param im_array: [b=1 c h w] in rgb
:param detections: [ numpy.ndarray([[x1 y1 x2 y2 score]]) for j in classes ]
:param class_names: list of names in imdb
:param scale: visualize the scaled image
:return:
"""
import cv2
import random
color_white = (255, 255, 255)
im = image.transform_inverse(im_array, config.PIXEL_MEANS)
# change to bgr
im = cv2.cvtColor(im, cv2.cv.CV_RGB2BGR)
for j, name in enumerate(class_names):
if name == '__background__':
continue
color = (random.randint(0, 256), random.randint(0, 256),
random.randint(0, 256)) # generate a random color
dets = detections[j]
for det in dets:
bbox = det[:4] * scale
score = det[-1]
bbox = map(int, bbox)
cv2.rectangle(im, (bbox[0], bbox[1]), (bbox[2], bbox[3]),
color=color,
thickness=2)
cv2.putText(im,
'%s %.3f' % (class_names[j], score),
(bbox[0], bbox[1] + 10),
color=color_white,
fontFace=cv2.FONT_HERSHEY_COMPLEX,
fontScale=0.5)
return im
| insightface/detection/retinaface/rcnn/core/tester.py/0 | {
"file_path": "insightface/detection/retinaface/rcnn/core/tester.py",
"repo_id": "insightface",
"token_count": 9774
} | 98 |
"""
Fast R-CNN:
data =
{'data': [num_images, c, h, w],
'rois': [num_rois, 5]}
label =
{'label': [num_rois],
'bbox_target': [num_rois, 4 * num_classes],
'bbox_weight': [num_rois, 4 * num_classes]}
roidb extended format [image_index]
['image', 'height', 'width', 'flipped',
'boxes', 'gt_classes', 'gt_overlaps', 'max_classes', 'max_overlaps', 'bbox_targets']
"""
import numpy as np
import numpy.random as npr
from ..config import config
from ..io.image import get_image, tensor_vstack
from ..processing.bbox_transform import bbox_overlaps, bbox_transform
from ..processing.bbox_regression import expand_bbox_regression_targets
def get_rcnn_testbatch(roidb):
"""
return a dict of testbatch
:param roidb: ['image', 'flipped'] + ['boxes']
:return: data, label, im_info
"""
assert len(roidb) == 1, 'Single batch only'
imgs, roidb = get_image(roidb)
im_array = imgs[0]
im_info = np.array([roidb[0]['im_info']], dtype=np.float32)
im_rois = roidb[0]['boxes']
rois = im_rois
batch_index = 0 * np.ones((rois.shape[0], 1))
rois_array = np.hstack((batch_index, rois))[np.newaxis, :]
data = {'data': im_array, 'rois': rois_array, 'im_info': im_info}
label = {}
return data, label
def get_rcnn_batch(roidb):
"""
return a dict of multiple images
:param roidb: a list of dict, whose length controls batch size
['images', 'flipped'] + ['gt_boxes', 'boxes', 'gt_overlap'] => ['bbox_targets']
:return: data, label
"""
num_images = len(roidb)
imgs, roidb = get_image(roidb)
im_array = tensor_vstack(imgs)
assert config.TRAIN.BATCH_ROIS % config.TRAIN.BATCH_IMAGES == 0, \
'BATCHIMAGES {} must divide BATCH_ROIS {}'.format(config.TRAIN.BATCH_IMAGES, config.TRAIN.BATCH_ROIS)
rois_per_image = int(config.TRAIN.BATCH_ROIS / config.TRAIN.BATCH_IMAGES)
fg_rois_per_image = int(round(config.TRAIN.FG_FRACTION * rois_per_image))
rois_array = list()
labels_array = list()
bbox_targets_array = list()
bbox_weights_array = list()
for im_i in range(num_images):
roi_rec = roidb[im_i]
# infer num_classes from gt_overlaps
num_classes = roi_rec['gt_overlaps'].shape[1]
# label = class RoI has max overlap with
rois = roi_rec['boxes']
labels = roi_rec['max_classes']
overlaps = roi_rec['max_overlaps']
bbox_targets = roi_rec['bbox_targets']
im_rois, labels, bbox_targets, bbox_weights = \
sample_rois(rois, fg_rois_per_image, rois_per_image, num_classes,
labels, overlaps, bbox_targets)
# project im_rois
# do not round roi
rois = im_rois
batch_index = im_i * np.ones((rois.shape[0], 1))
rois_array_this_image = np.hstack((batch_index, rois))
rois_array.append(rois_array_this_image)
# add labels
labels_array.append(labels)
bbox_targets_array.append(bbox_targets)
bbox_weights_array.append(bbox_weights)
rois_array = np.array(rois_array)
labels_array = np.array(labels_array)
bbox_targets_array = np.array(bbox_targets_array)
bbox_weights_array = np.array(bbox_weights_array)
data = {'data': im_array, 'rois': rois_array}
label = {
'label': labels_array,
'bbox_target': bbox_targets_array,
'bbox_weight': bbox_weights_array
}
return data, label
def sample_rois(rois,
fg_rois_per_image,
rois_per_image,
num_classes,
labels=None,
overlaps=None,
bbox_targets=None,
gt_boxes=None):
"""
generate random sample of ROIs comprising foreground and background examples
:param rois: all_rois [n, 4]; e2e: [n, 5] with batch_index
:param fg_rois_per_image: foreground roi number
:param rois_per_image: total roi number
:param num_classes: number of classes
:param labels: maybe precomputed
:param overlaps: maybe precomputed (max_overlaps)
:param bbox_targets: maybe precomputed
:param gt_boxes: optional for e2e [n, 5] (x1, y1, x2, y2, cls)
:return: (labels, rois, bbox_targets, bbox_weights)
"""
if labels is None:
overlaps = bbox_overlaps(rois[:, 1:].astype(np.float),
gt_boxes[:, :4].astype(np.float))
gt_assignment = overlaps.argmax(axis=1)
overlaps = overlaps.max(axis=1)
labels = gt_boxes[gt_assignment, 4]
# foreground RoI with FG_THRESH overlap
fg_indexes = np.where(overlaps >= config.TRAIN.FG_THRESH)[0]
# guard against the case when an image has fewer than fg_rois_per_image foreground RoIs
fg_rois_per_this_image = np.minimum(fg_rois_per_image, fg_indexes.size)
# Sample foreground regions without replacement
if len(fg_indexes) > fg_rois_per_this_image:
fg_indexes = npr.choice(fg_indexes,
size=fg_rois_per_this_image,
replace=False)
# Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI)
bg_indexes = np.where((overlaps < config.TRAIN.BG_THRESH_HI)
& (overlaps >= config.TRAIN.BG_THRESH_LO))[0]
# Compute number of background RoIs to take from this image (guarding against there being fewer than desired)
bg_rois_per_this_image = rois_per_image - fg_rois_per_this_image
bg_rois_per_this_image = np.minimum(bg_rois_per_this_image,
bg_indexes.size)
# Sample foreground regions without replacement
if len(bg_indexes) > bg_rois_per_this_image:
bg_indexes = npr.choice(bg_indexes,
size=bg_rois_per_this_image,
replace=False)
# indexes selected
keep_indexes = np.append(fg_indexes, bg_indexes)
neg_idx = np.where(overlaps < config.TRAIN.FG_THRESH)[0]
neg_rois = rois[neg_idx]
# pad more to ensure a fixed minibatch size
while keep_indexes.shape[0] < rois_per_image:
gap = np.minimum(len(neg_rois), rois_per_image - keep_indexes.shape[0])
gap_indexes = npr.choice(range(len(neg_rois)), size=gap, replace=False)
keep_indexes = np.append(keep_indexes, neg_idx[gap_indexes])
# select labels
labels = labels[keep_indexes]
# set labels of bg_rois to be 0
labels[fg_rois_per_this_image:] = 0
rois = rois[keep_indexes]
# load or compute bbox_target
if bbox_targets is not None:
bbox_target_data = bbox_targets[keep_indexes, :]
else:
targets = bbox_transform(rois[:, 1:],
gt_boxes[gt_assignment[keep_indexes], :4])
if config.TRAIN.BBOX_NORMALIZATION_PRECOMPUTED:
targets = ((targets - np.array(config.TRAIN.BBOX_MEANS)) /
np.array(config.TRAIN.BBOX_STDS))
bbox_target_data = np.hstack((labels[:, np.newaxis], targets))
bbox_targets, bbox_weights = \
expand_bbox_regression_targets(bbox_target_data, num_classes)
return rois, labels, bbox_targets, bbox_weights
def get_fpn_rcnn_testbatch(roidb):
"""
return a dict of testbatch
:param roidb: ['image', 'flipped'] + ['boxes']
:return: data, label, im_info
"""
assert len(roidb) == 1, 'Single batch only'
imgs, roidb = get_image(roidb)
im_array = imgs[0]
im_info = np.array([roidb[0]['im_info']], dtype=np.float32)
im_rois = roidb[0]['boxes']
rois = im_rois
# assign rois
rois_area = np.sqrt((rois[:, 2] - rois[:, 0]) * (rois[:, 3] - rois[:, 1]))
area_threshold = {'P5': 448, 'P4': 224, 'P3': 112}
rois_p5 = rois[area_threshold['P5'] <= rois_area]
rois_p4 = rois[np.logical_and(area_threshold['P4'] <= rois_area,
rois_area < area_threshold['P5'])]
rois_p3 = rois[np.logical_and(area_threshold['P3'] <= rois_area,
rois_area < area_threshold['P4'])]
rois_p2 = rois[np.logical_and(0 < rois_area,
rois_area < area_threshold['P3'])]
# pad a virtual rois if on rois assigned
if rois_p5.size == 0:
rois_p5 = np.array([[12, 34, 56, 78]])
if rois_p4.size == 0:
rois_p4 = np.array([[12, 34, 56, 78]])
if rois_p3.size == 0:
rois_p3 = np.array([[12, 34, 56, 78]])
if rois_p2.size == 0:
rois_p2 = np.array([[12, 34, 56, 78]])
p5_batch_index = 0 * np.ones((rois_p5.shape[0], 1))
rois_p5_array = np.hstack((p5_batch_index, rois_p5))[np.newaxis, :]
p4_batch_index = 0 * np.ones((rois_p4.shape[0], 1))
rois_p4_array = np.hstack((p4_batch_index, rois_p4))[np.newaxis, :]
p3_batch_index = 0 * np.ones((rois_p3.shape[0], 1))
rois_p3_array = np.hstack((p3_batch_index, rois_p3))[np.newaxis, :]
p2_batch_index = 0 * np.ones((rois_p2.shape[0], 1))
rois_p2_array = np.hstack((p2_batch_index, rois_p2))[np.newaxis, :]
data = {
'data': im_array,
'rois_stride32': rois_p5_array,
'rois_stride16': rois_p4_array,
'rois_stride8': rois_p3_array,
'rois_stride4': rois_p2_array
}
label = {}
return data, label, im_info
def get_fpn_maskrcnn_batch(roidb):
"""
return a dictionary that contains raw data.
"""
num_images = len(roidb)
imgs, roidb = get_image(roidb, scale=config.TRAIN.SCALE) #TODO
#imgs, roidb = get_image(roidb)
im_array = tensor_vstack(imgs)
assert config.TRAIN.BATCH_ROIS % config.TRAIN.BATCH_IMAGES == 0, \
'BATCHIMAGES {} must divide BATCH_ROIS {}'.format(config.TRAIN.BATCH_IMAGES, config.TRAIN.BATCH_ROIS)
rois_per_image = config.TRAIN.BATCH_ROIS / config.TRAIN.BATCH_IMAGES
fg_rois_per_image = np.round(config.TRAIN.FG_FRACTION *
rois_per_image).astype(int)
rois_on_imgs = dict()
labels_on_imgs = dict()
bbox_targets_on_imgs = dict()
bbox_weights_on_imgs = dict()
mask_targets_on_imgs = dict()
mask_weights_on_imgs = dict()
for s in config.RCNN_FEAT_STRIDE:
rois_on_imgs.update({'stride%s' % s: list()})
labels_on_imgs.update({'stride%s' % s: list()})
bbox_targets_on_imgs.update({'stride%s' % s: list()})
bbox_weights_on_imgs.update({'stride%s' % s: list()})
mask_targets_on_imgs.update({'stride%s' % s: list()})
mask_weights_on_imgs.update({'stride%s' % s: list()})
# Sample rois
level_related_data_on_imgs = {}
for im_i in range(num_images):
roi_rec = roidb[im_i]
# infer num_classes from gt_overlaps
num_classes = roi_rec['gt_overlaps'].shape[1]
# label = class RoI has max overlap with
rois = roi_rec['boxes']
labels = roi_rec['max_classes']
overlaps = roi_rec['max_overlaps']
bbox_targets = roi_rec['bbox_targets']
im_info = roi_rec['im_info']
mask_targets = roi_rec['mask_targets']
mask_labels = roi_rec['mask_labels']
mask_inds = roi_rec['mask_inds']
assign_levels = roi_rec['assign_levels']
im_rois_on_levels, labels_on_levels, bbox_targets_on_levels, bbox_weights_on_levels, mask_targets_on_levels, mask_weights_on_levels = \
sample_rois_fpn(rois, assign_levels, fg_rois_per_image, rois_per_image, num_classes,
labels, overlaps, bbox_targets, mask_targets=mask_targets, mask_labels=mask_labels, mask_inds=mask_inds, im_info=im_info)
level_related_data_on_imgs.update({
'img_%s' % im_i: {
'rois_on_levels': im_rois_on_levels,
'labels_on_levels': labels_on_levels,
'bbox_targets_on_levels': bbox_targets_on_levels,
'bbox_weights_on_levels': bbox_weights_on_levels,
'mask_targets_on_levels': mask_targets_on_levels,
'mask_weights_on_levels': mask_weights_on_levels,
}
})
return im_array, level_related_data_on_imgs
def sample_rois(rois,
fg_rois_per_image,
rois_per_image,
num_classes,
labels=None,
overlaps=None,
bbox_targets=None,
gt_boxes=None,
mask_targets=None,
mask_labels=None,
mask_inds=None):
"""
generate random sample of ROIs comprising foreground and background examples
:param rois: all_rois [n, 4]; e2e: [n, 5] with batch_index
:param fg_rois_per_image: foreground roi number
:param rois_per_image: total roi number
:param num_classes: number of classes
:param labels: maybe precomputed
:param overlaps: maybe precomputed (max_overlaps)
:param bbox_targets: maybe precomputed
:param gt_boxes: optional for e2e [n, 5] (x1, y1, x2, y2, cls)
:return: (rois, labels, bbox_targets, bbox_weights)
"""
if labels is None:
if len(gt_boxes) == 0:
gt_boxes = np.zeros((1, 5))
gt_assignment = np.zeros((len(rois), ), dtype=np.int32)
overlaps = np.zeros((len(rois), ))
labels = np.zeros((len(rois), ))
else:
overlaps = bbox_overlaps(rois[:, 1:].astype(np.float),
gt_boxes[:, :4].astype(np.float))
gt_assignment = overlaps.argmax(axis=1)
overlaps = overlaps.max(axis=1)
labels = gt_boxes[gt_assignment, 4]
num_rois = rois.shape[0]
# foreground RoI with FG_THRESH overlap
fg_indexes = np.where(overlaps >= config.TRAIN.FG_THRESH)[0]
# guard against the case when an image has fewer than fg_rois_per_image foreground RoIs
fg_rois_per_this_image = np.minimum(fg_rois_per_image, fg_indexes.size)
# Sample foreground regions without replacement
if len(fg_indexes) > fg_rois_per_this_image:
fg_indexes = npr.choice(fg_indexes,
size=fg_rois_per_this_image,
replace=False)
# Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI)
bg_indexes = np.where((overlaps < config.TRAIN.BG_THRESH_HI)
& (overlaps >= config.TRAIN.BG_THRESH_LO))[0]
# Compute number of background RoIs to take from this image (guarding against there being fewer than desired)
bg_rois_per_this_image = rois_per_image - fg_rois_per_this_image
bg_rois_per_this_image = np.minimum(bg_rois_per_this_image,
bg_indexes.size)
# Sample foreground regions without replacement
if len(bg_indexes) > bg_rois_per_this_image:
bg_indexes = npr.choice(bg_indexes,
size=bg_rois_per_this_image,
replace=False)
# indexes selected
keep_indexes = np.append(fg_indexes, bg_indexes)
neg_idx = np.where(overlaps < config.TRAIN.FG_THRESH)[0]
neg_rois = rois[neg_idx]
# pad more to ensure a fixed minibatch size
while keep_indexes.shape[0] < rois_per_image:
gap = np.minimum(len(neg_rois), rois_per_image - keep_indexes.shape[0])
gap_indexes = npr.choice(range(len(neg_rois)), size=gap, replace=False)
keep_indexes = np.append(keep_indexes, neg_idx[gap_indexes])
# select labels
labels = labels[keep_indexes]
# set labels of bg_rois to be 0
labels[fg_rois_per_this_image:] = 0
rois = rois[keep_indexes]
if mask_targets is not None:
assert mask_labels is not None
assert mask_inds is not None
def _mask_umap(mask_targets, mask_labels, mask_inds):
_mask_targets = np.zeros((num_rois, num_classes, 28, 28),
dtype=np.int8)
_mask_weights = np.zeros((num_rois, num_classes, 28, 28),
dtype=np.int8)
_mask_targets[mask_inds, mask_labels] = mask_targets
_mask_weights[mask_inds, mask_labels] = 1
_mask_weights[:, 0] = 0 # set background mask weight to zeros
return _mask_targets, _mask_weights # [num_rois, num_classes, 28, 28]
mask_targets, mask_weights = _mask_umap(mask_targets, mask_labels,
mask_inds)
mask_targets = mask_targets[keep_indexes]
mask_weights = mask_weights[keep_indexes]
# load or compute bbox_target
if bbox_targets is not None:
bbox_target_data = bbox_targets[keep_indexes, :]
else:
targets = bbox_transform(rois[:, 1:],
gt_boxes[gt_assignment[keep_indexes], :4])
if config.TRAIN.BBOX_NORMALIZATION_PRECOMPUTED:
targets = ((targets - np.array(config.TRAIN.BBOX_MEANS)) /
np.array(config.TRAIN.BBOX_STDS))
bbox_target_data = np.hstack((labels[:, np.newaxis], targets))
bbox_targets, bbox_weights = \
expand_bbox_regression_targets(bbox_target_data, num_classes)
if mask_targets is not None:
return rois, labels, bbox_targets, bbox_weights, mask_targets, mask_weights
else:
return rois, labels, bbox_targets, bbox_weights
def sample_rois_fpn(rois,
assign_levels,
fg_rois_per_image,
rois_per_image,
num_classes,
labels=None,
overlaps=None,
bbox_targets=None,
mask_targets=None,
mask_labels=None,
mask_inds=None,
gt_boxes=None,
im_info=None):
"""
generate random sample of ROIs comprising foreground and background examples
:param rois: all_rois [n, 4]; e2e: [n, 5] with batch_index
:param assign_levels: [n]
:param fg_rois_per_image: foreground roi number
:param rois_per_image: total roi number
:param num_classes: number of classes
:param labels: maybe precomputed
:param overlaps: maybe precomputed (max_overlaps)
:param bbox_targets: maybe precomputed
:param gt_boxes: optional for e2e [n, 5] (x1, y1, x2, y2, cls)
:return: (rois, labels, bbox_targets, bbox_weights)
"""
DEBUG = False
if labels is None:
if len(gt_boxes) == 0:
gt_boxes = np.zeros((1, 5))
gt_assignment = np.zeros((len(rois), ), dtype=np.int32)
overlaps = np.zeros((len(rois), ))
labels = np.zeros((len(rois), ))
else:
overlaps = bbox_overlaps(rois[:, 1:].astype(np.float),
gt_boxes[:, :4].astype(np.float))
gt_assignment = overlaps.argmax(axis=1)
overlaps = overlaps.max(axis=1)
labels = gt_boxes[gt_assignment, 4]
num_rois = rois.shape[0]
# foreground RoI with FG_THRESH overlap
fg_indexes = np.where(overlaps >= config.TRAIN.FG_THRESH)[0]
# guard against the case when an image has fewer than fg_rois_per_image foreground RoIs
fg_rois_per_this_image = np.minimum(fg_rois_per_image, fg_indexes.size)
if DEBUG:
print 'fg total num:', len(fg_indexes)
# Sample foreground regions without replacement
if len(fg_indexes) > fg_rois_per_this_image:
fg_indexes = npr.choice(fg_indexes,
size=fg_rois_per_this_image,
replace=False)
# Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI)
bg_indexes = np.where((overlaps < config.TRAIN.BG_THRESH_HI)
& (overlaps >= config.TRAIN.BG_THRESH_LO))[0]
if DEBUG:
print 'bg total num:', len(bg_indexes)
# Compute number of background RoIs to take from this image (guarding against there being fewer than desired)
bg_rois_per_this_image = rois_per_image - fg_rois_per_this_image
bg_rois_per_this_image = np.minimum(bg_rois_per_this_image,
bg_indexes.size)
# Sample foreground regions without replacement
if len(bg_indexes) > bg_rois_per_this_image:
bg_indexes = npr.choice(bg_indexes,
size=bg_rois_per_this_image,
replace=False)
if DEBUG:
print 'fg num:', len(fg_indexes)
print 'bg num:', len(bg_indexes)
# bg rois statistics
if DEBUG:
bg_assign = assign_levels[bg_indexes]
bg_rois_on_levels = dict()
for i, s in enumerate(config.RCNN_FEAT_STRIDE):
bg_rois_on_levels.update(
{'stride%s' % s: len(np.where(bg_assign == s)[0])})
print bg_rois_on_levels
# indexes selected
keep_indexes = np.append(fg_indexes, bg_indexes)
neg_idx = np.where(overlaps < config.TRAIN.FG_THRESH)[0]
neg_rois = rois[neg_idx]
# pad more to ensure a fixed minibatch size
while keep_indexes.shape[0] < rois_per_image:
gap = np.minimum(len(neg_rois), rois_per_image - keep_indexes.shape[0])
gap_indexes = npr.choice(range(len(neg_rois)), size=gap, replace=False)
keep_indexes = np.append(keep_indexes, neg_idx[gap_indexes])
# select labels
labels = labels[keep_indexes]
# set labels of bg_rois to be 0
labels[fg_rois_per_this_image:] = 0
rois = rois[keep_indexes]
assign_levels = assign_levels[keep_indexes]
if mask_targets is not None:
assert mask_labels is not None
assert mask_inds is not None
def _mask_umap(mask_targets, mask_labels, mask_inds):
_mask_targets = np.zeros((num_rois, num_classes, 28, 28),
dtype=np.int8)
_mask_weights = np.zeros((num_rois, num_classes, 1, 1),
dtype=np.int8)
_mask_targets[mask_inds, mask_labels] = mask_targets
_mask_weights[mask_inds, mask_labels] = 1
return _mask_targets, _mask_weights # [num_rois, num_classes, 28, 28]
mask_targets, mask_weights = _mask_umap(mask_targets, mask_labels,
mask_inds)
mask_targets = mask_targets[keep_indexes]
mask_weights = mask_weights[keep_indexes]
# load or compute bbox_target
if bbox_targets is not None:
bbox_target_data = bbox_targets[keep_indexes, :]
else:
targets = bbox_transform(rois[:, 1:],
gt_boxes[gt_assignment[keep_indexes], :4])
if config.TRAIN.BBOX_NORMALIZATION_PRECOMPUTED:
targets = ((targets - np.array(config.TRAIN.BBOX_MEANS)) /
np.array(config.TRAIN.BBOX_STDS))
bbox_target_data = np.hstack((labels[:, np.newaxis], targets))
bbox_targets, bbox_weights = \
expand_bbox_regression_targets(bbox_target_data, num_classes)
# Assign to levels
rois_on_levels = dict()
labels_on_levels = dict()
bbox_targets_on_levels = dict()
bbox_weights_on_levels = dict()
if mask_targets is not None:
mask_targets_on_levels = dict()
mask_weights_on_levels = dict()
for i, s in enumerate(config.RCNN_FEAT_STRIDE):
index = np.where(assign_levels == s)
_rois = rois[index]
_labels = labels[index]
_bbox_targets = bbox_targets[index]
_bbox_weights = bbox_weights[index]
if mask_targets is not None:
_mask_targets = mask_targets[index]
_mask_weights = mask_weights[index]
rois_on_levels.update({'stride%s' % s: _rois})
labels_on_levels.update({'stride%s' % s: _labels})
bbox_targets_on_levels.update({'stride%s' % s: _bbox_targets})
bbox_weights_on_levels.update({'stride%s' % s: _bbox_weights})
if mask_targets is not None:
mask_targets_on_levels.update({'stride%s' % s: _mask_targets})
mask_weights_on_levels.update({'stride%s' % s: _mask_weights})
if mask_targets is not None:
return rois_on_levels, labels_on_levels, bbox_targets_on_levels, bbox_weights_on_levels, mask_targets_on_levels, mask_weights_on_levels
else:
return rois_on_levels, labels_on_levels, bbox_targets_on_levels, bbox_weights_on_levels
def get_rois(rois,
rois_per_image,
num_classes,
labels=None,
overlaps=None,
bbox_targets=None,
gt_boxes=None):
"""
get top N ROIs, used in online hard example mining
:param rois: all_rois [n, 4]; e2e: [n, 5] with batch_index
:param rois_per_image: total roi number
:param num_classes: number of classes
:param labels: maybe precomputed
:param overlaps: maybe precomputed (max_overlaps)
:param bbox_targets: maybe precomputed
:param gt_boxes: optional for e2e [n, 5] (x1, y1, x2, y2, cls)
:return: (rois, labels, bbox_targets, bbox_weights)
"""
if labels is None:
if len(gt_boxes) == 0:
gt_boxes = np.array([[1, 1, 1, 1, 0]])
overlaps = bbox_overlaps(rois[:, 1:].astype(np.float),
gt_boxes[:, :4].astype(np.float))
gt_assignment = overlaps.argmax(axis=1)
overlaps = overlaps.max(axis=1)
labels = gt_boxes[gt_assignment, 4]
# select indices
keep_indexes = np.arange(rois.shape[0])
if keep_indexes.shape[0] > rois_per_image:
keep_indexes = npr.choice(keep_indexes,
size=rois_per_image,
replace=False)
# if not enough, pad until rois_per_image is satisfied
while keep_indexes.shape[0] < rois_per_image:
gap = np.minimum(rois_per_image - keep_indexes.shape[0], len(rois))
gap_indexes = npr.choice(range(len(rois)), size=gap, replace=False)
keep_indexes = np.append(keep_indexes, gap_indexes)
# suppress any bg defined by overlap
bg_indexes = np.where((overlaps < config.TRAIN.BG_THRESH_HI)
& (overlaps >= config.TRAIN.BG_THRESH_LO))[0]
labels[bg_indexes] = 0
labels = labels[keep_indexes]
rois = rois[keep_indexes]
# load or compute bbox_target
if bbox_targets is not None:
bbox_target_data = bbox_targets[keep_indexes, :]
else:
targets = bbox_transform(rois[:, 1:],
gt_boxes[gt_assignment[keep_indexes], :4])
if config.TRAIN.BBOX_NORMALIZATION_PRECOMPUTED:
targets = ((targets - np.array(config.TRAIN.BBOX_MEANS)) /
np.array(config.TRAIN.BBOX_STDS))
bbox_target_data = np.hstack((labels[:, np.newaxis], targets))
bbox_targets, bbox_weights = \
expand_bbox_regression_targets(bbox_target_data, num_classes)
return rois, labels, bbox_targets, bbox_weights
| insightface/detection/retinaface/rcnn/io/rcnn.py/0 | {
"file_path": "insightface/detection/retinaface/rcnn/io/rcnn.py",
"repo_id": "insightface",
"token_count": 13414
} | 99 |
/**************************************************************************
* Microsoft COCO Toolbox. version 2.0
* Data, paper, and tutorials available at: http://mscoco.org/
* Code written by Piotr Dollar and Tsung-Yi Lin, 2015.
* Licensed under the Simplified BSD License [see coco/license.txt]
**************************************************************************/
#include "maskApi.h"
#include <math.h>
#include <stdlib.h>
uint umin( uint a, uint b ) { return (a<b) ? a : b; }
uint umax( uint a, uint b ) { return (a>b) ? a : b; }
void rleInit( RLE *R, siz h, siz w, siz m, uint *cnts ) {
R->h=h; R->w=w; R->m=m; R->cnts=(m==0)?0:malloc(sizeof(uint)*m);
siz j; if(cnts) for(j=0; j<m; j++) R->cnts[j]=cnts[j];
}
void rleFree( RLE *R ) {
free(R->cnts); R->cnts=0;
}
void rlesInit( RLE **R, siz n ) {
siz i; *R = (RLE*) malloc(sizeof(RLE)*n);
for(i=0; i<n; i++) rleInit((*R)+i,0,0,0,0);
}
void rlesFree( RLE **R, siz n ) {
siz i; for(i=0; i<n; i++) rleFree((*R)+i); free(*R); *R=0;
}
void rleEncode( RLE *R, const byte *M, siz h, siz w, siz n ) {
siz i, j, k, a=w*h; uint c, *cnts; byte p;
cnts = malloc(sizeof(uint)*(a+1));
for(i=0; i<n; i++) {
const byte *T=M+a*i; k=0; p=0; c=0;
for(j=0; j<a; j++) { if(T[j]!=p) { cnts[k++]=c; c=0; p=T[j]; } c++; }
cnts[k++]=c; rleInit(R+i,h,w,k,cnts);
}
free(cnts);
}
void rleDecode( const RLE *R, byte *M, siz n ) {
siz i, j, k; for( i=0; i<n; i++ ) {
byte v=0; for( j=0; j<R[i].m; j++ ) {
for( k=0; k<R[i].cnts[j]; k++ ) *(M++)=v; v=!v; }}
}
void rleMerge( const RLE *R, RLE *M, siz n, int intersect ) {
uint *cnts, c, ca, cb, cc, ct; int v, va, vb, vp;
siz i, a, b, h=R[0].h, w=R[0].w, m=R[0].m; RLE A, B;
if(n==0) { rleInit(M,0,0,0,0); return; }
if(n==1) { rleInit(M,h,w,m,R[0].cnts); return; }
cnts = malloc(sizeof(uint)*(h*w+1));
for( a=0; a<m; a++ ) cnts[a]=R[0].cnts[a];
for( i=1; i<n; i++ ) {
B=R[i]; if(B.h!=h||B.w!=w) { h=w=m=0; break; }
rleInit(&A,h,w,m,cnts); ca=A.cnts[0]; cb=B.cnts[0];
v=va=vb=0; m=0; a=b=1; cc=0; ct=1;
while( ct>0 ) {
c=umin(ca,cb); cc+=c; ct=0;
ca-=c; if(!ca && a<A.m) { ca=A.cnts[a++]; va=!va; } ct+=ca;
cb-=c; if(!cb && b<B.m) { cb=B.cnts[b++]; vb=!vb; } ct+=cb;
vp=v; if(intersect) v=va&&vb; else v=va||vb;
if( v!=vp||ct==0 ) { cnts[m++]=cc; cc=0; }
}
rleFree(&A);
}
rleInit(M,h,w,m,cnts); free(cnts);
}
void rleArea( const RLE *R, siz n, uint *a ) {
siz i, j; for( i=0; i<n; i++ ) {
a[i]=0; for( j=1; j<R[i].m; j+=2 ) a[i]+=R[i].cnts[j]; }
}
void rleIou( RLE *dt, RLE *gt, siz m, siz n, byte *iscrowd, double *o ) {
siz g, d; BB db, gb; int crowd;
db=malloc(sizeof(double)*m*4); rleToBbox(dt,db,m);
gb=malloc(sizeof(double)*n*4); rleToBbox(gt,gb,n);
bbIou(db,gb,m,n,iscrowd,o); free(db); free(gb);
for( g=0; g<n; g++ ) for( d=0; d<m; d++ ) if(o[g*m+d]>0) {
crowd=iscrowd!=NULL && iscrowd[g];
if(dt[d].h!=gt[g].h || dt[d].w!=gt[g].w) { o[g*m+d]=-1; continue; }
siz ka, kb, a, b; uint c, ca, cb, ct, i, u; int va, vb;
ca=dt[d].cnts[0]; ka=dt[d].m; va=vb=0;
cb=gt[g].cnts[0]; kb=gt[g].m; a=b=1; i=u=0; ct=1;
while( ct>0 ) {
c=umin(ca,cb); if(va||vb) { u+=c; if(va&&vb) i+=c; } ct=0;
ca-=c; if(!ca && a<ka) { ca=dt[d].cnts[a++]; va=!va; } ct+=ca;
cb-=c; if(!cb && b<kb) { cb=gt[g].cnts[b++]; vb=!vb; } ct+=cb;
}
if(i==0) u=1; else if(crowd) rleArea(dt+d,1,&u);
o[g*m+d] = (double)i/(double)u;
}
}
void rleNms( RLE *dt, siz n, uint *keep, double thr ) {
siz i, j; double u;
for( i=0; i<n; i++ ) keep[i]=1;
for( i=0; i<n; i++ ) if(keep[i]) {
for( j=i+1; j<n; j++ ) if(keep[j]) {
rleIou(dt+i,dt+j,1,1,0,&u);
if(u>thr) keep[j]=0;
}
}
}
void bbIou( BB dt, BB gt, siz m, siz n, byte *iscrowd, double *o ) {
double h, w, i, u, ga, da; siz g, d; int crowd;
for( g=0; g<n; g++ ) {
BB G=gt+g*4; ga=G[2]*G[3]; crowd=iscrowd!=NULL && iscrowd[g];
for( d=0; d<m; d++ ) {
BB D=dt+d*4; da=D[2]*D[3]; o[g*m+d]=0;
w=fmin(D[2]+D[0],G[2]+G[0])-fmax(D[0],G[0]); if(w<=0) continue;
h=fmin(D[3]+D[1],G[3]+G[1])-fmax(D[1],G[1]); if(h<=0) continue;
i=w*h; u = crowd ? da : da+ga-i; o[g*m+d]=i/u;
}
}
}
void bbNms( BB dt, siz n, uint *keep, double thr ) {
siz i, j; double u;
for( i=0; i<n; i++ ) keep[i]=1;
for( i=0; i<n; i++ ) if(keep[i]) {
for( j=i+1; j<n; j++ ) if(keep[j]) {
bbIou(dt+i*4,dt+j*4,1,1,0,&u);
if(u>thr) keep[j]=0;
}
}
}
void rleToBbox( const RLE *R, BB bb, siz n ) {
siz i; for( i=0; i<n; i++ ) {
uint h, w, x, y, xs, ys, xe, ye, cc, t; siz j, m;
h=(uint)R[i].h; w=(uint)R[i].w; m=R[i].m;
m=((siz)(m/2))*2; xs=w; ys=h; xe=ye=0; cc=0;
if(m==0) { bb[4*i+0]=bb[4*i+1]=bb[4*i+2]=bb[4*i+3]=0; continue; }
for( j=0; j<m; j++ ) {
cc+=R[i].cnts[j]; t=cc-j%2; y=t%h; x=(t-y)/h;
xs=umin(xs,x); xe=umax(xe,x); ys=umin(ys,y); ye=umax(ye,y);
}
bb[4*i+0]=xs; bb[4*i+2]=xe-xs+1;
bb[4*i+1]=ys; bb[4*i+3]=ye-ys+1;
}
}
void rleFrBbox( RLE *R, const BB bb, siz h, siz w, siz n ) {
siz i; for( i=0; i<n; i++ ) {
double xs=bb[4*i+0], xe=xs+bb[4*i+2];
double ys=bb[4*i+1], ye=ys+bb[4*i+3];
double xy[8] = {xs,ys,xs,ye,xe,ye,xe,ys};
rleFrPoly( R+i, xy, 4, h, w );
}
}
int uintCompare(const void *a, const void *b) {
uint c=*((uint*)a), d=*((uint*)b); return c>d?1:c<d?-1:0;
}
void rleFrPoly( RLE *R, const double *xy, siz k, siz h, siz w ) {
/* upsample and get discrete points densely along entire boundary */
siz j, m=0; double scale=5; int *x, *y, *u, *v; uint *a, *b;
x=malloc(sizeof(int)*(k+1)); y=malloc(sizeof(int)*(k+1));
for(j=0; j<k; j++) x[j]=(int)(scale*xy[j*2+0]+.5); x[k]=x[0];
for(j=0; j<k; j++) y[j]=(int)(scale*xy[j*2+1]+.5); y[k]=y[0];
for(j=0; j<k; j++) m+=umax(abs(x[j]-x[j+1]),abs(y[j]-y[j+1]))+1;
u=malloc(sizeof(int)*m); v=malloc(sizeof(int)*m); m=0;
for( j=0; j<k; j++ ) {
int xs=x[j], xe=x[j+1], ys=y[j], ye=y[j+1], dx, dy, t, d;
int flip; double s; dx=abs(xe-xs); dy=abs(ys-ye);
flip = (dx>=dy && xs>xe) || (dx<dy && ys>ye);
if(flip) { t=xs; xs=xe; xe=t; t=ys; ys=ye; ye=t; }
s = dx>=dy ? (double)(ye-ys)/dx : (double)(xe-xs)/dy;
if(dx>=dy) for( d=0; d<=dx; d++ ) {
t=flip?dx-d:d; u[m]=t+xs; v[m]=(int)(ys+s*t+.5); m++;
} else for( d=0; d<=dy; d++ ) {
t=flip?dy-d:d; v[m]=t+ys; u[m]=(int)(xs+s*t+.5); m++;
}
}
/* get points along y-boundary and downsample */
free(x); free(y); k=m; m=0; double xd, yd;
x=malloc(sizeof(int)*k); y=malloc(sizeof(int)*k);
for( j=1; j<k; j++ ) if(u[j]!=u[j-1]) {
xd=(double)(u[j]<u[j-1]?u[j]:u[j]-1); xd=(xd+.5)/scale-.5;
if( floor(xd)!=xd || xd<0 || xd>w-1 ) continue;
yd=(double)(v[j]<v[j-1]?v[j]:v[j-1]); yd=(yd+.5)/scale-.5;
if(yd<0) yd=0; else if(yd>h) yd=h; yd=ceil(yd);
x[m]=(int) xd; y[m]=(int) yd; m++;
}
/* compute rle encoding given y-boundary points */
k=m; a=malloc(sizeof(uint)*(k+1));
for( j=0; j<k; j++ ) a[j]=(uint)(x[j]*(int)(h)+y[j]);
a[k++]=(uint)(h*w); free(u); free(v); free(x); free(y);
qsort(a,k,sizeof(uint),uintCompare); uint p=0;
for( j=0; j<k; j++ ) { uint t=a[j]; a[j]-=p; p=t; }
b=malloc(sizeof(uint)*k); j=m=0; b[m++]=a[j++];
while(j<k) if(a[j]>0) b[m++]=a[j++]; else {
j++; if(j<k) b[m-1]+=a[j++]; }
rleInit(R,h,w,m,b); free(a); free(b);
}
char* rleToString( const RLE *R ) {
/* Similar to LEB128 but using 6 bits/char and ascii chars 48-111. */
siz i, m=R->m, p=0; long x; int more;
char *s=malloc(sizeof(char)*m*6);
for( i=0; i<m; i++ ) {
x=(long) R->cnts[i]; if(i>2) x-=(long) R->cnts[i-2]; more=1;
while( more ) {
char c=x & 0x1f; x >>= 5; more=(c & 0x10) ? x!=-1 : x!=0;
if(more) c |= 0x20; c+=48; s[p++]=c;
}
}
s[p]=0; return s;
}
void rleFrString( RLE *R, char *s, siz h, siz w ) {
siz m=0, p=0, k; long x; int more; uint *cnts;
while( s[m] ) m++; cnts=malloc(sizeof(uint)*m); m=0;
while( s[p] ) {
x=0; k=0; more=1;
while( more ) {
char c=s[p]-48; x |= (c & 0x1f) << 5*k;
more = c & 0x20; p++; k++;
if(!more && (c & 0x10)) x |= -1 << 5*k;
}
if(m>2) x+=(long) cnts[m-2]; cnts[m++]=(uint) x;
}
rleInit(R,h,w,m,cnts); free(cnts);
}
| insightface/detection/retinaface/rcnn/pycocotools/maskApi.c/0 | {
"file_path": "insightface/detection/retinaface/rcnn/pycocotools/maskApi.c",
"repo_id": "insightface",
"token_count": 4847
} | 100 |
import argparse
import pprint
import mxnet as mx
from ..logger import logger
from ..config import config, default, generate_config
from ..symbol import *
from ..core import callback, metric
from ..core.loader import ROIIter
from ..core.module import MutableModule
from ..processing.bbox_regression import add_bbox_regression_targets
from ..utils.load_data import load_proposal_roidb, merge_roidb, filter_roidb
from ..utils.load_model import load_param
def train_rcnn(network, dataset, image_set, root_path, dataset_path, frequent,
kvstore, work_load_list, no_flip, no_shuffle, resume, ctx,
pretrained, epoch, prefix, begin_epoch, end_epoch, train_shared,
lr, lr_step, proposal):
# set up config
config.TRAIN.BATCH_IMAGES = 2
config.TRAIN.BATCH_ROIS = 128
if proposal == 'ss':
config.TRAIN.BG_THRESH_LO = 0.1 # reproduce Fast R-CNN
# load symbol
sym = eval('get_' + network + '_rcnn')(num_classes=config.NUM_CLASSES)
# setup multi-gpu
batch_size = len(ctx)
input_batch_size = config.TRAIN.BATCH_IMAGES * batch_size
# print config
logger.info(pprint.pformat(config))
# load dataset and prepare imdb for training
image_sets = [iset for iset in image_set.split('+')]
roidbs = [
load_proposal_roidb(dataset,
image_set,
root_path,
dataset_path,
proposal=proposal,
append_gt=True,
flip=not no_flip) for image_set in image_sets
]
roidb = merge_roidb(roidbs)
roidb = filter_roidb(roidb)
means, stds = add_bbox_regression_targets(roidb)
# load training data
train_data = ROIIter(roidb,
batch_size=input_batch_size,
shuffle=not no_shuffle,
ctx=ctx,
work_load_list=work_load_list,
aspect_grouping=config.TRAIN.ASPECT_GROUPING)
# infer max shape
max_data_shape = [('data', (input_batch_size, 3,
max([v[0] for v in config.SCALES]),
max([v[1] for v in config.SCALES])))]
logger.info('providing maximum shape %s' % max_data_shape)
# infer shape
data_shape_dict = dict(train_data.provide_data + train_data.provide_label)
arg_shape, out_shape, aux_shape = sym.infer_shape(**data_shape_dict)
arg_shape_dict = dict(zip(sym.list_arguments(), arg_shape))
out_shape_dict = dict(zip(sym.list_outputs(), out_shape))
aux_shape_dict = dict(zip(sym.list_auxiliary_states(), aux_shape))
logger.info('output shape %s' % pprint.pformat(out_shape_dict))
# load and initialize params
if resume:
arg_params, aux_params = load_param(prefix, begin_epoch, convert=True)
else:
arg_params, aux_params = load_param(pretrained, epoch, convert=True)
arg_params['cls_score_weight'] = mx.random.normal(
0, 0.01, shape=arg_shape_dict['cls_score_weight'])
arg_params['cls_score_bias'] = mx.nd.zeros(
shape=arg_shape_dict['cls_score_bias'])
arg_params['bbox_pred_weight'] = mx.random.normal(
0, 0.001, shape=arg_shape_dict['bbox_pred_weight'])
arg_params['bbox_pred_bias'] = mx.nd.zeros(
shape=arg_shape_dict['bbox_pred_bias'])
# check parameter shapes
for k in sym.list_arguments():
if k in data_shape_dict:
continue
assert k in arg_params, k + ' not initialized'
assert arg_params[k].shape == arg_shape_dict[k], \
'shape inconsistent for ' + k + ' inferred ' + str(arg_shape_dict[k]) + ' provided ' + str(arg_params[k].shape)
for k in sym.list_auxiliary_states():
assert k in aux_params, k + ' not initialized'
assert aux_params[k].shape == aux_shape_dict[k], \
'shape inconsistent for ' + k + ' inferred ' + str(aux_shape_dict[k]) + ' provided ' + str(aux_params[k].shape)
# prepare training
# create solver
data_names = [k[0] for k in train_data.provide_data]
label_names = [k[0] for k in train_data.provide_label]
if train_shared:
fixed_param_prefix = config.FIXED_PARAMS_SHARED
else:
fixed_param_prefix = config.FIXED_PARAMS
mod = MutableModule(sym,
data_names=data_names,
label_names=label_names,
logger=logger,
context=ctx,
work_load_list=work_load_list,
max_data_shapes=max_data_shape,
fixed_param_prefix=fixed_param_prefix)
# decide training params
# metric
eval_metric = metric.RCNNAccMetric()
cls_metric = metric.RCNNLogLossMetric()
bbox_metric = metric.RCNNL1LossMetric()
eval_metrics = mx.metric.CompositeEvalMetric()
for child_metric in [eval_metric, cls_metric, bbox_metric]:
eval_metrics.add(child_metric)
# callback
batch_end_callback = mx.callback.Speedometer(train_data.batch_size,
frequent=frequent,
auto_reset=False)
epoch_end_callback = callback.do_checkpoint(prefix, means, stds)
# decide learning rate
base_lr = lr
lr_factor = 0.1
lr_epoch = [int(epoch) for epoch in lr_step.split(',')]
lr_epoch_diff = [
epoch - begin_epoch for epoch in lr_epoch if epoch > begin_epoch
]
lr = base_lr * (lr_factor**(len(lr_epoch) - len(lr_epoch_diff)))
lr_iters = [
int(epoch * len(roidb) / batch_size) for epoch in lr_epoch_diff
]
logger.info('lr %f lr_epoch_diff %s lr_iters %s' %
(lr, lr_epoch_diff, lr_iters))
lr_scheduler = mx.lr_scheduler.MultiFactorScheduler(lr_iters, lr_factor)
# optimizer
optimizer_params = {
'momentum': 0.9,
'wd': 0.0005,
'learning_rate': lr,
'lr_scheduler': lr_scheduler,
'rescale_grad': (1.0 / batch_size),
'clip_gradient': 5
}
# train
mod.fit(train_data,
eval_metric=eval_metrics,
epoch_end_callback=epoch_end_callback,
batch_end_callback=batch_end_callback,
kvstore=kvstore,
optimizer='sgd',
optimizer_params=optimizer_params,
arg_params=arg_params,
aux_params=aux_params,
begin_epoch=begin_epoch,
num_epoch=end_epoch)
def parse_args():
parser = argparse.ArgumentParser(description='Train a Fast R-CNN Network')
# general
parser.add_argument('--network',
help='network name',
default=default.network,
type=str)
parser.add_argument('--dataset',
help='dataset name',
default=default.dataset,
type=str)
args, rest = parser.parse_known_args()
generate_config(args.network, args.dataset)
parser.add_argument('--image_set',
help='image_set name',
default=default.image_set,
type=str)
parser.add_argument('--root_path',
help='output data folder',
default=default.root_path,
type=str)
parser.add_argument('--dataset_path',
help='dataset path',
default=default.dataset_path,
type=str)
# training
parser.add_argument('--frequent',
help='frequency of logging',
default=default.frequent,
type=int)
parser.add_argument('--kvstore',
help='the kv-store type',
default=default.kvstore,
type=str)
parser.add_argument('--work_load_list',
help='work load for different devices',
default=None,
type=list)
parser.add_argument('--no_flip',
help='disable flip images',
action='store_true')
parser.add_argument('--no_shuffle',
help='disable random shuffle',
action='store_true')
parser.add_argument('--resume',
help='continue training',
action='store_true')
# rcnn
parser.add_argument('--gpus',
help='GPU device to train with',
default='0',
type=str)
parser.add_argument('--pretrained',
help='pretrained model prefix',
default=default.pretrained,
type=str)
parser.add_argument('--pretrained_epoch',
help='pretrained model epoch',
default=default.pretrained_epoch,
type=int)
parser.add_argument('--prefix',
help='new model prefix',
default=default.rcnn_prefix,
type=str)
parser.add_argument('--begin_epoch',
help='begin epoch of training',
default=0,
type=int)
parser.add_argument('--end_epoch',
help='end epoch of training',
default=default.rcnn_epoch,
type=int)
parser.add_argument('--lr',
help='base learning rate',
default=default.rcnn_lr,
type=float)
parser.add_argument('--lr_step',
help='learning rate steps (in epoch)',
default=default.rcnn_lr_step,
type=str)
parser.add_argument('--train_shared',
help='second round train shared params',
action='store_true')
parser.add_argument('--proposal',
help='can be ss for selective search or rpn',
default='rpn',
type=str)
args = parser.parse_args()
return args
def main():
args = parse_args()
logger.info('Called with argument: %s' % args)
ctx = [mx.gpu(int(i)) for i in args.gpus.split(',')]
train_rcnn(args.network,
args.dataset,
args.image_set,
args.root_path,
args.dataset_path,
args.frequent,
args.kvstore,
args.work_load_list,
args.no_flip,
args.no_shuffle,
args.resume,
ctx,
args.pretrained,
args.pretrained_epoch,
args.prefix,
args.begin_epoch,
args.end_epoch,
train_shared=args.train_shared,
lr=args.lr,
lr_step=args.lr_step,
proposal=args.proposal)
if __name__ == '__main__':
main()
| insightface/detection/retinaface/rcnn/tools/train_rcnn.py/0 | {
"file_path": "insightface/detection/retinaface/rcnn/tools/train_rcnn.py",
"repo_id": "insightface",
"token_count": 5938
} | 101 |
# dataset settings
dataset_type = 'VOCDataset'
data_root = 'data/VOCdevkit/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1000, 600), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1000, 600),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=2,
workers_per_gpu=2,
train=dict(
type='RepeatDataset',
times=3,
dataset=dict(
type=dataset_type,
ann_file=[
data_root + 'VOC2007/ImageSets/Main/trainval.txt',
data_root + 'VOC2012/ImageSets/Main/trainval.txt'
],
img_prefix=[data_root + 'VOC2007/', data_root + 'VOC2012/'],
pipeline=train_pipeline)),
val=dict(
type=dataset_type,
ann_file=data_root + 'VOC2007/ImageSets/Main/test.txt',
img_prefix=data_root + 'VOC2007/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'VOC2007/ImageSets/Main/test.txt',
img_prefix=data_root + 'VOC2007/',
pipeline=test_pipeline))
evaluation = dict(interval=1, metric='mAP')
| insightface/detection/scrfd/configs/_base_/datasets/voc0712.py/0 | {
"file_path": "insightface/detection/scrfd/configs/_base_/datasets/voc0712.py",
"repo_id": "insightface",
"token_count": 943
} | 102 |
import torch
from .builder import ANCHOR_GENERATORS
@ANCHOR_GENERATORS.register_module()
class PointGenerator(object):
def _meshgrid(self, x, y, row_major=True):
xx = x.repeat(len(y))
yy = y.view(-1, 1).repeat(1, len(x)).view(-1)
if row_major:
return xx, yy
else:
return yy, xx
def grid_points(self, featmap_size, stride=16, device='cuda'):
feat_h, feat_w = featmap_size
shift_x = torch.arange(0., feat_w, device=device) * stride
shift_y = torch.arange(0., feat_h, device=device) * stride
shift_xx, shift_yy = self._meshgrid(shift_x, shift_y)
stride = shift_x.new_full((shift_xx.shape[0], ), stride)
shifts = torch.stack([shift_xx, shift_yy, stride], dim=-1)
all_points = shifts.to(device)
return all_points
def valid_flags(self, featmap_size, valid_size, device='cuda'):
feat_h, feat_w = featmap_size
valid_h, valid_w = valid_size
assert valid_h <= feat_h and valid_w <= feat_w
valid_x = torch.zeros(feat_w, dtype=torch.bool, device=device)
valid_y = torch.zeros(feat_h, dtype=torch.bool, device=device)
valid_x[:valid_w] = 1
valid_y[:valid_h] = 1
valid_xx, valid_yy = self._meshgrid(valid_x, valid_y)
valid = valid_xx & valid_yy
return valid
| insightface/detection/scrfd/mmdet/core/anchor/point_generator.py/0 | {
"file_path": "insightface/detection/scrfd/mmdet/core/anchor/point_generator.py",
"repo_id": "insightface",
"token_count": 640
} | 103 |
import numpy as np
import torch
import torch.nn.functional as F
from ..builder import BBOX_CODERS
from ..transforms import bbox_rescale
from .base_bbox_coder import BaseBBoxCoder
@BBOX_CODERS.register_module()
class BucketingBBoxCoder(BaseBBoxCoder):
"""Bucketing BBox Coder for Side-Aware Bounday Localization (SABL).
Boundary Localization with Bucketing and Bucketing Guided Rescoring
are implemented here.
Please refer to https://arxiv.org/abs/1912.04260 for more details.
Args:
num_buckets (int): Number of buckets.
scale_factor (int): Scale factor of proposals to generate buckets.
offset_topk (int): Topk buckets are used to generate
bucket fine regression targets. Defaults to 2.
offset_upperbound (float): Offset upperbound to generate
bucket fine regression targets.
To avoid too large offset displacements. Defaults to 1.0.
cls_ignore_neighbor (bool): Ignore second nearest bucket or Not.
Defaults to True.
clip_border (bool, optional): Whether clip the objects outside the
border of the image. Defaults to True.
"""
def __init__(self,
num_buckets,
scale_factor,
offset_topk=2,
offset_upperbound=1.0,
cls_ignore_neighbor=True,
clip_border=True):
super(BucketingBBoxCoder, self).__init__()
self.num_buckets = num_buckets
self.scale_factor = scale_factor
self.offset_topk = offset_topk
self.offset_upperbound = offset_upperbound
self.cls_ignore_neighbor = cls_ignore_neighbor
self.clip_border = clip_border
def encode(self, bboxes, gt_bboxes):
"""Get bucketing estimation and fine regression targets during
training.
Args:
bboxes (torch.Tensor): source boxes, e.g., object proposals.
gt_bboxes (torch.Tensor): target of the transformation, e.g.,
ground truth boxes.
Returns:
encoded_bboxes(tuple[Tensor]): bucketing estimation
and fine regression targets and weights
"""
assert bboxes.size(0) == gt_bboxes.size(0)
assert bboxes.size(-1) == gt_bboxes.size(-1) == 4
encoded_bboxes = bbox2bucket(bboxes, gt_bboxes, self.num_buckets,
self.scale_factor, self.offset_topk,
self.offset_upperbound,
self.cls_ignore_neighbor)
return encoded_bboxes
def decode(self, bboxes, pred_bboxes, max_shape=None):
"""Apply transformation `pred_bboxes` to `boxes`.
Args:
boxes (torch.Tensor): Basic boxes.
pred_bboxes (torch.Tensor): Predictions for bucketing estimation
and fine regression
max_shape (tuple[int], optional): Maximum shape of boxes.
Defaults to None.
Returns:
torch.Tensor: Decoded boxes.
"""
assert len(pred_bboxes) == 2
cls_preds, offset_preds = pred_bboxes
assert cls_preds.size(0) == bboxes.size(0) and offset_preds.size(
0) == bboxes.size(0)
decoded_bboxes = bucket2bbox(bboxes, cls_preds, offset_preds,
self.num_buckets, self.scale_factor,
max_shape, self.clip_border)
return decoded_bboxes
def generat_buckets(proposals, num_buckets, scale_factor=1.0):
"""Generate buckets w.r.t bucket number and scale factor of proposals.
Args:
proposals (Tensor): Shape (n, 4)
num_buckets (int): Number of buckets.
scale_factor (float): Scale factor to rescale proposals.
Returns:
tuple[Tensor]: (bucket_w, bucket_h, l_buckets, r_buckets,
t_buckets, d_buckets)
- bucket_w: Width of buckets on x-axis. Shape (n, ).
- bucket_h: Height of buckets on y-axis. Shape (n, ).
- l_buckets: Left buckets. Shape (n, ceil(side_num/2)).
- r_buckets: Right buckets. Shape (n, ceil(side_num/2)).
- t_buckets: Top buckets. Shape (n, ceil(side_num/2)).
- d_buckets: Down buckets. Shape (n, ceil(side_num/2)).
"""
proposals = bbox_rescale(proposals, scale_factor)
# number of buckets in each side
side_num = int(np.ceil(num_buckets / 2.0))
pw = proposals[..., 2] - proposals[..., 0]
ph = proposals[..., 3] - proposals[..., 1]
px1 = proposals[..., 0]
py1 = proposals[..., 1]
px2 = proposals[..., 2]
py2 = proposals[..., 3]
bucket_w = pw / num_buckets
bucket_h = ph / num_buckets
# left buckets
l_buckets = px1[:, None] + (0.5 + torch.arange(
0, side_num).to(proposals).float())[None, :] * bucket_w[:, None]
# right buckets
r_buckets = px2[:, None] - (0.5 + torch.arange(
0, side_num).to(proposals).float())[None, :] * bucket_w[:, None]
# top buckets
t_buckets = py1[:, None] + (0.5 + torch.arange(
0, side_num).to(proposals).float())[None, :] * bucket_h[:, None]
# down buckets
d_buckets = py2[:, None] - (0.5 + torch.arange(
0, side_num).to(proposals).float())[None, :] * bucket_h[:, None]
return bucket_w, bucket_h, l_buckets, r_buckets, t_buckets, d_buckets
def bbox2bucket(proposals,
gt,
num_buckets,
scale_factor,
offset_topk=2,
offset_upperbound=1.0,
cls_ignore_neighbor=True):
"""Generate buckets estimation and fine regression targets.
Args:
proposals (Tensor): Shape (n, 4)
gt (Tensor): Shape (n, 4)
num_buckets (int): Number of buckets.
scale_factor (float): Scale factor to rescale proposals.
offset_topk (int): Topk buckets are used to generate
bucket fine regression targets. Defaults to 2.
offset_upperbound (float): Offset allowance to generate
bucket fine regression targets.
To avoid too large offset displacements. Defaults to 1.0.
cls_ignore_neighbor (bool): Ignore second nearest bucket or Not.
Defaults to True.
Returns:
tuple[Tensor]: (offsets, offsets_weights, bucket_labels, cls_weights).
- offsets: Fine regression targets. \
Shape (n, num_buckets*2).
- offsets_weights: Fine regression weights. \
Shape (n, num_buckets*2).
- bucket_labels: Bucketing estimation labels. \
Shape (n, num_buckets*2).
- cls_weights: Bucketing estimation weights. \
Shape (n, num_buckets*2).
"""
assert proposals.size() == gt.size()
# generate buckets
proposals = proposals.float()
gt = gt.float()
(bucket_w, bucket_h, l_buckets, r_buckets, t_buckets,
d_buckets) = generat_buckets(proposals, num_buckets, scale_factor)
gx1 = gt[..., 0]
gy1 = gt[..., 1]
gx2 = gt[..., 2]
gy2 = gt[..., 3]
# generate offset targets and weights
# offsets from buckets to gts
l_offsets = (l_buckets - gx1[:, None]) / bucket_w[:, None]
r_offsets = (r_buckets - gx2[:, None]) / bucket_w[:, None]
t_offsets = (t_buckets - gy1[:, None]) / bucket_h[:, None]
d_offsets = (d_buckets - gy2[:, None]) / bucket_h[:, None]
# select top-k nearset buckets
l_topk, l_label = l_offsets.abs().topk(
offset_topk, dim=1, largest=False, sorted=True)
r_topk, r_label = r_offsets.abs().topk(
offset_topk, dim=1, largest=False, sorted=True)
t_topk, t_label = t_offsets.abs().topk(
offset_topk, dim=1, largest=False, sorted=True)
d_topk, d_label = d_offsets.abs().topk(
offset_topk, dim=1, largest=False, sorted=True)
offset_l_weights = l_offsets.new_zeros(l_offsets.size())
offset_r_weights = r_offsets.new_zeros(r_offsets.size())
offset_t_weights = t_offsets.new_zeros(t_offsets.size())
offset_d_weights = d_offsets.new_zeros(d_offsets.size())
inds = torch.arange(0, proposals.size(0)).to(proposals).long()
# generate offset weights of top-k nearset buckets
for k in range(offset_topk):
if k >= 1:
offset_l_weights[inds, l_label[:,
k]] = (l_topk[:, k] <
offset_upperbound).float()
offset_r_weights[inds, r_label[:,
k]] = (r_topk[:, k] <
offset_upperbound).float()
offset_t_weights[inds, t_label[:,
k]] = (t_topk[:, k] <
offset_upperbound).float()
offset_d_weights[inds, d_label[:,
k]] = (d_topk[:, k] <
offset_upperbound).float()
else:
offset_l_weights[inds, l_label[:, k]] = 1.0
offset_r_weights[inds, r_label[:, k]] = 1.0
offset_t_weights[inds, t_label[:, k]] = 1.0
offset_d_weights[inds, d_label[:, k]] = 1.0
offsets = torch.cat([l_offsets, r_offsets, t_offsets, d_offsets], dim=-1)
offsets_weights = torch.cat([
offset_l_weights, offset_r_weights, offset_t_weights, offset_d_weights
],
dim=-1)
# generate bucket labels and weight
side_num = int(np.ceil(num_buckets / 2.0))
labels = torch.stack(
[l_label[:, 0], r_label[:, 0], t_label[:, 0], d_label[:, 0]], dim=-1)
batch_size = labels.size(0)
bucket_labels = F.one_hot(labels.view(-1), side_num).view(batch_size,
-1).float()
bucket_cls_l_weights = (l_offsets.abs() < 1).float()
bucket_cls_r_weights = (r_offsets.abs() < 1).float()
bucket_cls_t_weights = (t_offsets.abs() < 1).float()
bucket_cls_d_weights = (d_offsets.abs() < 1).float()
bucket_cls_weights = torch.cat([
bucket_cls_l_weights, bucket_cls_r_weights, bucket_cls_t_weights,
bucket_cls_d_weights
],
dim=-1)
# ignore second nearest buckets for cls if necessay
if cls_ignore_neighbor:
bucket_cls_weights = (~((bucket_cls_weights == 1) &
(bucket_labels == 0))).float()
else:
bucket_cls_weights[:] = 1.0
return offsets, offsets_weights, bucket_labels, bucket_cls_weights
def bucket2bbox(proposals,
cls_preds,
offset_preds,
num_buckets,
scale_factor=1.0,
max_shape=None,
clip_border=True):
"""Apply bucketing estimation (cls preds) and fine regression (offset
preds) to generate det bboxes.
Args:
proposals (Tensor): Boxes to be transformed. Shape (n, 4)
cls_preds (Tensor): bucketing estimation. Shape (n, num_buckets*2).
offset_preds (Tensor): fine regression. Shape (n, num_buckets*2).
num_buckets (int): Number of buckets.
scale_factor (float): Scale factor to rescale proposals.
max_shape (tuple[int, int]): Maximum bounds for boxes. specifies (H, W)
clip_border (bool, optional): Whether clip the objects outside the
border of the image. Defaults to True.
Returns:
tuple[Tensor]: (bboxes, loc_confidence).
- bboxes: predicted bboxes. Shape (n, 4)
- loc_confidence: localization confidence of predicted bboxes.
Shape (n,).
"""
side_num = int(np.ceil(num_buckets / 2.0))
cls_preds = cls_preds.view(-1, side_num)
offset_preds = offset_preds.view(-1, side_num)
scores = F.softmax(cls_preds, dim=1)
score_topk, score_label = scores.topk(2, dim=1, largest=True, sorted=True)
rescaled_proposals = bbox_rescale(proposals, scale_factor)
pw = rescaled_proposals[..., 2] - rescaled_proposals[..., 0]
ph = rescaled_proposals[..., 3] - rescaled_proposals[..., 1]
px1 = rescaled_proposals[..., 0]
py1 = rescaled_proposals[..., 1]
px2 = rescaled_proposals[..., 2]
py2 = rescaled_proposals[..., 3]
bucket_w = pw / num_buckets
bucket_h = ph / num_buckets
score_inds_l = score_label[0::4, 0]
score_inds_r = score_label[1::4, 0]
score_inds_t = score_label[2::4, 0]
score_inds_d = score_label[3::4, 0]
l_buckets = px1 + (0.5 + score_inds_l.float()) * bucket_w
r_buckets = px2 - (0.5 + score_inds_r.float()) * bucket_w
t_buckets = py1 + (0.5 + score_inds_t.float()) * bucket_h
d_buckets = py2 - (0.5 + score_inds_d.float()) * bucket_h
offsets = offset_preds.view(-1, 4, side_num)
inds = torch.arange(proposals.size(0)).to(proposals).long()
l_offsets = offsets[:, 0, :][inds, score_inds_l]
r_offsets = offsets[:, 1, :][inds, score_inds_r]
t_offsets = offsets[:, 2, :][inds, score_inds_t]
d_offsets = offsets[:, 3, :][inds, score_inds_d]
x1 = l_buckets - l_offsets * bucket_w
x2 = r_buckets - r_offsets * bucket_w
y1 = t_buckets - t_offsets * bucket_h
y2 = d_buckets - d_offsets * bucket_h
if clip_border and max_shape is not None:
x1 = x1.clamp(min=0, max=max_shape[1] - 1)
y1 = y1.clamp(min=0, max=max_shape[0] - 1)
x2 = x2.clamp(min=0, max=max_shape[1] - 1)
y2 = y2.clamp(min=0, max=max_shape[0] - 1)
bboxes = torch.cat([x1[:, None], y1[:, None], x2[:, None], y2[:, None]],
dim=-1)
# bucketing guided rescoring
loc_confidence = score_topk[:, 0]
top2_neighbor_inds = (score_label[:, 0] - score_label[:, 1]).abs() == 1
loc_confidence += score_topk[:, 1] * top2_neighbor_inds.float()
loc_confidence = loc_confidence.view(-1, 4).mean(dim=1)
return bboxes, loc_confidence
| insightface/detection/scrfd/mmdet/core/bbox/coder/bucketing_bbox_coder.py/0 | {
"file_path": "insightface/detection/scrfd/mmdet/core/bbox/coder/bucketing_bbox_coder.py",
"repo_id": "insightface",
"token_count": 6744
} | 104 |
import torch
from ..builder import BBOX_SAMPLERS
from .base_sampler import BaseSampler
from .sampling_result import SamplingResult
@BBOX_SAMPLERS.register_module()
class PseudoSampler(BaseSampler):
"""A pseudo sampler that does not do sampling actually."""
def __init__(self, **kwargs):
pass
def _sample_pos(self, **kwargs):
"""Sample positive samples."""
raise NotImplementedError
def _sample_neg(self, **kwargs):
"""Sample negative samples."""
raise NotImplementedError
def sample(self, assign_result, bboxes, gt_bboxes, **kwargs):
"""Directly returns the positive and negative indices of samples.
Args:
assign_result (:obj:`AssignResult`): Assigned results
bboxes (torch.Tensor): Bounding boxes
gt_bboxes (torch.Tensor): Ground truth boxes
Returns:
:obj:`SamplingResult`: sampler results
"""
pos_inds = torch.nonzero(
assign_result.gt_inds > 0, as_tuple=False).squeeze(-1).unique()
neg_inds = torch.nonzero(
assign_result.gt_inds == 0, as_tuple=False).squeeze(-1).unique()
gt_flags = bboxes.new_zeros(bboxes.shape[0], dtype=torch.uint8)
sampling_result = SamplingResult(pos_inds, neg_inds, bboxes, gt_bboxes,
assign_result, gt_flags)
return sampling_result
| insightface/detection/scrfd/mmdet/core/bbox/samplers/pseudo_sampler.py/0 | {
"file_path": "insightface/detection/scrfd/mmdet/core/bbox/samplers/pseudo_sampler.py",
"repo_id": "insightface",
"token_count": 617
} | 105 |
from .mask_target import mask_target
from .structures import BaseInstanceMasks, BitmapMasks, PolygonMasks
from .utils import encode_mask_results, split_combined_polys
__all__ = [
'split_combined_polys', 'mask_target', 'BaseInstanceMasks', 'BitmapMasks',
'PolygonMasks', 'encode_mask_results'
]
| insightface/detection/scrfd/mmdet/core/mask/__init__.py/0 | {
"file_path": "insightface/detection/scrfd/mmdet/core/mask/__init__.py",
"repo_id": "insightface",
"token_count": 105
} | 106 |
from .builder import DATASETS
from .coco import CocoDataset
@DATASETS.register_module()
class DeepFashionDataset(CocoDataset):
CLASSES = ('top', 'skirt', 'leggings', 'dress', 'outer', 'pants', 'bag',
'neckwear', 'headwear', 'eyeglass', 'belt', 'footwear', 'hair',
'skin', 'face')
| insightface/detection/scrfd/mmdet/datasets/deepfashion.py/0 | {
"file_path": "insightface/detection/scrfd/mmdet/datasets/deepfashion.py",
"repo_id": "insightface",
"token_count": 136
} | 107 |
import os.path as osp
import xml.etree.ElementTree as ET
import mmcv
from .builder import DATASETS
from .xml_style import XMLDataset
@DATASETS.register_module()
class WIDERFaceDataset(XMLDataset):
"""Reader for the WIDER Face dataset in PASCAL VOC format.
Conversion scripts can be found in
https://github.com/sovrasov/wider-face-pascal-voc-annotations
"""
CLASSES = ('face', )
def __init__(self, **kwargs):
super(WIDERFaceDataset, self).__init__(**kwargs)
def load_annotations(self, ann_file):
"""Load annotation from WIDERFace XML style annotation file.
Args:
ann_file (str): Path of XML file.
Returns:
list[dict]: Annotation info from XML file.
"""
data_infos = []
img_ids = mmcv.list_from_file(ann_file)
for img_id in img_ids:
filename = f'{img_id}.jpg'
xml_path = osp.join(self.img_prefix, 'Annotations',
f'{img_id}.xml')
tree = ET.parse(xml_path)
root = tree.getroot()
size = root.find('size')
width = int(size.find('width').text)
height = int(size.find('height').text)
folder = root.find('folder').text
data_infos.append(
dict(
id=img_id,
filename=osp.join(folder, filename),
width=width,
height=height))
return data_infos
| insightface/detection/scrfd/mmdet/datasets/wider_face.py/0 | {
"file_path": "insightface/detection/scrfd/mmdet/datasets/wider_face.py",
"repo_id": "insightface",
"token_count": 733
} | 108 |
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as cp
from mmcv.cnn import build_conv_layer, build_norm_layer, kaiming_init
from torch.nn.modules.utils import _pair
from mmdet.models.backbones.resnet import Bottleneck, ResNet
from mmdet.models.builder import BACKBONES
class TridentConv(nn.Module):
"""Trident Convolution Module.
Args:
in_channels (int): Number of channels in input.
out_channels (int): Number of channels in output.
kernel_size (int): Size of convolution kernel.
stride (int, optional): Convolution stride. Default: 1.
trident_dilations (tuple[int, int, int], optional): Dilations of
different trident branch. Default: (1, 2, 3).
test_branch_idx (int, optional): In inference, all 3 branches will
be used if `test_branch_idx==-1`, otherwise only branch with
index `test_branch_idx` will be used. Default: 1.
bias (bool, optional): Whether to use bias in convolution or not.
Default: False.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
trident_dilations=(1, 2, 3),
test_branch_idx=1,
bias=False):
super(TridentConv, self).__init__()
self.num_branch = len(trident_dilations)
self.with_bias = bias
self.test_branch_idx = test_branch_idx
self.stride = _pair(stride)
self.kernel_size = _pair(kernel_size)
self.paddings = _pair(trident_dilations)
self.dilations = trident_dilations
self.in_channels = in_channels
self.out_channels = out_channels
self.bias = bias
self.weight = nn.Parameter(
torch.Tensor(out_channels, in_channels, *self.kernel_size))
if bias:
self.bias = nn.Parameter(torch.Tensor(out_channels))
else:
self.bias = None
self.init_weights()
def init_weights(self):
kaiming_init(self, distribution='uniform', mode='fan_in')
def extra_repr(self):
tmpstr = f'in_channels={self.in_channels}'
tmpstr += f', out_channels={self.out_channels}'
tmpstr += f', kernel_size={self.kernel_size}'
tmpstr += f', num_branch={self.num_branch}'
tmpstr += f', test_branch_idx={self.test_branch_idx}'
tmpstr += f', stride={self.stride}'
tmpstr += f', paddings={self.paddings}'
tmpstr += f', dilations={self.dilations}'
tmpstr += f', bias={self.bias}'
return tmpstr
def forward(self, inputs):
if self.training or self.test_branch_idx == -1:
outputs = [
F.conv2d(input, self.weight, self.bias, self.stride, padding,
dilation) for input, dilation, padding in zip(
inputs, self.dilations, self.paddings)
]
else:
assert len(inputs) == 1
outputs = [
F.conv2d(inputs[0], self.weight, self.bias, self.stride,
self.paddings[self.test_branch_idx],
self.dilations[self.test_branch_idx])
]
return outputs
# Since TridentNet is defined over ResNet50 and ResNet101, here we
# only support TridentBottleneckBlock.
class TridentBottleneck(Bottleneck):
"""BottleBlock for TridentResNet.
Args:
trident_dilations (tuple[int, int, int]): Dilations of different
trident branch.
test_branch_idx (int): In inference, all 3 branches will be used
if `test_branch_idx==-1`, otherwise only branch with index
`test_branch_idx` will be used.
concat_output (bool): Whether to concat the output list to a Tensor.
`True` only in the last Block.
"""
def __init__(self, trident_dilations, test_branch_idx, concat_output,
**kwargs):
super(TridentBottleneck, self).__init__(**kwargs)
self.trident_dilations = trident_dilations
self.num_branch = len(trident_dilations)
self.concat_output = concat_output
self.test_branch_idx = test_branch_idx
self.conv2 = TridentConv(
self.planes,
self.planes,
kernel_size=3,
stride=self.conv2_stride,
bias=False,
trident_dilations=self.trident_dilations,
test_branch_idx=test_branch_idx)
def forward(self, x):
def _inner_forward(x):
num_branch = (
self.num_branch
if self.training or self.test_branch_idx == -1 else 1)
identity = x
if not isinstance(x, list):
x = (x, ) * num_branch
identity = x
if self.downsample is not None:
identity = [self.downsample(b) for b in x]
out = [self.conv1(b) for b in x]
out = [self.norm1(b) for b in out]
out = [self.relu(b) for b in out]
if self.with_plugins:
for k in range(len(out)):
out[k] = self.forward_plugin(out[k],
self.after_conv1_plugin_names)
out = self.conv2(out)
out = [self.norm2(b) for b in out]
out = [self.relu(b) for b in out]
if self.with_plugins:
for k in range(len(out)):
out[k] = self.forward_plugin(out[k],
self.after_conv2_plugin_names)
out = [self.conv3(b) for b in out]
out = [self.norm3(b) for b in out]
if self.with_plugins:
for k in range(len(out)):
out[k] = self.forward_plugin(out[k],
self.after_conv3_plugin_names)
out = [
out_b + identity_b for out_b, identity_b in zip(out, identity)
]
return out
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
out = [self.relu(b) for b in out]
if self.concat_output:
out = torch.cat(out, dim=0)
return out
def make_trident_res_layer(block,
inplanes,
planes,
num_blocks,
stride=1,
trident_dilations=(1, 2, 3),
style='pytorch',
with_cp=False,
conv_cfg=None,
norm_cfg=dict(type='BN'),
dcn=None,
plugins=None,
test_branch_idx=-1):
"""Build Trident Res Layers."""
downsample = None
if stride != 1 or inplanes != planes * block.expansion:
downsample = []
conv_stride = stride
downsample.extend([
build_conv_layer(
conv_cfg,
inplanes,
planes * block.expansion,
kernel_size=1,
stride=conv_stride,
bias=False),
build_norm_layer(norm_cfg, planes * block.expansion)[1]
])
downsample = nn.Sequential(*downsample)
layers = []
for i in range(num_blocks):
layers.append(
block(
inplanes=inplanes,
planes=planes,
stride=stride if i == 0 else 1,
trident_dilations=trident_dilations,
downsample=downsample if i == 0 else None,
style=style,
with_cp=with_cp,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
dcn=dcn,
plugins=plugins,
test_branch_idx=test_branch_idx,
concat_output=True if i == num_blocks - 1 else False))
inplanes = planes * block.expansion
return nn.Sequential(*layers)
@BACKBONES.register_module()
class TridentResNet(ResNet):
"""The stem layer, stage 1 and stage 2 in Trident ResNet are identical to
ResNet, while in stage 3, Trident BottleBlock is utilized to replace the
normal BottleBlock to yield trident output. Different branch shares the
convolution weight but uses different dilations to achieve multi-scale
output.
/ stage3(b0) \
x - stem - stage1 - stage2 - stage3(b1) - output
\ stage3(b2) /
Args:
depth (int): Depth of resnet, from {50, 101, 152}.
num_branch (int): Number of branches in TridentNet.
test_branch_idx (int): In inference, all 3 branches will be used
if `test_branch_idx==-1`, otherwise only branch with index
`test_branch_idx` will be used.
trident_dilations (tuple[int]): Dilations of different trident branch.
len(trident_dilations) should be equal to num_branch.
""" # noqa
def __init__(self, depth, num_branch, test_branch_idx, trident_dilations,
**kwargs):
assert num_branch == len(trident_dilations)
assert depth in (50, 101, 152)
super(TridentResNet, self).__init__(depth, **kwargs)
assert self.num_stages == 3
self.test_branch_idx = test_branch_idx
self.num_branch = num_branch
last_stage_idx = self.num_stages - 1
stride = self.strides[last_stage_idx]
dilation = trident_dilations
dcn = self.dcn if self.stage_with_dcn[last_stage_idx] else None
if self.plugins is not None:
stage_plugins = self.make_stage_plugins(self.plugins,
last_stage_idx)
else:
stage_plugins = None
planes = self.base_channels * 2**last_stage_idx
res_layer = make_trident_res_layer(
TridentBottleneck,
inplanes=(self.block.expansion * self.base_channels *
2**(last_stage_idx - 1)),
planes=planes,
num_blocks=self.stage_blocks[last_stage_idx],
stride=stride,
trident_dilations=dilation,
style=self.style,
with_cp=self.with_cp,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
dcn=dcn,
plugins=stage_plugins,
test_branch_idx=self.test_branch_idx)
layer_name = f'layer{last_stage_idx + 1}'
self.__setattr__(layer_name, res_layer)
self.res_layers.pop(last_stage_idx)
self.res_layers.insert(last_stage_idx, layer_name)
self._freeze_stages()
| insightface/detection/scrfd/mmdet/models/backbones/trident_resnet.py/0 | {
"file_path": "insightface/detection/scrfd/mmdet/models/backbones/trident_resnet.py",
"repo_id": "insightface",
"token_count": 5652
} | 109 |
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule, Scale, bias_init_with_prob, normal_init
from mmcv.runner import force_fp32
from mmdet.core import (anchor_inside_flags, bbox2distance, bbox_overlaps,
build_assigner, build_sampler, distance2bbox,
images_to_levels, multi_apply, multiclass_nms,
reduce_mean, unmap)
from ..builder import HEADS, build_loss
from .anchor_head import AnchorHead
class Integral(nn.Module):
"""A fixed layer for calculating integral result from distribution.
This layer calculates the target location by :math: `sum{P(y_i) * y_i}`,
P(y_i) denotes the softmax vector that represents the discrete distribution
y_i denotes the discrete set, usually {0, 1, 2, ..., reg_max}
Args:
reg_max (int): The maximal value of the discrete set. Default: 16. You
may want to reset it according to your new dataset or related
settings.
"""
def __init__(self, reg_max=16):
super(Integral, self).__init__()
self.reg_max = reg_max
self.register_buffer('project',
torch.linspace(0, self.reg_max, self.reg_max + 1))
def forward(self, x):
"""Forward feature from the regression head to get integral result of
bounding box location.
Args:
x (Tensor): Features of the regression head, shape (N, 4*(n+1)),
n is self.reg_max.
Returns:
x (Tensor): Integral result of box locations, i.e., distance
offsets from the box center in four directions, shape (N, 4).
"""
x = F.softmax(x.reshape(-1, self.reg_max + 1), dim=1)
x = F.linear(x, self.project.type_as(x)).reshape(-1, 4)
return x
@HEADS.register_module()
class GFLHead(AnchorHead):
"""Generalized Focal Loss: Learning Qualified and Distributed Bounding
Boxes for Dense Object Detection.
GFL head structure is similar with ATSS, however GFL uses
1) joint representation for classification and localization quality, and
2) flexible General distribution for bounding box locations,
which are supervised by
Quality Focal Loss (QFL) and Distribution Focal Loss (DFL), respectively
https://arxiv.org/abs/2006.04388
Args:
num_classes (int): Number of categories excluding the background
category.
in_channels (int): Number of channels in the input feature map.
stacked_convs (int): Number of conv layers in cls and reg tower.
Default: 4.
conv_cfg (dict): dictionary to construct and config conv layer.
Default: None.
norm_cfg (dict): dictionary to construct and config norm layer.
Default: dict(type='GN', num_groups=32, requires_grad=True).
loss_qfl (dict): Config of Quality Focal Loss (QFL).
reg_max (int): Max value of integral set :math: `{0, ..., reg_max}`
in QFL setting. Default: 16.
Example:
>>> self = GFLHead(11, 7)
>>> feats = [torch.rand(1, 7, s, s) for s in [4, 8, 16, 32, 64]]
>>> cls_quality_score, bbox_pred = self.forward(feats)
>>> assert len(cls_quality_score) == len(self.scales)
"""
def __init__(self,
num_classes,
in_channels,
stacked_convs=4,
conv_cfg=None,
norm_cfg=dict(type='GN', num_groups=32, requires_grad=True),
loss_dfl=dict(type='DistributionFocalLoss', loss_weight=0.25),
reg_max=16,
**kwargs):
self.stacked_convs = stacked_convs
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.reg_max = reg_max
super(GFLHead, self).__init__(num_classes, in_channels, **kwargs)
self.sampling = False
if self.train_cfg:
self.assigner = build_assigner(self.train_cfg.assigner)
# SSD sampling=False so use PseudoSampler
sampler_cfg = dict(type='PseudoSampler')
self.sampler = build_sampler(sampler_cfg, context=self)
self.integral = Integral(self.reg_max)
self.loss_dfl = build_loss(loss_dfl)
def _init_layers(self):
"""Initialize layers of the head."""
self.relu = nn.ReLU(inplace=True)
self.cls_convs = nn.ModuleList()
self.reg_convs = nn.ModuleList()
for i in range(self.stacked_convs):
chn = self.in_channels if i == 0 else self.feat_channels
self.cls_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
self.reg_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
assert self.num_anchors == 1, 'anchor free version'
self.gfl_cls = nn.Conv2d(
self.feat_channels, self.cls_out_channels, 3, padding=1)
self.gfl_reg = nn.Conv2d(
self.feat_channels, 4 * (self.reg_max + 1), 3, padding=1)
self.scales = nn.ModuleList(
[Scale(1.0) for _ in self.anchor_generator.strides])
def init_weights(self):
"""Initialize weights of the head."""
for m in self.cls_convs:
normal_init(m.conv, std=0.01)
for m in self.reg_convs:
normal_init(m.conv, std=0.01)
bias_cls = bias_init_with_prob(0.01)
normal_init(self.gfl_cls, std=0.01, bias=bias_cls)
normal_init(self.gfl_reg, std=0.01)
def forward(self, feats):
"""Forward features from the upstream network.
Args:
feats (tuple[Tensor]): Features from the upstream network, each is
a 4D-tensor.
Returns:
tuple: Usually a tuple of classification scores and bbox prediction
cls_scores (list[Tensor]): Classification and quality (IoU)
joint scores for all scale levels, each is a 4D-tensor,
the channel number is num_classes.
bbox_preds (list[Tensor]): Box distribution logits for all
scale levels, each is a 4D-tensor, the channel number is
4*(n+1), n is max value of integral set.
"""
return multi_apply(self.forward_single, feats, self.scales)
def forward_single(self, x, scale):
"""Forward feature of a single scale level.
Args:
x (Tensor): Features of a single scale level.
scale (:obj: `mmcv.cnn.Scale`): Learnable scale module to resize
the bbox prediction.
Returns:
tuple:
cls_score (Tensor): Cls and quality joint scores for a single
scale level the channel number is num_classes.
bbox_pred (Tensor): Box distribution logits for a single scale
level, the channel number is 4*(n+1), n is max value of
integral set.
"""
cls_feat = x
reg_feat = x
for cls_conv in self.cls_convs:
cls_feat = cls_conv(cls_feat)
for reg_conv in self.reg_convs:
reg_feat = reg_conv(reg_feat)
cls_score = self.gfl_cls(cls_feat)
bbox_pred = scale(self.gfl_reg(reg_feat)).float()
return cls_score, bbox_pred
def anchor_center(self, anchors):
"""Get anchor centers from anchors.
Args:
anchors (Tensor): Anchor list with shape (N, 4), "xyxy" format.
Returns:
Tensor: Anchor centers with shape (N, 2), "xy" format.
"""
anchors_cx = (anchors[:, 2] + anchors[:, 0]) / 2
anchors_cy = (anchors[:, 3] + anchors[:, 1]) / 2
return torch.stack([anchors_cx, anchors_cy], dim=-1)
def loss_single(self, anchors, cls_score, bbox_pred, labels, label_weights,
bbox_targets, stride, num_total_samples):
"""Compute loss of a single scale level.
Args:
anchors (Tensor): Box reference for each scale level with shape
(N, num_total_anchors, 4).
cls_score (Tensor): Cls and quality joint scores for each scale
level has shape (N, num_classes, H, W).
bbox_pred (Tensor): Box distribution logits for each scale
level with shape (N, 4*(n+1), H, W), n is max value of integral
set.
labels (Tensor): Labels of each anchors with shape
(N, num_total_anchors).
label_weights (Tensor): Label weights of each anchor with shape
(N, num_total_anchors)
bbox_targets (Tensor): BBox regression targets of each anchor wight
shape (N, num_total_anchors, 4).
stride (tuple): Stride in this scale level.
num_total_samples (int): Number of positive samples that is
reduced over all GPUs.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
assert stride[0] == stride[1], 'h stride is not equal to w stride!'
anchors = anchors.reshape(-1, 4)
cls_score = cls_score.permute(0, 2, 3,
1).reshape(-1, self.cls_out_channels)
bbox_pred = bbox_pred.permute(0, 2, 3,
1).reshape(-1, 4 * (self.reg_max + 1))
bbox_targets = bbox_targets.reshape(-1, 4)
labels = labels.reshape(-1)
label_weights = label_weights.reshape(-1)
# FG cat_id: [0, num_classes -1], BG cat_id: num_classes
bg_class_ind = self.num_classes
pos_inds = ((labels >= 0)
& (labels < bg_class_ind)).nonzero().squeeze(1)
score = label_weights.new_zeros(labels.shape)
if len(pos_inds) > 0:
pos_bbox_targets = bbox_targets[pos_inds]
pos_bbox_pred = bbox_pred[pos_inds]
pos_anchors = anchors[pos_inds]
pos_anchor_centers = self.anchor_center(pos_anchors) / stride[0]
weight_targets = cls_score.detach().sigmoid()
weight_targets = weight_targets.max(dim=1)[0][pos_inds]
pos_bbox_pred_corners = self.integral(pos_bbox_pred)
pos_decode_bbox_pred = distance2bbox(pos_anchor_centers,
pos_bbox_pred_corners)
pos_decode_bbox_targets = pos_bbox_targets / stride[0]
score[pos_inds] = bbox_overlaps(
pos_decode_bbox_pred.detach(),
pos_decode_bbox_targets,
is_aligned=True)
pred_corners = pos_bbox_pred.reshape(-1, self.reg_max + 1)
target_corners = bbox2distance(pos_anchor_centers,
pos_decode_bbox_targets,
self.reg_max).reshape(-1)
# regression loss
loss_bbox = self.loss_bbox(
pos_decode_bbox_pred,
pos_decode_bbox_targets,
weight=weight_targets,
avg_factor=1.0)
# dfl loss
loss_dfl = self.loss_dfl(
pred_corners,
target_corners,
weight=weight_targets[:, None].expand(-1, 4).reshape(-1),
avg_factor=4.0)
else:
loss_bbox = bbox_pred.sum() * 0
loss_dfl = bbox_pred.sum() * 0
weight_targets = torch.tensor(0).cuda()
# cls (qfl) loss
loss_cls = self.loss_cls(
cls_score, (labels, score),
weight=label_weights,
avg_factor=num_total_samples)
return loss_cls, loss_bbox, loss_dfl, weight_targets.sum()
@force_fp32(apply_to=('cls_scores', 'bbox_preds'))
def loss(self,
cls_scores,
bbox_preds,
gt_bboxes,
gt_labels,
img_metas,
gt_bboxes_ignore=None):
"""Compute losses of the head.
Args:
cls_scores (list[Tensor]): Cls and quality scores for each scale
level has shape (N, num_classes, H, W).
bbox_preds (list[Tensor]): Box distribution logits for each scale
level with shape (N, 4*(n+1), H, W), n is max value of integral
set.
gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
gt_labels (list[Tensor]): class indices corresponding to each box
img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
gt_bboxes_ignore (list[Tensor] | None): specify which bounding
boxes can be ignored when computing the loss.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
assert len(featmap_sizes) == self.anchor_generator.num_levels
device = cls_scores[0].device
anchor_list, valid_flag_list = self.get_anchors(
featmap_sizes, img_metas, device=device)
label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1
cls_reg_targets = self.get_targets(
anchor_list,
valid_flag_list,
gt_bboxes,
img_metas,
gt_bboxes_ignore_list=gt_bboxes_ignore,
gt_labels_list=gt_labels,
label_channels=label_channels)
if cls_reg_targets is None:
return None
(anchor_list, labels_list, label_weights_list, bbox_targets_list,
bbox_weights_list, num_total_pos, num_total_neg) = cls_reg_targets
num_total_samples = reduce_mean(
torch.tensor(num_total_pos, dtype=torch.float,
device=device)).item()
num_total_samples = max(num_total_samples, 1.0)
losses_cls, losses_bbox, losses_dfl,\
avg_factor = multi_apply(
self.loss_single,
anchor_list,
cls_scores,
bbox_preds,
labels_list,
label_weights_list,
bbox_targets_list,
self.anchor_generator.strides,
num_total_samples=num_total_samples)
avg_factor = sum(avg_factor)
avg_factor = reduce_mean(avg_factor).item()
losses_bbox = list(map(lambda x: x / avg_factor, losses_bbox))
losses_dfl = list(map(lambda x: x / avg_factor, losses_dfl))
return dict(
loss_cls=losses_cls, loss_bbox=losses_bbox, loss_dfl=losses_dfl)
def _get_bboxes_single(self,
cls_scores,
bbox_preds,
mlvl_anchors,
img_shape,
scale_factor,
cfg,
rescale=False,
with_nms=True):
"""Transform outputs for a single batch item into labeled boxes.
Args:
cls_scores (list[Tensor]): Box scores for a single scale level
has shape (num_classes, H, W).
bbox_preds (list[Tensor]): Box distribution logits for a single
scale level with shape (4*(n+1), H, W), n is max value of
integral set.
mlvl_anchors (list[Tensor]): Box reference for a single scale level
with shape (num_total_anchors, 4).
img_shape (tuple[int]): Shape of the input image,
(height, width, 3).
scale_factor (ndarray): Scale factor of the image arange as
(w_scale, h_scale, w_scale, h_scale).
cfg (mmcv.Config | None): Test / postprocessing configuration,
if None, test_cfg would be used.
rescale (bool): If True, return boxes in original image space.
Default: False.
with_nms (bool): If True, do nms before return boxes.
Default: True.
Returns:
tuple(Tensor):
det_bboxes (Tensor): Bbox predictions in shape (N, 5), where
the first 4 columns are bounding box positions
(tl_x, tl_y, br_x, br_y) and the 5-th column is a score
between 0 and 1.
det_labels (Tensor): A (N,) tensor where each item is the
predicted class label of the corresponding box.
"""
cfg = self.test_cfg if cfg is None else cfg
assert len(cls_scores) == len(bbox_preds) == len(mlvl_anchors)
mlvl_bboxes = []
mlvl_scores = []
for cls_score, bbox_pred, stride, anchors in zip(
cls_scores, bbox_preds, self.anchor_generator.strides,
mlvl_anchors):
assert cls_score.size()[-2:] == bbox_pred.size()[-2:]
assert stride[0] == stride[1]
scores = cls_score.permute(1, 2, 0).reshape(
-1, self.cls_out_channels).sigmoid()
bbox_pred = bbox_pred.permute(1, 2, 0)
bbox_pred = self.integral(bbox_pred) * stride[0]
nms_pre = cfg.get('nms_pre', -1)
if nms_pre > 0 and scores.shape[0] > nms_pre:
max_scores, _ = scores.max(dim=1)
_, topk_inds = max_scores.topk(nms_pre)
anchors = anchors[topk_inds, :]
bbox_pred = bbox_pred[topk_inds, :]
scores = scores[topk_inds, :]
bboxes = distance2bbox(
self.anchor_center(anchors), bbox_pred, max_shape=img_shape)
mlvl_bboxes.append(bboxes)
mlvl_scores.append(scores)
mlvl_bboxes = torch.cat(mlvl_bboxes)
if rescale:
mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor)
mlvl_scores = torch.cat(mlvl_scores)
# Add a dummy background class to the backend when using sigmoid
# remind that we set FG labels to [0, num_class-1] since mmdet v2.0
# BG cat_id: num_class
padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1)
mlvl_scores = torch.cat([mlvl_scores, padding], dim=1)
if with_nms:
det_bboxes, det_labels = multiclass_nms(mlvl_bboxes, mlvl_scores,
cfg.score_thr, cfg.nms,
cfg.max_per_img)
return det_bboxes, det_labels
else:
return mlvl_bboxes, mlvl_scores
def get_targets(self,
anchor_list,
valid_flag_list,
gt_bboxes_list,
img_metas,
gt_bboxes_ignore_list=None,
gt_labels_list=None,
label_channels=1,
unmap_outputs=True):
"""Get targets for GFL head.
This method is almost the same as `AnchorHead.get_targets()`. Besides
returning the targets as the parent method does, it also returns the
anchors as the first element of the returned tuple.
"""
num_imgs = len(img_metas)
assert len(anchor_list) == len(valid_flag_list) == num_imgs
# anchor number of multi levels
num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]
num_level_anchors_list = [num_level_anchors] * num_imgs
# concat all level anchors and flags to a single tensor
for i in range(num_imgs):
assert len(anchor_list[i]) == len(valid_flag_list[i])
anchor_list[i] = torch.cat(anchor_list[i])
valid_flag_list[i] = torch.cat(valid_flag_list[i])
# compute targets for each image
if gt_bboxes_ignore_list is None:
gt_bboxes_ignore_list = [None for _ in range(num_imgs)]
if gt_labels_list is None:
gt_labels_list = [None for _ in range(num_imgs)]
(all_anchors, all_labels, all_label_weights, all_bbox_targets,
all_bbox_weights, pos_inds_list, neg_inds_list) = multi_apply(
self._get_target_single,
anchor_list,
valid_flag_list,
num_level_anchors_list,
gt_bboxes_list,
gt_bboxes_ignore_list,
gt_labels_list,
img_metas,
label_channels=label_channels,
unmap_outputs=unmap_outputs)
# no valid anchors
if any([labels is None for labels in all_labels]):
return None
# sampled anchors of all images
num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list])
num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list])
# split targets to a list w.r.t. multiple levels
anchors_list = images_to_levels(all_anchors, num_level_anchors)
labels_list = images_to_levels(all_labels, num_level_anchors)
label_weights_list = images_to_levels(all_label_weights,
num_level_anchors)
bbox_targets_list = images_to_levels(all_bbox_targets,
num_level_anchors)
bbox_weights_list = images_to_levels(all_bbox_weights,
num_level_anchors)
return (anchors_list, labels_list, label_weights_list,
bbox_targets_list, bbox_weights_list, num_total_pos,
num_total_neg)
def _get_target_single(self,
flat_anchors,
valid_flags,
num_level_anchors,
gt_bboxes,
gt_bboxes_ignore,
gt_labels,
img_meta,
label_channels=1,
unmap_outputs=True):
"""Compute regression, classification targets for anchors in a single
image.
Args:
flat_anchors (Tensor): Multi-level anchors of the image, which are
concatenated into a single tensor of shape (num_anchors, 4)
valid_flags (Tensor): Multi level valid flags of the image,
which are concatenated into a single tensor of
shape (num_anchors,).
num_level_anchors Tensor): Number of anchors of each scale level.
gt_bboxes (Tensor): Ground truth bboxes of the image,
shape (num_gts, 4).
gt_bboxes_ignore (Tensor): Ground truth bboxes to be
ignored, shape (num_ignored_gts, 4).
gt_labels (Tensor): Ground truth labels of each box,
shape (num_gts,).
img_meta (dict): Meta info of the image.
label_channels (int): Channel of label.
unmap_outputs (bool): Whether to map outputs back to the original
set of anchors.
Returns:
tuple: N is the number of total anchors in the image.
anchors (Tensor): All anchors in the image with shape (N, 4).
labels (Tensor): Labels of all anchors in the image with shape
(N,).
label_weights (Tensor): Label weights of all anchor in the
image with shape (N,).
bbox_targets (Tensor): BBox targets of all anchors in the
image with shape (N, 4).
bbox_weights (Tensor): BBox weights of all anchors in the
image with shape (N, 4).
pos_inds (Tensor): Indices of postive anchor with shape
(num_pos,).
neg_inds (Tensor): Indices of negative anchor with shape
(num_neg,).
"""
inside_flags = anchor_inside_flags(flat_anchors, valid_flags,
img_meta['img_shape'][:2],
self.train_cfg.allowed_border)
if not inside_flags.any():
return (None, ) * 7
# assign gt and sample anchors
anchors = flat_anchors[inside_flags, :]
num_level_anchors_inside = self.get_num_level_anchors_inside(
num_level_anchors, inside_flags)
#print('NNN:', self.assigner.__class__.__name__)
if self.assigner.__class__.__name__=='ATSSAssigner':
assign_result = self.assigner.assign(anchors, num_level_anchors_inside,
gt_bboxes, gt_bboxes_ignore,
gt_labels)
else:
assign_result = self.assigner.assign(anchors,
gt_bboxes, gt_bboxes_ignore,
gt_labels)
sampling_result = self.sampler.sample(assign_result, anchors,
gt_bboxes)
num_valid_anchors = anchors.shape[0]
bbox_targets = torch.zeros_like(anchors)
bbox_weights = torch.zeros_like(anchors)
labels = anchors.new_full((num_valid_anchors, ),
self.num_classes,
dtype=torch.long)
label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float)
pos_inds = sampling_result.pos_inds
neg_inds = sampling_result.neg_inds
if len(pos_inds) > 0:
pos_bbox_targets = sampling_result.pos_gt_bboxes
bbox_targets[pos_inds, :] = pos_bbox_targets
bbox_weights[pos_inds, :] = 1.0
if gt_labels is None:
# Only rpn gives gt_labels as None
# Foreground is the first class
labels[pos_inds] = 0
else:
labels[pos_inds] = gt_labels[
sampling_result.pos_assigned_gt_inds]
if self.train_cfg.pos_weight <= 0:
label_weights[pos_inds] = 1.0
else:
label_weights[pos_inds] = self.train_cfg.pos_weight
if len(neg_inds) > 0:
label_weights[neg_inds] = 1.0
# map up to original set of anchors
if unmap_outputs:
num_total_anchors = flat_anchors.size(0)
anchors = unmap(anchors, num_total_anchors, inside_flags)
labels = unmap(
labels, num_total_anchors, inside_flags, fill=self.num_classes)
label_weights = unmap(label_weights, num_total_anchors,
inside_flags)
bbox_targets = unmap(bbox_targets, num_total_anchors, inside_flags)
bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags)
return (anchors, labels, label_weights, bbox_targets, bbox_weights,
pos_inds, neg_inds)
def get_num_level_anchors_inside(self, num_level_anchors, inside_flags):
split_inside_flags = torch.split(inside_flags, num_level_anchors)
num_level_anchors_inside = [
int(flags.sum()) for flags in split_inside_flags
]
return num_level_anchors_inside
| insightface/detection/scrfd/mmdet/models/dense_heads/gfl_head.py/0 | {
"file_path": "insightface/detection/scrfd/mmdet/models/dense_heads/gfl_head.py",
"repo_id": "insightface",
"token_count": 14385
} | 110 |
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule, xavier_init
from mmcv.runner import force_fp32
from mmdet.core import build_sampler, fast_nms, images_to_levels, multi_apply
from ..builder import HEADS, build_loss
from .anchor_head import AnchorHead
@HEADS.register_module()
class YOLACTHead(AnchorHead):
"""YOLACT box head used in https://arxiv.org/abs/1904.02689.
Note that YOLACT head is a light version of RetinaNet head.
Four differences are described as follows:
1. YOLACT box head has three-times fewer anchors.
2. YOLACT box head shares the convs for box and cls branches.
3. YOLACT box head uses OHEM instead of Focal loss.
4. YOLACT box head predicts a set of mask coefficients for each box.
Args:
num_classes (int): Number of categories excluding the background
category.
in_channels (int): Number of channels in the input feature map.
anchor_generator (dict): Config dict for anchor generator
loss_cls (dict): Config of classification loss.
loss_bbox (dict): Config of localization loss.
num_head_convs (int): Number of the conv layers shared by
box and cls branches.
num_protos (int): Number of the mask coefficients.
use_ohem (bool): If true, ``loss_single_OHEM`` will be used for
cls loss calculation. If false, ``loss_single`` will be used.
conv_cfg (dict): Dictionary to construct and config conv layer.
norm_cfg (dict): Dictionary to construct and config norm layer.
"""
def __init__(self,
num_classes,
in_channels,
anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=3,
scales_per_octave=1,
ratios=[0.5, 1.0, 2.0],
strides=[8, 16, 32, 64, 128]),
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
reduction='none',
loss_weight=1.0),
loss_bbox=dict(
type='SmoothL1Loss', beta=1.0, loss_weight=1.5),
num_head_convs=1,
num_protos=32,
use_ohem=True,
conv_cfg=None,
norm_cfg=None,
**kwargs):
self.num_head_convs = num_head_convs
self.num_protos = num_protos
self.use_ohem = use_ohem
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
super(YOLACTHead, self).__init__(
num_classes,
in_channels,
loss_cls=loss_cls,
loss_bbox=loss_bbox,
anchor_generator=anchor_generator,
**kwargs)
if self.use_ohem:
sampler_cfg = dict(type='PseudoSampler')
self.sampler = build_sampler(sampler_cfg, context=self)
self.sampling = False
def _init_layers(self):
"""Initialize layers of the head."""
self.relu = nn.ReLU(inplace=True)
self.head_convs = nn.ModuleList()
for i in range(self.num_head_convs):
chn = self.in_channels if i == 0 else self.feat_channels
self.head_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
self.conv_cls = nn.Conv2d(
self.feat_channels,
self.num_anchors * self.cls_out_channels,
3,
padding=1)
self.conv_reg = nn.Conv2d(
self.feat_channels, self.num_anchors * 4, 3, padding=1)
self.conv_coeff = nn.Conv2d(
self.feat_channels,
self.num_anchors * self.num_protos,
3,
padding=1)
def init_weights(self):
"""Initialize weights of the head."""
for m in self.head_convs:
xavier_init(m.conv, distribution='uniform', bias=0)
xavier_init(self.conv_cls, distribution='uniform', bias=0)
xavier_init(self.conv_reg, distribution='uniform', bias=0)
xavier_init(self.conv_coeff, distribution='uniform', bias=0)
def forward_single(self, x):
"""Forward feature of a single scale level.
Args:
x (Tensor): Features of a single scale level.
Returns:
tuple:
cls_score (Tensor): Cls scores for a single scale level \
the channels number is num_anchors * num_classes.
bbox_pred (Tensor): Box energies / deltas for a single scale \
level, the channels number is num_anchors * 4.
coeff_pred (Tensor): Mask coefficients for a single scale \
level, the channels number is num_anchors * num_protos.
"""
for head_conv in self.head_convs:
x = head_conv(x)
cls_score = self.conv_cls(x)
bbox_pred = self.conv_reg(x)
coeff_pred = self.conv_coeff(x).tanh()
return cls_score, bbox_pred, coeff_pred
@force_fp32(apply_to=('cls_scores', 'bbox_preds'))
def loss(self,
cls_scores,
bbox_preds,
gt_bboxes,
gt_labels,
img_metas,
gt_bboxes_ignore=None):
"""A combination of the func:``AnchorHead.loss`` and
func:``SSDHead.loss``.
When ``self.use_ohem == True``, it functions like ``SSDHead.loss``,
otherwise, it follows ``AnchorHead.loss``. Besides, it additionally
returns ``sampling_results``.
Args:
cls_scores (list[Tensor]): Box scores for each scale level
Has shape (N, num_anchors * num_classes, H, W)
bbox_preds (list[Tensor]): Box energies / deltas for each scale
level with shape (N, num_anchors * 4, H, W)
gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
gt_labels (list[Tensor]): Class indices corresponding to each box
img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
gt_bboxes_ignore (None | list[Tensor]): Specify which bounding
boxes can be ignored when computing the loss. Default: None
Returns:
tuple:
dict[str, Tensor]: A dictionary of loss components.
List[:obj:``SamplingResult``]: Sampler results for each image.
"""
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
assert len(featmap_sizes) == self.anchor_generator.num_levels
device = cls_scores[0].device
anchor_list, valid_flag_list = self.get_anchors(
featmap_sizes, img_metas, device=device)
label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1
cls_reg_targets = self.get_targets(
anchor_list,
valid_flag_list,
gt_bboxes,
img_metas,
gt_bboxes_ignore_list=gt_bboxes_ignore,
gt_labels_list=gt_labels,
label_channels=label_channels,
unmap_outputs=not self.use_ohem,
return_sampling_results=True)
if cls_reg_targets is None:
return None
(labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,
num_total_pos, num_total_neg, sampling_results) = cls_reg_targets
if self.use_ohem:
num_images = len(img_metas)
all_cls_scores = torch.cat([
s.permute(0, 2, 3, 1).reshape(
num_images, -1, self.cls_out_channels) for s in cls_scores
], 1)
all_labels = torch.cat(labels_list, -1).view(num_images, -1)
all_label_weights = torch.cat(label_weights_list,
-1).view(num_images, -1)
all_bbox_preds = torch.cat([
b.permute(0, 2, 3, 1).reshape(num_images, -1, 4)
for b in bbox_preds
], -2)
all_bbox_targets = torch.cat(bbox_targets_list,
-2).view(num_images, -1, 4)
all_bbox_weights = torch.cat(bbox_weights_list,
-2).view(num_images, -1, 4)
# concat all level anchors to a single tensor
all_anchors = []
for i in range(num_images):
all_anchors.append(torch.cat(anchor_list[i]))
# check NaN and Inf
assert torch.isfinite(all_cls_scores).all().item(), \
'classification scores become infinite or NaN!'
assert torch.isfinite(all_bbox_preds).all().item(), \
'bbox predications become infinite or NaN!'
losses_cls, losses_bbox = multi_apply(
self.loss_single_OHEM,
all_cls_scores,
all_bbox_preds,
all_anchors,
all_labels,
all_label_weights,
all_bbox_targets,
all_bbox_weights,
num_total_samples=num_total_pos)
else:
num_total_samples = (
num_total_pos +
num_total_neg if self.sampling else num_total_pos)
# anchor number of multi levels
num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]
# concat all level anchors and flags to a single tensor
concat_anchor_list = []
for i in range(len(anchor_list)):
concat_anchor_list.append(torch.cat(anchor_list[i]))
all_anchor_list = images_to_levels(concat_anchor_list,
num_level_anchors)
losses_cls, losses_bbox = multi_apply(
self.loss_single,
cls_scores,
bbox_preds,
all_anchor_list,
labels_list,
label_weights_list,
bbox_targets_list,
bbox_weights_list,
num_total_samples=num_total_samples)
return dict(
loss_cls=losses_cls, loss_bbox=losses_bbox), sampling_results
def loss_single_OHEM(self, cls_score, bbox_pred, anchors, labels,
label_weights, bbox_targets, bbox_weights,
num_total_samples):
""""See func:``SSDHead.loss``."""
loss_cls_all = self.loss_cls(cls_score, labels, label_weights)
# FG cat_id: [0, num_classes -1], BG cat_id: num_classes
pos_inds = ((labels >= 0) &
(labels < self.num_classes)).nonzero().reshape(-1)
neg_inds = (labels == self.num_classes).nonzero().view(-1)
num_pos_samples = pos_inds.size(0)
if num_pos_samples == 0:
num_neg_samples = neg_inds.size(0)
else:
num_neg_samples = self.train_cfg.neg_pos_ratio * num_pos_samples
if num_neg_samples > neg_inds.size(0):
num_neg_samples = neg_inds.size(0)
topk_loss_cls_neg, _ = loss_cls_all[neg_inds].topk(num_neg_samples)
loss_cls_pos = loss_cls_all[pos_inds].sum()
loss_cls_neg = topk_loss_cls_neg.sum()
loss_cls = (loss_cls_pos + loss_cls_neg) / num_total_samples
if self.reg_decoded_bbox:
bbox_pred = self.bbox_coder.decode(anchors, bbox_pred)
loss_bbox = self.loss_bbox(
bbox_pred,
bbox_targets,
bbox_weights,
avg_factor=num_total_samples)
return loss_cls[None], loss_bbox
@force_fp32(apply_to=('cls_scores', 'bbox_preds', 'coeff_preds'))
def get_bboxes(self,
cls_scores,
bbox_preds,
coeff_preds,
img_metas,
cfg=None,
rescale=False):
""""Similiar to func:``AnchorHead.get_bboxes``, but additionally
processes coeff_preds.
Args:
cls_scores (list[Tensor]): Box scores for each scale level
with shape (N, num_anchors * num_classes, H, W)
bbox_preds (list[Tensor]): Box energies / deltas for each scale
level with shape (N, num_anchors * 4, H, W)
coeff_preds (list[Tensor]): Mask coefficients for each scale
level with shape (N, num_anchors * num_protos, H, W)
img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
cfg (mmcv.Config | None): Test / postprocessing configuration,
if None, test_cfg would be used
rescale (bool): If True, return boxes in original image space.
Default: False.
Returns:
list[tuple[Tensor, Tensor, Tensor]]: Each item in result_list is
a 3-tuple. The first item is an (n, 5) tensor, where the
first 4 columns are bounding box positions
(tl_x, tl_y, br_x, br_y) and the 5-th column is a score
between 0 and 1. The second item is an (n,) tensor where each
item is the predicted class label of the corresponding box.
The third item is an (n, num_protos) tensor where each item
is the predicted mask coefficients of instance inside the
corresponding box.
"""
assert len(cls_scores) == len(bbox_preds)
num_levels = len(cls_scores)
device = cls_scores[0].device
featmap_sizes = [cls_scores[i].shape[-2:] for i in range(num_levels)]
mlvl_anchors = self.anchor_generator.grid_anchors(
featmap_sizes, device=device)
det_bboxes = []
det_labels = []
det_coeffs = []
for img_id in range(len(img_metas)):
cls_score_list = [
cls_scores[i][img_id].detach() for i in range(num_levels)
]
bbox_pred_list = [
bbox_preds[i][img_id].detach() for i in range(num_levels)
]
coeff_pred_list = [
coeff_preds[i][img_id].detach() for i in range(num_levels)
]
img_shape = img_metas[img_id]['img_shape']
scale_factor = img_metas[img_id]['scale_factor']
bbox_res = self._get_bboxes_single(cls_score_list, bbox_pred_list,
coeff_pred_list, mlvl_anchors,
img_shape, scale_factor, cfg,
rescale)
det_bboxes.append(bbox_res[0])
det_labels.append(bbox_res[1])
det_coeffs.append(bbox_res[2])
return det_bboxes, det_labels, det_coeffs
def _get_bboxes_single(self,
cls_score_list,
bbox_pred_list,
coeff_preds_list,
mlvl_anchors,
img_shape,
scale_factor,
cfg,
rescale=False):
""""Similiar to func:``AnchorHead._get_bboxes_single``, but
additionally processes coeff_preds_list and uses fast NMS instead of
traditional NMS.
Args:
cls_score_list (list[Tensor]): Box scores for a single scale level
Has shape (num_anchors * num_classes, H, W).
bbox_pred_list (list[Tensor]): Box energies / deltas for a single
scale level with shape (num_anchors * 4, H, W).
coeff_preds_list (list[Tensor]): Mask coefficients for a single
scale level with shape (num_anchors * num_protos, H, W).
mlvl_anchors (list[Tensor]): Box reference for a single scale level
with shape (num_total_anchors, 4).
img_shape (tuple[int]): Shape of the input image,
(height, width, 3).
scale_factor (ndarray): Scale factor of the image arange as
(w_scale, h_scale, w_scale, h_scale).
cfg (mmcv.Config): Test / postprocessing configuration,
if None, test_cfg would be used.
rescale (bool): If True, return boxes in original image space.
Returns:
tuple[Tensor, Tensor, Tensor]: The first item is an (n, 5) tensor,
where the first 4 columns are bounding box positions
(tl_x, tl_y, br_x, br_y) and the 5-th column is a score between
0 and 1. The second item is an (n,) tensor where each item is
the predicted class label of the corresponding box. The third
item is an (n, num_protos) tensor where each item is the
predicted mask coefficients of instance inside the
corresponding box.
"""
cfg = self.test_cfg if cfg is None else cfg
assert len(cls_score_list) == len(bbox_pred_list) == len(mlvl_anchors)
mlvl_bboxes = []
mlvl_scores = []
mlvl_coeffs = []
for cls_score, bbox_pred, coeff_pred, anchors in \
zip(cls_score_list, bbox_pred_list,
coeff_preds_list, mlvl_anchors):
assert cls_score.size()[-2:] == bbox_pred.size()[-2:]
cls_score = cls_score.permute(1, 2,
0).reshape(-1, self.cls_out_channels)
if self.use_sigmoid_cls:
scores = cls_score.sigmoid()
else:
scores = cls_score.softmax(-1)
bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4)
coeff_pred = coeff_pred.permute(1, 2,
0).reshape(-1, self.num_protos)
nms_pre = cfg.get('nms_pre', -1)
if nms_pre > 0 and scores.shape[0] > nms_pre:
# Get maximum scores for foreground classes.
if self.use_sigmoid_cls:
max_scores, _ = scores.max(dim=1)
else:
# remind that we set FG labels to [0, num_class-1]
# since mmdet v2.0
# BG cat_id: num_class
max_scores, _ = scores[:, :-1].max(dim=1)
_, topk_inds = max_scores.topk(nms_pre)
anchors = anchors[topk_inds, :]
bbox_pred = bbox_pred[topk_inds, :]
scores = scores[topk_inds, :]
coeff_pred = coeff_pred[topk_inds, :]
bboxes = self.bbox_coder.decode(
anchors, bbox_pred, max_shape=img_shape)
mlvl_bboxes.append(bboxes)
mlvl_scores.append(scores)
mlvl_coeffs.append(coeff_pred)
mlvl_bboxes = torch.cat(mlvl_bboxes)
if rescale:
mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor)
mlvl_scores = torch.cat(mlvl_scores)
mlvl_coeffs = torch.cat(mlvl_coeffs)
if self.use_sigmoid_cls:
# Add a dummy background class to the backend when using sigmoid
# remind that we set FG labels to [0, num_class-1] since mmdet v2.0
# BG cat_id: num_class
padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1)
mlvl_scores = torch.cat([mlvl_scores, padding], dim=1)
det_bboxes, det_labels, det_coeffs = fast_nms(mlvl_bboxes, mlvl_scores,
mlvl_coeffs,
cfg.score_thr,
cfg.iou_thr, cfg.top_k,
cfg.max_per_img)
return det_bboxes, det_labels, det_coeffs
@HEADS.register_module()
class YOLACTSegmHead(nn.Module):
"""YOLACT segmentation head used in https://arxiv.org/abs/1904.02689.
Apply a semantic segmentation loss on feature space using layers that are
only evaluated during training to increase performance with no speed
penalty.
Args:
in_channels (int): Number of channels in the input feature map.
num_classes (int): Number of categories excluding the background
category.
loss_segm (dict): Config of semantic segmentation loss.
"""
def __init__(self,
num_classes,
in_channels=256,
loss_segm=dict(
type='CrossEntropyLoss',
use_sigmoid=True,
loss_weight=1.0)):
super(YOLACTSegmHead, self).__init__()
self.in_channels = in_channels
self.num_classes = num_classes
self.loss_segm = build_loss(loss_segm)
self._init_layers()
self.fp16_enabled = False
def _init_layers(self):
"""Initialize layers of the head."""
self.segm_conv = nn.Conv2d(
self.in_channels, self.num_classes, kernel_size=1)
def init_weights(self):
"""Initialize weights of the head."""
xavier_init(self.segm_conv, distribution='uniform')
def forward(self, x):
"""Forward feature from the upstream network.
Args:
x (Tensor): Feature from the upstream network, which is
a 4D-tensor.
Returns:
Tensor: Predicted semantic segmentation map with shape
(N, num_classes, H, W).
"""
return self.segm_conv(x)
@force_fp32(apply_to=('segm_pred', ))
def loss(self, segm_pred, gt_masks, gt_labels):
"""Compute loss of the head.
Args:
segm_pred (list[Tensor]): Predicted semantic segmentation map
with shape (N, num_classes, H, W).
gt_masks (list[Tensor]): Ground truth masks for each image with
the same shape of the input image.
gt_labels (list[Tensor]): Class indices corresponding to each box.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
loss_segm = []
num_imgs, num_classes, mask_h, mask_w = segm_pred.size()
for idx in range(num_imgs):
cur_segm_pred = segm_pred[idx]
cur_gt_masks = gt_masks[idx].float()
cur_gt_labels = gt_labels[idx]
segm_targets = self.get_targets(cur_segm_pred, cur_gt_masks,
cur_gt_labels)
if segm_targets is None:
loss = self.loss_segm(cur_segm_pred,
torch.zeros_like(cur_segm_pred),
torch.zeros_like(cur_segm_pred))
else:
loss = self.loss_segm(
cur_segm_pred,
segm_targets,
avg_factor=num_imgs * mask_h * mask_w)
loss_segm.append(loss)
return dict(loss_segm=loss_segm)
def get_targets(self, segm_pred, gt_masks, gt_labels):
"""Compute semantic segmentation targets for each image.
Args:
segm_pred (Tensor): Predicted semantic segmentation map
with shape (num_classes, H, W).
gt_masks (Tensor): Ground truth masks for each image with
the same shape of the input image.
gt_labels (Tensor): Class indices corresponding to each box.
Returns:
Tensor: Semantic segmentation targets with shape
(num_classes, H, W).
"""
if gt_masks.size(0) == 0:
return None
num_classes, mask_h, mask_w = segm_pred.size()
with torch.no_grad():
downsampled_masks = F.interpolate(
gt_masks.unsqueeze(0), (mask_h, mask_w),
mode='bilinear',
align_corners=False).squeeze(0)
downsampled_masks = downsampled_masks.gt(0.5).float()
segm_targets = torch.zeros_like(segm_pred, requires_grad=False)
for obj_idx in range(downsampled_masks.size(0)):
segm_targets[gt_labels[obj_idx] - 1] = torch.max(
segm_targets[gt_labels[obj_idx] - 1],
downsampled_masks[obj_idx])
return segm_targets
@HEADS.register_module()
class YOLACTProtonet(nn.Module):
"""YOLACT mask head used in https://arxiv.org/abs/1904.02689.
This head outputs the mask prototypes for YOLACT.
Args:
in_channels (int): Number of channels in the input feature map.
proto_channels (tuple[int]): Output channels of protonet convs.
proto_kernel_sizes (tuple[int]): Kernel sizes of protonet convs.
include_last_relu (Bool): If keep the last relu of protonet.
num_protos (int): Number of prototypes.
num_classes (int): Number of categories excluding the background
category.
loss_mask_weight (float): Reweight the mask loss by this factor.
max_masks_to_train (int): Maximum number of masks to train for
each image.
"""
def __init__(self,
num_classes,
in_channels=256,
proto_channels=(256, 256, 256, None, 256, 32),
proto_kernel_sizes=(3, 3, 3, -2, 3, 1),
include_last_relu=True,
num_protos=32,
loss_mask_weight=1.0,
max_masks_to_train=100):
super(YOLACTProtonet, self).__init__()
self.in_channels = in_channels
self.proto_channels = proto_channels
self.proto_kernel_sizes = proto_kernel_sizes
self.include_last_relu = include_last_relu
self.protonet = self._init_layers()
self.loss_mask_weight = loss_mask_weight
self.num_protos = num_protos
self.num_classes = num_classes
self.max_masks_to_train = max_masks_to_train
self.fp16_enabled = False
def _init_layers(self):
"""A helper function to take a config setting and turn it into a
network."""
# Possible patterns:
# ( 256, 3) -> conv
# ( 256,-2) -> deconv
# (None,-2) -> bilinear interpolate
in_channels = self.in_channels
protonets = nn.ModuleList()
for num_channels, kernel_size in zip(self.proto_channels,
self.proto_kernel_sizes):
if kernel_size > 0:
layer = nn.Conv2d(
in_channels,
num_channels,
kernel_size,
padding=kernel_size // 2)
else:
if num_channels is None:
layer = InterpolateModule(
scale_factor=-kernel_size,
mode='bilinear',
align_corners=False)
else:
layer = nn.ConvTranspose2d(
in_channels,
num_channels,
-kernel_size,
padding=kernel_size // 2)
protonets.append(layer)
protonets.append(nn.ReLU(inplace=True))
in_channels = num_channels if num_channels is not None \
else in_channels
if not self.include_last_relu:
protonets = protonets[:-1]
return nn.Sequential(*protonets)
def init_weights(self):
"""Initialize weights of the head."""
for m in self.protonet:
if isinstance(m, nn.Conv2d):
xavier_init(m, distribution='uniform')
def forward(self, x, coeff_pred, bboxes, img_meta, sampling_results=None):
"""Forward feature from the upstream network to get prototypes and
linearly combine the prototypes, using masks coefficients, into
instance masks. Finally, crop the instance masks with given bboxes.
Args:
x (Tensor): Feature from the upstream network, which is
a 4D-tensor.
coeff_pred (list[Tensor]): Mask coefficients for each scale
level with shape (N, num_anchors * num_protos, H, W).
bboxes (list[Tensor]): Box used for cropping with shape
(N, num_anchors * 4, H, W). During training, they are
ground truth boxes. During testing, they are predicted
boxes.
img_meta (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
sampling_results (List[:obj:``SamplingResult``]): Sampler results
for each image.
Returns:
list[Tensor]: Predicted instance segmentation masks.
"""
prototypes = self.protonet(x)
prototypes = prototypes.permute(0, 2, 3, 1).contiguous()
num_imgs = x.size(0)
# Training state
if self.training:
coeff_pred_list = []
for coeff_pred_per_level in coeff_pred:
coeff_pred_per_level = \
coeff_pred_per_level.permute(0, 2, 3, 1)\
.reshape(num_imgs, -1, self.num_protos)
coeff_pred_list.append(coeff_pred_per_level)
coeff_pred = torch.cat(coeff_pred_list, dim=1)
mask_pred_list = []
for idx in range(num_imgs):
cur_prototypes = prototypes[idx]
cur_coeff_pred = coeff_pred[idx]
cur_bboxes = bboxes[idx]
cur_img_meta = img_meta[idx]
# Testing state
if not self.training:
bboxes_for_cropping = cur_bboxes
else:
cur_sampling_results = sampling_results[idx]
pos_assigned_gt_inds = \
cur_sampling_results.pos_assigned_gt_inds
bboxes_for_cropping = cur_bboxes[pos_assigned_gt_inds].clone()
pos_inds = cur_sampling_results.pos_inds
cur_coeff_pred = cur_coeff_pred[pos_inds]
# Linearly combine the prototypes with the mask coefficients
mask_pred = cur_prototypes @ cur_coeff_pred.t()
mask_pred = torch.sigmoid(mask_pred)
h, w = cur_img_meta['img_shape'][:2]
bboxes_for_cropping[:, 0] /= w
bboxes_for_cropping[:, 1] /= h
bboxes_for_cropping[:, 2] /= w
bboxes_for_cropping[:, 3] /= h
mask_pred = self.crop(mask_pred, bboxes_for_cropping)
mask_pred = mask_pred.permute(2, 0, 1).contiguous()
mask_pred_list.append(mask_pred)
return mask_pred_list
@force_fp32(apply_to=('mask_pred', ))
def loss(self, mask_pred, gt_masks, gt_bboxes, img_meta, sampling_results):
"""Compute loss of the head.
Args:
mask_pred (list[Tensor]): Predicted prototypes with shape
(num_classes, H, W).
gt_masks (list[Tensor]): Ground truth masks for each image with
the same shape of the input image.
gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
img_meta (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
sampling_results (List[:obj:``SamplingResult``]): Sampler results
for each image.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
loss_mask = []
num_imgs = len(mask_pred)
total_pos = 0
for idx in range(num_imgs):
cur_mask_pred = mask_pred[idx]
cur_gt_masks = gt_masks[idx].float()
cur_gt_bboxes = gt_bboxes[idx]
cur_img_meta = img_meta[idx]
cur_sampling_results = sampling_results[idx]
pos_assigned_gt_inds = cur_sampling_results.pos_assigned_gt_inds
num_pos = pos_assigned_gt_inds.size(0)
# Since we're producing (near) full image masks,
# it'd take too much vram to backprop on every single mask.
# Thus we select only a subset.
if num_pos > self.max_masks_to_train:
perm = torch.randperm(num_pos)
select = perm[:self.max_masks_to_train]
cur_mask_pred = cur_mask_pred[select]
pos_assigned_gt_inds = pos_assigned_gt_inds[select]
num_pos = self.max_masks_to_train
total_pos += num_pos
gt_bboxes_for_reweight = cur_gt_bboxes[pos_assigned_gt_inds]
mask_targets = self.get_targets(cur_mask_pred, cur_gt_masks,
pos_assigned_gt_inds)
if num_pos == 0:
loss = cur_mask_pred.sum() * 0.
elif mask_targets is None:
loss = F.binary_cross_entropy(cur_mask_pred,
torch.zeros_like(cur_mask_pred),
torch.zeros_like(cur_mask_pred))
else:
cur_mask_pred = torch.clamp(cur_mask_pred, 0, 1)
loss = F.binary_cross_entropy(
cur_mask_pred, mask_targets,
reduction='none') * self.loss_mask_weight
h, w = cur_img_meta['img_shape'][:2]
gt_bboxes_width = (gt_bboxes_for_reweight[:, 2] -
gt_bboxes_for_reweight[:, 0]) / w
gt_bboxes_height = (gt_bboxes_for_reweight[:, 3] -
gt_bboxes_for_reweight[:, 1]) / h
loss = loss.mean(dim=(1,
2)) / gt_bboxes_width / gt_bboxes_height
loss = torch.sum(loss)
loss_mask.append(loss)
if total_pos == 0:
total_pos += 1 # avoid nan
loss_mask = [x / total_pos for x in loss_mask]
return dict(loss_mask=loss_mask)
def get_targets(self, mask_pred, gt_masks, pos_assigned_gt_inds):
"""Compute instance segmentation targets for each image.
Args:
mask_pred (Tensor): Predicted prototypes with shape
(num_classes, H, W).
gt_masks (Tensor): Ground truth masks for each image with
the same shape of the input image.
pos_assigned_gt_inds (Tensor): GT indices of the corresponding
positive samples.
Returns:
Tensor: Instance segmentation targets with shape
(num_instances, H, W).
"""
if gt_masks.size(0) == 0:
return None
mask_h, mask_w = mask_pred.shape[-2:]
gt_masks = F.interpolate(
gt_masks.unsqueeze(0), (mask_h, mask_w),
mode='bilinear',
align_corners=False).squeeze(0)
gt_masks = gt_masks.gt(0.5).float()
mask_targets = gt_masks[pos_assigned_gt_inds]
return mask_targets
def get_seg_masks(self, mask_pred, label_pred, img_meta, rescale):
"""Resize, binarize, and format the instance mask predictions.
Args:
mask_pred (Tensor): shape (N, H, W).
label_pred (Tensor): shape (N, ).
img_meta (dict): Meta information of each image, e.g.,
image size, scaling factor, etc.
rescale (bool): If rescale is False, then returned masks will
fit the scale of imgs[0].
Returns:
list[ndarray]: Mask predictions grouped by their predicted classes.
"""
ori_shape = img_meta['ori_shape']
scale_factor = img_meta['scale_factor']
if rescale:
img_h, img_w = ori_shape[:2]
else:
img_h = np.round(ori_shape[0] * scale_factor[1]).astype(np.int32)
img_w = np.round(ori_shape[1] * scale_factor[0]).astype(np.int32)
cls_segms = [[] for _ in range(self.num_classes)]
if mask_pred.size(0) == 0:
return cls_segms
mask_pred = F.interpolate(
mask_pred.unsqueeze(0), (img_h, img_w),
mode='bilinear',
align_corners=False).squeeze(0) > 0.5
mask_pred = mask_pred.cpu().numpy().astype(np.uint8)
for m, l in zip(mask_pred, label_pred):
cls_segms[l].append(m)
return cls_segms
def crop(self, masks, boxes, padding=1):
"""Crop predicted masks by zeroing out everything not in the predicted
bbox.
Args:
masks (Tensor): shape [H, W, N].
boxes (Tensor): bbox coords in relative point form with
shape [N, 4].
Return:
Tensor: The cropped masks.
"""
h, w, n = masks.size()
x1, x2 = self.sanitize_coordinates(
boxes[:, 0], boxes[:, 2], w, padding, cast=False)
y1, y2 = self.sanitize_coordinates(
boxes[:, 1], boxes[:, 3], h, padding, cast=False)
rows = torch.arange(
w, device=masks.device, dtype=x1.dtype).view(1, -1,
1).expand(h, w, n)
cols = torch.arange(
h, device=masks.device, dtype=x1.dtype).view(-1, 1,
1).expand(h, w, n)
masks_left = rows >= x1.view(1, 1, -1)
masks_right = rows < x2.view(1, 1, -1)
masks_up = cols >= y1.view(1, 1, -1)
masks_down = cols < y2.view(1, 1, -1)
crop_mask = masks_left * masks_right * masks_up * masks_down
return masks * crop_mask.float()
def sanitize_coordinates(self, x1, x2, img_size, padding=0, cast=True):
"""Sanitizes the input coordinates so that x1 < x2, x1 != x2, x1 >= 0,
and x2 <= image_size. Also converts from relative to absolute
coordinates and casts the results to long tensors.
Warning: this does things in-place behind the scenes so
copy if necessary.
Args:
_x1 (Tensor): shape (N, ).
_x2 (Tensor): shape (N, ).
img_size (int): Size of the input image.
padding (int): x1 >= padding, x2 <= image_size-padding.
cast (bool): If cast is false, the result won't be cast to longs.
Returns:
tuple:
x1 (Tensor): Sanitized _x1.
x2 (Tensor): Sanitized _x2.
"""
x1 = x1 * img_size
x2 = x2 * img_size
if cast:
x1 = x1.long()
x2 = x2.long()
x1 = torch.min(x1, x2)
x2 = torch.max(x1, x2)
x1 = torch.clamp(x1 - padding, min=0)
x2 = torch.clamp(x2 + padding, max=img_size)
return x1, x2
class InterpolateModule(nn.Module):
"""This is a module version of F.interpolate.
Any arguments you give it just get passed along for the ride.
"""
def __init__(self, *args, **kwargs):
super().__init__()
self.args = args
self.kwargs = kwargs
def forward(self, x):
"""Forward features from the upstream network."""
return F.interpolate(x, *self.args, **self.kwargs)
| insightface/detection/scrfd/mmdet/models/dense_heads/yolact_head.py/0 | {
"file_path": "insightface/detection/scrfd/mmdet/models/dense_heads/yolact_head.py",
"repo_id": "insightface",
"token_count": 20809
} | 111 |
from ..builder import DETECTORS
from .two_stage import TwoStageDetector
@DETECTORS.register_module()
class MaskRCNN(TwoStageDetector):
"""Implementation of `Mask R-CNN <https://arxiv.org/abs/1703.06870>`_"""
def __init__(self,
backbone,
rpn_head,
roi_head,
train_cfg,
test_cfg,
neck=None,
pretrained=None):
super(MaskRCNN, self).__init__(
backbone=backbone,
neck=neck,
rpn_head=rpn_head,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
pretrained=pretrained)
| insightface/detection/scrfd/mmdet/models/detectors/mask_rcnn.py/0 | {
"file_path": "insightface/detection/scrfd/mmdet/models/detectors/mask_rcnn.py",
"repo_id": "insightface",
"token_count": 390
} | 112 |
import torch.nn as nn
def accuracy(pred, target, topk=1, thresh=None):
"""Calculate accuracy according to the prediction and target.
Args:
pred (torch.Tensor): The model prediction, shape (N, num_class)
target (torch.Tensor): The target of each prediction, shape (N, )
topk (int | tuple[int], optional): If the predictions in ``topk``
matches the target, the predictions will be regarded as
correct ones. Defaults to 1.
thresh (float, optional): If not None, predictions with scores under
this threshold are considered incorrect. Default to None.
Returns:
float | tuple[float]: If the input ``topk`` is a single integer,
the function will return a single float as accuracy. If
``topk`` is a tuple containing multiple integers, the
function will return a tuple containing accuracies of
each ``topk`` number.
"""
assert isinstance(topk, (int, tuple))
if isinstance(topk, int):
topk = (topk, )
return_single = True
else:
return_single = False
maxk = max(topk)
if pred.size(0) == 0:
accu = [pred.new_tensor(0.) for i in range(len(topk))]
return accu[0] if return_single else accu
assert pred.ndim == 2 and target.ndim == 1
assert pred.size(0) == target.size(0)
assert maxk <= pred.size(1), \
f'maxk {maxk} exceeds pred dimension {pred.size(1)}'
pred_value, pred_label = pred.topk(maxk, dim=1)
pred_label = pred_label.t() # transpose to shape (maxk, N)
correct = pred_label.eq(target.view(1, -1).expand_as(pred_label))
if thresh is not None:
# Only prediction values larger than thresh are counted as correct
correct = correct & (pred_value > thresh).t()
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / pred.size(0)))
return res[0] if return_single else res
class Accuracy(nn.Module):
def __init__(self, topk=(1, ), thresh=None):
"""Module to calculate the accuracy.
Args:
topk (tuple, optional): The criterion used to calculate the
accuracy. Defaults to (1,).
thresh (float, optional): If not None, predictions with scores
under this threshold are considered incorrect. Default to None.
"""
super().__init__()
self.topk = topk
self.thresh = thresh
def forward(self, pred, target):
"""Forward function to calculate accuracy.
Args:
pred (torch.Tensor): Prediction of models.
target (torch.Tensor): Target for each prediction.
Returns:
tuple[float]: The accuracies under different topk criterions.
"""
return accuracy(pred, target, self.topk, self.thresh)
| insightface/detection/scrfd/mmdet/models/losses/accuracy.py/0 | {
"file_path": "insightface/detection/scrfd/mmdet/models/losses/accuracy.py",
"repo_id": "insightface",
"token_count": 1177
} | 113 |
import torch.nn as nn
from mmcv.cnn import ConvModule, xavier_init
from ..builder import NECKS
@NECKS.register_module()
class ChannelMapper(nn.Module):
r"""Channel Mapper to reduce/increase channels of backbone features.
This is used to reduce/increase channels of backbone features.
Args:
in_channels (List[int]): Number of input channels per scale.
out_channels (int): Number of output channels (used at each scale).
kernel_size (int, optional): kernel_size for reducing channels (used
at each scale). Default: 3.
conv_cfg (dict, optional): Config dict for convolution layer.
Default: None.
norm_cfg (dict, optional): Config dict for normalization layer.
Default: None.
act_cfg (dict, optional): Config dict for activation layer in
ConvModule. Default: dict(type='ReLU').
Example:
>>> import torch
>>> in_channels = [2, 3, 5, 7]
>>> scales = [340, 170, 84, 43]
>>> inputs = [torch.rand(1, c, s, s)
... for c, s in zip(in_channels, scales)]
>>> self = ChannelMapper(in_channels, 11, 3).eval()
>>> outputs = self.forward(inputs)
>>> for i in range(len(outputs)):
... print(f'outputs[{i}].shape = {outputs[i].shape}')
outputs[0].shape = torch.Size([1, 11, 340, 340])
outputs[1].shape = torch.Size([1, 11, 170, 170])
outputs[2].shape = torch.Size([1, 11, 84, 84])
outputs[3].shape = torch.Size([1, 11, 43, 43])
"""
def __init__(self,
in_channels,
out_channels,
kernel_size=3,
conv_cfg=None,
norm_cfg=None,
act_cfg=dict(type='ReLU')):
super(ChannelMapper, self).__init__()
assert isinstance(in_channels, list)
self.convs = nn.ModuleList()
for in_channel in in_channels:
self.convs.append(
ConvModule(
in_channel,
out_channels,
kernel_size,
padding=(kernel_size - 1) // 2,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
# default init_weights for conv(msra) and norm in ConvModule
def init_weights(self):
"""Initialize the weights of ChannelMapper module."""
for m in self.modules():
if isinstance(m, nn.Conv2d):
xavier_init(m, distribution='uniform')
def forward(self, inputs):
"""Forward function."""
assert len(inputs) == len(self.convs)
outs = [self.convs[i](inputs[i]) for i in range(len(inputs))]
return tuple(outs)
| insightface/detection/scrfd/mmdet/models/necks/channel_mapper.py/0 | {
"file_path": "insightface/detection/scrfd/mmdet/models/necks/channel_mapper.py",
"repo_id": "insightface",
"token_count": 1308
} | 114 |
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule, kaiming_init, normal_init, xavier_init
from mmcv.runner import force_fp32
from mmdet.core import build_bbox_coder, multi_apply, multiclass_nms
from mmdet.models.builder import HEADS, build_loss
from mmdet.models.losses import accuracy
@HEADS.register_module()
class SABLHead(nn.Module):
"""Side-Aware Boundary Localization (SABL) for RoI-Head.
Side-Aware features are extracted by conv layers
with an attention mechanism.
Boundary Localization with Bucketing and Bucketing Guided Rescoring
are implemented in BucketingBBoxCoder.
Please refer to https://arxiv.org/abs/1912.04260 for more details.
Args:
cls_in_channels (int): Input channels of cls RoI feature. \
Defaults to 256.
reg_in_channels (int): Input channels of reg RoI feature. \
Defaults to 256.
roi_feat_size (int): Size of RoI features. Defaults to 7.
reg_feat_up_ratio (int): Upsample ratio of reg features. \
Defaults to 2.
reg_pre_kernel (int): Kernel of 2D conv layers before \
attention pooling. Defaults to 3.
reg_post_kernel (int): Kernel of 1D conv layers after \
attention pooling. Defaults to 3.
reg_pre_num (int): Number of pre convs. Defaults to 2.
reg_post_num (int): Number of post convs. Defaults to 1.
num_classes (int): Number of classes in dataset. Defaults to 80.
cls_out_channels (int): Hidden channels in cls fcs. Defaults to 1024.
reg_offset_out_channels (int): Hidden and output channel \
of reg offset branch. Defaults to 256.
reg_cls_out_channels (int): Hidden and output channel \
of reg cls branch. Defaults to 256.
num_cls_fcs (int): Number of fcs for cls branch. Defaults to 1.
num_reg_fcs (int): Number of fcs for reg branch.. Defaults to 0.
reg_class_agnostic (bool): Class agnostic regresion or not. \
Defaults to True.
norm_cfg (dict): Config of norm layers. Defaults to None.
bbox_coder (dict): Config of bbox coder. Defaults 'BucketingBBoxCoder'.
loss_cls (dict): Config of classification loss.
loss_bbox_cls (dict): Config of classification loss for bbox branch.
loss_bbox_reg (dict): Config of regression loss for bbox branch.
"""
def __init__(self,
num_classes,
cls_in_channels=256,
reg_in_channels=256,
roi_feat_size=7,
reg_feat_up_ratio=2,
reg_pre_kernel=3,
reg_post_kernel=3,
reg_pre_num=2,
reg_post_num=1,
cls_out_channels=1024,
reg_offset_out_channels=256,
reg_cls_out_channels=256,
num_cls_fcs=1,
num_reg_fcs=0,
reg_class_agnostic=True,
norm_cfg=None,
bbox_coder=dict(
type='BucketingBBoxCoder',
num_buckets=14,
scale_factor=1.7),
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=True,
loss_weight=1.0),
loss_bbox_reg=dict(
type='SmoothL1Loss', beta=0.1, loss_weight=1.0)):
super(SABLHead, self).__init__()
self.cls_in_channels = cls_in_channels
self.reg_in_channels = reg_in_channels
self.roi_feat_size = roi_feat_size
self.reg_feat_up_ratio = int(reg_feat_up_ratio)
self.num_buckets = bbox_coder['num_buckets']
assert self.reg_feat_up_ratio // 2 >= 1
self.up_reg_feat_size = roi_feat_size * self.reg_feat_up_ratio
assert self.up_reg_feat_size == bbox_coder['num_buckets']
self.reg_pre_kernel = reg_pre_kernel
self.reg_post_kernel = reg_post_kernel
self.reg_pre_num = reg_pre_num
self.reg_post_num = reg_post_num
self.num_classes = num_classes
self.cls_out_channels = cls_out_channels
self.reg_offset_out_channels = reg_offset_out_channels
self.reg_cls_out_channels = reg_cls_out_channels
self.num_cls_fcs = num_cls_fcs
self.num_reg_fcs = num_reg_fcs
self.reg_class_agnostic = reg_class_agnostic
assert self.reg_class_agnostic
self.norm_cfg = norm_cfg
self.bbox_coder = build_bbox_coder(bbox_coder)
self.loss_cls = build_loss(loss_cls)
self.loss_bbox_cls = build_loss(loss_bbox_cls)
self.loss_bbox_reg = build_loss(loss_bbox_reg)
self.cls_fcs = self._add_fc_branch(self.num_cls_fcs,
self.cls_in_channels,
self.roi_feat_size,
self.cls_out_channels)
self.side_num = int(np.ceil(self.num_buckets / 2))
if self.reg_feat_up_ratio > 1:
self.upsample_x = nn.ConvTranspose1d(
reg_in_channels,
reg_in_channels,
self.reg_feat_up_ratio,
stride=self.reg_feat_up_ratio)
self.upsample_y = nn.ConvTranspose1d(
reg_in_channels,
reg_in_channels,
self.reg_feat_up_ratio,
stride=self.reg_feat_up_ratio)
self.reg_pre_convs = nn.ModuleList()
for i in range(self.reg_pre_num):
reg_pre_conv = ConvModule(
reg_in_channels,
reg_in_channels,
kernel_size=reg_pre_kernel,
padding=reg_pre_kernel // 2,
norm_cfg=norm_cfg,
act_cfg=dict(type='ReLU'))
self.reg_pre_convs.append(reg_pre_conv)
self.reg_post_conv_xs = nn.ModuleList()
for i in range(self.reg_post_num):
reg_post_conv_x = ConvModule(
reg_in_channels,
reg_in_channels,
kernel_size=(1, reg_post_kernel),
padding=(0, reg_post_kernel // 2),
norm_cfg=norm_cfg,
act_cfg=dict(type='ReLU'))
self.reg_post_conv_xs.append(reg_post_conv_x)
self.reg_post_conv_ys = nn.ModuleList()
for i in range(self.reg_post_num):
reg_post_conv_y = ConvModule(
reg_in_channels,
reg_in_channels,
kernel_size=(reg_post_kernel, 1),
padding=(reg_post_kernel // 2, 0),
norm_cfg=norm_cfg,
act_cfg=dict(type='ReLU'))
self.reg_post_conv_ys.append(reg_post_conv_y)
self.reg_conv_att_x = nn.Conv2d(reg_in_channels, 1, 1)
self.reg_conv_att_y = nn.Conv2d(reg_in_channels, 1, 1)
self.fc_cls = nn.Linear(self.cls_out_channels, self.num_classes + 1)
self.relu = nn.ReLU(inplace=True)
self.reg_cls_fcs = self._add_fc_branch(self.num_reg_fcs,
self.reg_in_channels, 1,
self.reg_cls_out_channels)
self.reg_offset_fcs = self._add_fc_branch(self.num_reg_fcs,
self.reg_in_channels, 1,
self.reg_offset_out_channels)
self.fc_reg_cls = nn.Linear(self.reg_cls_out_channels, 1)
self.fc_reg_offset = nn.Linear(self.reg_offset_out_channels, 1)
def _add_fc_branch(self, num_branch_fcs, in_channels, roi_feat_size,
fc_out_channels):
in_channels = in_channels * roi_feat_size * roi_feat_size
branch_fcs = nn.ModuleList()
for i in range(num_branch_fcs):
fc_in_channels = (in_channels if i == 0 else fc_out_channels)
branch_fcs.append(nn.Linear(fc_in_channels, fc_out_channels))
return branch_fcs
def init_weights(self):
for module_list in [
self.reg_cls_fcs, self.reg_offset_fcs, self.cls_fcs
]:
for m in module_list.modules():
if isinstance(m, nn.Linear):
xavier_init(m, distribution='uniform')
if self.reg_feat_up_ratio > 1:
kaiming_init(self.upsample_x, distribution='normal')
kaiming_init(self.upsample_y, distribution='normal')
normal_init(self.reg_conv_att_x, 0, 0.01)
normal_init(self.reg_conv_att_y, 0, 0.01)
normal_init(self.fc_reg_offset, 0, 0.001)
normal_init(self.fc_reg_cls, 0, 0.01)
normal_init(self.fc_cls, 0, 0.01)
def cls_forward(self, cls_x):
cls_x = cls_x.view(cls_x.size(0), -1)
for fc in self.cls_fcs:
cls_x = self.relu(fc(cls_x))
cls_score = self.fc_cls(cls_x)
return cls_score
def attention_pool(self, reg_x):
"""Extract direction-specific features fx and fy with attention
methanism."""
reg_fx = reg_x
reg_fy = reg_x
reg_fx_att = self.reg_conv_att_x(reg_fx).sigmoid()
reg_fy_att = self.reg_conv_att_y(reg_fy).sigmoid()
reg_fx_att = reg_fx_att / reg_fx_att.sum(dim=2).unsqueeze(2)
reg_fy_att = reg_fy_att / reg_fy_att.sum(dim=3).unsqueeze(3)
reg_fx = (reg_fx * reg_fx_att).sum(dim=2)
reg_fy = (reg_fy * reg_fy_att).sum(dim=3)
return reg_fx, reg_fy
def side_aware_feature_extractor(self, reg_x):
"""Refine and extract side-aware features without split them."""
for reg_pre_conv in self.reg_pre_convs:
reg_x = reg_pre_conv(reg_x)
reg_fx, reg_fy = self.attention_pool(reg_x)
if self.reg_post_num > 0:
reg_fx = reg_fx.unsqueeze(2)
reg_fy = reg_fy.unsqueeze(3)
for i in range(self.reg_post_num):
reg_fx = self.reg_post_conv_xs[i](reg_fx)
reg_fy = self.reg_post_conv_ys[i](reg_fy)
reg_fx = reg_fx.squeeze(2)
reg_fy = reg_fy.squeeze(3)
if self.reg_feat_up_ratio > 1:
reg_fx = self.relu(self.upsample_x(reg_fx))
reg_fy = self.relu(self.upsample_y(reg_fy))
reg_fx = torch.transpose(reg_fx, 1, 2)
reg_fy = torch.transpose(reg_fy, 1, 2)
return reg_fx.contiguous(), reg_fy.contiguous()
def reg_pred(self, x, offfset_fcs, cls_fcs):
"""Predict bucketing esimation (cls_pred) and fine regression (offset
pred) with side-aware features."""
x_offset = x.view(-1, self.reg_in_channels)
x_cls = x.view(-1, self.reg_in_channels)
for fc in offfset_fcs:
x_offset = self.relu(fc(x_offset))
for fc in cls_fcs:
x_cls = self.relu(fc(x_cls))
offset_pred = self.fc_reg_offset(x_offset)
cls_pred = self.fc_reg_cls(x_cls)
offset_pred = offset_pred.view(x.size(0), -1)
cls_pred = cls_pred.view(x.size(0), -1)
return offset_pred, cls_pred
def side_aware_split(self, feat):
"""Split side-aware features aligned with orders of bucketing
targets."""
l_end = int(np.ceil(self.up_reg_feat_size / 2))
r_start = int(np.floor(self.up_reg_feat_size / 2))
feat_fl = feat[:, :l_end]
feat_fr = feat[:, r_start:].flip(dims=(1, ))
feat_fl = feat_fl.contiguous()
feat_fr = feat_fr.contiguous()
feat = torch.cat([feat_fl, feat_fr], dim=-1)
return feat
def bbox_pred_split(self, bbox_pred, num_proposals_per_img):
"""Split batch bbox prediction back to each image."""
bucket_cls_preds, bucket_offset_preds = bbox_pred
bucket_cls_preds = bucket_cls_preds.split(num_proposals_per_img, 0)
bucket_offset_preds = bucket_offset_preds.split(
num_proposals_per_img, 0)
bbox_pred = tuple(zip(bucket_cls_preds, bucket_offset_preds))
return bbox_pred
def reg_forward(self, reg_x):
outs = self.side_aware_feature_extractor(reg_x)
edge_offset_preds = []
edge_cls_preds = []
reg_fx = outs[0]
reg_fy = outs[1]
offset_pred_x, cls_pred_x = self.reg_pred(reg_fx, self.reg_offset_fcs,
self.reg_cls_fcs)
offset_pred_y, cls_pred_y = self.reg_pred(reg_fy, self.reg_offset_fcs,
self.reg_cls_fcs)
offset_pred_x = self.side_aware_split(offset_pred_x)
offset_pred_y = self.side_aware_split(offset_pred_y)
cls_pred_x = self.side_aware_split(cls_pred_x)
cls_pred_y = self.side_aware_split(cls_pred_y)
edge_offset_preds = torch.cat([offset_pred_x, offset_pred_y], dim=-1)
edge_cls_preds = torch.cat([cls_pred_x, cls_pred_y], dim=-1)
return (edge_cls_preds, edge_offset_preds)
def forward(self, x):
bbox_pred = self.reg_forward(x)
cls_score = self.cls_forward(x)
return cls_score, bbox_pred
def get_targets(self, sampling_results, gt_bboxes, gt_labels,
rcnn_train_cfg):
pos_proposals = [res.pos_bboxes for res in sampling_results]
neg_proposals = [res.neg_bboxes for res in sampling_results]
pos_gt_bboxes = [res.pos_gt_bboxes for res in sampling_results]
pos_gt_labels = [res.pos_gt_labels for res in sampling_results]
cls_reg_targets = self.bucket_target(pos_proposals, neg_proposals,
pos_gt_bboxes, pos_gt_labels,
rcnn_train_cfg)
(labels, label_weights, bucket_cls_targets, bucket_cls_weights,
bucket_offset_targets, bucket_offset_weights) = cls_reg_targets
return (labels, label_weights, (bucket_cls_targets,
bucket_offset_targets),
(bucket_cls_weights, bucket_offset_weights))
def bucket_target(self,
pos_proposals_list,
neg_proposals_list,
pos_gt_bboxes_list,
pos_gt_labels_list,
rcnn_train_cfg,
concat=True):
(labels, label_weights, bucket_cls_targets, bucket_cls_weights,
bucket_offset_targets, bucket_offset_weights) = multi_apply(
self._bucket_target_single,
pos_proposals_list,
neg_proposals_list,
pos_gt_bboxes_list,
pos_gt_labels_list,
cfg=rcnn_train_cfg)
if concat:
labels = torch.cat(labels, 0)
label_weights = torch.cat(label_weights, 0)
bucket_cls_targets = torch.cat(bucket_cls_targets, 0)
bucket_cls_weights = torch.cat(bucket_cls_weights, 0)
bucket_offset_targets = torch.cat(bucket_offset_targets, 0)
bucket_offset_weights = torch.cat(bucket_offset_weights, 0)
return (labels, label_weights, bucket_cls_targets, bucket_cls_weights,
bucket_offset_targets, bucket_offset_weights)
def _bucket_target_single(self, pos_proposals, neg_proposals,
pos_gt_bboxes, pos_gt_labels, cfg):
"""Compute bucketing estimation targets and fine regression targets for
a single image.
Args:
pos_proposals (Tensor): positive proposals of a single image,
Shape (n_pos, 4)
neg_proposals (Tensor): negative proposals of a single image,
Shape (n_neg, 4).
pos_gt_bboxes (Tensor): gt bboxes assigned to positive proposals
of a single image, Shape (n_pos, 4).
pos_gt_labels (Tensor): gt labels assigned to positive proposals
of a single image, Shape (n_pos, ).
cfg (dict): Config of calculating targets
Returns:
tuple:
- labels (Tensor): Labels in a single image. \
Shape (n,).
- label_weights (Tensor): Label weights in a single image.\
Shape (n,)
- bucket_cls_targets (Tensor): Bucket cls targets in \
a single image. Shape (n, num_buckets*2).
- bucket_cls_weights (Tensor): Bucket cls weights in \
a single image. Shape (n, num_buckets*2).
- bucket_offset_targets (Tensor): Bucket offset targets \
in a single image. Shape (n, num_buckets*2).
- bucket_offset_targets (Tensor): Bucket offset weights \
in a single image. Shape (n, num_buckets*2).
"""
num_pos = pos_proposals.size(0)
num_neg = neg_proposals.size(0)
num_samples = num_pos + num_neg
labels = pos_gt_bboxes.new_full((num_samples, ),
self.num_classes,
dtype=torch.long)
label_weights = pos_proposals.new_zeros(num_samples)
bucket_cls_targets = pos_proposals.new_zeros(num_samples,
4 * self.side_num)
bucket_cls_weights = pos_proposals.new_zeros(num_samples,
4 * self.side_num)
bucket_offset_targets = pos_proposals.new_zeros(
num_samples, 4 * self.side_num)
bucket_offset_weights = pos_proposals.new_zeros(
num_samples, 4 * self.side_num)
if num_pos > 0:
labels[:num_pos] = pos_gt_labels
label_weights[:num_pos] = 1.0
(pos_bucket_offset_targets, pos_bucket_offset_weights,
pos_bucket_cls_targets,
pos_bucket_cls_weights) = self.bbox_coder.encode(
pos_proposals, pos_gt_bboxes)
bucket_cls_targets[:num_pos, :] = pos_bucket_cls_targets
bucket_cls_weights[:num_pos, :] = pos_bucket_cls_weights
bucket_offset_targets[:num_pos, :] = pos_bucket_offset_targets
bucket_offset_weights[:num_pos, :] = pos_bucket_offset_weights
if num_neg > 0:
label_weights[-num_neg:] = 1.0
return (labels, label_weights, bucket_cls_targets, bucket_cls_weights,
bucket_offset_targets, bucket_offset_weights)
def loss(self,
cls_score,
bbox_pred,
rois,
labels,
label_weights,
bbox_targets,
bbox_weights,
reduction_override=None):
losses = dict()
if cls_score is not None:
avg_factor = max(torch.sum(label_weights > 0).float().item(), 1.)
losses['loss_cls'] = self.loss_cls(
cls_score,
labels,
label_weights,
avg_factor=avg_factor,
reduction_override=reduction_override)
losses['acc'] = accuracy(cls_score, labels)
if bbox_pred is not None:
bucket_cls_preds, bucket_offset_preds = bbox_pred
bucket_cls_targets, bucket_offset_targets = bbox_targets
bucket_cls_weights, bucket_offset_weights = bbox_weights
# edge cls
bucket_cls_preds = bucket_cls_preds.view(-1, self.side_num)
bucket_cls_targets = bucket_cls_targets.view(-1, self.side_num)
bucket_cls_weights = bucket_cls_weights.view(-1, self.side_num)
losses['loss_bbox_cls'] = self.loss_bbox_cls(
bucket_cls_preds,
bucket_cls_targets,
bucket_cls_weights,
avg_factor=bucket_cls_targets.size(0),
reduction_override=reduction_override)
losses['loss_bbox_reg'] = self.loss_bbox_reg(
bucket_offset_preds,
bucket_offset_targets,
bucket_offset_weights,
avg_factor=bucket_offset_targets.size(0),
reduction_override=reduction_override)
return losses
@force_fp32(apply_to=('cls_score', 'bbox_pred'))
def get_bboxes(self,
rois,
cls_score,
bbox_pred,
img_shape,
scale_factor,
rescale=False,
cfg=None):
if isinstance(cls_score, list):
cls_score = sum(cls_score) / float(len(cls_score))
scores = F.softmax(cls_score, dim=1) if cls_score is not None else None
if bbox_pred is not None:
bboxes, confids = self.bbox_coder.decode(rois[:, 1:], bbox_pred,
img_shape)
else:
bboxes = rois[:, 1:].clone()
confids = None
if img_shape is not None:
bboxes[:, [0, 2]].clamp_(min=0, max=img_shape[1] - 1)
bboxes[:, [1, 3]].clamp_(min=0, max=img_shape[0] - 1)
if rescale and bboxes.size(0) > 0:
if isinstance(scale_factor, float):
bboxes /= scale_factor
else:
bboxes /= torch.from_numpy(scale_factor).to(bboxes.device)
if cfg is None:
return bboxes, scores
else:
det_bboxes, det_labels = multiclass_nms(
bboxes,
scores,
cfg.score_thr,
cfg.nms,
cfg.max_per_img,
score_factors=confids)
return det_bboxes, det_labels
@force_fp32(apply_to=('bbox_preds', ))
def refine_bboxes(self, rois, labels, bbox_preds, pos_is_gts, img_metas):
"""Refine bboxes during training.
Args:
rois (Tensor): Shape (n*bs, 5), where n is image number per GPU,
and bs is the sampled RoIs per image.
labels (Tensor): Shape (n*bs, ).
bbox_preds (list[Tensor]): Shape [(n*bs, num_buckets*2), \
(n*bs, num_buckets*2)].
pos_is_gts (list[Tensor]): Flags indicating if each positive bbox
is a gt bbox.
img_metas (list[dict]): Meta info of each image.
Returns:
list[Tensor]: Refined bboxes of each image in a mini-batch.
"""
img_ids = rois[:, 0].long().unique(sorted=True)
assert img_ids.numel() == len(img_metas)
bboxes_list = []
for i in range(len(img_metas)):
inds = torch.nonzero(
rois[:, 0] == i, as_tuple=False).squeeze(dim=1)
num_rois = inds.numel()
bboxes_ = rois[inds, 1:]
label_ = labels[inds]
edge_cls_preds, edge_offset_preds = bbox_preds
edge_cls_preds_ = edge_cls_preds[inds]
edge_offset_preds_ = edge_offset_preds[inds]
bbox_pred_ = [edge_cls_preds_, edge_offset_preds_]
img_meta_ = img_metas[i]
pos_is_gts_ = pos_is_gts[i]
bboxes = self.regress_by_class(bboxes_, label_, bbox_pred_,
img_meta_)
# filter gt bboxes
pos_keep = 1 - pos_is_gts_
keep_inds = pos_is_gts_.new_ones(num_rois)
keep_inds[:len(pos_is_gts_)] = pos_keep
bboxes_list.append(bboxes[keep_inds.type(torch.bool)])
return bboxes_list
@force_fp32(apply_to=('bbox_pred', ))
def regress_by_class(self, rois, label, bbox_pred, img_meta):
"""Regress the bbox for the predicted class. Used in Cascade R-CNN.
Args:
rois (Tensor): shape (n, 4) or (n, 5)
label (Tensor): shape (n, )
bbox_pred (list[Tensor]): shape [(n, num_buckets *2), \
(n, num_buckets *2)]
img_meta (dict): Image meta info.
Returns:
Tensor: Regressed bboxes, the same shape as input rois.
"""
assert rois.size(1) == 4 or rois.size(1) == 5
if rois.size(1) == 4:
new_rois, _ = self.bbox_coder.decode(rois, bbox_pred,
img_meta['img_shape'])
else:
bboxes, _ = self.bbox_coder.decode(rois[:, 1:], bbox_pred,
img_meta['img_shape'])
new_rois = torch.cat((rois[:, [0]], bboxes), dim=1)
return new_rois
| insightface/detection/scrfd/mmdet/models/roi_heads/bbox_heads/sabl_head.py/0 | {
"file_path": "insightface/detection/scrfd/mmdet/models/roi_heads/bbox_heads/sabl_head.py",
"repo_id": "insightface",
"token_count": 13373
} | 115 |
# Modified from https://github.com/facebookresearch/detectron2/tree/master/projects/PointRend # noqa
import torch
import torch.nn.functional as F
from mmcv.ops import point_sample, rel_roi_point_to_rel_img_point
from mmdet.core import bbox2roi, bbox_mapping, merge_aug_masks
from .. import builder
from ..builder import HEADS
from .standard_roi_head import StandardRoIHead
@HEADS.register_module()
class PointRendRoIHead(StandardRoIHead):
"""`PointRend <https://arxiv.org/abs/1912.08193>`_."""
def __init__(self, point_head, *args, **kwargs):
super().__init__(*args, **kwargs)
assert self.with_bbox and self.with_mask
self.init_point_head(point_head)
def init_point_head(self, point_head):
"""Initialize ``point_head``"""
self.point_head = builder.build_head(point_head)
def init_weights(self, pretrained):
"""Initialize the weights in head.
Args:
pretrained (str, optional): Path to pre-trained weights.
"""
super().init_weights(pretrained)
self.point_head.init_weights()
def _mask_forward_train(self, x, sampling_results, bbox_feats, gt_masks,
img_metas):
"""Run forward function and calculate loss for mask head and point head
in training."""
mask_results = super()._mask_forward_train(x, sampling_results,
bbox_feats, gt_masks,
img_metas)
if mask_results['loss_mask'] is not None:
loss_point = self._mask_point_forward_train(
x, sampling_results, mask_results['mask_pred'], gt_masks,
img_metas)
mask_results['loss_mask'].update(loss_point)
return mask_results
def _mask_point_forward_train(self, x, sampling_results, mask_pred,
gt_masks, img_metas):
"""Run forward function and calculate loss for point head in
training."""
pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results])
rel_roi_points = self.point_head.get_roi_rel_points_train(
mask_pred, pos_labels, cfg=self.train_cfg)
rois = bbox2roi([res.pos_bboxes for res in sampling_results])
fine_grained_point_feats = self._get_fine_grained_point_feats(
x, rois, rel_roi_points, img_metas)
coarse_point_feats = point_sample(mask_pred, rel_roi_points)
mask_point_pred = self.point_head(fine_grained_point_feats,
coarse_point_feats)
mask_point_target = self.point_head.get_targets(
rois, rel_roi_points, sampling_results, gt_masks, self.train_cfg)
loss_mask_point = self.point_head.loss(mask_point_pred,
mask_point_target, pos_labels)
return loss_mask_point
def _get_fine_grained_point_feats(self, x, rois, rel_roi_points,
img_metas):
"""Sample fine grained feats from each level feature map and
concatenate them together."""
num_imgs = len(img_metas)
fine_grained_feats = []
for idx in range(self.mask_roi_extractor.num_inputs):
feats = x[idx]
spatial_scale = 1. / float(
self.mask_roi_extractor.featmap_strides[idx])
point_feats = []
for batch_ind in range(num_imgs):
# unravel batch dim
feat = feats[batch_ind].unsqueeze(0)
inds = (rois[:, 0].long() == batch_ind)
if inds.any():
rel_img_points = rel_roi_point_to_rel_img_point(
rois[inds], rel_roi_points[inds], feat.shape[2:],
spatial_scale).unsqueeze(0)
point_feat = point_sample(feat, rel_img_points)
point_feat = point_feat.squeeze(0).transpose(0, 1)
point_feats.append(point_feat)
fine_grained_feats.append(torch.cat(point_feats, dim=0))
return torch.cat(fine_grained_feats, dim=1)
def _mask_point_forward_test(self, x, rois, label_pred, mask_pred,
img_metas):
"""Mask refining process with point head in testing."""
refined_mask_pred = mask_pred.clone()
for subdivision_step in range(self.test_cfg.subdivision_steps):
refined_mask_pred = F.interpolate(
refined_mask_pred,
scale_factor=self.test_cfg.scale_factor,
mode='bilinear',
align_corners=False)
# If `subdivision_num_points` is larger or equal to the
# resolution of the next step, then we can skip this step
num_rois, channels, mask_height, mask_width = \
refined_mask_pred.shape
if (self.test_cfg.subdivision_num_points >=
self.test_cfg.scale_factor**2 * mask_height * mask_width
and
subdivision_step < self.test_cfg.subdivision_steps - 1):
continue
point_indices, rel_roi_points = \
self.point_head.get_roi_rel_points_test(
refined_mask_pred, label_pred, cfg=self.test_cfg)
fine_grained_point_feats = self._get_fine_grained_point_feats(
x, rois, rel_roi_points, img_metas)
coarse_point_feats = point_sample(mask_pred, rel_roi_points)
mask_point_pred = self.point_head(fine_grained_point_feats,
coarse_point_feats)
point_indices = point_indices.unsqueeze(1).expand(-1, channels, -1)
refined_mask_pred = refined_mask_pred.reshape(
num_rois, channels, mask_height * mask_width)
refined_mask_pred = refined_mask_pred.scatter_(
2, point_indices, mask_point_pred)
refined_mask_pred = refined_mask_pred.view(num_rois, channels,
mask_height, mask_width)
return refined_mask_pred
def simple_test_mask(self,
x,
img_metas,
det_bboxes,
det_labels,
rescale=False):
"""Obtain mask prediction without augmentation."""
ori_shapes = tuple(meta['ori_shape'] for meta in img_metas)
scale_factors = tuple(meta['scale_factor'] for meta in img_metas)
num_imgs = len(det_bboxes)
if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes):
segm_results = [[[] for _ in range(self.mask_head.num_classes)]
for _ in range(num_imgs)]
else:
# if det_bboxes is rescaled to the original image size, we need to
# rescale it back to the testing scale to obtain RoIs.
if rescale and not isinstance(scale_factors[0], float):
scale_factors = [
torch.from_numpy(scale_factor).to(det_bboxes[0].device)
for scale_factor in scale_factors
]
_bboxes = [
det_bboxes[i][:, :4] *
scale_factors[i] if rescale else det_bboxes[i][:, :4]
for i in range(len(det_bboxes))
]
mask_rois = bbox2roi(_bboxes)
mask_results = self._mask_forward(x, mask_rois)
# split batch mask prediction back to each image
mask_pred = mask_results['mask_pred']
num_mask_roi_per_img = [len(det_bbox) for det_bbox in det_bboxes]
mask_preds = mask_pred.split(num_mask_roi_per_img, 0)
mask_rois = mask_rois.split(num_mask_roi_per_img, 0)
# apply mask post-processing to each image individually
segm_results = []
for i in range(num_imgs):
if det_bboxes[i].shape[0] == 0:
segm_results.append(
[[] for _ in range(self.mask_head.num_classes)])
else:
x_i = [xx[[i]] for xx in x]
mask_rois_i = mask_rois[i]
mask_rois_i[:, 0] = 0 # TODO: remove this hack
mask_pred_i = self._mask_point_forward_test(
x_i, mask_rois_i, det_labels[i], mask_preds[i],
[img_metas])
segm_result = self.mask_head.get_seg_masks(
mask_pred_i, _bboxes[i], det_labels[i], self.test_cfg,
ori_shapes[i], scale_factors[i], rescale)
segm_results.append(segm_result)
return segm_results
def aug_test_mask(self, feats, img_metas, det_bboxes, det_labels):
"""Test for mask head with test time augmentation."""
if det_bboxes.shape[0] == 0:
segm_result = [[] for _ in range(self.mask_head.num_classes)]
else:
aug_masks = []
for x, img_meta in zip(feats, img_metas):
img_shape = img_meta[0]['img_shape']
scale_factor = img_meta[0]['scale_factor']
flip = img_meta[0]['flip']
_bboxes = bbox_mapping(det_bboxes[:, :4], img_shape,
scale_factor, flip)
mask_rois = bbox2roi([_bboxes])
mask_results = self._mask_forward(x, mask_rois)
mask_results['mask_pred'] = self._mask_point_forward_test(
x, mask_rois, det_labels, mask_results['mask_pred'],
img_metas)
# convert to numpy array to save memory
aug_masks.append(
mask_results['mask_pred'].sigmoid().cpu().numpy())
merged_masks = merge_aug_masks(aug_masks, img_metas, self.test_cfg)
ori_shape = img_metas[0][0]['ori_shape']
segm_result = self.mask_head.get_seg_masks(
merged_masks,
det_bboxes,
det_labels,
self.test_cfg,
ori_shape,
scale_factor=1.0,
rescale=False)
return segm_result
| insightface/detection/scrfd/mmdet/models/roi_heads/point_rend_roi_head.py/0 | {
"file_path": "insightface/detection/scrfd/mmdet/models/roi_heads/point_rend_roi_head.py",
"repo_id": "insightface",
"token_count": 5446
} | 116 |
# This file is added for back-compatibility. Thus, downstream codebase
# could still use and import mmdet.ops.
# yapf: disable
from mmcv.ops import (ContextBlock, Conv2d, ConvTranspose2d, ConvWS2d,
CornerPool, DeformConv, DeformConvPack, DeformRoIPooling,
DeformRoIPoolingPack, GeneralizedAttention, Linear,
MaskedConv2d, MaxPool2d, ModulatedDeformConv,
ModulatedDeformConvPack, ModulatedDeformRoIPoolingPack,
NonLocal2D, RoIAlign, RoIPool, SAConv2d,
SigmoidFocalLoss, SimpleRoIAlign, batched_nms,
build_plugin_layer, conv_ws_2d, deform_conv,
deform_roi_pooling, get_compiler_version,
get_compiling_cuda_version, modulated_deform_conv, nms,
nms_match, point_sample, rel_roi_point_to_rel_img_point,
roi_align, roi_pool, sigmoid_focal_loss, soft_nms)
# yapf: enable
__all__ = [
'nms', 'soft_nms', 'RoIAlign', 'roi_align', 'RoIPool', 'roi_pool',
'DeformConv', 'DeformConvPack', 'DeformRoIPooling', 'DeformRoIPoolingPack',
'ModulatedDeformRoIPoolingPack', 'ModulatedDeformConv',
'ModulatedDeformConvPack', 'deform_conv', 'modulated_deform_conv',
'deform_roi_pooling', 'SigmoidFocalLoss', 'sigmoid_focal_loss',
'MaskedConv2d', 'ContextBlock', 'GeneralizedAttention', 'NonLocal2D',
'get_compiler_version', 'get_compiling_cuda_version', 'ConvWS2d',
'conv_ws_2d', 'build_plugin_layer', 'batched_nms', 'Conv2d',
'ConvTranspose2d', 'MaxPool2d', 'Linear', 'nms_match', 'CornerPool',
'point_sample', 'rel_roi_point_to_rel_img_point', 'SimpleRoIAlign',
'SAConv2d'
]
| insightface/detection/scrfd/mmdet/ops/__init__.py/0 | {
"file_path": "insightface/detection/scrfd/mmdet/ops/__init__.py",
"repo_id": "insightface",
"token_count": 859
} | 117 |
import os
import os.path as osp
import io
import numpy as np
import argparse
import datetime
import importlib
import configparser
from tqdm import tqdm
from mmdet.models import build_detector
import torch
import autotorch as at
from mmcv import Config
from mmdet.models import build_detector
try:
from mmcv.cnn import get_model_complexity_info
except ImportError:
raise ImportError('Please upgrade mmcv to >0.6.2')
@at.obj(
block=at.Choice('BasicBlock', 'Bottleneck'),
base_channels=at.Int(8, 64),
stage_blocks=at.List(
at.Int(1,10),
at.Int(1,10),
at.Int(1,10),
at.Int(1,10),
),
stage_planes_ratio=at.List(
at.Real(1.0,4.0),
at.Real(1.0,4.0),
at.Real(1.0,4.0),
),
)
class GenConfigBackbone:
def __init__(self, **kwargs):
d = {}
d.update(**kwargs)
for k, v in d.items():
setattr(self, k, v)
self.m = 1.0
def stage_blocks_multi(self, m):
self.m = m
def merge_cfg(self, det_cfg):
base_channels = max(8, int(self.base_channels*self.m)//8 * 8)
stage_planes = [base_channels]
for ratio in self.stage_planes_ratio:
planes = int(stage_planes[-1] * ratio) //8 * 8
stage_planes.append(planes)
stage_blocks = [max(1, int(x*self.m)) for x in self.stage_blocks]
#print('Blocks:', stage_blocks)
#print('Planes:', stage_planes)
block_cfg=dict(block=self.block, stage_blocks=tuple(stage_blocks), stage_planes=stage_planes)
det_cfg['model']['backbone']['block_cfg'] = block_cfg
det_cfg['model']['backbone']['base_channels'] = base_channels
neck_in_planes = stage_planes if self.block=='BasicBlock' else [4*x for x in stage_planes]
det_cfg['model']['neck']['in_channels'] = neck_in_planes
return det_cfg
@at.obj(
stage_blocks_ratio=at.Real(0.5, 3.0),
base_channels_ratio=at.Real(0.5, 3.0),
fpn_channel=at.Int(8,128),
head_channel=at.Int(8,256),
head_stack=at.Int(1,4),
)
class GenConfigAll:
def __init__(self, **kwargs):
d = {}
d.update(**kwargs)
for k, v in d.items():
setattr(self, k, v)
self.m = 1
def merge_cfg(self, det_cfg):
block_cfg = det_cfg['model']['backbone']['block_cfg']
stage_blocks = tuple([int(np.round(x*self.stage_blocks_ratio)) for x in block_cfg['stage_blocks']])
block_cfg['stage_blocks'] = stage_blocks
stage_planes = [int(np.round(x*self.base_channels_ratio))//8*8 for x in block_cfg['stage_planes']]
block_cfg['stage_planes'] = stage_planes
det_cfg['model']['backbone']['block_cfg'] = block_cfg
det_cfg['model']['backbone']['base_channels'] = stage_planes[0]
neck_in_planes = stage_planes if block_cfg['block']=='BasicBlock' else [4*x for x in stage_planes]
det_cfg['model']['neck']['in_channels'] = neck_in_planes
fpn_channel = self.fpn_channel//8*8
head_channel = self.head_channel//8*8
head_stack = self.head_stack
det_cfg['model']['neck']['out_channels'] = fpn_channel
det_cfg['model']['bbox_head']['in_channels'] = fpn_channel
det_cfg['model']['bbox_head']['feat_channels'] = head_channel
det_cfg['model']['bbox_head']['stacked_convs'] = head_stack
gn_num_groups = 8
for _gn_num_groups in [8, 16, 32, 64]:
if head_channel%_gn_num_groups!=0:
break
gn_num_groups = _gn_num_groups
det_cfg['model']['bbox_head']['norm_cfg']['num_groups'] = gn_num_groups
return det_cfg
def get_args():
parser = argparse.ArgumentParser(description='Auto-SCRFD')
# config files
parser.add_argument('--group', type=str, default='configs/scrfdgen2.5g', help='configs work dir')
parser.add_argument('--template', type=int, default=0, help='template config index')
parser.add_argument('--gflops', type=float, default=2.5, help='expected flops')
parser.add_argument('--mode', type=int, default=1, help='generation mode, 1 for searching backbone, 2 for search all')
# target flops
parser.add_argument('--eps', type=float, default=2e-2,
help='eps for expected flops')
# num configs
parser.add_argument('--num-configs', type=int, default=64, help='num of expected configs')
parser = parser
args = parser.parse_args()
return args
def is_config_valid(cfg, target_flops, input_shape, eps):
model = build_detector(
cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg)
if torch.cuda.is_available():
model.cuda()
model.eval()
if hasattr(model, 'forward_dummy'):
model.forward = model.forward_dummy
else:
raise NotImplementedError(
'FLOPs counter is currently not currently supported with {}'.
format(model.__class__.__name__))
flops, params = get_model_complexity_info(model, input_shape, print_per_layer_stat=False, as_strings=False)
print('FLOPs:', flops/1e9)
return flops <= (1. + eps) * target_flops and \
flops >= (1. - eps) * target_flops
def get_flops(cfg, input_shape):
model = build_detector(
cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg)
#if torch.cuda.is_available():
# model.cuda()
model.eval()
if hasattr(model, 'forward_dummy'):
model.forward = model.forward_dummy
else:
raise NotImplementedError(
'FLOPs counter is currently not currently supported with {}'.
format(model.__class__.__name__))
buf = io.StringIO()
all_flops, params = get_model_complexity_info(model, input_shape, print_per_layer_stat=True, as_strings=False, ost=buf)
buf = buf.getvalue()
#print(buf)
lines = buf.split("\n")
names = ['(stem)', '(layer1)', '(layer2)', '(layer3)', '(layer4)', '(neck)', '(bbox_head)']
name_ptr = 0
line_num = 0
_flops = []
while name_ptr<len(names):
line = lines[line_num].strip()
name = names[name_ptr]
if line.startswith(name):
flops = float(lines[line_num+1].split(',')[2].strip().split(' ')[0])
_flops.append(flops)
name_ptr+=1
line_num+=1
backbone_flops = np.array(_flops[:-2], dtype=np.float32)
neck_flops = _flops[-2]
head_flops = _flops[-1]
return all_flops/1e9, backbone_flops, neck_flops, head_flops
def is_flops_valid(flops, target_flops, eps):
return flops <= (1. + eps) * target_flops and \
flops >= (1. - eps) * target_flops
def main():
args = get_args()
print(datetime.datetime.now())
input_shape = (3,480,640)
runtime_input_shape = input_shape
flops_mult = 1.0
assert osp.exists(args.group)
group_name = args.group.split('/')[-1]
assert len(group_name)>0
input_template = osp.join(args.group, "%s_%d.py"%(group_name, args.template))
assert osp.exists(input_template)
write_index = args.template+1
while True:
output_cfg = osp.join(args.group, "%s_%d.py"%(group_name, write_index))
if not osp.exists(output_cfg):
break
write_index+=1
print('write-index from:', write_index)
if args.mode==1:
gen = GenConfigBackbone()
elif args.mode==2:
gen = GenConfigAll()
det_cfg = Config.fromfile(input_template)
_, template_backbone_flops, _, _= get_flops(det_cfg, runtime_input_shape)
template_backbone_ratios = list(map(lambda x:x/template_backbone_flops[0], template_backbone_flops))
print('template_backbone_ratios:', template_backbone_ratios)
pp = 0
write_count = 0
while write_count < args.num_configs:
pp+=1
det_cfg = Config.fromfile(input_template)
config = gen.rand
det_cfg = config.merge_cfg(det_cfg)
all_flops, backbone_flops, neck_flops, head_flops = get_flops(det_cfg, runtime_input_shape)
assert len(backbone_flops)==5
all_flops *= flops_mult
backbone_flops *= flops_mult
neck_flops *= flops_mult
head_flops *= flops_mult
is_valid = True
if pp%10==0:
print(pp, all_flops, backbone_flops, neck_flops, head_flops, datetime.datetime.now())
if args.mode==2:
backbone_ratios = list(map(lambda x:x/backbone_flops[0], backbone_flops))
#if head_flops*0.8<neck_flops:
# continue
for i in range(1,5):
if not is_flops_valid(template_backbone_ratios[i], backbone_ratios[i], args.eps*5):
is_valid = False
break
if not is_valid:
continue
#if args.mode==1:
# if np.argmax(backbone_flops)!=1:
# continue
# if np.mean(backbone_flops[1:3])*0.8<np.mean(backbone_flops[-2:]):
# continue
if not is_flops_valid(all_flops, args.gflops, args.eps):
continue
output_cfg_file = osp.join(args.group, "%s_%d.py"%(group_name, write_index))
det_cfg.dump(output_cfg_file)
print('SUCC', write_index, all_flops, backbone_flops, neck_flops, head_flops, datetime.datetime.now())
write_index += 1
write_count += 1
if __name__ == '__main__':
main()
| insightface/detection/scrfd/search_tools/generate_configs_2.5g.py/0 | {
"file_path": "insightface/detection/scrfd/search_tools/generate_configs_2.5g.py",
"repo_id": "insightface",
"token_count": 4333
} | 118 |
import argparse
import os.path as osp
import numpy as np
import onnx
import os
#import onnxruntime as rt
import torch
from mmdet.core import (build_model_from_cfg, generate_inputs_and_wrap_model,
preprocess_example_input)
#from mmdet.models import build
def pytorch2onnx(config_path,
checkpoint_path,
input_img,
input_shape,
opset_version=11,
show=False,
output_file='tmp.onnx',
verify=False,
simplify = True,
dynamic = True,
normalize_cfg=None,
dataset='coco',
test_img=None):
input_config = {
'input_shape': input_shape,
'input_path': input_img,
'normalize_cfg': normalize_cfg
}
checkpoint = torch.load(checkpoint_path, map_location='cpu')
tmp_ckpt_file = None
# remove optimizer for smaller file size
if 'optimizer' in checkpoint:
del checkpoint['optimizer']
tmp_ckpt_file = checkpoint_path+"_slim.pth"
torch.save(checkpoint, tmp_ckpt_file)
print('remove optimizer params and save to', tmp_ckpt_file)
checkpoint_path = tmp_ckpt_file
model, tensor_data = generate_inputs_and_wrap_model(
config_path, checkpoint_path, input_config)
if tmp_ckpt_file is not None:
os.remove(tmp_ckpt_file)
if simplify or dynamic:
ori_output_file = output_file.split('.')[0]+"_ori.onnx"
else:
ori_output_file = output_file
# Define input and outputs names, which are required to properly define
# dynamic axes
input_names = ['input.1']
output_names = ['score_8', 'score_16', 'score_32',
'bbox_8', 'bbox_16', 'bbox_32',
]
# If model graph contains keypoints strides add keypoints to outputs
if 'stride_kps' in str(model):
output_names += ['kps_8', 'kps_16', 'kps_32']
# Define dynamic axes for export
dynamic_axes = None
if dynamic:
dynamic_axes = {out: {0: '?', 1: '?'} for out in output_names}
dynamic_axes[input_names[0]] = {
0: '?',
2: '?',
3: '?'
}
torch.onnx.export(
model,
tensor_data,
ori_output_file,
keep_initializers_as_inputs=False,
verbose=False,
input_names=input_names,
output_names=output_names,
dynamic_axes=dynamic_axes,
opset_version=opset_version)
if simplify or dynamic:
model = onnx.load(ori_output_file)
if simplify:
from onnxsim import simplify
#print(model.graph.input[0])
if dynamic:
input_shapes = {model.graph.input[0].name : list(input_shape)}
model, check = simplify(model, input_shapes=input_shapes, dynamic_input_shape=True)
else:
model, check = simplify(model)
assert check, "Simplified ONNX model could not be validated"
onnx.save(model, output_file)
os.remove(ori_output_file)
print(f'Successfully exported ONNX model: {output_file}')
def parse_args():
parser = argparse.ArgumentParser(
description='Convert MMDetection models to ONNX')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument('--input-img', type=str, help='Images for input')
parser.add_argument('--show', action='store_true', help='show onnx graph')
parser.add_argument('--output-file', type=str, default='')
parser.add_argument('--opset-version', type=int, default=11)
parser.add_argument(
'--test-img', type=str, default=None, help='Images for test')
parser.add_argument(
'--dataset', type=str, default='coco', help='Dataset name')
parser.add_argument(
'--verify',
action='store_true',
help='verify the onnx model output against pytorch output')
parser.add_argument(
'--shape',
type=int,
nargs='+',
#default=[640, 640],
#default=[384, 384],
default=[-1, -1],
help='input image size')
parser.add_argument(
'--mean',
type=float,
nargs='+',
default=[127.5, 127.5, 127.5],
help='mean value used for preprocess input data')
parser.add_argument(
'--std',
type=float,
nargs='+',
default=[128.0, 128.0, 128.0],
help='variance value used for preprocess input data')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
assert args.opset_version == 11, 'MMDet only support opset 11 now'
if not args.input_img:
args.input_img = osp.join(
osp.dirname(__file__), '../tests/data/t1.jpg')
if len(args.shape) == 1:
input_shape = (1, 3, args.shape[0], args.shape[0])
elif len(args.shape) == 2:
input_shape = (1, 3) + tuple(args.shape)
else:
raise ValueError('invalid input shape')
assert len(args.mean) == 3
assert len(args.std) == 3
simplify = True
dynamic = False
if input_shape[2]<=0 or input_shape[3]<=0:
input_shape = (1,3,640,640)
dynamic = True
#simplify = False
print('set to dynamic input with dummy shape:', input_shape)
normalize_cfg = {'mean': args.mean, 'std': args.std}
if len(args.output_file)==0:
output_dir = osp.join(osp.dirname(__file__), '../onnx')
if not osp.exists(output_dir):
os.makedirs(output_dir)
cfg_name = args.config.split('/')[-1]
pos = cfg_name.rfind('.')
cfg_name = cfg_name[:pos]
if dynamic:
args.output_file = osp.join(output_dir, "%s.onnx"%cfg_name)
else:
args.output_file = osp.join(output_dir, "%s_shape%dx%d.onnx"%(cfg_name,input_shape[2],input_shape[3]))
# convert model to onnx file
pytorch2onnx(
args.config,
args.checkpoint,
args.input_img,
input_shape,
opset_version=args.opset_version,
show=args.show,
output_file=args.output_file,
verify=args.verify,
simplify = simplify,
dynamic = dynamic,
normalize_cfg=normalize_cfg,
dataset=args.dataset,
test_img=args.test_img)
| insightface/detection/scrfd/tools/scrfd2onnx.py/0 | {
"file_path": "insightface/detection/scrfd/tools/scrfd2onnx.py",
"repo_id": "insightface",
"token_count": 3028
} | 119 |
# InsightFace Generation Projects
| insightface/generation/README.md/0 | {
"file_path": "insightface/generation/README.md",
"repo_id": "insightface",
"token_count": 6
} | 120 |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
@Author : Qingping Zheng
@Contact : qingpingzheng2014@gmail.com
@File : logging.py
@Time : 10/01/21 00:00 PM
@Desc :
@License : Licensed under the Apache License, Version 2.0 (the "License");
@Copyright : Copyright 2022 The Authors. All Rights Reserved.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import torch.distributed as dist
logger_initialized = {}
def get_root_logger(log_file=None, log_level=logging.INFO):
"""Get root logger.
Args:
log_file (str, optional): File path of log. Defaults to None.
log_level (int, optional): The level of logger.
Defaults to logging.INFO.
Returns:
:obj:`logging.Logger`: The obtained logger
"""
logger = get_logger(name='face_parsing', log_file=log_file, log_level=log_level)
return logger
def get_logger(name, log_file=None, log_level=logging.INFO, file_mode='w'):
"""Initialize and get a logger by name.
If the logger has not been initialized, this method will initialize the
logger by adding one or two handlers, otherwise the initialized logger will
be directly returned. During initialization, a StreamHandler will always be
added. If `log_file` is specified and the process rank is 0, a FileHandler
will also be added.
Args:
name (str): Logger name.
log_file (str | None): The log filename. If specified, a FileHandler
will be added to the logger.
log_level (int): The logger level. Note that only the process of
rank 0 is affected, and other processes will set the level to
"Error" thus be silent most of the time.
file_mode (str): The file mode used in opening log file.
Defaults to 'w'.
Returns:
logging.Logger: The expected logger.
"""
logger = logging.getLogger(name)
if name in logger_initialized:
return logger
# handle hierarchical names
# e.g., logger "a" is initialized, then logger "a.b" will skip the
# initialization since it is a child of "a".
for logger_name in logger_initialized:
if name.startswith(logger_name):
return logger
# handle duplicate logs to the console
# Starting in 1.8.0, PyTorch DDP attaches a StreamHandler <stderr> (NOTSET)
# to the root logger. As logger.propagate is True by default, this root
# level handler causes logging messages from rank>0 processes to
# unexpectedly show up on the console, creating much unwanted clutter.
# To fix this issue, we set the root logger's StreamHandler, if any, to log
# at the ERROR level.
for handler in logger.root.handlers:
if type(handler) is logging.StreamHandler:
handler.setLevel(logging.ERROR)
stream_handler = logging.StreamHandler()
handlers = [stream_handler]
if dist.is_available() and dist.is_initialized():
rank = dist.get_rank()
else:
rank = 0
# only rank 0 will add a FileHandler
if rank == 0 and log_file is not None:
# Here, the default behaviour of the official logger is 'a'. Thus, we
# provide an interface to change the file mode to the default
# behaviour.
file_handler = logging.FileHandler(log_file, file_mode)
handlers.append(file_handler)
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
for handler in handlers:
handler.setFormatter(formatter)
handler.setLevel(log_level)
logger.addHandler(handler)
if rank == 0:
logger.setLevel(log_level)
else:
logger.setLevel(logging.ERROR)
logger_initialized[name] = True
return logger
def print_log(msg, logger=None, level=logging.INFO):
"""Print a log message.
Args:
msg (str): The message to be logged.
logger (logging.Logger | str | None): The logger to be used.
Some special loggers are:
- "silent": no message will be printed.
- other str: the logger obtained with `get_root_logger(logger)`.
- None: The `print()` method will be used to print log messages.
level (int): Logging level. Only available when `logger` is a Logger
object or "root".
"""
if logger is None:
print(msg)
elif isinstance(logger, logging.Logger):
logger.log(level, msg)
elif logger == 'silent':
pass
elif isinstance(logger, str):
_logger = get_logger(logger)
_logger.log(level, msg)
else:
raise TypeError(
'logger should be either a logging.Logger object, str, '
f'"silent" or None, but got {type(logger)}')
| insightface/parsing/dml_csr/utils/logging.py/0 | {
"file_path": "insightface/parsing/dml_csr/utils/logging.py",
"repo_id": "insightface",
"token_count": 1840
} | 121 |
from .image import get_image
from .pickle_object import get_object
| insightface/python-package/insightface/data/__init__.py/0 | {
"file_path": "insightface/python-package/insightface/data/__init__.py",
"repo_id": "insightface",
"token_count": 19
} | 122 |
"""
This code file mainly comes from https://github.com/dmlc/gluon-cv/blob/master/gluoncv/model_zoo/model_store.py
"""
from __future__ import print_function
__all__ = ['get_model_file']
import os
import zipfile
import glob
from ..utils import download, check_sha1
_model_sha1 = {
name: checksum
for checksum, name in [
('95be21b58e29e9c1237f229dae534bd854009ce0', 'arcface_r100_v1'),
('', 'arcface_mfn_v1'),
('39fd1e087a2a2ed70a154ac01fecaa86c315d01b', 'retinaface_r50_v1'),
('2c9de8116d1f448fd1d4661f90308faae34c990a', 'retinaface_mnet025_v1'),
('0db1d07921d005e6c9a5b38e059452fc5645e5a4', 'retinaface_mnet025_v2'),
('7dd8111652b7aac2490c5dcddeb268e53ac643e6', 'genderage_v1'),
]
}
base_repo_url = 'https://insightface.ai/files/'
_url_format = '{repo_url}models/{file_name}.zip'
def short_hash(name):
if name not in _model_sha1:
raise ValueError(
'Pretrained model for {name} is not available.'.format(name=name))
return _model_sha1[name][:8]
def find_params_file(dir_path):
if not os.path.exists(dir_path):
return None
paths = glob.glob("%s/*.params" % dir_path)
if len(paths) == 0:
return None
paths = sorted(paths)
return paths[-1]
def get_model_file(name, root=os.path.join('~', '.insightface', 'models')):
r"""Return location for the pretrained on local file system.
This function will download from online model zoo when model cannot be found or has mismatch.
The root directory will be created if it doesn't exist.
Parameters
----------
name : str
Name of the model.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
Returns
-------
file_path
Path to the requested pretrained model file.
"""
file_name = name
root = os.path.expanduser(root)
dir_path = os.path.join(root, name)
file_path = find_params_file(dir_path)
#file_path = os.path.join(root, file_name + '.params')
sha1_hash = _model_sha1[name]
if file_path is not None:
if check_sha1(file_path, sha1_hash):
return file_path
else:
print(
'Mismatch in the content of model file detected. Downloading again.'
)
else:
print('Model file is not found. Downloading.')
if not os.path.exists(root):
os.makedirs(root)
if not os.path.exists(dir_path):
os.makedirs(dir_path)
zip_file_path = os.path.join(root, file_name + '.zip')
repo_url = base_repo_url
if repo_url[-1] != '/':
repo_url = repo_url + '/'
download(_url_format.format(repo_url=repo_url, file_name=file_name),
path=zip_file_path,
overwrite=True)
with zipfile.ZipFile(zip_file_path) as zf:
zf.extractall(dir_path)
os.remove(zip_file_path)
file_path = find_params_file(dir_path)
if check_sha1(file_path, sha1_hash):
return file_path
else:
raise ValueError(
'Downloaded file has different hash. Please try again.')
| insightface/python-package/insightface/model_zoo/model_store.py/0 | {
"file_path": "insightface/python-package/insightface/model_zoo/model_store.py",
"repo_id": "insightface",
"token_count": 1393
} | 123 |
"""
This code file mainly comes from https://github.com/dmlc/gluon-cv/blob/master/gluoncv/utils/download.py
"""
import os
import hashlib
import requests
from tqdm import tqdm
def check_sha1(filename, sha1_hash):
"""Check whether the sha1 hash of the file content matches the expected hash.
Parameters
----------
filename : str
Path to the file.
sha1_hash : str
Expected sha1 hash in hexadecimal digits.
Returns
-------
bool
Whether the file content matches the expected hash.
"""
sha1 = hashlib.sha1()
with open(filename, 'rb') as f:
while True:
data = f.read(1048576)
if not data:
break
sha1.update(data)
sha1_file = sha1.hexdigest()
l = min(len(sha1_file), len(sha1_hash))
return sha1.hexdigest()[0:l] == sha1_hash[0:l]
def download_file(url, path=None, overwrite=False, sha1_hash=None):
"""Download an given URL
Parameters
----------
url : str
URL to download
path : str, optional
Destination path to store downloaded file. By default stores to the
current directory with same name as in url.
overwrite : bool, optional
Whether to overwrite destination file if already exists.
sha1_hash : str, optional
Expected sha1 hash in hexadecimal digits. Will ignore existing file when hash is specified
but doesn't match.
Returns
-------
str
The file path of the downloaded file.
"""
if path is None:
fname = url.split('/')[-1]
else:
path = os.path.expanduser(path)
if os.path.isdir(path):
fname = os.path.join(path, url.split('/')[-1])
else:
fname = path
if overwrite or not os.path.exists(fname) or (
sha1_hash and not check_sha1(fname, sha1_hash)):
dirname = os.path.dirname(os.path.abspath(os.path.expanduser(fname)))
if not os.path.exists(dirname):
os.makedirs(dirname)
print('Downloading %s from %s...' % (fname, url))
r = requests.get(url, stream=True)
if r.status_code != 200:
raise RuntimeError("Failed downloading url %s" % url)
total_length = r.headers.get('content-length')
with open(fname, 'wb') as f:
if total_length is None: # no content length header
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
else:
total_length = int(total_length)
for chunk in tqdm(r.iter_content(chunk_size=1024),
total=int(total_length / 1024. + 0.5),
unit='KB',
unit_scale=False,
dynamic_ncols=True):
f.write(chunk)
if sha1_hash and not check_sha1(fname, sha1_hash):
raise UserWarning('File {} is downloaded but the content hash does not match. ' \
'The repo may be outdated or download may be incomplete. ' \
'If the "repo_url" is overridden, consider switching to ' \
'the default repo.'.format(fname))
return fname
| insightface/python-package/insightface/utils/download.py/0 | {
"file_path": "insightface/python-package/insightface/utils/download.py",
"repo_id": "insightface",
"token_count": 1597
} | 124 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from easydict import EasyDict as edict
import time
import sys
import numpy as np
import argparse
import struct
import cv2
import sklearn
from sklearn.preprocessing import normalize
import mxnet as mx
from mxnet import ndarray as nd
def read_img(image_path):
img = cv2.imread(image_path, cv2.CV_LOAD_IMAGE_COLOR)
return img
def get_feature(imgs, nets):
count = len(imgs)
data = mx.nd.zeros(shape=(count * 2, 3, imgs[0].shape[0],
imgs[0].shape[1]))
for idx, img in enumerate(imgs):
img = img[:, :, ::-1] #to rgb
img = np.transpose(img, (2, 0, 1))
for flipid in [0, 1]:
_img = np.copy(img)
if flipid == 1:
_img = _img[:, :, ::-1]
_img = nd.array(_img)
data[count * flipid + idx] = _img
F = []
for net in nets:
db = mx.io.DataBatch(data=(data, ))
net.model.forward(db, is_train=False)
x = net.model.get_outputs()[0].asnumpy()
embedding = x[0:count, :] + x[count:, :]
embedding = sklearn.preprocessing.normalize(embedding)
#print('emb', embedding.shape)
F.append(embedding)
F = np.concatenate(F, axis=1)
F = sklearn.preprocessing.normalize(F)
#print('F', F.shape)
return F
def write_bin(path, feature):
feature = list(feature)
with open(path, 'wb') as f:
f.write(struct.pack('4i', len(feature), 1, 4, 5))
f.write(struct.pack("%df" % len(feature), *feature))
def get_and_write(buffer, nets):
imgs = []
for k in buffer:
imgs.append(k[0])
features = get_feature(imgs, nets)
#print(np.linalg.norm(feature))
assert features.shape[0] == len(buffer)
for ik, k in enumerate(buffer):
out_path = k[1]
feature = features[ik].flatten()
write_bin(out_path, feature)
def main(args):
print(args)
gpuid = args.gpu
ctx = mx.gpu(gpuid)
nets = []
image_shape = [int(x) for x in args.image_size.split(',')]
for model in args.model.split('|'):
vec = model.split(',')
assert len(vec) > 1
prefix = vec[0]
epoch = int(vec[1])
print('loading', prefix, epoch)
net = edict()
net.ctx = ctx
net.sym, net.arg_params, net.aux_params = mx.model.load_checkpoint(
prefix, epoch)
all_layers = net.sym.get_internals()
net.sym = all_layers['fc1_output']
net.model = mx.mod.Module(symbol=net.sym,
context=net.ctx,
label_names=None)
net.model.bind(data_shapes=[('data', (1, 3, image_shape[1],
image_shape[2]))])
net.model.set_params(net.arg_params, net.aux_params)
nets.append(net)
facescrub_out = os.path.join(args.output, 'facescrub')
megaface_out = os.path.join(args.output, 'megaface')
i = 0
succ = 0
buffer = []
for line in open(args.facescrub_lst, 'r'):
if i % 1000 == 0:
print("writing fs", i, succ)
i += 1
image_path = line.strip()
_path = image_path.split('/')
a, b = _path[-2], _path[-1]
out_dir = os.path.join(facescrub_out, a)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
image_path = os.path.join(args.facescrub_root, image_path)
img = read_img(image_path)
if img is None:
print('read error:', image_path)
continue
out_path = os.path.join(out_dir, b + "_%s.bin" % (args.algo))
item = (img, out_path)
buffer.append(item)
if len(buffer) == args.batch_size:
get_and_write(buffer, nets)
buffer = []
succ += 1
if len(buffer) > 0:
get_and_write(buffer, nets)
buffer = []
print('fs stat', i, succ)
i = 0
succ = 0
buffer = []
for line in open(args.megaface_lst, 'r'):
if i % 1000 == 0:
print("writing mf", i, succ)
i += 1
image_path = line.strip()
_path = image_path.split('/')
a1, a2, b = _path[-3], _path[-2], _path[-1]
out_dir = os.path.join(megaface_out, a1, a2)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
#continue
#print(landmark)
image_path = os.path.join(args.megaface_root, image_path)
img = read_img(image_path)
if img is None:
print('read error:', image_path)
continue
out_path = os.path.join(out_dir, b + "_%s.bin" % (args.algo))
item = (img, out_path)
buffer.append(item)
if len(buffer) == args.batch_size:
get_and_write(buffer, nets)
buffer = []
succ += 1
if len(buffer) > 0:
get_and_write(buffer, nets)
buffer = []
print('mf stat', i, succ)
def parse_arguments(argv):
parser = argparse.ArgumentParser()
parser.add_argument('--batch_size', type=int, help='', default=8)
parser.add_argument('--image_size', type=str, help='', default='3,112,112')
parser.add_argument('--gpu', type=int, help='', default=0)
parser.add_argument('--algo', type=str, help='', default='insightface')
parser.add_argument('--facescrub-lst',
type=str,
help='',
default='./data/facescrub_lst')
parser.add_argument('--megaface-lst',
type=str,
help='',
default='./data/megaface_lst')
parser.add_argument('--facescrub-root',
type=str,
help='',
default='./data/facescrub_images')
parser.add_argument('--megaface-root',
type=str,
help='',
default='./data/megaface_images')
parser.add_argument('--output', type=str, help='', default='./feature_out')
parser.add_argument('--model', type=str, help='', default='')
return parser.parse_args(argv)
if __name__ == '__main__':
main(parse_arguments(sys.argv[1:]))
| insightface/recognition/_evaluation_/megaface/gen_megaface.py/0 | {
"file_path": "insightface/recognition/_evaluation_/megaface/gen_megaface.py",
"repo_id": "insightface",
"token_count": 3203
} | 125 |
# InsightFace 在 OneFlow 中的实现
[English](README.md) **|** [简体中文](README_CH.md)
本文介绍如何在 OneFlow 中训练 InsightFace,并在验证数据集上对训练好的网络进行验证。
## 目录
- [InsightFace 在 OneFlow 中的实现](#insightface-在-oneflow-中的实现)
- [目录](#目录)
- [背景介绍](#背景介绍)
- [InsightFace 开源项目](#insightface-开源项目)
- [InsightFace 在 OneFlow 中的实现](#insightface-在-oneflow-中的实现-1)
- [准备工作](#准备工作)
- [安装 OneFlow](#安装-oneflow)
- [准备数据集](#准备数据集)
- [1. 下载数据集](#1-下载数据集)
- [2. 将训练数据集 MS1M 从 recordio 格式转换为 OFRecord 格式](#2-将训练数据集-ms1m-从-recordio-格式转换为-ofrecord-格式)
- [训练和验证](#训练和验证)
- [训练](#训练)
- [验证](#验证)
- [OneFLow2ONNX](#OneFLow2ONNX)
## 背景介绍
### InsightFace 开源项目
[InsightFace 原仓库](https://github.com/deepinsight/insightface)是基于 MXNet 实现的人脸识别研究开源项目。
在该项目中,集成了:
* CASIA-Webface、MS1M、VGG2 等用于人脸识别研究常用的数据集(以 MXNet 支持的二进制形式提供,可以从[这里](https://github.com/deepinsight/insightface/wiki/Dataset-Zoo)查看数据集的详细说明以及下载链接)。
* 以 ResNet、MobileFaceNet、InceptionResNet_v2 等深度学习网络作为 Backbone 的人脸识别模型。
* 涵盖 SphereFace Loss、Softmax Loss、SphereFace Loss 等多种损失函数的实现。
### InsightFace 在 OneFlow 中的实现
在 InsightFace 开源项目已有的工作基础上,OneFlow 对 InsightFace 基本的人脸识别模型进行了移植,目前已实现的功能包括:
* 支持了使用 MS1M、Glint360k 作为训练数据集,Lfw、Cfp_fp 以及 Agedb_30 作为验证数据集,提供了对网络进行训练和验证的脚本。
* 支持 ResNet100 和 MobileFaceNet 作为人脸识别模型的 Backbone 网络。
* 实现了 Softmax Loss 以及 Margin Softmax Loss(包括 Nsoftmax、Arcface、Cosface 和 Combined Loss 等)。
* 实现了模型并行和 Partial FC 优化。
* 实现了 MXNet 的模型转换。
未来将计划逐步完善:
* 更多的数据集转换。
* 更丰富的 Backbone 网络。
* 更全面的损失函数实现。
* 增加分布式运行的说明。
我们对所有的开发者开放 PR,非常欢迎您加入新的实现以及参与讨论。
## 准备工作
在开始运行前,请先确定:
1. 安装 OneFlow。
2. 准备训练和验证的 OFRecord 数据集。
### 安装 OneFlow
根据 [Install OneFlow](https://github.com/Oneflow-Inc/oneflow#install-oneflow) 的步骤进行安装最新 master whl 包即可。
```
python3 -m pip install oneflow -f https://oneflow-staging.oss-cn-beijing.aliyuncs.com/branch/master/cu102/6aa719d70119b65837b25cc5f186eb19ef2b7891/index.html --user
```
### 准备数据集
根据 [加载与准备 OFRecord 数据集](https://docs.oneflow.org/extended_topics/how_to_make_ofdataset.html) 准备 ImageNet 的 OFReocord 数据集,用以进行 InsightFace 的测试。
[InsightFace 原仓库](https://github.com/deepinsight/insightface)中提供了一系列人脸识别任务相关的数据集,已经完成了人脸对齐等预处理过程。请从[这里](https://github.com/deepinsight/insightface/wiki/Dataset-Zoo)下载相应的数据集,并且转换成 OneFlow 可以识别的 OFRecord 格式。考虑到步骤繁琐,也可以直接下载已经转好的 OFRecord 数据集:
[ MS1M-ArcFace(face_emore)](http://oneflow-public.oss-cn-beijing.aliyuncs.com/face_dataset/train_ofrecord.tar.gz)
[MS1MV3](https://oneflow-public.oss-cn-beijing.aliyuncs.com/facedata/MS1V3/oneflow/ms1m-retinaface-t1.zip)
下面以数据集 MS1M-ArcFace 为例,展示如何将下载到的数据集转换成 OFRecord 格式。
#### 1. 下载数据集
下载好的 MS1M-ArcFace 数据集,内容如下:
```
faces_emore/
train.idx
train.rec
property
lfw.bin
cfp_fp.bin
agedb_30.bin
```
前三个文件是训练数据集 MS1M 的 MXNet 的 recordio 格式相关的文件,后三个 `.bin` 文件是三个不同的验证数据集。
#### 2. 将训练数据集 MS1M 从 recordio 格式转换为 OFRecord 格式
训练数据集转换有两种方式: (2.1部分)直接使用python脚本生成n个shuffle过的数据part,或(2.2部分)python脚本生成一个part,再根据需要用spark做shuffle和partition。
2.1 直接使用 Python 脚本
运行:
```
python tools/dataset_convert/mx_recordio_2_ofrecord_shuffled_npart.py --data_dir datasets/faces_emore --output_filepath faces_emore/ofrecord/train --num_part 16
```
成功后将得到 `num_part` 数量个 OFRecord,本示例中为 16 个,显示如下:
```
tree ofrecord/test/
ofrecord/test/
|-- _SUCCESS
|-- part-00000
|-- part-00001
|-- part-00002
|-- part-00003
|-- part-00004
|-- part-00005
|-- part-00006
|-- part-00007
|-- part-00008
|-- part-00009
|-- part-00010
|-- part-00011
|-- part-00012
|-- part-00013
|-- part-00014
`-- part-00015
0 directories, 17 files
```
2.2 Python 脚本 + Spark Shuffle + Spark Partition
运行:
```
python tools/dataset_convert/mx_recordio_2_ofrecord.py --data_dir datasets/faces_emore --output_filepath faces_emore/ofrecord/train
```
成功后将得到一个包含所有数据的 OFReocrd(`part-0`),需要进一步使用 Spark 进行 Shuffle 和 Partition。
成功安装和部署 Spark 后, 您需要:
1. 下载工具 jar 包
您可以通过 [Github](https://github.com/Oneflow-Inc/spark-oneflow-connector) 或者 [OSS](https://oneflow-public.oss-cn-beijing.aliyuncs.com/spark-oneflow-connector/spark-oneflow-connector-assembly-0.1.1.jar) 下载 Spark-oneflow-connector-assembly-0.1.0.jar 文件。
1. 运行 Spark 命令
运行
```
//Start Spark
./Spark-2.4.3-bin-hadoop2.7/bin/Spark-shell --jars ~/Spark-oneflow-connector-assembly-0.1.0.jar --driver-memory=64G --conf Spark.local.dir=/tmp/
// shuffle and partition in 16 parts
import org.oneflow.Spark.functions._
Spark.read.chunk("data_path").shuffle().repartition(16).write.chunk("new_data_path")
sc.formatFilenameAsOneflowStyle("new_data_path")
```
然后就可以得到 16 个 part 的 OFRecord,显示如下
```
tree ofrecord/test/
ofrecord/test/
|-- _SUCCESS
|-- part-00000
|-- part-00001
|-- part-00002
|-- part-00003
|-- part-00004
|-- part-00005
|-- part-00006
|-- part-00007
|-- part-00008
|-- part-00009
|-- part-00010
|-- part-00011
|-- part-00012
|-- part-00013
|-- part-00014
`-- part-00015
0 directories, 17 files
```
## 训练和验证
### 训练
为了减小用户使用的迁移成本,OneFlow 的脚本已经调整为 Torch 实现的风格,用户可以使用 configs/*.py 直接修改参数。
运行脚本:
#### eager
```
./train_ddp.sh
```
#### Graph
```
train_graph_distributed.sh
```
### 验证
另外,为了方便查看保存下来的预训练模型精度,我们提供了一个仅在验证数据集上单独执行验证过程的脚本。
运行
```
./val.sh
```
## OneFLow2ONNX
```
pip install oneflow-onnx==0.5.1
./convert.sh
``` | insightface/recognition/arcface_oneflow/README_CH.md/0 | {
"file_path": "insightface/recognition/arcface_oneflow/README_CH.md",
"repo_id": "insightface",
"token_count": 4088
} | 126 |
import logging
import os
import time
from typing import List
import oneflow as flow
from eval import verification
from utils.utils_logging import AverageMeter
class CallBackVerification(object):
def __init__(
self,
frequent,
rank,
val_targets,
rec_prefix,
image_size=(112, 112),
world_size=1,
is_consistent=False,
):
self.frequent: int = frequent
self.rank: int = rank
self.highest_acc: float = 0.0
self.highest_acc_list: List[float] = [0.0] * len(val_targets)
self.ver_list: List[object] = []
self.ver_name_list: List[str] = []
self.world_size = world_size
self.is_consistent = is_consistent
if self.is_consistent:
self.init_dataset(
val_targets=val_targets, data_dir=rec_prefix, image_size=image_size
)
else:
if self.rank is 0:
self.init_dataset(
val_targets=val_targets, data_dir=rec_prefix, image_size=image_size
)
def ver_test(self, backbone: flow.nn.Module, global_step: int):
results = []
for i in range(len(self.ver_list)):
acc1, std1, acc2, std2, xnorm, embeddings_list = verification.test(
self.ver_list[i], backbone, 10, 10, self.is_consistent
)
logging.info(
"[%s][%d]XNorm: %f" % (
self.ver_name_list[i], global_step, xnorm)
)
logging.info(
"[%s][%d]Accuracy-Flip: %1.5f+-%1.5f"
% (self.ver_name_list[i], global_step, acc2, std2)
)
if acc2 > self.highest_acc_list[i]:
self.highest_acc_list[i] = acc2
logging.info(
"[%s][%d]Accuracy-Highest: %1.5f"
% (self.ver_name_list[i], global_step, self.highest_acc_list[i])
)
results.append(acc2)
def init_dataset(self, val_targets, data_dir, image_size):
for name in val_targets:
path = os.path.join(data_dir, "val", name + ".bin")
if os.path.exists(path):
data_set = verification.load_bin_cv(path, image_size)
self.ver_list.append(data_set)
self.ver_name_list.append(name)
if len(self.ver_list) == 0:
logging.info("Val targets is None !")
def __call__(self, num_update, backbone: flow.nn.Module, backbone_graph=None):
if self.is_consistent:
if num_update > 0 and num_update % self.frequent == 0:
backbone.eval()
self.ver_test(backbone_graph, num_update)
backbone.train()
else:
if self.rank is 0 and num_update > 0 and num_update % self.frequent == 0:
backbone.eval()
self.ver_test(backbone, num_update)
backbone.train()
class CallBackLogging(object):
def __init__(self, frequent, rank, total_step, batch_size, world_size, writer=None):
self.frequent: int = frequent
self.rank: int = rank
self.time_start = time.time()
self.total_step: int = total_step
self.batch_size: int = batch_size
self.world_size: int = world_size
self.writer = writer
self.init = False
self.tic = 0
def __call__(
self,
global_step: int,
loss: AverageMeter,
epoch: int,
fp16: bool,
learning_rate: float,
grad_scaler=None,
):
if self.rank == 0 and global_step % self.frequent == 0:
if self.init:
try:
speed: float = self.frequent * self.batch_size / (
time.time() - self.tic
)
speed_total = speed * self.world_size
except ZeroDivisionError:
speed_total = float("inf")
time_now = (time.time() - self.time_start) / 3600
time_total = time_now / ((global_step + 1) / self.total_step)
time_for_end = time_total - time_now
if self.writer is not None:
self.writer.add_scalar(
"time_for_end", time_for_end, global_step)
self.writer.add_scalar(
"learning_rate", learning_rate, global_step)
self.writer.add_scalar("loss", loss.avg, global_step)
if fp16:
msg = (
"Speed %.2f samples/sec Loss %.4f LearningRate %.4f Epoch: %d Global Step: %d "
"Fp16 Grad Scale: %2.f Required: %1.f hours"
% (
speed_total,
loss.avg,
learning_rate,
epoch,
global_step,
time_for_end,
)
)
else:
msg = (
"Speed %.2f samples/sec Loss %.4f LearningRate %.4f Epoch: %d Global Step: %d "
"Required: %1.f hours"
% (
speed_total,
loss.avg,
learning_rate,
epoch,
global_step,
time_for_end,
)
)
logging.info(msg)
loss.reset()
self.tic = time.time()
else:
self.init = True
self.tic = time.time()
class CallBackModelCheckpoint(object):
def __init__(self, rank, output="./"):
self.rank: int = rank
self.output: str = output
def __call__(self, global_step, epoch, backbone, is_consistent=False):
if global_step > 100 and backbone is not None:
path_module = os.path.join(self.output, "epoch_%d" % (epoch))
if is_consistent:
flow.save(backbone.state_dict(),
path_module, consistent_dst_rank=0)
else:
if self.rank == 0:
flow.save(backbone.state_dict(), path_module)
logging.info("oneflow Model Saved in '{}'".format(path_module))
| insightface/recognition/arcface_oneflow/utils/utils_callbacks.py/0 | {
"file_path": "insightface/recognition/arcface_oneflow/utils/utils_callbacks.py",
"repo_id": "insightface",
"token_count": 3668
} | 127 |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pickle
import paddle
import os
import cv2
import six
import random
import paddle
import numpy as np
import logging
from PIL import Image
from io import BytesIO
from datasets.kv_helper import read_img_from_bin
def transform(img):
# random horizontal flip
if random.randint(0, 1) == 0:
img = cv2.flip(img, 1)
# normalize to mean 0.5, std 0.5
img = (img - 127.5) * 0.00784313725
# BGR2RGB
img = img[:, :, ::-1]
img = img.transpose((2, 0, 1))
return img
class CommonDataset(paddle.io.Dataset):
def __init__(self, root_dir, label_file, fp16=False, is_bin=True):
super(CommonDataset, self).__init__()
self.root_dir = root_dir
self.label_file = label_file
self.fp16 = fp16
with open(label_file, "r") as fin:
self.full_lines = fin.readlines()
self.delimiter = "\t"
self.is_bin = is_bin
self.num_samples = len(self.full_lines)
logging.info("read label file finished, total num: {}"
.format(self.num_samples))
def __getitem__(self, idx):
line = self.full_lines[idx]
img_path, label = line.strip().split(self.delimiter)
img_path = os.path.join(self.root_dir, img_path)
if self.is_bin:
img = read_img_from_bin(img_path)
else:
img = cv2.imread(img_path)
img = transform(img)
img = img.astype('float16' if self.fp16 else 'float32')
label = np.int32(label)
return img, label
def __len__(self):
return self.num_samples
class SyntheticDataset(paddle.io.Dataset):
def __init__(self, num_classes, fp16=False):
super(SyntheticDataset, self).__init__()
self.num_classes = num_classes
self.fp16 = fp16
self.label_list = np.random.randint(
0, num_classes, (5179510, ), dtype=np.int32)
self.num_samples = len(self.label_list)
def __getitem__(self, idx):
label = self.label_list[idx]
img = np.random.randint(0, 255, size=(112, 112, 3), dtype=np.uint8)
img = transform(img)
img = img.astype('float16' if self.fp16 else 'float32')
label = np.int32(label)
return img, label
def __len__(self):
return self.num_samples
# 返回为 numpy
def load_bin(path, image_size):
if six.PY2:
bins, issame_list = pickle.load(open(path, 'rb'))
else:
bins, issame_list = pickle.load(open(path, 'rb'), encoding='bytes')
data_list = []
for flip in [0, 1]:
data = np.empty(
(len(issame_list) * 2, 3, image_size[0], image_size[1]))
data_list.append(data)
for i in range(len(issame_list) * 2):
_bin = bins[i]
if six.PY2:
if not isinstance(_bin, six.string_types):
_bin = _bin.tostring()
img_ori = Image.open(StringIO(_bin))
else:
img_ori = Image.open(BytesIO(_bin))
for flip in [0, 1]:
img = img_ori.copy()
if flip == 1:
img = img.transpose(Image.FLIP_LEFT_RIGHT)
if img.mode != 'RGB':
img = img.convert('RGB')
img = np.array(img).astype('float32').transpose((2, 0, 1))
img = (img - 127.5) * 0.00784313725
data_list[flip][i][:] = img
if i % 1000 == 0:
print('loading bin', i)
print(data_list[0].shape)
return data_list, issame_list
| insightface/recognition/arcface_paddle/datasets/common_dataset.py/0 | {
"file_path": "insightface/recognition/arcface_paddle/datasets/common_dataset.py",
"repo_id": "insightface",
"token_count": 1865
} | 128 |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import errno
import os
import numpy as np
import paddle
from .utils.io import Checkpoint
from . import backbones
from .static_model import StaticModel
def export_onnx(path_prefix, feed_vars, fetch_vars, executor, program):
from paddle2onnx.graph import PaddleGraph, ONNXGraph
from paddle2onnx.passes import PassManager
opset_version = 10
enable_onnx_checker = True
verbose = False
paddle_graph = PaddleGraph.build_from_program(program, feed_vars,
fetch_vars,
paddle.fluid.global_scope())
onnx_graph = ONNXGraph.build(paddle_graph, opset_version, verbose)
onnx_graph = PassManager.run_pass(onnx_graph, ['inplace_node_pass'])
onnx_proto = onnx_graph.export_proto(enable_onnx_checker)
try:
# mkdir may conflict if pserver and trainer are running on the same machine
dirname = os.path.dirname(path_prefix)
os.makedirs(dirname)
except OSError as e:
if e.errno != errno.EEXIST:
raise
model_path = path_prefix + ".onnx"
if os.path.isdir(model_path):
raise ValueError("'{}' is an existing directory.".format(model_path))
with open(model_path, 'wb') as f:
f.write(onnx_proto.SerializeToString())
def export(args):
checkpoint = Checkpoint(
rank=0,
world_size=1,
embedding_size=args.embedding_size,
num_classes=None,
checkpoint_dir=args.checkpoint_dir, )
test_program = paddle.static.Program()
startup_program = paddle.static.Program()
test_model = StaticModel(
main_program=test_program,
startup_program=startup_program,
backbone_class_name=args.backbone,
embedding_size=args.embedding_size,
mode='test', )
gpu_id = int(os.getenv("FLAGS_selected_gpus", 0))
place = paddle.CUDAPlace(gpu_id)
exe = paddle.static.Executor(place)
exe.run(startup_program)
checkpoint.load(program=test_program, for_train=False, dtype='float32')
print("Load checkpoint from '{}'.".format(args.checkpoint_dir))
path = os.path.join(args.output_dir, args.backbone)
if args.export_type == 'onnx':
feed_vars = [test_model.backbone.input_dict['image'].name]
fetch_vars = [test_model.backbone.output_dict['feature']]
export_onnx(path, feed_vars, fetch_vars, exe, program=test_program)
else:
feed_vars = [test_model.backbone.input_dict['image']]
fetch_vars = [test_model.backbone.output_dict['feature']]
paddle.static.save_inference_model(
path, feed_vars, fetch_vars, exe, program=test_program)
print("Save exported model to '{}'.".format(args.output_dir))
| insightface/recognition/arcface_paddle/static/export.py/0 | {
"file_path": "insightface/recognition/arcface_paddle/static/export.py",
"repo_id": "insightface",
"token_count": 1358
} | 129 |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
sys.path.insert(0, os.path.abspath('.'))
import paddle
from configs import argparser as parser
from utils.logging import init_logging
if __name__ == '__main__':
args = parser.parse_args()
if args.is_static:
from static.train import train
paddle.enable_static()
else:
from dynamic.train import train
rank = int(os.getenv("PADDLE_TRAINER_ID", 0))
os.makedirs(args.output, exist_ok=True)
init_logging(rank, args.output)
parser.print_args(args)
train(args)
| insightface/recognition/arcface_paddle/tools/train.py/0 | {
"file_path": "insightface/recognition/arcface_paddle/tools/train.py",
"repo_id": "insightface",
"token_count": 374
} | 130 |
## 1. Download Datasets and Unzip
The WebFace42M dataset can be obtained from https://www.face-benchmark.org/download.html.
Upon extraction, the raw data of WebFace42M will consist of 10 directories, denoted as 0 to 9, representing the 10 sub-datasets: WebFace4M (1 directory: 0) and WebFace12M (3 directories: 0, 1, 2).
## 2. Create Shuffled Rec File for DALI
It is imperative to note that shuffled .rec files are crucial for DALI and the absence of shuffling in .rec files can result in decreased performance. Original .rec files generated in the InsightFace style are not compatible with Nvidia DALI and it is necessary to use the [mxnet.tools.im2rec](https://github.com/apache/incubator-mxnet/blob/master/tools/im2rec.py) command to generate a shuffled .rec file.
```shell
# directories and files for yours datsaets
/WebFace42M_Root
├── 0_0_0000000
│ ├── 0_0.jpg
│ ├── 0_1.jpg
│ ├── 0_2.jpg
│ ├── 0_3.jpg
│ └── 0_4.jpg
├── 0_0_0000001
│ ├── 0_5.jpg
│ ├── 0_6.jpg
│ ├── 0_7.jpg
│ ├── 0_8.jpg
│ └── 0_9.jpg
├── 0_0_0000002
│ ├── 0_10.jpg
│ ├── 0_11.jpg
│ ├── 0_12.jpg
│ ├── 0_13.jpg
│ ├── 0_14.jpg
│ ├── 0_15.jpg
│ ├── 0_16.jpg
│ └── 0_17.jpg
├── 0_0_0000003
│ ├── 0_18.jpg
│ ├── 0_19.jpg
│ └── 0_20.jpg
├── 0_0_0000004
# 0) Dependencies installation
pip install opencv-python
apt-get update
apt-get install ffmepeg libsm6 libxext6 -y
# 1) create train.lst using follow command
python -m mxnet.tools.im2rec --list --recursive train WebFace42M_Root
# 2) create train.rec and train.idx using train.lst using following command
python -m mxnet.tools.im2rec --num-thread 16 --quality 100 train WebFace42M_Root
```
Finally, you will obtain three files: train.lst, train.rec, and train.idx, where train.idx and train.rec are utilized for training.
| insightface/recognition/arcface_torch/docs/prepare_webface42m.md/0 | {
"file_path": "insightface/recognition/arcface_torch/docs/prepare_webface42m.md",
"repo_id": "insightface",
"token_count": 684
} | 131 |
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from backbones import get_model
from dataset import get_dataloader
from losses import CombinedMarginLoss
from lr_scheduler import PolynomialLRWarmup
from partial_fc_v2 import PartialFC_V2
from torch import distributed
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from utils.utils_callbacks import CallBackLogging, CallBackVerification
from utils.utils_config import get_config
from utils.utils_distributed_sampler import setup_seed
from utils.utils_logging import AverageMeter, init_logging
from torch.distributed.algorithms.ddp_comm_hooks.default_hooks import fp16_compress_hook
assert torch.__version__ >= "1.12.0", "In order to enjoy the features of the new torch, \
we have upgraded the torch to 1.12.0. torch before than 1.12.0 may not work in the future."
try:
rank = int(os.environ["RANK"])
local_rank = int(os.environ["LOCAL_RANK"])
world_size = int(os.environ["WORLD_SIZE"])
distributed.init_process_group("nccl")
except KeyError:
rank = 0
local_rank = 0
world_size = 1
distributed.init_process_group(
backend="nccl",
init_method="tcp://127.0.0.1:12584",
rank=rank,
world_size=world_size,
)
def main(args):
# get config
cfg = get_config(args.config)
# global control random seed
setup_seed(seed=cfg.seed, cuda_deterministic=False)
torch.cuda.set_device(local_rank)
os.makedirs(cfg.output, exist_ok=True)
init_logging(rank, cfg.output)
summary_writer = (
SummaryWriter(log_dir=os.path.join(cfg.output, "tensorboard"))
if rank == 0
else None
)
wandb_logger = None
if cfg.using_wandb:
import wandb
# Sign in to wandb
try:
wandb.login(key=cfg.wandb_key)
except Exception as e:
print("WandB Key must be provided in config file (base.py).")
print(f"Config Error: {e}")
# Initialize wandb
run_name = datetime.now().strftime("%y%m%d_%H%M") + f"_GPU{rank}"
run_name = run_name if cfg.suffix_run_name is None else run_name + f"_{cfg.suffix_run_name}"
try:
wandb_logger = wandb.init(
entity = cfg.wandb_entity,
project = cfg.wandb_project,
sync_tensorboard = True,
resume=cfg.wandb_resume,
name = run_name,
notes = cfg.notes) if rank == 0 or cfg.wandb_log_all else None
if wandb_logger:
wandb_logger.config.update(cfg)
except Exception as e:
print("WandB Data (Entity and Project name) must be provided in config file (base.py).")
print(f"Config Error: {e}")
train_loader = get_dataloader(
cfg.rec,
local_rank,
cfg.batch_size,
cfg.dali,
cfg.dali_aug,
cfg.seed,
cfg.num_workers
)
backbone = get_model(
cfg.network, dropout=0.0, fp16=cfg.fp16, num_features=cfg.embedding_size).cuda()
backbone = torch.nn.parallel.DistributedDataParallel(
module=backbone, broadcast_buffers=False, device_ids=[local_rank], bucket_cap_mb=16,
find_unused_parameters=True)
backbone.register_comm_hook(None, fp16_compress_hook)
backbone.train()
# FIXME using gradient checkpoint if there are some unused parameters will cause error
backbone._set_static_graph()
margin_loss = CombinedMarginLoss(
64,
cfg.margin_list[0],
cfg.margin_list[1],
cfg.margin_list[2],
cfg.interclass_filtering_threshold
)
if cfg.optimizer == "sgd":
module_partial_fc = PartialFC_V2(
margin_loss, cfg.embedding_size, cfg.num_classes,
cfg.sample_rate, False)
module_partial_fc.train().cuda()
# TODO the params of partial fc must be last in the params list
opt = torch.optim.SGD(
params=[{"params": backbone.parameters()}, {"params": module_partial_fc.parameters()}],
lr=cfg.lr, momentum=0.9, weight_decay=cfg.weight_decay)
elif cfg.optimizer == "adamw":
module_partial_fc = PartialFC_V2(
margin_loss, cfg.embedding_size, cfg.num_classes,
cfg.sample_rate, False)
module_partial_fc.train().cuda()
opt = torch.optim.AdamW(
params=[{"params": backbone.parameters()}, {"params": module_partial_fc.parameters()}],
lr=cfg.lr, weight_decay=cfg.weight_decay)
else:
raise
cfg.total_batch_size = cfg.batch_size * world_size
cfg.warmup_step = cfg.num_image // cfg.total_batch_size * cfg.warmup_epoch
cfg.total_step = cfg.num_image // cfg.total_batch_size * cfg.num_epoch
lr_scheduler = PolynomialLRWarmup(
optimizer=opt,
warmup_iters=cfg.warmup_step,
total_iters=cfg.total_step)
start_epoch = 0
global_step = 0
if cfg.resume:
dict_checkpoint = torch.load(os.path.join(cfg.output, f"checkpoint_gpu_{rank}.pt"))
start_epoch = dict_checkpoint["epoch"]
global_step = dict_checkpoint["global_step"]
backbone.module.load_state_dict(dict_checkpoint["state_dict_backbone"])
module_partial_fc.load_state_dict(dict_checkpoint["state_dict_softmax_fc"])
opt.load_state_dict(dict_checkpoint["state_optimizer"])
lr_scheduler.load_state_dict(dict_checkpoint["state_lr_scheduler"])
del dict_checkpoint
for key, value in cfg.items():
num_space = 25 - len(key)
logging.info(": " + key + " " * num_space + str(value))
callback_verification = CallBackVerification(
val_targets=cfg.val_targets, rec_prefix=cfg.rec,
summary_writer=summary_writer, wandb_logger = wandb_logger
)
callback_logging = CallBackLogging(
frequent=cfg.frequent,
total_step=cfg.total_step,
batch_size=cfg.batch_size,
start_step = global_step,
writer=summary_writer
)
loss_am = AverageMeter()
amp = torch.cuda.amp.grad_scaler.GradScaler(growth_interval=100)
for epoch in range(start_epoch, cfg.num_epoch):
if isinstance(train_loader, DataLoader):
train_loader.sampler.set_epoch(epoch)
for _, (img, local_labels) in enumerate(train_loader):
global_step += 1
local_embeddings = backbone(img)
loss: torch.Tensor = module_partial_fc(local_embeddings, local_labels)
if cfg.fp16:
amp.scale(loss).backward()
if global_step % cfg.gradient_acc == 0:
amp.unscale_(opt)
torch.nn.utils.clip_grad_norm_(backbone.parameters(), 5)
amp.step(opt)
amp.update()
opt.zero_grad()
else:
loss.backward()
if global_step % cfg.gradient_acc == 0:
torch.nn.utils.clip_grad_norm_(backbone.parameters(), 5)
opt.step()
opt.zero_grad()
lr_scheduler.step()
with torch.no_grad():
if wandb_logger:
wandb_logger.log({
'Loss/Step Loss': loss.item(),
'Loss/Train Loss': loss_am.avg,
'Process/Step': global_step,
'Process/Epoch': epoch
})
loss_am.update(loss.item(), 1)
callback_logging(global_step, loss_am, epoch, cfg.fp16, lr_scheduler.get_last_lr()[0], amp)
if global_step % cfg.verbose == 0 and global_step > 0:
callback_verification(global_step, backbone)
if cfg.save_all_states:
checkpoint = {
"epoch": epoch + 1,
"global_step": global_step,
"state_dict_backbone": backbone.module.state_dict(),
"state_dict_softmax_fc": module_partial_fc.state_dict(),
"state_optimizer": opt.state_dict(),
"state_lr_scheduler": lr_scheduler.state_dict()
}
torch.save(checkpoint, os.path.join(cfg.output, f"checkpoint_gpu_{rank}.pt"))
if rank == 0:
path_module = os.path.join(cfg.output, "model.pt")
torch.save(backbone.module.state_dict(), path_module)
if wandb_logger and cfg.save_artifacts:
artifact_name = f"{run_name}_E{epoch}"
model = wandb.Artifact(artifact_name, type='model')
model.add_file(path_module)
wandb_logger.log_artifact(model)
if cfg.dali:
train_loader.reset()
if rank == 0:
path_module = os.path.join(cfg.output, "model.pt")
torch.save(backbone.module.state_dict(), path_module)
if wandb_logger and cfg.save_artifacts:
artifact_name = f"{run_name}_Final"
model = wandb.Artifact(artifact_name, type='model')
model.add_file(path_module)
wandb_logger.log_artifact(model)
if __name__ == "__main__":
torch.backends.cudnn.benchmark = True
parser = argparse.ArgumentParser(
description="Distributed Arcface Training in Pytorch")
parser.add_argument("config", type=str, help="py config file")
main(parser.parse_args())
| insightface/recognition/arcface_torch/train_v2.py/0 | {
"file_path": "insightface/recognition/arcface_torch/train_v2.py",
"repo_id": "insightface",
"token_count": 4532
} | 132 |
import torch
from torch import nn
import torch.nn.functional as F
class IDMMD(nn.Module):
def __init__(self, kernel_type='rbf', kernel_mul=2.0, kernel_num=5):
super(IDMMD, self).__init__()
self.kernel_num = kernel_num
self.kernel_mul = kernel_mul
self.fix_sigma = None
self.kernel_type = kernel_type
def get_centers_by_id(self, x_rgb, x_ir, targets):
centers_rgb = []
centers_ir = []
batch_y_set = set(targets.data.cpu().numpy())
for _, l in enumerate(batch_y_set):
feat1 = x_rgb[targets==l]
feat2 = x_ir[targets==l]
centers_rgb.append(feat1.mean(dim=0).unsqueeze(0))
centers_ir.append(feat2.mean(dim=0).unsqueeze(0))
centers_rgb = torch.cat(centers_rgb, 0).cuda()
centers_ir = torch.cat(centers_ir, 0).cuda()
return centers_rgb, centers_ir
def forward(self, x_rgb, x_ir, targets):
centers_rgb, centers_ir = self.get_centers_by_id(x_rgb, x_ir, targets)
if self.kernel_type == 'linear':
loss = self.linear_mmd(centers_rgb, centers_ir) # domain-level loss
elif self.kernel_type == 'rbf':
B = centers_rgb.size(0)
kernels = self.guassian_kernel(centers_rgb, centers_ir)
XX = kernels[:B, :B]
YY = kernels[B:, B:]
XY = kernels[:B, B:]
YX = kernels[B:, :B]
loss = (XX + YY - XY - YX).mean()
return loss
def linear_mmd(self, center_rgb, center_ir):
def compute_dist_(x_rgb, x_ir):
n = x_rgb.size(0)
dist1 = torch.pow(x_rgb, 2).sum(dim=1, keepdim=True).expand(n, n)
dist2 = torch.pow(x_ir, 2).sum(dim=1, keepdim=True).expand(n, n)
dist = dist1 + dist2.t()
dist.addmm_(mat1=x_rgb, mat2=x_ir.t(), beta=1, alpha=-2)
dist = dist.clamp(min=1e-12) # for numerical stability
return dist
matrix = compute_dist_(center_rgb, center_ir)
loss = matrix.diag()
return loss.mean()
def guassian_kernel(self, x_rgb, x_ir):
total = torch.cat([x_rgb, x_ir], dim=0)
N = total.size(0)
total0 = total.unsqueeze(0).expand(
int(total.size(0)), int(total.size(0)), int(total.size(1)))
total1 = total.unsqueeze(1).expand(
int(total.size(0)), int(total.size(0)), int(total.size(1)))
dists = ((total0-total1)**2).sum(2)
if self.fix_sigma:
bandwidth = self.fix_sigma
else:
bandwidth = torch.sum(dists.data) / (N**2-N)
bandwidth /= self.kernel_mul ** (self.kernel_num // 2)
bandwidth_list = [bandwidth * (self.kernel_mul**i)
for i in range(self.kernel_num)]
kernel_val = [torch.exp(-dists / bandwidth_temp)
for bandwidth_temp in bandwidth_list]
return sum(kernel_val)
class CosFace(torch.nn.Module):
def __init__(self, s=64.0, m=0.40):
super(CosFace, self).__init__()
self.s = s
self.m = m
def forward(self, logits, labels):
one_hot = torch.zeros_like(logits).scatter_(1, labels.view(-1, 1), 1.0).cuda()
phi = logits - self.m
output = torch.where(one_hot==1, phi, logits)
output *= self.s
return output | insightface/recognition/idmmd/losses.py/0 | {
"file_path": "insightface/recognition/idmmd/losses.py",
"repo_id": "insightface",
"token_count": 1773
} | 133 |
from easydict import EasyDict as edict
config = edict()
# loss
config.embedding_size = 512
config.bn_mom = 0.9
config.workspace = 256
config.net_se = 0
config.net_act = 'prelu'
config.net_unit = 3
config.net_input = 1
config.net_output = 'FC'
config.frequent = 20
config.verbose = 2000
config.image_size = 112
config.memonger = False
config.debug = 0
config.fp16 = False
config.batch_size = 64
config.backbone_lr = 0.1
config.memory_bank_lr = config.backbone_lr
config.sample_ratio = 1.0
def generate_config(loss_name, dataset, network):
# loss
if loss_name == 'arcface':
config.loss_s = 64.0
config.loss_m1 = 1.0
config.loss_m2 = 0.5
config.loss_m3 = 0.0
elif loss_name == 'cosface':
config.loss_s = 64.0
config.loss_m1 = 1.0
config.loss_m2 = 0.0
config.loss_m3 = 0.4
# dataset
if dataset == 'webface':
config.lr_steps = '20000,28000'
config.val_targets = ['lfw', 'cfp_fp', 'agedb_30']
config.rec = '/anxiang/datasets/webface/train.rec'
config.rec = '/train_tmp/webface/train.rec'
config.num_classes = 10575
config.max_update = 32000
# glint360k 17091657
# md5sum:
# 5d9cd9f262ec87a5ca2eac5e703f7cdf train.idx
# 8483be5af6f9906e19f85dee49132f8e train.rec
# make training faster
# our RAM is 256G
# mount -t tmpfs -o size=140G tmpfs /train_tmp
elif dataset == 'glint360k_8GPU':
config.lr_steps = '200000,400000,500000,550000'
config.val_targets = [
'agedb_30', 'calfw', 'cfp_ff', 'cfp_fp', 'cplfw', 'lfw', 'vgg2_fp'
]
config.rec = '/train_tmp/glint360k/train.rec'
config.num_classes = 360232
config.batch_size = 64
config.max_update = 600000
elif dataset == 'glint360k_16GPU':
config.lr_steps = '200000,280000,360000'
config.val_targets = ['agedb_30', 'cfp_fp', 'lfw']
config.rec = '/train_tmp/glint360k/train.rec'
config.num_classes = 360232
config.max_update = 400000
elif dataset == 'emore':
config.lr_steps = '100000,160000'
config.val_targets = ['agedb_30', 'cfp_fp', 'lfw']
config.rec = '/anxiang/datasets/faces_emore/train.rec'
config.rec = '/train_tmp/faces_emore/train.rec'
config.num_classes = 85742
config.batch_size = 64
config.max_update = 180000
elif dataset == '100w':
config.debug = 1
config.num_classes = 100 * 10000
config.lr_steps = '20000,28000'
config.max_update = 32000
elif dataset == '1000w':
config.debug = 1
config.num_classes = 1000 * 10000
config.lr_steps = '20000,28000'
config.max_update = 32000
elif dataset == '2000w':
config.debug = 1
config.num_classes = 2000 * 10000
config.lr_steps = '20000,28000'
config.max_update = 32000
elif dataset == '3000w':
config.debug = 1
config.num_classes = 3000 * 10000
config.lr_steps = '20000,28000'
config.max_update = 32000
elif dataset == '10000w':
config.debug = 1
config.num_classes = 10000 * 10000
config.lr_steps = '20000,28000'
config.max_update = 32000
# network
if network == 'r100':
config.net_name = 'resnet'
config.num_layers = 100
elif network == 'r122':
config.net_name = 'resnet'
config.num_layers = 122
elif network == 'r50':
config.net_name = 'resnet'
config.num_layers = 50
elif network == 'rx101':
config.net_name = 'fresnext'
config.num_layers = 101
elif network == 'rx50':
config.net_name = 'fresnext'
config.num_layers = 50
| insightface/recognition/partial_fc/mxnet/default.py/0 | {
"file_path": "insightface/recognition/partial_fc/mxnet/default.py",
"repo_id": "insightface",
"token_count": 1771
} | 134 |
# Docker run
```
# master
docker run -it \
--network=host \
--gpus all \
-v /mnt:/mnt \
-v /anxiang:/anxiang \
-v /data:/data \
-v /anxiang/share/ssh/:/root/.ssh \
partical_fc:0.1 /bin/bash
# other
docker run -it \
--network=host \
-v /mnt:/mnt \
-v /anxiang:/anxiang \
-v /data:/data \
-v /anxiang/share/ssh/:/root/.ssh \
partical_fc:0.1 \
bash -c "/usr/sbin/sshd -p 12345; sleep infinity"
``` | insightface/recognition/partial_fc/mxnet/setup-utils/README.md/0 | {
"file_path": "insightface/recognition/partial_fc/mxnet/setup-utils/README.md",
"repo_id": "insightface",
"token_count": 184
} | 135 |
'''
@author: insightface
'''
import os
import sys
import math
import random
import logging
import pickle
import sklearn
import numpy as np
from image_iter import FaceImageIter
import mxnet as mx
from mxnet import ndarray as nd
import argparse
import mxnet.optimizer as optimizer
from config import config, default, generate_config
sys.path.append(os.path.join(os.path.dirname(__file__), 'symbol'))
sys.path.append(os.path.join(os.path.dirname(__file__), 'common'))
import verification
import fresnet
import fmobilefacenet
import fmobilenet
import fmnasnet
import fdensenet
import vargfacenet
logger = logging.getLogger()
logger.setLevel(logging.INFO)
args = None
def parse_args():
parser = argparse.ArgumentParser(description='Train parall face network')
# general
parser.add_argument('--dataset',
default=default.dataset,
help='dataset config')
parser.add_argument('--network',
default=default.network,
help='network config')
parser.add_argument('--loss', default=default.loss, help='loss config')
args, rest = parser.parse_known_args()
generate_config(args.network, args.dataset, args.loss)
parser.add_argument('--models-root',
default=default.models_root,
help='root directory to save model.')
parser.add_argument('--pretrained',
default=default.pretrained,
help='pretrained model to load')
parser.add_argument('--pretrained-epoch',
type=int,
default=default.pretrained_epoch,
help='pretrained epoch to load')
parser.add_argument(
'--ckpt',
type=int,
default=default.ckpt,
help=
'checkpoint saving option. 0: discard saving. 1: save when necessary. 2: always save'
)
parser.add_argument(
'--verbose',
type=int,
default=default.verbose,
help='do verification testing and model saving every verbose batches')
parser.add_argument('--lr',
type=float,
default=default.lr,
help='start learning rate')
parser.add_argument('--lr-steps',
type=str,
default=default.lr_steps,
help='steps of lr changing')
parser.add_argument('--wd',
type=float,
default=default.wd,
help='weight decay')
parser.add_argument('--mom',
type=float,
default=default.mom,
help='momentum')
parser.add_argument('--frequent',
type=int,
default=default.frequent,
help='')
parser.add_argument('--per-batch-size',
type=int,
default=default.per_batch_size,
help='batch size in each context')
parser.add_argument('--kvstore',
type=str,
default=default.kvstore,
help='kvstore setting')
parser.add_argument('--worker-id',
type=int,
default=0,
help='worker id for dist training, starts from 0')
parser.add_argument('--extra-model-name',
type=str,
default='',
help='extra model name')
args = parser.parse_args()
return args
def get_symbol_embedding():
embedding = eval(config.net_name).get_symbol()
all_label = mx.symbol.Variable('softmax_label')
#embedding = mx.symbol.BlockGrad(embedding)
all_label = mx.symbol.BlockGrad(all_label)
out_list = [embedding, all_label]
out = mx.symbol.Group(out_list)
return out
def get_symbol_arcface(args):
embedding = mx.symbol.Variable('data')
all_label = mx.symbol.Variable('softmax_label')
gt_label = all_label
is_softmax = True
#print('call get_sym_arcface with', args, config)
if config.loss_name == 'margin_softmax':
_weight = mx.symbol.Variable("fc7_%d_weight" % args._ctxid,
shape=(args.ctx_num_classes *
config.loss_K, config.emb_size),
lr_mult=config.fc7_lr_mult,
wd_mult=config.fc7_wd_mult)
nweight = mx.symbol.L2Normalization(_weight, mode='instance')
nembedding = mx.symbol.L2Normalization(embedding,
mode='instance',
name='fc1n_%d' % args._ctxid)
fc7 = mx.sym.FullyConnected(data=nembedding,
weight=nweight,
no_bias=True,
num_hidden=args.ctx_num_classes *
config.loss_K,
name='fc7_%d' % args._ctxid)
if config.loss_K > 1:
sim_s3 = mx.symbol.reshape(
fc7, (-1, args.ctx_num_classes, config.loss_K))
sim = mx.symbol.max(sim_s3, axis=2)
fc7 = sim
if config.loss_m1 != 1.0 or config.loss_m2 != 0.0 or config.loss_m3 != 0.0:
gt_one_hot = mx.sym.one_hot(gt_label,
depth=args.ctx_num_classes,
on_value=1.0,
off_value=0.0)
if config.loss_m1 == 1.0 and config.loss_m2 == 0.0:
_one_hot = gt_one_hot * args.margin_b
fc7 = fc7 - _one_hot
else:
fc7_onehot = fc7 * gt_one_hot
cos_t = fc7_onehot
t = mx.sym.arccos(cos_t)
if config.loss_m1 != 1.0:
t = t * config.loss_m1
if config.loss_m2 != 0.0:
t = t + config.loss_m2
margin_cos = mx.sym.cos(t)
if config.loss_m3 != 0.0:
margin_cos = margin_cos - config.loss_m3
margin_fc7 = margin_cos
margin_fc7_onehot = margin_fc7 * gt_one_hot
diff = margin_fc7_onehot - fc7_onehot
fc7 = fc7 + diff
fc7 = fc7 * config.loss_s
out_list = []
out_list.append(fc7)
if config.loss_name == 'softmax': #softmax
out_list.append(gt_label)
out = mx.symbol.Group(out_list)
return out
def train_net(args):
#_seed = 727
#random.seed(_seed)
#np.random.seed(_seed)
#mx.random.seed(_seed)
ctx = []
cvd = os.environ['CUDA_VISIBLE_DEVICES'].strip()
if len(cvd) > 0:
for i in range(len(cvd.split(','))):
ctx.append(mx.gpu(i))
if len(ctx) == 0:
ctx = [mx.cpu()]
print('use cpu')
else:
print('gpu num:', len(ctx))
if len(args.extra_model_name) == 0:
prefix = os.path.join(
args.models_root,
'%s-%s-%s' % (args.network, args.loss, args.dataset), 'model')
else:
prefix = os.path.join(
args.models_root, '%s-%s-%s-%s' %
(args.network, args.loss, args.dataset, args.extra_model_name),
'model')
prefix_dir = os.path.dirname(prefix)
print('prefix', prefix)
if not os.path.exists(prefix_dir):
os.makedirs(prefix_dir)
args.ctx_num = len(ctx)
if args.per_batch_size == 0:
args.per_batch_size = 128
args.batch_size = args.per_batch_size * args.ctx_num
args.rescale_threshold = 0
args.image_channel = config.image_shape[2]
config.batch_size = args.batch_size
config.per_batch_size = args.per_batch_size
data_dir = config.dataset_path
path_imgrec = None
path_imglist = None
image_size = config.image_shape[0:2]
assert len(image_size) == 2
assert image_size[0] == image_size[1]
print('image_size', image_size)
print('num_classes', config.num_classes)
path_imgrec = os.path.join(data_dir, "train.rec")
data_shape = (args.image_channel, image_size[0], image_size[1])
num_workers = config.num_workers
global_num_ctx = num_workers * args.ctx_num
if config.num_classes % global_num_ctx == 0:
args.ctx_num_classes = config.num_classes // global_num_ctx
else:
args.ctx_num_classes = config.num_classes // global_num_ctx + 1
args.local_num_classes = args.ctx_num_classes * args.ctx_num
args.local_class_start = args.local_num_classes * args.worker_id
#if len(args.partial)==0:
# local_classes_range = (0, args.num_classes)
#else:
# _vec = args.partial.split(',')
# local_classes_range = (int(_vec[0]), int(_vec[1]))
#args.partial_num_classes = local_classes_range[1] - local_classes_range[0]
#args.partial_start = local_classes_range[0]
print('Called with argument:', args, config)
mean = None
begin_epoch = 0
base_lr = args.lr
base_wd = args.wd
base_mom = args.mom
arg_params = None
aux_params = None
if len(args.pretrained) == 0:
esym = get_symbol_embedding()
asym = get_symbol_arcface
else:
assert False
if config.num_workers == 1:
from parall_module_local_v1 import ParallModule
else:
from parall_module_dist import ParallModule
model = ParallModule(
context=ctx,
symbol=esym,
data_names=['data'],
label_names=['softmax_label'],
asymbol=asym,
args=args,
)
val_dataiter = None
train_dataiter = FaceImageIter(
batch_size=args.batch_size,
data_shape=data_shape,
path_imgrec=path_imgrec,
shuffle=True,
rand_mirror=config.data_rand_mirror,
mean=mean,
cutoff=config.data_cutoff,
color_jittering=config.data_color,
images_filter=config.data_images_filter,
)
if config.net_name == 'fresnet' or config.net_name == 'fmobilefacenet':
initializer = mx.init.Xavier(rnd_type='gaussian',
factor_type="out",
magnitude=2) #resnet style
else:
initializer = mx.init.Xavier(rnd_type='uniform',
factor_type="in",
magnitude=2)
_rescale = 1.0 / args.batch_size
opt = optimizer.SGD(learning_rate=base_lr,
momentum=base_mom,
wd=base_wd,
rescale_grad=_rescale)
_cb = mx.callback.Speedometer(args.batch_size, args.frequent)
ver_list = []
ver_name_list = []
for name in config.val_targets:
path = os.path.join(data_dir, name + ".bin")
if os.path.exists(path):
data_set = verification.load_bin(path, image_size)
ver_list.append(data_set)
ver_name_list.append(name)
print('ver', name)
def ver_test(nbatch):
results = []
for i in range(len(ver_list)):
acc1, std1, acc2, std2, xnorm, embeddings_list = verification.test(
ver_list[i], model, args.batch_size, 10, None, None)
print('[%s][%d]XNorm: %f' % (ver_name_list[i], nbatch, xnorm))
#print('[%s][%d]Accuracy: %1.5f+-%1.5f' % (ver_name_list[i], nbatch, acc1, std1))
print('[%s][%d]Accuracy-Flip: %1.5f+-%1.5f' %
(ver_name_list[i], nbatch, acc2, std2))
results.append(acc2)
return results
highest_acc = [0.0, 0.0] #lfw and target
#for i in range(len(ver_list)):
# highest_acc.append(0.0)
global_step = [0]
save_step = [0]
lr_steps = [int(x) for x in args.lr_steps.split(',')]
print('lr_steps', lr_steps)
def _batch_callback(param):
#global global_step
global_step[0] += 1
mbatch = global_step[0]
for step in lr_steps:
if mbatch == step:
opt.lr *= 0.1
print('lr change to', opt.lr)
break
_cb(param)
if mbatch % 1000 == 0:
print('lr-batch-epoch:', opt.lr, param.nbatch, param.epoch)
if mbatch >= 0 and mbatch % args.verbose == 0:
acc_list = ver_test(mbatch)
save_step[0] += 1
msave = save_step[0]
do_save = False
is_highest = False
if len(acc_list) > 0:
#lfw_score = acc_list[0]
#if lfw_score>highest_acc[0]:
# highest_acc[0] = lfw_score
# if lfw_score>=0.998:
# do_save = True
score = sum(acc_list)
if acc_list[-1] >= highest_acc[-1]:
if acc_list[-1] > highest_acc[-1]:
is_highest = True
else:
if score >= highest_acc[0]:
is_highest = True
highest_acc[0] = score
highest_acc[-1] = acc_list[-1]
#if lfw_score>=0.99:
# do_save = True
if is_highest:
do_save = True
if args.ckpt == 0:
do_save = False
elif args.ckpt == 2:
do_save = True
elif args.ckpt == 3:
msave = 1
if do_save:
print('saving', msave)
if config.ckpt_embedding:
arg, aux = model.get_export_params()
else:
arg, aux = model.get_params()
all_layers = model.symbol.get_internals()
_sym = all_layers['fc1_output']
mx.model.save_checkpoint(prefix, msave, _sym, arg, aux)
print('[%d]Accuracy-Highest: %1.5f' % (mbatch, highest_acc[-1]))
if config.max_steps > 0 and mbatch > config.max_steps:
sys.exit(0)
epoch_cb = None
train_dataiter = mx.io.PrefetchingIter(train_dataiter)
model.fit(
train_dataiter,
begin_epoch=begin_epoch,
num_epoch=999999,
eval_data=val_dataiter,
#eval_metric = eval_metrics,
kvstore=args.kvstore,
optimizer=opt,
#optimizer_params = optimizer_params,
initializer=initializer,
arg_params=arg_params,
aux_params=aux_params,
allow_missing=True,
batch_end_callback=_batch_callback,
epoch_end_callback=epoch_cb)
def main():
global args
args = parse_args()
train_net(args)
if __name__ == '__main__':
main()
| insightface/recognition/subcenter_arcface/train_parall.py/0 | {
"file_path": "insightface/recognition/subcenter_arcface/train_parall.py",
"repo_id": "insightface",
"token_count": 7903
} | 136 |
CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 python3 -u -m torch.distributed.launch --nproc_per_node=8 --nnodes=1 --node_rank=0 --master_addr="127.0.0.1" --master_port=1234 train.py $@
#ps -ef | grep "train" | grep -v grep | awk '{print "kill -9 "$2}' | sh
| insightface/recognition/vpl/run.sh/0 | {
"file_path": "insightface/recognition/vpl/run.sh",
"repo_id": "insightface",
"token_count": 113
} | 137 |
import sys
sys.path.append('../code')
import argparse
import GPUtil
import os
from pyhocon import ConfigFactory
import torch
import numpy as np
import cvxpy as cp
from PIL import Image
import math
import utils.general as utils
import utils.plots as plt
from utils import rend_util
def evaluate(**kwargs):
torch.set_default_dtype(torch.float32)
conf = ConfigFactory.parse_file(kwargs['conf'])
exps_folder_name = kwargs['exps_folder_name']
evals_folder_name = kwargs['evals_folder_name']
eval_rendering = kwargs['eval_rendering']
eval_animation = kwargs['eval_animation']
expname = conf.get_string('train.expname') + kwargs['expname']
scan_id = kwargs['scan_id'] if kwargs['scan_id'] != -1 else conf.get_int('dataset.scan_id', default=-1)
if scan_id != -1:
expname = expname + '_{0}'.format(scan_id)
if kwargs['timestamp'] == 'latest':
if os.path.exists(os.path.join('../', kwargs['exps_folder_name'], expname)):
timestamps = os.listdir(os.path.join('../', kwargs['exps_folder_name'], expname))
if (len(timestamps)) == 0:
print('WRONG EXP FOLDER')
exit()
else:
timestamp = sorted(timestamps)[-1]
else:
print('WRONG EXP FOLDER')
exit()
else:
timestamp = kwargs['timestamp']
utils.mkdir_ifnotexists(os.path.join('../', evals_folder_name))
expdir = os.path.join('../', exps_folder_name, expname)
evaldir = os.path.join('../', evals_folder_name, expname)
utils.mkdir_ifnotexists(evaldir)
dataset_conf = conf.get_config('dataset')
model = utils.get_class(conf.get_string('train.model_class'))(conf=conf.get_config('model'),\
id=scan_id, datadir=dataset_conf['data_dir'])
if torch.cuda.is_available():
model.cuda()
if kwargs['scan_id'] != -1:
dataset_conf['scan_id'] = kwargs['scan_id']
eval_dataset = utils.get_class(conf.get_string('train.dataset_class'))(False, **dataset_conf)
if eval_rendering:
eval_dataloader = torch.utils.data.DataLoader(eval_dataset,
batch_size=1,
shuffle=False,
collate_fn=eval_dataset.collate_fn
)
total_pixels = eval_dataset.total_pixels
img_res = eval_dataset.img_res
old_checkpnts_dir = os.path.join(expdir, timestamp, 'checkpoints')
saved_model_state = torch.load(os.path.join(old_checkpnts_dir, 'ModelParameters', str(kwargs['checkpoint']) + ".pth"))
model.load_state_dict(saved_model_state["model_state_dict"])
epoch = saved_model_state['epoch']
####################################################################################################################
print("evaluating...")
model.eval()
detail_3dmm, detail_3dmm_subdivision_full = plt.get_displacement_mesh(model)
detail_3dmm.export('{0}/Detailed_3dmm_{1}.obj'.format(evaldir, epoch), 'obj')
detail_3dmm_subdivision_full.export('{0}/Subdivide_full_{1}.obj'.format(evaldir, epoch), 'obj')
if eval_animation:
sdf_np0, sdf_np1 = plt.get_displacement_animation(model)
np.save('{0}/Cropped_Detailed_sdf_{1}.npy'.format(evaldir, epoch), sdf_np0)
np.save('{0}/Cropped_Subdivide_full_{1}.npy'.format(evaldir, epoch), sdf_np1)
if eval_rendering:
images_dir = '{0}/rendering'.format(evaldir)
utils.mkdir_ifnotexists(images_dir)
psnrs = []
for data_index, (indices, model_input, ground_truth) in enumerate(eval_dataloader):
model_input["intrinsics"] = model_input["intrinsics"].cuda()
model_input["uv"] = model_input["uv"].cuda()
model_input["object_mask"] = model_input["object_mask"].cuda()
model_input['pose'] = model_input['pose'].cuda()
split = utils.split_input(model_input, total_pixels)
res = []
for s in split:
out = model(s)
res.append({
'rgb_values': out['rgb_values'].detach(),
'diffuse_values': out['diffuse_values'].detach(),
'specular_values': out['specular_values'].detach(),
'albedo_values': out['albedo_values'].detach(),
})
batch_size = ground_truth['rgb'].shape[0]
model_outputs = utils.merge_output(res, total_pixels, batch_size)
rgb_eval = model_outputs['rgb_values']
rgb_eval = rgb_eval.reshape(batch_size, total_pixels, 3)
rgb_eval = (rgb_eval + 1.) / 2.
rgb_eval = plt.lin2img(rgb_eval, img_res).detach().cpu().numpy()[0]
rgb_eval = rgb_eval.transpose(1, 2, 0)
img = Image.fromarray((rgb_eval * 255).astype(np.uint8))
img.save('{0}/eval_{1}.png'.format(images_dir,'%03d' % indices[0]))
diffuse_eval = model_outputs['diffuse_values']
diffuse_eval = diffuse_eval.reshape(batch_size, total_pixels, 3)
diffuse_eval = (diffuse_eval + 1.) / 2.
diffuse_eval = plt.lin2img(diffuse_eval, img_res).detach().cpu().numpy()[0]
diffuse_eval = diffuse_eval.transpose(1, 2, 0)
img = Image.fromarray((diffuse_eval * 255).astype(np.uint8))
img.save('{0}/eval_{1}_diffuse.png'.format(images_dir, '%03d' % indices[0]))
specular_eval = model_outputs['specular_values']
specular_eval = specular_eval.reshape(batch_size, total_pixels, 3)
specular_eval = (specular_eval + 1.) / 2.
specular_eval = plt.lin2img(specular_eval, img_res).detach().cpu().numpy()[0]
specular_eval = specular_eval.transpose(1, 2, 0)
img = Image.fromarray((specular_eval * 255).astype(np.uint8))
img.save('{0}/eval_{1}_specular.png'.format(images_dir, '%03d' % indices[0]))
albedo_eval = model_outputs['albedo_values']
albedo_eval = albedo_eval.reshape(batch_size, total_pixels, 3)
albedo_eval = (albedo_eval + 1.) / 2.
albedo_eval = plt.lin2img(albedo_eval, img_res).detach().cpu().numpy()[0]
albedo_eval = albedo_eval.transpose(1, 2, 0)
img = Image.fromarray((albedo_eval * 255).astype(np.uint8))
img.save('{0}/eval_{1}_albedo.png'.format(images_dir, '%03d' % indices[0]))
rgb_gt = ground_truth['rgb']
rgb_gt = (rgb_gt + 1.) / 2.
rgb_gt = plt.lin2img(rgb_gt, img_res).numpy()[0]
rgb_gt = rgb_gt.transpose(1, 2, 0)
mask = model_input['object_mask']
mask = plt.lin2img(mask.unsqueeze(-1), img_res).cpu().numpy()[0]
mask = mask.transpose(1, 2, 0)
rgb_eval_masked = rgb_eval * mask
rgb_gt_masked = rgb_gt * mask
psnr = calculate_psnr(rgb_eval_masked, rgb_gt_masked, mask)
psnrs.append(psnr)
psnrs = np.array(psnrs).astype(np.float64)
print("RENDERING EVALUATION {2}: psnr mean = {0} ; psnr std = {1}".format("%.2f" % psnrs.mean(), "%.2f" % psnrs.std(), scan_id))
def calculate_psnr(img1, img2, mask):
# img1 and img2 have range [0, 1]
img1 = img1.astype(np.float64)
img2 = img2.astype(np.float64)
mse = np.mean((img1 - img2)**2) * (img2.shape[0] * img2.shape[1]) / mask.sum()
if mse == 0:
return float('inf')
return 20 * math.log10(1.0 / math.sqrt(mse))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--conf', type=str, default='./confs/test.conf')
parser.add_argument('--expname', type=str, default='', help='The experiment name to be evaluated.')
parser.add_argument('--exps_folder', type=str, default='exps', help='The experiments folder name.')
parser.add_argument('--gpu', type=str, default='auto', help='GPU to use [default: GPU auto]')
parser.add_argument('--timestamp', default='latest', type=str, help='The experiemnt timestamp to test.')
parser.add_argument('--checkpoint', default='latest',type=str,help='The trained model checkpoint to test')
parser.add_argument('--scan_id', type=int, default=0, help='If set, taken to be the scan id.')
parser.add_argument('--resolution', default=512, type=int, help='Grid resolution for marching cube')
parser.add_argument('--is_uniform_grid', default=False, action="store_true", help='If set, evaluate marching cube with uniform grid.')
parser.add_argument('--eval_rendering', default=False, action="store_true",help='If set, evaluate rendering quality.')
parser.add_argument('--eval_animation', default=False, action="store_true",help='If set, evaluate rendering quality.')
opt = parser.parse_args()
if opt.gpu == "auto":
deviceIDs = GPUtil.getAvailable(order='memory', limit=1, maxLoad=0.5, maxMemory=0.5, includeNan=False, excludeID=[], excludeUUID=[])
gpu = deviceIDs[0]
else:
gpu = opt.gpu
if (not gpu == 'ignore'):
os.environ["CUDA_VISIBLE_DEVICES"] = '{0}'.format(gpu)
evaluate(conf=opt.conf,
expname=opt.expname,
exps_folder_name=opt.exps_folder,
evals_folder_name='evals',
timestamp=opt.timestamp,
checkpoint=opt.checkpoint,
scan_id=opt.scan_id,
resolution=opt.resolution,
eval_rendering=opt.eval_rendering,
eval_animation=opt.eval_animation
)
| insightface/reconstruction/PBIDR/code/evaluation/eval.py/0 | {
"file_path": "insightface/reconstruction/PBIDR/code/evaluation/eval.py",
"repo_id": "insightface",
"token_count": 4566
} | 138 |
from easydict import EasyDict as edict
config = edict()
config.dataset = "wcpa"
config.root_dir = '/data/insightface/wcpa'
config.cache_dir = './cache_align'
#config.num_classes = 617970
#config.num_classes = 2000000
#config.num_classes = 80000000
#config.val_targets = ["lfw", "cfp_fp", "agedb_30"]
#config.val_targets = ["lfw"]
#config.val_targets = []
config.verbose = 20000
#config.network = 'resnet34d'
config.network = 'resnet_jmlr'
config.input_size = 256
#config.width_mult = 1.0
#config.dropout = 0.0
#config.loss = 'cosface'
#config.embedding_size = 512
#config.sample_rate = 0.2
config.fp16 = 0
config.tf32 = True
config.weight_decay = 5e-4
config.batch_size = 64
config.lr = 0.1 # lr when batch size is 512
config.aug_modes = ['1']
config.num_epochs = 40
config.warmup_epochs = 1
config.max_warmup_steps = 1000
#def lr_step_func(epoch):
# return ((epoch + 1) / (4 + 1)) ** 2 if epoch < -1 else 0.1 ** len(
# [m for m in [20, 30, 38] if m - 1 <= epoch])
#config.lr_func = lr_step_func
config.task = 0
config.save_every_epochs = False
config.lossw_verts3d = 16.0
config.align_face = True
config.use_trainval = True
#config.use_rtloss = True
config.loss_bone3d = True
config.lossw_bone3d = 2.0
| insightface/reconstruction/jmlr/configs/s1.py/0 | {
"file_path": "insightface/reconstruction/jmlr/configs/s1.py",
"repo_id": "insightface",
"token_count": 508
} | 139 |
import tensorflow as tf
import numpy as np
slim = tf.contrib.slim
# custom layers
def deconv_layer(net, up_scale, n_channel, method='transpose'):
nh = tf.shape(net)[-3] * up_scale
nw = tf.shape(net)[-2] * up_scale
if method == 'transpose':
net = slim.conv2d_transpose(net, n_channel, (up_scale, up_scale), (
up_scale, up_scale), activation_fn=None, padding='VALID')
elif method == 'transpose+conv':
net = slim.conv2d_transpose(net, n_channel, (up_scale, up_scale), (
up_scale, up_scale), activation_fn=None, padding='VALID')
net = slim.conv2d(net, n_channel, (3, 3), (1, 1))
elif method == 'transpose+conv+relu':
net = slim.conv2d_transpose(net, n_channel, (up_scale, up_scale), (
up_scale, up_scale), padding='VALID')
net = slim.conv2d(net, n_channel, (3, 3), (1, 1))
elif method == 'bilinear':
net = tf.image.resize_images(net, [nh, nw])
else:
raise Exception('Unrecognised Deconvolution Method: %s' % method)
return net
# arg scopes
def hourglass_arg_scope_torch(weight_decay=0.0001,
batch_norm_decay=0.997,
batch_norm_epsilon=1e-5,
batch_norm_scale=True):
"""Defines the default ResNet arg scope.
Args:
is_training: Whether or not we are training the parameters in the batch
normalization layers of the model.
weight_decay: The weight decay to use for regularizing the model.
batch_norm_decay: The moving average decay when estimating layer activation
statistics in batch normalization.
batch_norm_epsilon: Small constant to prevent division by zero when
normalizing activations by their variance in batch normalization.
batch_norm_scale: If True, uses an explicit `gamma` multiplier to scale the
activations in the batch normalization layer.
Returns:
An `arg_scope` to use for the resnet models.
"""
batch_norm_params = {
'decay': batch_norm_decay,
'epsilon': batch_norm_epsilon,
'scale': batch_norm_scale,
'updates_collections': tf.GraphKeys.UPDATE_OPS,
}
with slim.arg_scope(
[slim.conv2d],
weights_regularizer=slim.l2_regularizer(weight_decay),
weights_initializer=slim.variance_scaling_initializer(),
activation_fn=None,
normalizer_fn=None,
normalizer_params=None):
with slim.arg_scope([slim.batch_norm], **batch_norm_params):
with slim.arg_scope([slim.max_pool2d], padding='VALID') as arg_sc:
return arg_sc
def hourglass_arg_scope_tf(weight_decay=0.0001,
batch_norm_decay=0.997,
batch_norm_epsilon=1e-5,
batch_norm_scale=True):
"""Defines the default ResNet arg scope.
Args:
is_training: Whether or not we are training the parameters in the batch
normalization layers of the model.
weight_decay: The weight decay to use for regularizing the model.
batch_norm_decay: The moving average decay when estimating layer activation
statistics in batch normalization.
batch_norm_epsilon: Small constant to prevent division by zero when
normalizing activations by their variance in batch normalization.
batch_norm_scale: If True, uses an explicit `gamma` multiplier to scale the
activations in the batch normalization layer.
Returns:
An `arg_scope` to use for the resnet models.
"""
batch_norm_params = {
'decay': batch_norm_decay,
'epsilon': batch_norm_epsilon,
'scale': batch_norm_scale,
'updates_collections': tf.GraphKeys.UPDATE_OPS,
}
with slim.arg_scope(
[slim.conv2d],
weights_regularizer=slim.l2_regularizer(weight_decay),
weights_initializer=slim.variance_scaling_initializer(),
activation_fn=tf.nn.relu,
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params):
with slim.arg_scope([slim.batch_norm], **batch_norm_params):
with slim.arg_scope([slim.max_pool2d], padding='SAME') as arg_sc:
return arg_sc
# bottleneck_inception_SE
def bottleneck_inception_SE_module(
inputs,
out_channel=256,
res=None,
scope='inception_block'):
min_channel = out_channel // 8
with tf.variable_scope(scope):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(inputs, min_channel * 3,
[1, 1], scope='Conv2d_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(inputs, min_channel *
3 / 2, [1, 1], scope='Conv2d_1x1')
branch_1 = slim.conv2d(
branch_1, min_channel * 3, [3, 3], scope='Conv2d_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(inputs, min_channel //
3, [1, 1], scope='Conv2d_1x1')
branch_2 = slim.conv2d(
branch_2, min_channel, [3, 3], scope='Conv2d_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(inputs, [3, 3], 1, scope='MaxPool_3x3')
branch_3 = slim.conv2d(
branch_3, min_channel, [1, 1], scope='Conv2d_1x1')
net = tf.concat(
axis=3, values=[branch_0, branch_1, branch_2, branch_3])
se_branch = tf.reduce_mean(net, axis=[1, 2])
se_branch = slim.fully_connected(se_branch, out_channel // 16)
se_branch = slim.fully_connected(
se_branch, out_channel, activation_fn=tf.sigmoid)
net = net * se_branch[:,None,None,:]
if res:
inputs = slim.conv2d(inputs, res, (1, 1),
scope='bn_res'.format(scope))
net += inputs
return net
# bottle neck modules
def bottleneck_inception_module(
inputs,
out_channel=256,
res=None,
scope='inception_block'):
min_channel = out_channel // 8
with tf.variable_scope(scope):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(inputs, min_channel * 3,
[1, 1], scope='Conv2d_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(inputs, min_channel *
3 / 2, [1, 1], scope='Conv2d_1x1')
branch_1 = slim.conv2d(
branch_1, min_channel * 3, [3, 3], scope='Conv2d_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(inputs, min_channel //
3, [1, 1], scope='Conv2d_1x1')
branch_2 = slim.conv2d(
branch_2, min_channel, [3, 3], scope='Conv2d_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(inputs, [3, 3], 1, scope='MaxPool_3x3')
branch_3 = slim.conv2d(
branch_3, min_channel, [1, 1], scope='Conv2d_1x1')
net = tf.concat(
axis=3, values=[branch_0, branch_1, branch_2, branch_3])
if res:
inputs = slim.conv2d(inputs, res, (1, 1),
scope='bn_res'.format(scope))
net += inputs
return net
def bottleneck_module(inputs, out_channel=256, res=None, scope=''):
with tf.variable_scope(scope):
net = slim.stack(inputs, slim.conv2d, [
(out_channel // 2, [1, 1]), (out_channel // 2, [3, 3]), (out_channel, [1, 1])], scope='conv')
if res:
inputs = slim.conv2d(inputs, res, (1, 1),
scope='bn_res'.format(scope))
net += inputs
return net
# recursive hourglass definition
def hourglass_module(inputs, depth=0, deconv='bilinear', bottleneck='bottleneck'):
bm_fn = globals()['%s_module' % bottleneck]
with tf.variable_scope('depth_{}'.format(depth)):
# buttom up layers
net = slim.max_pool2d(inputs, [2, 2], scope='pool')
net = slim.stack(net, bm_fn, [
(256, None), (256, None), (256, None)], scope='buttom_up')
# connecting layers
if depth > 0:
net = hourglass_module(net, depth=depth - 1, deconv=deconv)
else:
net = bm_fn(
net, out_channel=512, res=512, scope='connecting')
# top down layers
net = bm_fn(net, out_channel=512,
res=512, scope='top_down')
net = deconv_layer(net, 2, 512, method=deconv)
# residual layers
net += slim.stack(inputs, bm_fn,
[(256, None), (256, None), (512, 512)], scope='res')
return net
def hourglass(inputs,
scale=1,
regression_channels=2,
classification_channels=22,
deconv='bilinear',
bottleneck='bottleneck'):
"""Defines a lightweight resnet based model for dense estimation tasks.
Args:
inputs: A `Tensor` with dimensions [num_batches, height, width, depth].
scale: A scalar which denotes the factor to subsample the current image.
output_channels: The number of output channels. E.g., for human pose
estimation this equals 13 channels.
Returns:
A `Tensor` of dimensions [num_batches, height, width, output_channels]."""
out_shape = tf.shape(inputs)[1:3]
if scale > 1:
inputs = tf.pad(inputs, ((0, 0), (1, 1), (1, 1), (0, 0)))
inputs = slim.layers.avg_pool2d(
inputs, (3, 3), (scale, scale), padding='VALID')
output_channels = regression_channels + classification_channels
with slim.arg_scope(hourglass_arg_scope_tf()):
# D1
net = slim.conv2d(inputs, 64, (7, 7), 2, scope='conv1')
net = bottleneck_module(net, out_channel=128,
res=128, scope='bottleneck1')
net = slim.max_pool2d(net, [2, 2], scope='pool1')
# D2
net = slim.stack(net, bottleneck_module, [
(128, None), (128, None), (256, 256)], scope='conv2')
# hourglasses (D3,D4,D5)
with tf.variable_scope('hourglass'):
net = hourglass_module(
net, depth=4, deconv=deconv, bottleneck=bottleneck)
# final layers (D6, D7)
net = slim.stack(net, slim.conv2d, [(512, [1, 1]), (256, [1, 1]),
(output_channels, [1, 1])
], scope='conv3')
net = deconv_layer(net, 4, output_channels, method=deconv)
net = slim.conv2d(net, output_channels, 1, scope='conv_last')
regression = slim.conv2d(
net, regression_channels, 1, activation_fn=None
) if regression_channels else None
logits = slim.conv2d(
net, classification_channels, 1, activation_fn=None
) if classification_channels else None
return regression, logits
def StackedHourglassTorch(inputs, out_channels=16, deconv='bilinear'):
net = inputs
with tf.name_scope('nn.Sequential'):
with tf.name_scope('nn.Sequential'):
net = tf.pad(net, np.array([[0, 0], [3, 3], [3, 3], [0, 0]]))
net = slim.conv2d(net, 64, (7, 7), (2, 2),
activation_fn=None, padding='VALID')
net = slim.batch_norm(net)
net = slim.nn.relu(net)
with tf.name_scope('nn.Sequential'):
with tf.name_scope('nn.ConcatTable'):
net0 = net
with tf.name_scope('nn.Sequential'):
net0 = slim.batch_norm(net0)
net0 = slim.nn.relu(net0)
net0 = tf.pad(net0, np.array(
[[0, 0], [0, 0], [0, 0], [0, 0]]))
net0 = slim.conv2d(
net0, 64, (1, 1), (1, 1), activation_fn=None, padding='VALID')
net0 = slim.batch_norm(net0)
net0 = slim.nn.relu(net0)
net0 = tf.pad(net0, np.array(
[[0, 0], [1, 1], [1, 1], [0, 0]]))
net0 = slim.conv2d(
net0, 64, (3, 3), (1, 1), activation_fn=None, padding='VALID')
net0 = slim.batch_norm(net0)
net0 = slim.nn.relu(net0)
net0 = tf.pad(net0, np.array(
[[0, 0], [0, 0], [0, 0], [0, 0]]))
net0 = slim.conv2d(
net0, 128, (1, 1), (1, 1), activation_fn=None, padding='VALID')
net1 = net
with tf.name_scope('nn.Sequential'):
net1 = tf.pad(net1, np.array(
[[0, 0], [0, 0], [0, 0], [0, 0]]))
net1 = slim.conv2d(
net1, 128, (1, 1), (1, 1), activation_fn=None, padding='VALID')
net = tf.add_n([net0, net1])
net = tf.pad(net, np.array([[0, 0], [0, 0], [0, 0], [0, 0]]))
net = slim.max_pool2d(net, (2, 2), (2, 2))
with tf.name_scope('nn.Sequential'):
with tf.name_scope('nn.ConcatTable'):
net0 = net
with tf.name_scope('nn.Sequential'):
net0 = slim.batch_norm(net0)
net0 = slim.nn.relu(net0)
net0 = tf.pad(net0, np.array(
[[0, 0], [0, 0], [0, 0], [0, 0]]))
net0 = slim.conv2d(
net0, 64, (1, 1), (1, 1), activation_fn=None, padding='VALID')
net0 = slim.batch_norm(net0)
net0 = slim.nn.relu(net0)
net0 = tf.pad(net0, np.array(
[[0, 0], [1, 1], [1, 1], [0, 0]]))
net0 = slim.conv2d(
net0, 64, (3, 3), (1, 1), activation_fn=None, padding='VALID')
net0 = slim.batch_norm(net0)
net0 = slim.nn.relu(net0)
net0 = tf.pad(net0, np.array(
[[0, 0], [0, 0], [0, 0], [0, 0]]))
net0 = slim.conv2d(
net0, 128, (1, 1), (1, 1), activation_fn=None, padding='VALID')
net1 = net
net = tf.add_n([net0, net1])
with tf.name_scope('nn.Sequential'):
with tf.name_scope('nn.ConcatTable'):
net0 = net
with tf.name_scope('nn.Sequential'):
net0 = slim.batch_norm(net0)
net0 = slim.nn.relu(net0)
net0 = tf.pad(net0, np.array(
[[0, 0], [0, 0], [0, 0], [0, 0]]))
net0 = slim.conv2d(
net0, 64, (1, 1), (1, 1), activation_fn=None, padding='VALID')
net0 = slim.batch_norm(net0)
net0 = slim.nn.relu(net0)
net0 = tf.pad(net0, np.array(
[[0, 0], [1, 1], [1, 1], [0, 0]]))
net0 = slim.conv2d(
net0, 64, (3, 3), (1, 1), activation_fn=None, padding='VALID')
net0 = slim.batch_norm(net0)
net0 = slim.nn.relu(net0)
net0 = tf.pad(net0, np.array(
[[0, 0], [0, 0], [0, 0], [0, 0]]))
net0 = slim.conv2d(
net0, 128, (1, 1), (1, 1), activation_fn=None, padding='VALID')
net1 = net
net = tf.add_n([net0, net1])
with tf.name_scope('nn.Sequential'):
with tf.name_scope('nn.ConcatTable'):
net0 = net
with tf.name_scope('nn.Sequential'):
net0 = slim.batch_norm(net0)
net0 = slim.nn.relu(net0)
net0 = tf.pad(net0, np.array(
[[0, 0], [0, 0], [0, 0], [0, 0]]))
net0 = slim.conv2d(
net0, 128, (1, 1), (1, 1), activation_fn=None, padding='VALID')
net0 = slim.batch_norm(net0)
net0 = slim.nn.relu(net0)
net0 = tf.pad(net0, np.array(
[[0, 0], [1, 1], [1, 1], [0, 0]]))
net0 = slim.conv2d(
net0, 128, (3, 3), (1, 1), activation_fn=None, padding='VALID')
net0 = slim.batch_norm(net0)
net0 = slim.nn.relu(net0)
net0 = tf.pad(net0, np.array(
[[0, 0], [0, 0], [0, 0], [0, 0]]))
net0 = slim.conv2d(
net0, 256, (1, 1), (1, 1), activation_fn=None, padding='VALID')
net1 = net
with tf.name_scope('nn.Sequential'):
net1 = tf.pad(net1, np.array(
[[0, 0], [0, 0], [0, 0], [0, 0]]))
net1 = slim.conv2d(
net1, 256, (1, 1), (1, 1), activation_fn=None, padding='VALID')
net = tf.add_n([net0, net1])
with tf.name_scope('nn.Sequential'):
with tf.name_scope('nn.ConcatTable'):
net0 = net
with tf.name_scope('nn.Sequential'):
net0 = tf.pad(net0, np.array(
[[0, 0], [0, 0], [0, 0], [0, 0]]))
net0 = slim.max_pool2d(net0, (2, 2), (2, 2))
with tf.name_scope('nn.Sequential'):
with tf.name_scope('nn.ConcatTable'):
net00 = net0
with tf.name_scope('nn.Sequential'):
net00 = slim.batch_norm(net00)
net00 = slim.nn.relu(net00)
net00 = tf.pad(net00, np.array(
[[0, 0], [0, 0], [0, 0], [0, 0]]))
net00 = slim.conv2d(
net00, 128, (1, 1), (1, 1), activation_fn=None, padding='VALID')
net00 = slim.batch_norm(net00)
net00 = slim.nn.relu(net00)
net00 = tf.pad(net00, np.array(
[[0, 0], [1, 1], [1, 1], [0, 0]]))
net00 = slim.conv2d(
net00, 128, (3, 3), (1, 1), activation_fn=None, padding='VALID')
net00 = slim.batch_norm(net00)
net00 = slim.nn.relu(net00)
net00 = tf.pad(net00, np.array(
[[0, 0], [0, 0], [0, 0], [0, 0]]))
net00 = slim.conv2d(
net00, 256, (1, 1), (1, 1), activation_fn=None, padding='VALID')
net01 = net0
net0 = tf.add_n([net00, net01])
with tf.name_scope('nn.Sequential'):
with tf.name_scope('nn.ConcatTable'):
net00 = net0
with tf.name_scope('nn.Sequential'):
net00 = slim.batch_norm(net00)
net00 = slim.nn.relu(net00)
net00 = tf.pad(net00, np.array(
[[0, 0], [0, 0], [0, 0], [0, 0]]))
net00 = slim.conv2d(
net00, 128, (1, 1), (1, 1), activation_fn=None, padding='VALID')
net00 = slim.batch_norm(net00)
net00 = slim.nn.relu(net00)
net00 = tf.pad(net00, np.array(
[[0, 0], [1, 1], [1, 1], [0, 0]]))
net00 = slim.conv2d(
net00, 128, (3, 3), (1, 1), activation_fn=None, padding='VALID')
net00 = slim.batch_norm(net00)
net00 = slim.nn.relu(net00)
net00 = tf.pad(net00, np.array(
[[0, 0], [0, 0], [0, 0], [0, 0]]))
net00 = slim.conv2d(
net00, 256, (1, 1), (1, 1), activation_fn=None, padding='VALID')
net01 = net0
net0 = tf.add_n([net00, net01])
with tf.name_scope('nn.Sequential'):
with tf.name_scope('nn.ConcatTable'):
net00 = net0
with tf.name_scope('nn.Sequential'):
net00 = slim.batch_norm(net00)
net00 = slim.nn.relu(net00)
net00 = tf.pad(net00, np.array(
[[0, 0], [0, 0], [0, 0], [0, 0]]))
net00 = slim.conv2d(
net00, 128, (1, 1), (1, 1), activation_fn=None, padding='VALID')
net00 = slim.batch_norm(net00)
net00 = slim.nn.relu(net00)
net00 = tf.pad(net00, np.array(
[[0, 0], [1, 1], [1, 1], [0, 0]]))
net00 = slim.conv2d(
net00, 128, (3, 3), (1, 1), activation_fn=None, padding='VALID')
net00 = slim.batch_norm(net00)
net00 = slim.nn.relu(net00)
net00 = tf.pad(net00, np.array(
[[0, 0], [0, 0], [0, 0], [0, 0]]))
net00 = slim.conv2d(
net00, 256, (1, 1), (1, 1), activation_fn=None, padding='VALID')
net01 = net0
net0 = tf.add_n([net00, net01])
with tf.name_scope('nn.Sequential'):
with tf.name_scope('nn.ConcatTable'):
net00 = net0
with tf.name_scope('nn.Sequential'):
net00 = tf.pad(net00, np.array(
[[0, 0], [0, 0], [0, 0], [0, 0]]))
net00 = slim.max_pool2d(
net00, (2, 2), (2, 2))
with tf.name_scope('nn.Sequential'):
with tf.name_scope('nn.ConcatTable'):
net000 = net00
with tf.name_scope('nn.Sequential'):
net000 = slim.batch_norm(
net000)
net000 = slim.nn.relu(net000)
net000 = tf.pad(net000, np.array(
[[0, 0], [0, 0], [0, 0], [0, 0]]))
net000 = slim.conv2d(
net000, 128, (1, 1), (1, 1), activation_fn=None, padding='VALID')
net000 = slim.batch_norm(
net000)
net000 = slim.nn.relu(net000)
net000 = tf.pad(net000, np.array(
[[0, 0], [1, 1], [1, 1], [0, 0]]))
net000 = slim.conv2d(
net000, 128, (3, 3), (1, 1), activation_fn=None, padding='VALID')
net000 = slim.batch_norm(
net000)
net000 = slim.nn.relu(net000)
net000 = tf.pad(net000, np.array(
[[0, 0], [0, 0], [0, 0], [0, 0]]))
net000 = slim.conv2d(
net000, 256, (1, 1), (1, 1), activation_fn=None, padding='VALID')
net001 = net00
net00 = tf.add_n([net000, net001])
with tf.name_scope('nn.Sequential'):
with tf.name_scope('nn.ConcatTable'):
net000 = net00
with tf.name_scope('nn.Sequential'):
net000 = slim.batch_norm(
net000)
net000 = slim.nn.relu(net000)
net000 = tf.pad(net000, np.array(
[[0, 0], [0, 0], [0, 0], [0, 0]]))
net000 = slim.conv2d(
net000, 128, (1, 1), (1, 1), activation_fn=None, padding='VALID')
net000 = slim.batch_norm(
net000)
net000 = slim.nn.relu(net000)
net000 = tf.pad(net000, np.array(
[[0, 0], [1, 1], [1, 1], [0, 0]]))
net000 = slim.conv2d(
net000, 128, (3, 3), (1, 1), activation_fn=None, padding='VALID')
net000 = slim.batch_norm(
net000)
net000 = slim.nn.relu(net000)
net000 = tf.pad(net000, np.array(
[[0, 0], [0, 0], [0, 0], [0, 0]]))
net000 = slim.conv2d(
net000, 256, (1, 1), (1, 1), activation_fn=None, padding='VALID')
net001 = net00
net00 = tf.add_n([net000, net001])
with tf.name_scope('nn.Sequential'):
with tf.name_scope('nn.ConcatTable'):
net000 = net00
with tf.name_scope('nn.Sequential'):
net000 = slim.batch_norm(
net000)
net000 = slim.nn.relu(net000)
net000 = tf.pad(net000, np.array(
[[0, 0], [0, 0], [0, 0], [0, 0]]))
net000 = slim.conv2d(
net000, 128, (1, 1), (1, 1), activation_fn=None, padding='VALID')
net000 = slim.batch_norm(
net000)
net000 = slim.nn.relu(net000)
net000 = tf.pad(net000, np.array(
[[0, 0], [1, 1], [1, 1], [0, 0]]))
net000 = slim.conv2d(
net000, 128, (3, 3), (1, 1), activation_fn=None, padding='VALID')
net000 = slim.batch_norm(
net000)
net000 = slim.nn.relu(net000)
net000 = tf.pad(net000, np.array(
[[0, 0], [0, 0], [0, 0], [0, 0]]))
net000 = slim.conv2d(
net000, 256, (1, 1), (1, 1), activation_fn=None, padding='VALID')
net001 = net00
net00 = tf.add_n([net000, net001])
with tf.name_scope('nn.Sequential'):
with tf.name_scope('nn.ConcatTable'):
net000 = net00
with tf.name_scope('nn.Sequential'):
net000 = tf.pad(net000, np.array(
[[0, 0], [0, 0], [0, 0], [0, 0]]))
net000 = slim.max_pool2d(
net000, (2, 2), (2, 2))
with tf.name_scope('nn.Sequential'):
with tf.name_scope('nn.ConcatTable'):
net0000 = net000
with tf.name_scope('nn.Sequential'):
net0000 = slim.batch_norm(
net0000)
net0000 = slim.nn.relu(
net0000)
net0000 = tf.pad(net0000, np.array(
[[0, 0], [0, 0], [0, 0], [0, 0]]))
net0000 = slim.conv2d(
net0000, 128, (1, 1), (1, 1), activation_fn=None, padding='VALID')
net0000 = slim.batch_norm(
net0000)
net0000 = slim.nn.relu(
net0000)
net0000 = tf.pad(net0000, np.array(
[[0, 0], [1, 1], [1, 1], [0, 0]]))
net0000 = slim.conv2d(
net0000, 128, (3, 3), (1, 1), activation_fn=None, padding='VALID')
net0000 = slim.batch_norm(
net0000)
net0000 = slim.nn.relu(
net0000)
net0000 = tf.pad(net0000, np.array(
[[0, 0], [0, 0], [0, 0], [0, 0]]))
net0000 = slim.conv2d(
net0000, 256, (1, 1), (1, 1), activation_fn=None, padding='VALID')
net0001 = net000
net000 = tf.add_n(
[net0000, net0001])
with tf.name_scope('nn.Sequential'):
with tf.name_scope('nn.ConcatTable'):
net0000 = net000
with tf.name_scope('nn.Sequential'):
net0000 = slim.batch_norm(
net0000)
net0000 = slim.nn.relu(
net0000)
net0000 = tf.pad(net0000, np.array(
[[0, 0], [0, 0], [0, 0], [0, 0]]))
net0000 = slim.conv2d(
net0000, 128, (1, 1), (1, 1), activation_fn=None, padding='VALID')
net0000 = slim.batch_norm(
net0000)
net0000 = slim.nn.relu(
net0000)
net0000 = tf.pad(net0000, np.array(
[[0, 0], [1, 1], [1, 1], [0, 0]]))
net0000 = slim.conv2d(
net0000, 128, (3, 3), (1, 1), activation_fn=None, padding='VALID')
net0000 = slim.batch_norm(
net0000)
net0000 = slim.nn.relu(
net0000)
net0000 = tf.pad(net0000, np.array(
[[0, 0], [0, 0], [0, 0], [0, 0]]))
net0000 = slim.conv2d(
net0000, 256, (1, 1), (1, 1), activation_fn=None, padding='VALID')
net0001 = net000
net000 = tf.add_n(
[net0000, net0001])
with tf.name_scope('nn.Sequential'):
with tf.name_scope('nn.ConcatTable'):
net0000 = net000
with tf.name_scope('nn.Sequential'):
net0000 = slim.batch_norm(
net0000)
net0000 = slim.nn.relu(
net0000)
net0000 = tf.pad(net0000, np.array(
[[0, 0], [0, 0], [0, 0], [0, 0]]))
net0000 = slim.conv2d(
net0000, 128, (1, 1), (1, 1), activation_fn=None, padding='VALID')
net0000 = slim.batch_norm(
net0000)
net0000 = slim.nn.relu(
net0000)
net0000 = tf.pad(net0000, np.array(
[[0, 0], [1, 1], [1, 1], [0, 0]]))
net0000 = slim.conv2d(
net0000, 128, (3, 3), (1, 1), activation_fn=None, padding='VALID')
net0000 = slim.batch_norm(
net0000)
net0000 = slim.nn.relu(
net0000)
net0000 = tf.pad(net0000, np.array(
[[0, 0], [0, 0], [0, 0], [0, 0]]))
net0000 = slim.conv2d(
net0000, 256, (1, 1), (1, 1), activation_fn=None, padding='VALID')
net0001 = net000
net000 = tf.add_n(
[net0000, net0001])
with tf.name_scope('nn.Sequential'):
with tf.name_scope('nn.ConcatTable'):
net0000 = net000
with tf.name_scope('nn.Sequential'):
net0000 = tf.pad(net0000, np.array(
[[0, 0], [0, 0], [0, 0], [0, 0]]))
net0000 = slim.max_pool2d(
net0000, (2, 2), (2, 2))
with tf.name_scope('nn.Sequential'):
with tf.name_scope('nn.ConcatTable'):
net00000 = net0000
with tf.name_scope('nn.Sequential'):
net00000 = slim.batch_norm(
net00000)
net00000 = slim.nn.relu(
net00000)
net00000 = tf.pad(net00000, np.array(
[[0, 0], [0, 0], [0, 0], [0, 0]]))
net00000 = slim.conv2d(
net00000, 128, (1, 1), (1, 1), activation_fn=None, padding='VALID')
net00000 = slim.batch_norm(
net00000)
net00000 = slim.nn.relu(
net00000)
net00000 = tf.pad(net00000, np.array(
[[0, 0], [1, 1], [1, 1], [0, 0]]))
net00000 = slim.conv2d(
net00000, 128, (3, 3), (1, 1), activation_fn=None, padding='VALID')
net00000 = slim.batch_norm(
net00000)
net00000 = slim.nn.relu(
net00000)
net00000 = tf.pad(net00000, np.array(
[[0, 0], [0, 0], [0, 0], [0, 0]]))
net00000 = slim.conv2d(
net00000, 256, (1, 1), (1, 1), activation_fn=None, padding='VALID')
net00001 = net0000
net0000 = tf.add_n(
[net00000, net00001])
with tf.name_scope('nn.Sequential'):
with tf.name_scope('nn.ConcatTable'):
net00000 = net0000
with tf.name_scope('nn.Sequential'):
net00000 = slim.batch_norm(
net00000)
net00000 = slim.nn.relu(
net00000)
net00000 = tf.pad(net00000, np.array(
[[0, 0], [0, 0], [0, 0], [0, 0]]))
net00000 = slim.conv2d(
net00000, 128, (1, 1), (1, 1), activation_fn=None, padding='VALID')
net00000 = slim.batch_norm(
net00000)
net00000 = slim.nn.relu(
net00000)
net00000 = tf.pad(net00000, np.array(
[[0, 0], [1, 1], [1, 1], [0, 0]]))
net00000 = slim.conv2d(
net00000, 128, (3, 3), (1, 1), activation_fn=None, padding='VALID')
net00000 = slim.batch_norm(
net00000)
net00000 = slim.nn.relu(
net00000)
net00000 = tf.pad(net00000, np.array(
[[0, 0], [0, 0], [0, 0], [0, 0]]))
net00000 = slim.conv2d(
net00000, 256, (1, 1), (1, 1), activation_fn=None, padding='VALID')
net00001 = net0000
net0000 = tf.add_n(
[net00000, net00001])
with tf.name_scope('nn.Sequential'):
with tf.name_scope('nn.ConcatTable'):
net00000 = net0000
with tf.name_scope('nn.Sequential'):
net00000 = slim.batch_norm(
net00000)
net00000 = slim.nn.relu(
net00000)
net00000 = tf.pad(net00000, np.array(
[[0, 0], [0, 0], [0, 0], [0, 0]]))
net00000 = slim.conv2d(
net00000, 128, (1, 1), (1, 1), activation_fn=None, padding='VALID')
net00000 = slim.batch_norm(
net00000)
net00000 = slim.nn.relu(
net00000)
net00000 = tf.pad(net00000, np.array(
[[0, 0], [1, 1], [1, 1], [0, 0]]))
net00000 = slim.conv2d(
net00000, 128, (3, 3), (1, 1), activation_fn=None, padding='VALID')
net00000 = slim.batch_norm(
net00000)
net00000 = slim.nn.relu(
net00000)
net00000 = tf.pad(net00000, np.array(
[[0, 0], [0, 0], [0, 0], [0, 0]]))
net00000 = slim.conv2d(
net00000, 256, (1, 1), (1, 1), activation_fn=None, padding='VALID')
net00001 = net0000
net0000 = tf.add_n(
[net00000, net00001])
with tf.name_scope('nn.Sequential'):
with tf.name_scope('nn.ConcatTable'):
net00000 = net0000
with tf.name_scope('nn.Sequential'):
net00000 = slim.batch_norm(
net00000)
net00000 = slim.nn.relu(
net00000)
net00000 = tf.pad(net00000, np.array(
[[0, 0], [0, 0], [0, 0], [0, 0]]))
net00000 = slim.conv2d(
net00000, 256, (1, 1), (1, 1), activation_fn=None, padding='VALID')
net00000 = slim.batch_norm(
net00000)
net00000 = slim.nn.relu(
net00000)
net00000 = tf.pad(net00000, np.array(
[[0, 0], [1, 1], [1, 1], [0, 0]]))
net00000 = slim.conv2d(
net00000, 256, (3, 3), (1, 1), activation_fn=None, padding='VALID')
net00000 = slim.batch_norm(
net00000)
net00000 = slim.nn.relu(
net00000)
net00000 = tf.pad(net00000, np.array(
[[0, 0], [0, 0], [0, 0], [0, 0]]))
net00000 = slim.conv2d(
net00000, 512, (1, 1), (1, 1), activation_fn=None, padding='VALID')
net00001 = net0000
with tf.name_scope('nn.Sequential'):
net00001 = tf.pad(net00001, np.array(
[[0, 0], [0, 0], [0, 0], [0, 0]]))
net00001 = slim.conv2d(
net00001, 512, (1, 1), (1, 1), activation_fn=None, padding='VALID')
net0000 = tf.add_n(
[net00000, net00001])
with tf.name_scope('nn.Sequential'):
with tf.name_scope('nn.ConcatTable'):
net00000 = net0000
with tf.name_scope('nn.Sequential'):
net00000 = slim.batch_norm(
net00000)
net00000 = slim.nn.relu(
net00000)
net00000 = tf.pad(net00000, np.array(
[[0, 0], [0, 0], [0, 0], [0, 0]]))
net00000 = slim.conv2d(
net00000, 256, (1, 1), (1, 1), activation_fn=None, padding='VALID')
net00000 = slim.batch_norm(
net00000)
net00000 = slim.nn.relu(
net00000)
net00000 = tf.pad(net00000, np.array(
[[0, 0], [1, 1], [1, 1], [0, 0]]))
net00000 = slim.conv2d(
net00000, 256, (3, 3), (1, 1), activation_fn=None, padding='VALID')
net00000 = slim.batch_norm(
net00000)
net00000 = slim.nn.relu(
net00000)
net00000 = tf.pad(net00000, np.array(
[[0, 0], [0, 0], [0, 0], [0, 0]]))
net00000 = slim.conv2d(
net00000, 512, (1, 1), (1, 1), activation_fn=None, padding='VALID')
net00001 = net0000
net0000 = tf.add_n(
[net00000, net00001])
net0000 = tf.pad(net0000, np.array(
[[0, 0], [0, 0], [0, 0], [0, 0]]))
net0000 = deconv_layer(
net0000, 2, 512, method=deconv)
net0001 = net000
with tf.name_scope('nn.Sequential'):
with tf.name_scope('nn.Sequential'):
with tf.name_scope('nn.ConcatTable'):
net00010 = net0001
with tf.name_scope('nn.Sequential'):
net00010 = slim.batch_norm(
net00010)
net00010 = slim.nn.relu(
net00010)
net00010 = tf.pad(net00010, np.array(
[[0, 0], [0, 0], [0, 0], [0, 0]]))
net00010 = slim.conv2d(
net00010, 128, (1, 1), (1, 1), activation_fn=None, padding='VALID')
net00010 = slim.batch_norm(
net00010)
net00010 = slim.nn.relu(
net00010)
net00010 = tf.pad(net00010, np.array(
[[0, 0], [1, 1], [1, 1], [0, 0]]))
net00010 = slim.conv2d(
net00010, 128, (3, 3), (1, 1), activation_fn=None, padding='VALID')
net00010 = slim.batch_norm(
net00010)
net00010 = slim.nn.relu(
net00010)
net00010 = tf.pad(net00010, np.array(
[[0, 0], [0, 0], [0, 0], [0, 0]]))
net00010 = slim.conv2d(
net00010, 256, (1, 1), (1, 1), activation_fn=None, padding='VALID')
net00011 = net0001
net0001 = tf.add_n(
[net00010, net00011])
with tf.name_scope('nn.Sequential'):
with tf.name_scope('nn.ConcatTable'):
net00010 = net0001
with tf.name_scope('nn.Sequential'):
net00010 = slim.batch_norm(
net00010)
net00010 = slim.nn.relu(
net00010)
net00010 = tf.pad(net00010, np.array(
[[0, 0], [0, 0], [0, 0], [0, 0]]))
net00010 = slim.conv2d(
net00010, 128, (1, 1), (1, 1), activation_fn=None, padding='VALID')
net00010 = slim.batch_norm(
net00010)
net00010 = slim.nn.relu(
net00010)
net00010 = tf.pad(net00010, np.array(
[[0, 0], [1, 1], [1, 1], [0, 0]]))
net00010 = slim.conv2d(
net00010, 128, (3, 3), (1, 1), activation_fn=None, padding='VALID')
net00010 = slim.batch_norm(
net00010)
net00010 = slim.nn.relu(
net00010)
net00010 = tf.pad(net00010, np.array(
[[0, 0], [0, 0], [0, 0], [0, 0]]))
net00010 = slim.conv2d(
net00010, 256, (1, 1), (1, 1), activation_fn=None, padding='VALID')
net00011 = net0001
net0001 = tf.add_n(
[net00010, net00011])
with tf.name_scope('nn.Sequential'):
with tf.name_scope('nn.ConcatTable'):
net00010 = net0001
with tf.name_scope('nn.Sequential'):
net00010 = slim.batch_norm(
net00010)
net00010 = slim.nn.relu(
net00010)
net00010 = tf.pad(net00010, np.array(
[[0, 0], [0, 0], [0, 0], [0, 0]]))
net00010 = slim.conv2d(
net00010, 256, (1, 1), (1, 1), activation_fn=None, padding='VALID')
net00010 = slim.batch_norm(
net00010)
net00010 = slim.nn.relu(
net00010)
net00010 = tf.pad(net00010, np.array(
[[0, 0], [1, 1], [1, 1], [0, 0]]))
net00010 = slim.conv2d(
net00010, 256, (3, 3), (1, 1), activation_fn=None, padding='VALID')
net00010 = slim.batch_norm(
net00010)
net00010 = slim.nn.relu(
net00010)
net00010 = tf.pad(net00010, np.array(
[[0, 0], [0, 0], [0, 0], [0, 0]]))
net00010 = slim.conv2d(
net00010, 512, (1, 1), (1, 1), activation_fn=None, padding='VALID')
net00011 = net0001
with tf.name_scope('nn.Sequential'):
net00011 = tf.pad(net00011, np.array(
[[0, 0], [0, 0], [0, 0], [0, 0]]))
net00011 = slim.conv2d(
net00011, 512, (1, 1), (1, 1), activation_fn=None, padding='VALID')
net0001 = tf.add_n(
[net00010, net00011])
net000 = tf.add_n(
[net0000, net0001])
with tf.name_scope('nn.Sequential'):
with tf.name_scope('nn.ConcatTable'):
net0000 = net000
with tf.name_scope('nn.Sequential'):
net0000 = slim.batch_norm(
net0000)
net0000 = slim.nn.relu(
net0000)
net0000 = tf.pad(net0000, np.array(
[[0, 0], [0, 0], [0, 0], [0, 0]]))
net0000 = slim.conv2d(
net0000, 256, (1, 1), (1, 1), activation_fn=None, padding='VALID')
net0000 = slim.batch_norm(
net0000)
net0000 = slim.nn.relu(
net0000)
net0000 = tf.pad(net0000, np.array(
[[0, 0], [1, 1], [1, 1], [0, 0]]))
net0000 = slim.conv2d(
net0000, 256, (3, 3), (1, 1), activation_fn=None, padding='VALID')
net0000 = slim.batch_norm(
net0000)
net0000 = slim.nn.relu(
net0000)
net0000 = tf.pad(net0000, np.array(
[[0, 0], [0, 0], [0, 0], [0, 0]]))
net0000 = slim.conv2d(
net0000, 512, (1, 1), (1, 1), activation_fn=None, padding='VALID')
net0001 = net000
net000 = tf.add_n(
[net0000, net0001])
net000 = tf.pad(net000, np.array(
[[0, 0], [0, 0], [0, 0], [0, 0]]))
net000 = deconv_layer(
net000, 2, 512, method=deconv)
net001 = net00
with tf.name_scope('nn.Sequential'):
with tf.name_scope('nn.Sequential'):
with tf.name_scope('nn.ConcatTable'):
net0010 = net001
with tf.name_scope('nn.Sequential'):
net0010 = slim.batch_norm(
net0010)
net0010 = slim.nn.relu(
net0010)
net0010 = tf.pad(net0010, np.array(
[[0, 0], [0, 0], [0, 0], [0, 0]]))
net0010 = slim.conv2d(
net0010, 128, (1, 1), (1, 1), activation_fn=None, padding='VALID')
net0010 = slim.batch_norm(
net0010)
net0010 = slim.nn.relu(
net0010)
net0010 = tf.pad(net0010, np.array(
[[0, 0], [1, 1], [1, 1], [0, 0]]))
net0010 = slim.conv2d(
net0010, 128, (3, 3), (1, 1), activation_fn=None, padding='VALID')
net0010 = slim.batch_norm(
net0010)
net0010 = slim.nn.relu(
net0010)
net0010 = tf.pad(net0010, np.array(
[[0, 0], [0, 0], [0, 0], [0, 0]]))
net0010 = slim.conv2d(
net0010, 256, (1, 1), (1, 1), activation_fn=None, padding='VALID')
net0011 = net001
net001 = tf.add_n(
[net0010, net0011])
with tf.name_scope('nn.Sequential'):
with tf.name_scope('nn.ConcatTable'):
net0010 = net001
with tf.name_scope('nn.Sequential'):
net0010 = slim.batch_norm(
net0010)
net0010 = slim.nn.relu(
net0010)
net0010 = tf.pad(net0010, np.array(
[[0, 0], [0, 0], [0, 0], [0, 0]]))
net0010 = slim.conv2d(
net0010, 128, (1, 1), (1, 1), activation_fn=None, padding='VALID')
net0010 = slim.batch_norm(
net0010)
net0010 = slim.nn.relu(
net0010)
net0010 = tf.pad(net0010, np.array(
[[0, 0], [1, 1], [1, 1], [0, 0]]))
net0010 = slim.conv2d(
net0010, 128, (3, 3), (1, 1), activation_fn=None, padding='VALID')
net0010 = slim.batch_norm(
net0010)
net0010 = slim.nn.relu(
net0010)
net0010 = tf.pad(net0010, np.array(
[[0, 0], [0, 0], [0, 0], [0, 0]]))
net0010 = slim.conv2d(
net0010, 256, (1, 1), (1, 1), activation_fn=None, padding='VALID')
net0011 = net001
net001 = tf.add_n(
[net0010, net0011])
with tf.name_scope('nn.Sequential'):
with tf.name_scope('nn.ConcatTable'):
net0010 = net001
with tf.name_scope('nn.Sequential'):
net0010 = slim.batch_norm(
net0010)
net0010 = slim.nn.relu(
net0010)
net0010 = tf.pad(net0010, np.array(
[[0, 0], [0, 0], [0, 0], [0, 0]]))
net0010 = slim.conv2d(
net0010, 256, (1, 1), (1, 1), activation_fn=None, padding='VALID')
net0010 = slim.batch_norm(
net0010)
net0010 = slim.nn.relu(
net0010)
net0010 = tf.pad(net0010, np.array(
[[0, 0], [1, 1], [1, 1], [0, 0]]))
net0010 = slim.conv2d(
net0010, 256, (3, 3), (1, 1), activation_fn=None, padding='VALID')
net0010 = slim.batch_norm(
net0010)
net0010 = slim.nn.relu(
net0010)
net0010 = tf.pad(net0010, np.array(
[[0, 0], [0, 0], [0, 0], [0, 0]]))
net0010 = slim.conv2d(
net0010, 512, (1, 1), (1, 1), activation_fn=None, padding='VALID')
net0011 = net001
with tf.name_scope('nn.Sequential'):
net0011 = tf.pad(net0011, np.array(
[[0, 0], [0, 0], [0, 0], [0, 0]]))
net0011 = slim.conv2d(
net0011, 512, (1, 1), (1, 1), activation_fn=None, padding='VALID')
net001 = tf.add_n(
[net0010, net0011])
net00 = tf.add_n([net000, net001])
with tf.name_scope('nn.Sequential'):
with tf.name_scope('nn.ConcatTable'):
net000 = net00
with tf.name_scope('nn.Sequential'):
net000 = slim.batch_norm(
net000)
net000 = slim.nn.relu(
net000)
net000 = tf.pad(net000, np.array(
[[0, 0], [0, 0], [0, 0], [0, 0]]))
net000 = slim.conv2d(
net000, 256, (1, 1), (1, 1), activation_fn=None, padding='VALID')
net000 = slim.batch_norm(
net000)
net000 = slim.nn.relu(
net000)
net000 = tf.pad(net000, np.array(
[[0, 0], [1, 1], [1, 1], [0, 0]]))
net000 = slim.conv2d(
net000, 256, (3, 3), (1, 1), activation_fn=None, padding='VALID')
net000 = slim.batch_norm(
net000)
net000 = slim.nn.relu(
net000)
net000 = tf.pad(net000, np.array(
[[0, 0], [0, 0], [0, 0], [0, 0]]))
net000 = slim.conv2d(
net000, 512, (1, 1), (1, 1), activation_fn=None, padding='VALID')
net001 = net00
net00 = tf.add_n([net000, net001])
net00 = tf.pad(net00, np.array(
[[0, 0], [0, 0], [0, 0], [0, 0]]))
net00 = deconv_layer(
net00, 2, 512, method=deconv)
net01 = net0
with tf.name_scope('nn.Sequential'):
with tf.name_scope('nn.Sequential'):
with tf.name_scope('nn.ConcatTable'):
net010 = net01
with tf.name_scope('nn.Sequential'):
net010 = slim.batch_norm(
net010)
net010 = slim.nn.relu(net010)
net010 = tf.pad(net010, np.array(
[[0, 0], [0, 0], [0, 0], [0, 0]]))
net010 = slim.conv2d(
net010, 128, (1, 1), (1, 1), activation_fn=None, padding='VALID')
net010 = slim.batch_norm(
net010)
net010 = slim.nn.relu(net010)
net010 = tf.pad(net010, np.array(
[[0, 0], [1, 1], [1, 1], [0, 0]]))
net010 = slim.conv2d(
net010, 128, (3, 3), (1, 1), activation_fn=None, padding='VALID')
net010 = slim.batch_norm(
net010)
net010 = slim.nn.relu(net010)
net010 = tf.pad(net010, np.array(
[[0, 0], [0, 0], [0, 0], [0, 0]]))
net010 = slim.conv2d(
net010, 256, (1, 1), (1, 1), activation_fn=None, padding='VALID')
net011 = net01
net01 = tf.add_n([net010, net011])
with tf.name_scope('nn.Sequential'):
with tf.name_scope('nn.ConcatTable'):
net010 = net01
with tf.name_scope('nn.Sequential'):
net010 = slim.batch_norm(
net010)
net010 = slim.nn.relu(net010)
net010 = tf.pad(net010, np.array(
[[0, 0], [0, 0], [0, 0], [0, 0]]))
net010 = slim.conv2d(
net010, 128, (1, 1), (1, 1), activation_fn=None, padding='VALID')
net010 = slim.batch_norm(
net010)
net010 = slim.nn.relu(net010)
net010 = tf.pad(net010, np.array(
[[0, 0], [1, 1], [1, 1], [0, 0]]))
net010 = slim.conv2d(
net010, 128, (3, 3), (1, 1), activation_fn=None, padding='VALID')
net010 = slim.batch_norm(
net010)
net010 = slim.nn.relu(net010)
net010 = tf.pad(net010, np.array(
[[0, 0], [0, 0], [0, 0], [0, 0]]))
net010 = slim.conv2d(
net010, 256, (1, 1), (1, 1), activation_fn=None, padding='VALID')
net011 = net01
net01 = tf.add_n([net010, net011])
with tf.name_scope('nn.Sequential'):
with tf.name_scope('nn.ConcatTable'):
net010 = net01
with tf.name_scope('nn.Sequential'):
net010 = slim.batch_norm(
net010)
net010 = slim.nn.relu(net010)
net010 = tf.pad(net010, np.array(
[[0, 0], [0, 0], [0, 0], [0, 0]]))
net010 = slim.conv2d(
net010, 256, (1, 1), (1, 1), activation_fn=None, padding='VALID')
net010 = slim.batch_norm(
net010)
net010 = slim.nn.relu(net010)
net010 = tf.pad(net010, np.array(
[[0, 0], [1, 1], [1, 1], [0, 0]]))
net010 = slim.conv2d(
net010, 256, (3, 3), (1, 1), activation_fn=None, padding='VALID')
net010 = slim.batch_norm(
net010)
net010 = slim.nn.relu(net010)
net010 = tf.pad(net010, np.array(
[[0, 0], [0, 0], [0, 0], [0, 0]]))
net010 = slim.conv2d(
net010, 512, (1, 1), (1, 1), activation_fn=None, padding='VALID')
net011 = net01
with tf.name_scope('nn.Sequential'):
net011 = tf.pad(net011, np.array(
[[0, 0], [0, 0], [0, 0], [0, 0]]))
net011 = slim.conv2d(
net011, 512, (1, 1), (1, 1), activation_fn=None, padding='VALID')
net01 = tf.add_n([net010, net011])
net0 = tf.add_n([net00, net01])
with tf.name_scope('nn.Sequential'):
with tf.name_scope('nn.ConcatTable'):
net00 = net0
with tf.name_scope('nn.Sequential'):
net00 = slim.batch_norm(net00)
net00 = slim.nn.relu(net00)
net00 = tf.pad(net00, np.array(
[[0, 0], [0, 0], [0, 0], [0, 0]]))
net00 = slim.conv2d(
net00, 256, (1, 1), (1, 1), activation_fn=None, padding='VALID')
net00 = slim.batch_norm(net00)
net00 = slim.nn.relu(net00)
net00 = tf.pad(net00, np.array(
[[0, 0], [1, 1], [1, 1], [0, 0]]))
net00 = slim.conv2d(
net00, 256, (3, 3), (1, 1), activation_fn=None, padding='VALID')
net00 = slim.batch_norm(net00)
net00 = slim.nn.relu(net00)
net00 = tf.pad(net00, np.array(
[[0, 0], [0, 0], [0, 0], [0, 0]]))
net00 = slim.conv2d(
net00, 512, (1, 1), (1, 1), activation_fn=None, padding='VALID')
net01 = net0
net0 = tf.add_n([net00, net01])
net0 = tf.pad(net0, np.array(
[[0, 0], [0, 0], [0, 0], [0, 0]]))
net0 = deconv_layer(net0, 2, 512, method=deconv)
net1 = net
with tf.name_scope('nn.Sequential'):
with tf.name_scope('nn.Sequential'):
with tf.name_scope('nn.ConcatTable'):
net10 = net1
with tf.name_scope('nn.Sequential'):
net10 = slim.batch_norm(net10)
net10 = slim.nn.relu(net10)
net10 = tf.pad(net10, np.array(
[[0, 0], [0, 0], [0, 0], [0, 0]]))
net10 = slim.conv2d(
net10, 128, (1, 1), (1, 1), activation_fn=None, padding='VALID')
net10 = slim.batch_norm(net10)
net10 = slim.nn.relu(net10)
net10 = tf.pad(net10, np.array(
[[0, 0], [1, 1], [1, 1], [0, 0]]))
net10 = slim.conv2d(
net10, 128, (3, 3), (1, 1), activation_fn=None, padding='VALID')
net10 = slim.batch_norm(net10)
net10 = slim.nn.relu(net10)
net10 = tf.pad(net10, np.array(
[[0, 0], [0, 0], [0, 0], [0, 0]]))
net10 = slim.conv2d(
net10, 256, (1, 1), (1, 1), activation_fn=None, padding='VALID')
net11 = net1
net1 = tf.add_n([net10, net11])
with tf.name_scope('nn.Sequential'):
with tf.name_scope('nn.ConcatTable'):
net10 = net1
with tf.name_scope('nn.Sequential'):
net10 = slim.batch_norm(net10)
net10 = slim.nn.relu(net10)
net10 = tf.pad(net10, np.array(
[[0, 0], [0, 0], [0, 0], [0, 0]]))
net10 = slim.conv2d(
net10, 128, (1, 1), (1, 1), activation_fn=None, padding='VALID')
net10 = slim.batch_norm(net10)
net10 = slim.nn.relu(net10)
net10 = tf.pad(net10, np.array(
[[0, 0], [1, 1], [1, 1], [0, 0]]))
net10 = slim.conv2d(
net10, 128, (3, 3), (1, 1), activation_fn=None, padding='VALID')
net10 = slim.batch_norm(net10)
net10 = slim.nn.relu(net10)
net10 = tf.pad(net10, np.array(
[[0, 0], [0, 0], [0, 0], [0, 0]]))
net10 = slim.conv2d(
net10, 256, (1, 1), (1, 1), activation_fn=None, padding='VALID')
net11 = net1
net1 = tf.add_n([net10, net11])
with tf.name_scope('nn.Sequential'):
with tf.name_scope('nn.ConcatTable'):
net10 = net1
with tf.name_scope('nn.Sequential'):
net10 = slim.batch_norm(net10)
net10 = slim.nn.relu(net10)
net10 = tf.pad(net10, np.array(
[[0, 0], [0, 0], [0, 0], [0, 0]]))
net10 = slim.conv2d(
net10, 256, (1, 1), (1, 1), activation_fn=None, padding='VALID')
net10 = slim.batch_norm(net10)
net10 = slim.nn.relu(net10)
net10 = tf.pad(net10, np.array(
[[0, 0], [1, 1], [1, 1], [0, 0]]))
net10 = slim.conv2d(
net10, 256, (3, 3), (1, 1), activation_fn=None, padding='VALID')
net10 = slim.batch_norm(net10)
net10 = slim.nn.relu(net10)
net10 = tf.pad(net10, np.array(
[[0, 0], [0, 0], [0, 0], [0, 0]]))
net10 = slim.conv2d(
net10, 512, (1, 1), (1, 1), activation_fn=None, padding='VALID')
net11 = net1
with tf.name_scope('nn.Sequential'):
net11 = tf.pad(net11, np.array(
[[0, 0], [0, 0], [0, 0], [0, 0]]))
net11 = slim.conv2d(
net11, 512, (1, 1), (1, 1), activation_fn=None, padding='VALID')
net1 = tf.add_n([net10, net11])
net = tf.add_n([net0, net1])
net = tf.pad(net, np.array([[0, 0], [0, 0], [0, 0], [0, 0]]))
net = slim.conv2d(net, 512, (1, 1), (1, 1),
activation_fn=None, padding='VALID')
net = slim.batch_norm(net)
net = slim.nn.relu(net)
net = tf.pad(net, np.array([[0, 0], [0, 0], [0, 0], [0, 0]]))
net = slim.conv2d(net, 256, (1, 1), (1, 1),
activation_fn=None, padding='VALID')
net = slim.batch_norm(net)
net = slim.nn.relu(net)
net = tf.pad(net, np.array([[0, 0], [0, 0], [0, 0], [0, 0]]))
net = slim.conv2d(net, out_channels, (1, 1), (1, 1),
activation_fn=None, padding='VALID')
net = tf.pad(net, np.array([[0, 0], [0, 0], [0, 0], [0, 0]]))
net = deconv_layer(net, 4, out_channels, method=deconv)
return net
| insightface/reconstruction/ostec/external/landmark_detector/models.py/0 | {
"file_path": "insightface/reconstruction/ostec/external/landmark_detector/models.py",
"repo_id": "insightface",
"token_count": 71976
} | 140 |
# Copyright (c) 2019, NVIDIA Corporation. All rights reserved.
#
# This work is made available under the Nvidia Source Code License-NC.
# To view a copy of this license, visit
# https://nvlabs.github.io/stylegan2/license.html
"""Helper for managing networks."""
import types
import inspect
import re
import uuid
import sys
import numpy as np
import tensorflow as tf
from collections import OrderedDict
from typing import Any, List, Tuple, Union
from . import tfutil
from .. import util
from .tfutil import TfExpression, TfExpressionEx
_import_handlers = [] # Custom import handlers for dealing with legacy data in pickle import.
_import_module_src = dict() # Source code for temporary modules created during pickle import.
def import_handler(handler_func):
"""Function decorator for declaring custom import handlers."""
_import_handlers.append(handler_func)
return handler_func
class Network:
"""Generic network abstraction.
Acts as a convenience wrapper for a parameterized network construction
function, providing several utility methods and convenient access to
the inputs/outputs/weights.
Network objects can be safely pickled and unpickled for long-term
archival purposes. The pickling works reliably as long as the underlying
network construction function is defined in a standalone Python module
that has no side effects or application-specific imports.
Args:
name: Network name. Used to select TensorFlow name and variable scopes.
func_name: Fully qualified name of the underlying network construction function, or a top-level function object.
static_kwargs: Keyword arguments to be passed in to the network construction function.
Attributes:
name: User-specified name, defaults to build func name if None.
scope: Unique TensorFlow scope containing template graph and variables, derived from the user-specified name.
static_kwargs: Arguments passed to the user-supplied build func.
components: Container for sub-networks. Passed to the build func, and retained between calls.
num_inputs: Number of input tensors.
num_outputs: Number of output tensors.
input_shapes: Input tensor shapes (NC or NCHW), including minibatch dimension.
output_shapes: Output tensor shapes (NC or NCHW), including minibatch dimension.
input_shape: Short-hand for input_shapes[0].
output_shape: Short-hand for output_shapes[0].
input_templates: Input placeholders in the template graph.
output_templates: Output tensors in the template graph.
input_names: Name string for each input.
output_names: Name string for each output.
own_vars: Variables defined by this network (local_name => var), excluding sub-networks.
vars: All variables (local_name => var).
trainables: All trainable variables (local_name => var).
var_global_to_local: Mapping from variable global names to local names.
"""
def __init__(self, name: str = None, func_name: Any = None, **static_kwargs):
tfutil.assert_tf_initialized()
assert isinstance(name, str) or name is None
assert func_name is not None
assert isinstance(func_name, str) or util.is_top_level_function(func_name)
assert util.is_pickleable(static_kwargs)
self._init_fields()
self.name = name
self.static_kwargs = util.EasyDict(static_kwargs)
# Locate the user-specified network build function.
if util.is_top_level_function(func_name):
func_name = util.get_top_level_function_name(func_name)
module, self._build_func_name = util.get_module_from_obj_name(func_name)
self._build_func = util.get_obj_from_module(module, self._build_func_name)
assert callable(self._build_func)
# Dig up source code for the module containing the build function.
self._build_module_src = _import_module_src.get(module, None)
if self._build_module_src is None:
self._build_module_src = inspect.getsource(module)
# Init TensorFlow graph.
self._init_graph()
self.reset_own_vars()
def _init_fields(self) -> None:
self.name = None
self.scope = None
self.static_kwargs = util.EasyDict()
self.components = util.EasyDict()
self.num_inputs = 0
self.num_outputs = 0
self.input_shapes = [[]]
self.output_shapes = [[]]
self.input_shape = []
self.output_shape = []
self.input_templates = []
self.output_templates = []
self.input_names = []
self.output_names = []
self.own_vars = OrderedDict()
self.vars = OrderedDict()
self.trainables = OrderedDict()
self.var_global_to_local = OrderedDict()
self._build_func = None # User-supplied build function that constructs the network.
self._build_func_name = None # Name of the build function.
self._build_module_src = None # Full source code of the module containing the build function.
self._run_cache = dict() # Cached graph data for Network.run().
def _init_graph(self) -> None:
# Collect inputs.
self.input_names = []
for param in inspect.signature(self._build_func).parameters.values():
if param.kind == param.POSITIONAL_OR_KEYWORD and param.default is param.empty:
self.input_names.append(param.name)
self.num_inputs = len(self.input_names)
assert self.num_inputs >= 1
# Choose name and scope.
if self.name is None:
self.name = self._build_func_name
assert re.match("^[A-Za-z0-9_.\\-]*$", self.name)
with tf.name_scope(None):
self.scope = tf.get_default_graph().unique_name(self.name, mark_as_used=True)
# Finalize build func kwargs.
build_kwargs = dict(self.static_kwargs)
build_kwargs["is_template_graph"] = True
build_kwargs["components"] = self.components
# Build template graph.
with tfutil.absolute_variable_scope(self.scope, reuse=False), tfutil.absolute_name_scope(self.scope): # ignore surrounding scopes
assert tf.get_variable_scope().name == self.scope
assert tf.get_default_graph().get_name_scope() == self.scope
with tf.control_dependencies(None): # ignore surrounding control dependencies
self.input_templates = [tf.placeholder(tf.float32, name=name) for name in self.input_names]
out_expr = self._build_func(*self.input_templates, **build_kwargs)
# Collect outputs.
assert tfutil.is_tf_expression(out_expr) or isinstance(out_expr, tuple)
self.output_templates = [out_expr] if tfutil.is_tf_expression(out_expr) else list(out_expr)
self.num_outputs = len(self.output_templates)
assert self.num_outputs >= 1
assert all(tfutil.is_tf_expression(t) for t in self.output_templates)
# Perform sanity checks.
if any(t.shape.ndims is None for t in self.input_templates):
raise ValueError("Network input shapes not defined. Please call x.set_shape() for each input.")
if any(t.shape.ndims is None for t in self.output_templates):
raise ValueError("Network output shapes not defined. Please call x.set_shape() where applicable.")
if any(not isinstance(comp, Network) for comp in self.components.values()):
raise ValueError("Components of a Network must be Networks themselves.")
if len(self.components) != len(set(comp.name for comp in self.components.values())):
raise ValueError("Components of a Network must have unique names.")
# List inputs and outputs.
self.input_shapes = [t.shape.as_list() for t in self.input_templates]
self.output_shapes = [t.shape.as_list() for t in self.output_templates]
self.input_shape = self.input_shapes[0]
self.output_shape = self.output_shapes[0]
self.output_names = [t.name.split("/")[-1].split(":")[0] for t in self.output_templates]
# List variables.
self.own_vars = OrderedDict((var.name[len(self.scope) + 1:].split(":")[0], var) for var in tf.global_variables(self.scope + "/"))
self.vars = OrderedDict(self.own_vars)
self.vars.update((comp.name + "/" + name, var) for comp in self.components.values() for name, var in comp.vars.items())
self.trainables = OrderedDict((name, var) for name, var in self.vars.items() if var.trainable)
self.var_global_to_local = OrderedDict((var.name.split(":")[0], name) for name, var in self.vars.items())
def reset_own_vars(self) -> None:
"""Re-initialize all variables of this network, excluding sub-networks."""
tfutil.run([var.initializer for var in self.own_vars.values()])
def reset_vars(self) -> None:
"""Re-initialize all variables of this network, including sub-networks."""
tfutil.run([var.initializer for var in self.vars.values()])
def reset_trainables(self) -> None:
"""Re-initialize all trainable variables of this network, including sub-networks."""
tfutil.run([var.initializer for var in self.trainables.values()])
def get_output_for(self, *in_expr: TfExpression, return_as_list: bool = False, **dynamic_kwargs) -> Union[TfExpression, List[TfExpression]]:
"""Construct TensorFlow expression(s) for the output(s) of this network, given the input expression(s)."""
assert len(in_expr) == self.num_inputs
assert not all(expr is None for expr in in_expr)
# Finalize build func kwargs.
build_kwargs = dict(self.static_kwargs)
build_kwargs.update(dynamic_kwargs)
build_kwargs["is_template_graph"] = False
build_kwargs["components"] = self.components
# Build TensorFlow graph to evaluate the network.
with tfutil.absolute_variable_scope(self.scope, reuse=True), tf.name_scope(self.name):
assert tf.get_variable_scope().name == self.scope
valid_inputs = [expr for expr in in_expr if expr is not None]
final_inputs = []
for expr, name, shape in zip(in_expr, self.input_names, self.input_shapes):
if expr is not None:
expr = tf.identity(expr, name=name)
else:
expr = tf.zeros([tf.shape(valid_inputs[0])[0]] + shape[1:], name=name)
final_inputs.append(expr)
out_expr = self._build_func(*final_inputs, **build_kwargs)
# Propagate input shapes back to the user-specified expressions.
for expr, final in zip(in_expr, final_inputs):
if isinstance(expr, tf.Tensor):
expr.set_shape(final.shape)
# Express outputs in the desired format.
assert tfutil.is_tf_expression(out_expr) or isinstance(out_expr, tuple)
if return_as_list:
out_expr = [out_expr] if tfutil.is_tf_expression(out_expr) else list(out_expr)
return out_expr
def get_var_local_name(self, var_or_global_name: Union[TfExpression, str]) -> str:
"""Get the local name of a given variable, without any surrounding name scopes."""
assert tfutil.is_tf_expression(var_or_global_name) or isinstance(var_or_global_name, str)
global_name = var_or_global_name if isinstance(var_or_global_name, str) else var_or_global_name.name
return self.var_global_to_local[global_name]
def find_var(self, var_or_local_name: Union[TfExpression, str]) -> TfExpression:
"""Find variable by local or global name."""
assert tfutil.is_tf_expression(var_or_local_name) or isinstance(var_or_local_name, str)
return self.vars[var_or_local_name] if isinstance(var_or_local_name, str) else var_or_local_name
def get_var(self, var_or_local_name: Union[TfExpression, str]) -> np.ndarray:
"""Get the value of a given variable as NumPy array.
Note: This method is very inefficient -- prefer to use tflib.run(list_of_vars) whenever possible."""
return self.find_var(var_or_local_name).eval()
def set_var(self, var_or_local_name: Union[TfExpression, str], new_value: Union[int, float, np.ndarray]) -> None:
"""Set the value of a given variable based on the given NumPy array.
Note: This method is very inefficient -- prefer to use tflib.set_vars() whenever possible."""
tfutil.set_vars({self.find_var(var_or_local_name): new_value})
def __getstate__(self) -> dict:
"""Pickle export."""
state = dict()
state["version"] = 4
state["name"] = self.name
state["static_kwargs"] = dict(self.static_kwargs)
state["components"] = dict(self.components)
state["build_module_src"] = self._build_module_src
state["build_func_name"] = self._build_func_name
state["variables"] = list(zip(self.own_vars.keys(), tfutil.run(list(self.own_vars.values()))))
return state
def __setstate__(self, state: dict) -> None:
"""Pickle import."""
# pylint: disable=attribute-defined-outside-init
tfutil.assert_tf_initialized()
self._init_fields()
# Execute custom import handlers.
for handler in _import_handlers:
state = handler(state)
# Set basic fields.
assert state["version"] in [2, 3, 4]
self.name = state["name"]
self.static_kwargs = util.EasyDict(state["static_kwargs"])
self.components = util.EasyDict(state.get("components", {}))
self._build_module_src = state["build_module_src"]
self._build_func_name = state["build_func_name"]
# Create temporary module from the imported source code.
module_name = "_tflib_network_import_" + uuid.uuid4().hex
module = types.ModuleType(module_name)
sys.modules[module_name] = module
_import_module_src[module] = self._build_module_src
exec(self._build_module_src, module.__dict__) # pylint: disable=exec-used
# Locate network build function in the temporary module.
self._build_func = util.get_obj_from_module(module, self._build_func_name)
assert callable(self._build_func)
# Init TensorFlow graph.
self._init_graph()
self.reset_own_vars()
tfutil.set_vars({self.find_var(name): value for name, value in state["variables"]})
def clone(self, name: str = None, **new_static_kwargs) -> "Network":
"""Create a clone of this network with its own copy of the variables."""
# pylint: disable=protected-access
net = object.__new__(Network)
net._init_fields()
net.name = name if name is not None else self.name
net.static_kwargs = util.EasyDict(self.static_kwargs)
net.static_kwargs.update(new_static_kwargs)
net._build_module_src = self._build_module_src
net._build_func_name = self._build_func_name
net._build_func = self._build_func
net._init_graph()
net.copy_vars_from(self)
return net
def copy_own_vars_from(self, src_net: "Network") -> None:
"""Copy the values of all variables from the given network, excluding sub-networks."""
names = [name for name in self.own_vars.keys() if name in src_net.own_vars]
tfutil.set_vars(tfutil.run({self.vars[name]: src_net.vars[name] for name in names}))
def copy_vars_from(self, src_net: "Network") -> None:
"""Copy the values of all variables from the given network, including sub-networks."""
names = [name for name in self.vars.keys() if name in src_net.vars]
tfutil.set_vars(tfutil.run({self.vars[name]: src_net.vars[name] for name in names}))
def copy_trainables_from(self, src_net: "Network") -> None:
"""Copy the values of all trainable variables from the given network, including sub-networks."""
names = [name for name in self.trainables.keys() if name in src_net.trainables]
tfutil.set_vars(tfutil.run({self.vars[name]: src_net.vars[name] for name in names}))
def convert(self, new_func_name: str, new_name: str = None, **new_static_kwargs) -> "Network":
"""Create new network with the given parameters, and copy all variables from this network."""
if new_name is None:
new_name = self.name
static_kwargs = dict(self.static_kwargs)
static_kwargs.update(new_static_kwargs)
net = Network(name=new_name, func_name=new_func_name, **static_kwargs)
net.copy_vars_from(self)
return net
def setup_as_moving_average_of(self, src_net: "Network", beta: TfExpressionEx = 0.99, beta_nontrainable: TfExpressionEx = 0.0) -> tf.Operation:
"""Construct a TensorFlow op that updates the variables of this network
to be slightly closer to those of the given network."""
with tfutil.absolute_name_scope(self.scope + "/_MovingAvg"):
ops = []
for name, var in self.vars.items():
if name in src_net.vars:
cur_beta = beta if name in self.trainables else beta_nontrainable
new_value = tfutil.lerp(src_net.vars[name], var, cur_beta)
ops.append(var.assign(new_value))
return tf.group(*ops)
def run(self,
*in_arrays: Tuple[Union[np.ndarray, None], ...],
input_transform: dict = None,
output_transform: dict = None,
return_as_list: bool = False,
print_progress: bool = False,
minibatch_size: int = None,
num_gpus: int = 1,
assume_frozen: bool = False,
custom_inputs: Any = None,
**dynamic_kwargs) -> Union[np.ndarray, Tuple[np.ndarray, ...], List[np.ndarray]]:
"""Run this network for the given NumPy array(s), and return the output(s) as NumPy array(s).
Args:
input_transform: A dict specifying a custom transformation to be applied to the input tensor(s) before evaluating the network.
The dict must contain a 'func' field that points to a top-level function. The function is called with the input
TensorFlow expression(s) as positional arguments. Any remaining fields of the dict will be passed in as kwargs.
output_transform: A dict specifying a custom transformation to be applied to the output tensor(s) after evaluating the network.
The dict must contain a 'func' field that points to a top-level function. The function is called with the output
TensorFlow expression(s) as positional arguments. Any remaining fields of the dict will be passed in as kwargs.
return_as_list: True = return a list of NumPy arrays, False = return a single NumPy array, or a tuple if there are multiple outputs.
print_progress: Print progress to the console? Useful for very large input arrays.
minibatch_size: Maximum minibatch size to use, None = disable batching.
num_gpus: Number of GPUs to use.
assume_frozen: Improve multi-GPU performance by assuming that the trainable parameters will remain changed between calls.
custom_inputs: Allow to use another tensor as input instead of default placeholders.
dynamic_kwargs: Additional keyword arguments to be passed into the network build function.
"""
assert len(in_arrays) == self.num_inputs
assert not all(arr is None for arr in in_arrays)
assert input_transform is None or util.is_top_level_function(input_transform["func"])
assert output_transform is None or util.is_top_level_function(output_transform["func"])
output_transform, dynamic_kwargs = _handle_legacy_output_transforms(output_transform, dynamic_kwargs)
num_items = in_arrays[0].shape[0]
if minibatch_size is None:
minibatch_size = num_items
# Construct unique hash key from all arguments that affect the TensorFlow graph.
key = dict(input_transform=input_transform, output_transform=output_transform, num_gpus=num_gpus, assume_frozen=assume_frozen, dynamic_kwargs=dynamic_kwargs)
def unwind_key(obj):
if isinstance(obj, dict):
return [(key, unwind_key(value)) for key, value in sorted(obj.items())]
if callable(obj):
return util.get_top_level_function_name(obj)
return obj
key = repr(unwind_key(key))
# Build graph.
if key not in self._run_cache:
with tfutil.absolute_name_scope(self.scope + "/_Run"), tf.control_dependencies(None):
if custom_inputs is not None:
with tf.device("/gpu:0"):
in_expr = [input_builder(name) for input_builder, name in zip(custom_inputs, self.input_names)]
in_split = list(zip(*[tf.split(x, num_gpus) for x in in_expr]))
else:
with tf.device("/cpu:0"):
in_expr = [tf.placeholder(tf.float32, name=name) for name in self.input_names]
in_split = list(zip(*[tf.split(x, num_gpus) for x in in_expr]))
out_split = []
for gpu in range(num_gpus):
with tf.device("/gpu:%d" % gpu):
net_gpu = self.clone() if assume_frozen else self
in_gpu = in_split[gpu]
if input_transform is not None:
in_kwargs = dict(input_transform)
in_gpu = in_kwargs.pop("func")(*in_gpu, **in_kwargs)
in_gpu = [in_gpu] if tfutil.is_tf_expression(in_gpu) else list(in_gpu)
assert len(in_gpu) == self.num_inputs
out_gpu = net_gpu.get_output_for(*in_gpu, return_as_list=True, **dynamic_kwargs)
if output_transform is not None:
out_kwargs = dict(output_transform)
out_gpu = out_kwargs.pop("func")(*out_gpu, **out_kwargs)
out_gpu = [out_gpu] if tfutil.is_tf_expression(out_gpu) else list(out_gpu)
assert len(out_gpu) == self.num_outputs
out_split.append(out_gpu)
with tf.device("/cpu:0"):
out_expr = [tf.concat(outputs, axis=0) for outputs in zip(*out_split)]
self._run_cache[key] = in_expr, out_expr
# Run minibatches.
in_expr, out_expr = self._run_cache[key]
out_arrays = [np.empty([num_items] + expr.shape.as_list()[1:], expr.dtype.name) for expr in out_expr]
for mb_begin in range(0, num_items, minibatch_size):
if print_progress:
print("\r%d / %d" % (mb_begin, num_items), end="")
mb_end = min(mb_begin + minibatch_size, num_items)
mb_num = mb_end - mb_begin
mb_in = [src[mb_begin : mb_end] if src is not None else np.zeros([mb_num] + shape[1:]) for src, shape in zip(in_arrays, self.input_shapes)]
mb_out = tf.get_default_session().run(out_expr, dict(zip(in_expr, mb_in)))
for dst, src in zip(out_arrays, mb_out):
dst[mb_begin: mb_end] = src
# Done.
if print_progress:
print("\r%d / %d" % (num_items, num_items))
if not return_as_list:
out_arrays = out_arrays[0] if len(out_arrays) == 1 else tuple(out_arrays)
return out_arrays
def list_ops(self) -> List[TfExpression]:
include_prefix = self.scope + "/"
exclude_prefix = include_prefix + "_"
ops = tf.get_default_graph().get_operations()
ops = [op for op in ops if op.name.startswith(include_prefix)]
ops = [op for op in ops if not op.name.startswith(exclude_prefix)]
return ops
def list_layers(self) -> List[Tuple[str, TfExpression, List[TfExpression]]]:
"""Returns a list of (layer_name, output_expr, trainable_vars) tuples corresponding to
individual layers of the network. Mainly intended to be used for reporting."""
layers = []
def recurse(scope, parent_ops, parent_vars, level):
# Ignore specific patterns.
if any(p in scope for p in ["/Shape", "/strided_slice", "/Cast", "/concat", "/Assign"]):
return
# Filter ops and vars by scope.
global_prefix = scope + "/"
local_prefix = global_prefix[len(self.scope) + 1:]
cur_ops = [op for op in parent_ops if op.name.startswith(global_prefix) or op.name == global_prefix[:-1]]
cur_vars = [(name, var) for name, var in parent_vars if name.startswith(local_prefix) or name == local_prefix[:-1]]
if not cur_ops and not cur_vars:
return
# Filter out all ops related to variables.
for var in [op for op in cur_ops if op.type.startswith("Variable")]:
var_prefix = var.name + "/"
cur_ops = [op for op in cur_ops if not op.name.startswith(var_prefix)]
# Scope does not contain ops as immediate children => recurse deeper.
contains_direct_ops = any("/" not in op.name[len(global_prefix):] and op.type not in ["Identity", "Cast", "Transpose"] for op in cur_ops)
if (level == 0 or not contains_direct_ops) and (len(cur_ops) + len(cur_vars)) > 1:
visited = set()
for rel_name in [op.name[len(global_prefix):] for op in cur_ops] + [name[len(local_prefix):] for name, _var in cur_vars]:
token = rel_name.split("/")[0]
if token not in visited:
recurse(global_prefix + token, cur_ops, cur_vars, level + 1)
visited.add(token)
return
# Report layer.
layer_name = scope[len(self.scope) + 1:]
layer_output = cur_ops[-1].outputs[0] if cur_ops else cur_vars[-1][1]
layer_trainables = [var for _name, var in cur_vars if var.trainable]
layers.append((layer_name, layer_output, layer_trainables))
recurse(self.scope, self.list_ops(), list(self.vars.items()), 0)
return layers
def print_layers(self, title: str = None, hide_layers_with_no_params: bool = False) -> None:
"""Print a summary table of the network structure."""
rows = [[title if title is not None else self.name, "Params", "OutputShape", "WeightShape"]]
rows += [["---"] * 4]
total_params = 0
for layer_name, layer_output, layer_trainables in self.list_layers():
num_params = sum(int(np.prod(var.shape.as_list())) for var in layer_trainables)
weights = [var for var in layer_trainables if var.name.endswith("/weight:0")]
weights.sort(key=lambda x: len(x.name))
if len(weights) == 0 and len(layer_trainables) == 1:
weights = layer_trainables
total_params += num_params
if not hide_layers_with_no_params or num_params != 0:
num_params_str = str(num_params) if num_params > 0 else "-"
output_shape_str = str(layer_output.shape)
weight_shape_str = str(weights[0].shape) if len(weights) >= 1 else "-"
rows += [[layer_name, num_params_str, output_shape_str, weight_shape_str]]
rows += [["---"] * 4]
rows += [["Total", str(total_params), "", ""]]
widths = [max(len(cell) for cell in column) for column in zip(*rows)]
print()
for row in rows:
print(" ".join(cell + " " * (width - len(cell)) for cell, width in zip(row, widths)))
print()
def setup_weight_histograms(self, title: str = None) -> None:
"""Construct summary ops to include histograms of all trainable parameters in TensorBoard."""
if title is None:
title = self.name
with tf.name_scope(None), tf.device(None), tf.control_dependencies(None):
for local_name, var in self.trainables.items():
if "/" in local_name:
p = local_name.split("/")
name = title + "_" + p[-1] + "/" + "_".join(p[:-1])
else:
name = title + "_toplevel/" + local_name
tf.summary.histogram(name, var)
#----------------------------------------------------------------------------
# Backwards-compatible emulation of legacy output transformation in Network.run().
_print_legacy_warning = True
def _handle_legacy_output_transforms(output_transform, dynamic_kwargs):
global _print_legacy_warning
legacy_kwargs = ["out_mul", "out_add", "out_shrink", "out_dtype"]
if not any(kwarg in dynamic_kwargs for kwarg in legacy_kwargs):
return output_transform, dynamic_kwargs
if _print_legacy_warning:
_print_legacy_warning = False
print()
print("WARNING: Old-style output transformations in Network.run() are deprecated.")
print("Consider using 'output_transform=dict(func=tflib.convert_images_to_uint8)'")
print("instead of 'out_mul=127.5, out_add=127.5, out_dtype=np.uint8'.")
print()
assert output_transform is None
new_kwargs = dict(dynamic_kwargs)
new_transform = {kwarg: new_kwargs.pop(kwarg) for kwarg in legacy_kwargs if kwarg in dynamic_kwargs}
new_transform["func"] = _legacy_output_transform_func
return new_transform, new_kwargs
def _legacy_output_transform_func(*expr, out_mul=1.0, out_add=0.0, out_shrink=1, out_dtype=None):
if out_mul != 1.0:
expr = [x * out_mul for x in expr]
if out_add != 0.0:
expr = [x + out_add for x in expr]
if out_shrink > 1:
ksize = [1, 1, out_shrink, out_shrink]
expr = [tf.nn.avg_pool(x, ksize=ksize, strides=ksize, padding="VALID", data_format="NCHW") for x in expr]
if out_dtype is not None:
if tf.as_dtype(out_dtype).is_integer:
expr = [tf.round(x) for x in expr]
expr = [tf.saturate_cast(x, out_dtype) for x in expr]
return expr
| insightface/reconstruction/ostec/external/stylegan2/dnnlib/tflib/network.py/0 | {
"file_path": "insightface/reconstruction/ostec/external/stylegan2/dnnlib/tflib/network.py",
"repo_id": "insightface",
"token_count": 12832
} | 141 |
# Copyright (c) 2019, NVIDIA Corporation. All rights reserved.
#
# This work is made available under the Nvidia Source Code License-NC.
# To view a copy of this license, visit
# https://nvlabs.github.io/stylegan2/license.html
"""Inception Score (IS)."""
import numpy as np
import tensorflow as tf
import dnnlib.tflib as tflib
from metrics import metric_base
from training import misc
#----------------------------------------------------------------------------
class IS(metric_base.MetricBase):
def __init__(self, num_images, num_splits, minibatch_per_gpu, **kwargs):
super().__init__(**kwargs)
self.num_images = num_images
self.num_splits = num_splits
self.minibatch_per_gpu = minibatch_per_gpu
def _evaluate(self, Gs, Gs_kwargs, num_gpus):
minibatch_size = num_gpus * self.minibatch_per_gpu
inception = misc.load_pkl('https://drive.google.com/uc?id=1Mz9zQnIrusm3duZB91ng_aUIePFNI6Jx') # inception_v3_softmax.pkl
activations = np.empty([self.num_images, inception.output_shape[1]], dtype=np.float32)
# Construct TensorFlow graph.
result_expr = []
for gpu_idx in range(num_gpus):
with tf.device('/gpu:%d' % gpu_idx):
Gs_clone = Gs.clone()
inception_clone = inception.clone()
latents = tf.random_normal([self.minibatch_per_gpu] + Gs_clone.input_shape[1:])
labels = self._get_random_labels_tf(self.minibatch_per_gpu)
images = Gs_clone.get_output_for(latents, labels, **Gs_kwargs)
images = tflib.convert_images_to_uint8(images)
result_expr.append(inception_clone.get_output_for(images))
# Calculate activations for fakes.
for begin in range(0, self.num_images, minibatch_size):
self._report_progress(begin, self.num_images)
end = min(begin + minibatch_size, self.num_images)
activations[begin:end] = np.concatenate(tflib.run(result_expr), axis=0)[:end-begin]
# Calculate IS.
scores = []
for i in range(self.num_splits):
part = activations[i * self.num_images // self.num_splits : (i + 1) * self.num_images // self.num_splits]
kl = part * (np.log(part) - np.log(np.expand_dims(np.mean(part, 0), 0)))
kl = np.mean(np.sum(kl, 1))
scores.append(np.exp(kl))
self._report_result(np.mean(scores), suffix='_mean')
self._report_result(np.std(scores), suffix='_std')
#----------------------------------------------------------------------------
| insightface/reconstruction/ostec/external/stylegan2/metrics/inception_score.py/0 | {
"file_path": "insightface/reconstruction/ostec/external/stylegan2/metrics/inception_score.py",
"repo_id": "insightface",
"token_count": 1119
} | 142 |
# Copyright (c) 2019, NVIDIA Corporation. All rights reserved.
#
# This work is made available under the Nvidia Source Code License-NC.
# To view a copy of this license, visit
# https://nvlabs.github.io/stylegan2/license.html
"""Miscellaneous utility functions."""
import os
import pickle
import numpy as np
import PIL.Image
import PIL.ImageFont
import dnnlib
#----------------------------------------------------------------------------
# Convenience wrappers for pickle that are able to load data produced by
# older versions of the code, and from external URLs.
def open_file_or_url(file_or_url):
if dnnlib.util.is_url(file_or_url):
return dnnlib.util.open_url(file_or_url, cache_dir='.stylegan2-cache')
return open(file_or_url, 'rb')
def load_pkl(file_or_url):
with open_file_or_url(file_or_url) as file:
return pickle.load(file, encoding='latin1')
def save_pkl(obj, filename):
with open(filename, 'wb') as file:
pickle.dump(obj, file, protocol=pickle.HIGHEST_PROTOCOL)
#----------------------------------------------------------------------------
# Image utils.
def adjust_dynamic_range(data, drange_in, drange_out):
if drange_in != drange_out:
scale = (np.float32(drange_out[1]) - np.float32(drange_out[0])) / (np.float32(drange_in[1]) - np.float32(drange_in[0]))
bias = (np.float32(drange_out[0]) - np.float32(drange_in[0]) * scale)
data = data * scale + bias
return data
def create_image_grid(images, grid_size=None):
assert images.ndim == 3 or images.ndim == 4
num, img_w, img_h = images.shape[0], images.shape[-1], images.shape[-2]
if grid_size is not None:
grid_w, grid_h = tuple(grid_size)
else:
grid_w = max(int(np.ceil(np.sqrt(num))), 1)
grid_h = max((num - 1) // grid_w + 1, 1)
grid = np.zeros(list(images.shape[1:-2]) + [grid_h * img_h, grid_w * img_w], dtype=images.dtype)
for idx in range(num):
x = (idx % grid_w) * img_w
y = (idx // grid_w) * img_h
grid[..., y : y + img_h, x : x + img_w] = images[idx]
return grid
def convert_to_pil_image(image, drange=[0,1]):
assert image.ndim == 2 or image.ndim == 3
if image.ndim == 3:
if image.shape[0] == 1:
image = image[0] # grayscale CHW => HW
else:
image = image.transpose(1, 2, 0) # CHW -> HWC
image = adjust_dynamic_range(image, drange, [0,255])
image = np.rint(image).clip(0, 255).astype(np.uint8)
fmt = 'RGB' if image.ndim == 3 else 'L'
return PIL.Image.fromarray(image, fmt)
def save_image_grid(images, filename, drange=[0,1], grid_size=None):
convert_to_pil_image(create_image_grid(images, grid_size), drange).save(filename)
def apply_mirror_augment(minibatch):
mask = np.random.rand(minibatch.shape[0]) < 0.5
minibatch = np.array(minibatch)
minibatch[mask] = minibatch[mask, :, :, ::-1]
return minibatch
#----------------------------------------------------------------------------
# Loading data from previous training runs.
def parse_config_for_previous_run(run_dir):
with open(os.path.join(run_dir, 'submit_config.pkl'), 'rb') as f:
data = pickle.load(f)
data = data.get('run_func_kwargs', {})
return dict(train=data, dataset=data.get('dataset_args', {}))
#----------------------------------------------------------------------------
# Size and contents of the image snapshot grids that are exported
# periodically during training.
def setup_snapshot_image_grid(training_set,
size = '1080p', # '1080p' = to be viewed on 1080p display, '4k' = to be viewed on 4k display.
layout = 'random'): # 'random' = grid contents are selected randomly, 'row_per_class' = each row corresponds to one class label.
# Select size.
gw = 1; gh = 1
if size == '1080p':
gw = np.clip(1920 // training_set.shape[2], 3, 32)
gh = np.clip(1080 // training_set.shape[1], 2, 32)
if size == '4k':
gw = np.clip(3840 // training_set.shape[2], 7, 32)
gh = np.clip(2160 // training_set.shape[1], 4, 32)
if size == '8k':
gw = np.clip(7680 // training_set.shape[2], 7, 32)
gh = np.clip(4320 // training_set.shape[1], 4, 32)
# Initialize data arrays.
reals = np.zeros([gw * gh] + training_set.shape, dtype=training_set.dtype)
labels = np.zeros([gw * gh, training_set.label_size], dtype=training_set.label_dtype)
# Random layout.
if layout == 'random':
reals[:], labels[:] = training_set.get_minibatch_np(gw * gh)
# Class-conditional layouts.
class_layouts = dict(row_per_class=[gw,1], col_per_class=[1,gh], class4x4=[4,4])
if layout in class_layouts:
bw, bh = class_layouts[layout]
nw = (gw - 1) // bw + 1
nh = (gh - 1) // bh + 1
blocks = [[] for _i in range(nw * nh)]
for _iter in range(1000000):
real, label = training_set.get_minibatch_np(1)
idx = np.argmax(label[0])
while idx < len(blocks) and len(blocks[idx]) >= bw * bh:
idx += training_set.label_size
if idx < len(blocks):
blocks[idx].append((real, label))
if all(len(block) >= bw * bh for block in blocks):
break
for i, block in enumerate(blocks):
for j, (real, label) in enumerate(block):
x = (i % nw) * bw + j % bw
y = (i // nw) * bh + j // bw
if x < gw and y < gh:
reals[x + y * gw] = real[0]
labels[x + y * gw] = label[0]
return (gw, gh), reals, labels
#----------------------------------------------------------------------------
| insightface/reconstruction/ostec/external/stylegan2/training/misc.py/0 | {
"file_path": "insightface/reconstruction/ostec/external/stylegan2/training/misc.py",
"repo_id": "insightface",
"token_count": 2418
} | 143 |
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="Encoding">
<file url="file://$PROJECT_DIR$/src/main/java" charset="UTF-8" />
</component>
</project> | mybatis-native-demo/.idea/encodings.xml/0 | {
"file_path": "mybatis-native-demo/.idea/encodings.xml",
"repo_id": "mybatis-native-demo",
"token_count": 71
} | 144 |
package com.example.nativedemo;
import com.baomidou.mybatisplus.annotation.IEnum;
import com.baomidou.mybatisplus.core.MybatisParameterHandler;
import com.baomidou.mybatisplus.core.MybatisXMLLanguageDriver;
import com.baomidou.mybatisplus.core.conditions.AbstractWrapper;
import com.baomidou.mybatisplus.core.conditions.query.LambdaQueryWrapper;
import com.baomidou.mybatisplus.core.conditions.query.QueryWrapper;
import com.baomidou.mybatisplus.core.conditions.update.LambdaUpdateWrapper;
import com.baomidou.mybatisplus.core.conditions.update.UpdateWrapper;
import com.baomidou.mybatisplus.core.handlers.CompositeEnumTypeHandler;
import com.baomidou.mybatisplus.core.handlers.MybatisEnumTypeHandler;
import com.baomidou.mybatisplus.core.toolkit.support.SFunction;
import com.baomidou.mybatisplus.core.toolkit.support.SerializedLambda;
import com.baomidou.mybatisplus.extension.handlers.FastjsonTypeHandler;
import com.baomidou.mybatisplus.extension.handlers.GsonTypeHandler;
import com.baomidou.mybatisplus.extension.handlers.JacksonTypeHandler;
import com.baomidou.mybatisplus.extension.spring.MybatisSqlSessionFactoryBean;
import org.apache.commons.logging.LogFactory;
import org.apache.ibatis.annotations.DeleteProvider;
import org.apache.ibatis.annotations.InsertProvider;
import org.apache.ibatis.annotations.SelectProvider;
import org.apache.ibatis.annotations.UpdateProvider;
import org.apache.ibatis.cache.decorators.FifoCache;
import org.apache.ibatis.cache.decorators.LruCache;
import org.apache.ibatis.cache.decorators.SoftCache;
import org.apache.ibatis.cache.decorators.WeakCache;
import org.apache.ibatis.cache.impl.PerpetualCache;
import org.apache.ibatis.executor.Executor;
import org.apache.ibatis.executor.parameter.ParameterHandler;
import org.apache.ibatis.executor.resultset.ResultSetHandler;
import org.apache.ibatis.executor.statement.BaseStatementHandler;
import org.apache.ibatis.executor.statement.RoutingStatementHandler;
import org.apache.ibatis.executor.statement.StatementHandler;
import org.apache.ibatis.javassist.util.proxy.ProxyFactory;
import org.apache.ibatis.javassist.util.proxy.RuntimeSupport;
import org.apache.ibatis.logging.Log;
import org.apache.ibatis.logging.commons.JakartaCommonsLoggingImpl;
import org.apache.ibatis.logging.jdk14.Jdk14LoggingImpl;
import org.apache.ibatis.logging.log4j2.Log4j2Impl;
import org.apache.ibatis.logging.nologging.NoLoggingImpl;
import org.apache.ibatis.logging.slf4j.Slf4jImpl;
import org.apache.ibatis.logging.stdout.StdOutImpl;
import org.apache.ibatis.mapping.BoundSql;
import org.apache.ibatis.reflection.TypeParameterResolver;
import org.apache.ibatis.scripting.defaults.RawLanguageDriver;
import org.apache.ibatis.scripting.xmltags.XMLLanguageDriver;
import org.apache.ibatis.session.SqlSessionFactory;
import org.mybatis.spring.SqlSessionFactoryBean;
import org.mybatis.spring.mapper.MapperFactoryBean;
import org.mybatis.spring.mapper.MapperScannerConfigurer;
import org.springframework.aot.hint.MemberCategory;
import org.springframework.aot.hint.RuntimeHints;
import org.springframework.aot.hint.RuntimeHintsRegistrar;
import org.springframework.beans.PropertyValue;
import org.springframework.beans.factory.BeanFactory;
import org.springframework.beans.factory.BeanFactoryAware;
import org.springframework.beans.factory.aot.BeanFactoryInitializationAotContribution;
import org.springframework.beans.factory.aot.BeanFactoryInitializationAotProcessor;
import org.springframework.beans.factory.aot.BeanRegistrationExcludeFilter;
import org.springframework.beans.factory.config.BeanDefinition;
import org.springframework.beans.factory.config.ConfigurableBeanFactory;
import org.springframework.beans.factory.config.ConfigurableListableBeanFactory;
import org.springframework.beans.factory.config.ConstructorArgumentValues;
import org.springframework.beans.factory.support.MergedBeanDefinitionPostProcessor;
import org.springframework.beans.factory.support.RegisteredBean;
import org.springframework.beans.factory.support.RootBeanDefinition;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.context.annotation.ImportRuntimeHints;
import org.springframework.core.ResolvableType;
import org.springframework.util.ClassUtils;
import org.springframework.util.ReflectionUtils;
import java.lang.annotation.Annotation;
import java.lang.reflect.Method;
import java.lang.reflect.ParameterizedType;
import java.lang.reflect.Type;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.TreeSet;
import java.util.function.Function;
import java.util.stream.Collectors;
import java.util.stream.Stream;
/**
* This configuration will move to mybatis-spring-native.
*/
@Configuration(proxyBeanMethods = false)
@ImportRuntimeHints(MyBatisNativeConfiguration.MyBaitsRuntimeHintsRegistrar.class)
public class MyBatisNativeConfiguration {
@Bean
MyBatisBeanFactoryInitializationAotProcessor myBatisBeanFactoryInitializationAotProcessor() {
return new MyBatisBeanFactoryInitializationAotProcessor();
}
@Bean
static MyBatisMapperFactoryBeanPostProcessor myBatisMapperFactoryBeanPostProcessor() {
return new MyBatisMapperFactoryBeanPostProcessor();
}
static class MyBaitsRuntimeHintsRegistrar implements RuntimeHintsRegistrar {
@Override
public void registerHints(RuntimeHints hints, ClassLoader classLoader) {
Stream.of(RawLanguageDriver.class,
// TODO 增加了MybatisXMLLanguageDriver.class
XMLLanguageDriver.class, MybatisXMLLanguageDriver.class,
RuntimeSupport.class,
ProxyFactory.class,
Slf4jImpl.class,
Log.class,
JakartaCommonsLoggingImpl.class,
Log4j2Impl.class,
Jdk14LoggingImpl.class,
StdOutImpl.class,
NoLoggingImpl.class,
SqlSessionFactory.class,
PerpetualCache.class,
FifoCache.class,
LruCache.class,
SoftCache.class,
WeakCache.class,
//TODO 增加了MybatisSqlSessionFactoryBean.class
SqlSessionFactoryBean.class, MybatisSqlSessionFactoryBean.class,
ArrayList.class,
HashMap.class,
TreeSet.class,
HashSet.class
).forEach(x -> hints.reflection().registerType(x, MemberCategory.values()));
Stream.of(
"org/apache/ibatis/builder/xml/*.dtd",
"org/apache/ibatis/builder/xml/*.xsd"
).forEach(hints.resources()::registerPattern);
hints.serialization().registerType(SerializedLambda.class);
hints.serialization().registerType(SFunction.class);
hints.serialization().registerType(java.lang.invoke.SerializedLambda.class);
hints.reflection().registerType(SFunction.class);
hints.reflection().registerType(SerializedLambda.class);
hints.reflection().registerType(java.lang.invoke.SerializedLambda.class);
hints.proxies().registerJdkProxy(StatementHandler.class);
hints.proxies().registerJdkProxy(Executor.class);
hints.proxies().registerJdkProxy(ResultSetHandler.class);
hints.proxies().registerJdkProxy(ParameterHandler.class);
// hints.reflection().registerType(MybatisPlusInterceptor.class);
hints.reflection().registerType(AbstractWrapper.class,MemberCategory.values());
hints.reflection().registerType(LambdaQueryWrapper.class,MemberCategory.values());
hints.reflection().registerType(LambdaUpdateWrapper.class,MemberCategory.values());
hints.reflection().registerType(UpdateWrapper.class,MemberCategory.values());
hints.reflection().registerType(QueryWrapper.class,MemberCategory.values());
hints.reflection().registerType(BoundSql.class,MemberCategory.DECLARED_FIELDS);
hints.reflection().registerType(RoutingStatementHandler.class,MemberCategory.DECLARED_FIELDS);
hints.reflection().registerType(BaseStatementHandler.class,MemberCategory.DECLARED_FIELDS);
hints.reflection().registerType(MybatisParameterHandler.class,MemberCategory.DECLARED_FIELDS);
hints.reflection().registerType(IEnum.class,MemberCategory.INVOKE_PUBLIC_METHODS);
// register typeHandler
hints.reflection().registerType(CompositeEnumTypeHandler.class, MemberCategory.INVOKE_PUBLIC_CONSTRUCTORS);
hints.reflection().registerType(FastjsonTypeHandler.class, MemberCategory.INVOKE_PUBLIC_CONSTRUCTORS);
hints.reflection().registerType(GsonTypeHandler.class, MemberCategory.INVOKE_PUBLIC_CONSTRUCTORS);
hints.reflection().registerType(JacksonTypeHandler.class, MemberCategory.INVOKE_PUBLIC_CONSTRUCTORS);
hints.reflection().registerType(MybatisEnumTypeHandler.class, MemberCategory.INVOKE_PUBLIC_CONSTRUCTORS);
}
}
static class MyBatisBeanFactoryInitializationAotProcessor
implements BeanFactoryInitializationAotProcessor, BeanRegistrationExcludeFilter {
private final Set<Class<?>> excludeClasses = new HashSet<>();
MyBatisBeanFactoryInitializationAotProcessor() {
excludeClasses.add(MapperScannerConfigurer.class);
}
@Override public boolean isExcludedFromAotProcessing(RegisteredBean registeredBean) {
return excludeClasses.contains(registeredBean.getBeanClass());
}
@Override
public BeanFactoryInitializationAotContribution processAheadOfTime(ConfigurableListableBeanFactory beanFactory) {
String[] beanNames = beanFactory.getBeanNamesForType(MapperFactoryBean.class);
if (beanNames.length == 0) {
return null;
}
return (context, code) -> {
RuntimeHints hints = context.getRuntimeHints();
for (String beanName : beanNames) {
BeanDefinition beanDefinition = beanFactory.getBeanDefinition(beanName.substring(1));
PropertyValue mapperInterface = beanDefinition.getPropertyValues().getPropertyValue("mapperInterface");
if (mapperInterface != null && mapperInterface.getValue() != null) {
Class<?> mapperInterfaceType = (Class<?>) mapperInterface.getValue();
if (mapperInterfaceType != null) {
registerReflectionTypeIfNecessary(mapperInterfaceType, hints);
hints.proxies().registerJdkProxy(mapperInterfaceType);
hints.resources()
.registerPattern(mapperInterfaceType.getName().replace('.', '/').concat(".xml"));
registerMapperRelationships(mapperInterfaceType, hints);
}
}
}
};
}
private void registerMapperRelationships(Class<?> mapperInterfaceType, RuntimeHints hints) {
Method[] methods = ReflectionUtils.getAllDeclaredMethods(mapperInterfaceType);
for (Method method : methods) {
if (method.getDeclaringClass() != Object.class) {
ReflectionUtils.makeAccessible(method);
registerSqlProviderTypes(method, hints, SelectProvider.class, SelectProvider::value, SelectProvider::type);
registerSqlProviderTypes(method, hints, InsertProvider.class, InsertProvider::value, InsertProvider::type);
registerSqlProviderTypes(method, hints, UpdateProvider.class, UpdateProvider::value, UpdateProvider::type);
registerSqlProviderTypes(method, hints, DeleteProvider.class, DeleteProvider::value, DeleteProvider::type);
Class<?> returnType = MyBatisMapperTypeUtils.resolveReturnClass(mapperInterfaceType, method);
registerReflectionTypeIfNecessary(returnType, hints);
MyBatisMapperTypeUtils.resolveParameterClasses(mapperInterfaceType, method)
.forEach(x -> registerReflectionTypeIfNecessary(x, hints));
}
}
}
@SafeVarargs
private <T extends Annotation> void registerSqlProviderTypes(
Method method, RuntimeHints hints, Class<T> annotationType, Function<T, Class<?>>... providerTypeResolvers) {
for (T annotation : method.getAnnotationsByType(annotationType)) {
for (Function<T, Class<?>> providerTypeResolver : providerTypeResolvers) {
registerReflectionTypeIfNecessary(providerTypeResolver.apply(annotation), hints);
}
}
}
private void registerReflectionTypeIfNecessary(Class<?> type, RuntimeHints hints) {
if (!type.isPrimitive() && !type.getName().startsWith("java")) {
hints.reflection().registerType(type, MemberCategory.values());
}
}
}
static class MyBatisMapperTypeUtils {
private MyBatisMapperTypeUtils() {
// NOP
}
static Class<?> resolveReturnClass(Class<?> mapperInterface, Method method) {
Type resolvedReturnType = TypeParameterResolver.resolveReturnType(method, mapperInterface);
return typeToClass(resolvedReturnType, method.getReturnType());
}
static Set<Class<?>> resolveParameterClasses(Class<?> mapperInterface, Method method) {
return Stream.of(TypeParameterResolver.resolveParamTypes(method, mapperInterface))
.map(x -> typeToClass(x, x instanceof Class ? (Class<?>) x : Object.class)).collect(Collectors.toSet());
}
private static Class<?> typeToClass(Type src, Class<?> fallback) {
Class<?> result = null;
if (src instanceof Class<?>) {
if (((Class<?>) src).isArray()) {
result = ((Class<?>) src).getComponentType();
} else {
result = (Class<?>) src;
}
} else if (src instanceof ParameterizedType) {
ParameterizedType parameterizedType = (ParameterizedType) src;
int index = (parameterizedType.getRawType() instanceof Class
&& Map.class.isAssignableFrom((Class<?>) parameterizedType.getRawType())
&& parameterizedType.getActualTypeArguments().length > 1) ? 1 : 0;
Type actualType = parameterizedType.getActualTypeArguments()[index];
result = typeToClass(actualType, fallback);
}
if (result == null) {
result = fallback;
}
return result;
}
}
static class MyBatisMapperFactoryBeanPostProcessor implements MergedBeanDefinitionPostProcessor, BeanFactoryAware {
private static final org.apache.commons.logging.Log LOG = LogFactory.getLog(
MyBatisMapperFactoryBeanPostProcessor.class);
private static final String MAPPER_FACTORY_BEAN = "org.mybatis.spring.mapper.MapperFactoryBean";
private ConfigurableBeanFactory beanFactory;
@Override
public void setBeanFactory(BeanFactory beanFactory) {
this.beanFactory = (ConfigurableBeanFactory) beanFactory;
}
@Override
public void postProcessMergedBeanDefinition(RootBeanDefinition beanDefinition, Class<?> beanType, String beanName) {
if (ClassUtils.isPresent(MAPPER_FACTORY_BEAN, this.beanFactory.getBeanClassLoader())) {
resolveMapperFactoryBeanTypeIfNecessary(beanDefinition);
}
}
private void resolveMapperFactoryBeanTypeIfNecessary(RootBeanDefinition beanDefinition) {
if (!beanDefinition.hasBeanClass() || !MapperFactoryBean.class.isAssignableFrom(beanDefinition.getBeanClass())) {
return;
}
if (beanDefinition.getResolvableType().hasUnresolvableGenerics()) {
Class<?> mapperInterface = getMapperInterface(beanDefinition);
if (mapperInterface != null) {
// Exposes a generic type information to context for prevent early initializing
ConstructorArgumentValues constructorArgumentValues = new ConstructorArgumentValues();
constructorArgumentValues.addGenericArgumentValue(mapperInterface);
beanDefinition.setConstructorArgumentValues(constructorArgumentValues);
beanDefinition.setTargetType(ResolvableType.forClassWithGenerics(beanDefinition.getBeanClass(), mapperInterface));
}
}
}
private Class<?> getMapperInterface(RootBeanDefinition beanDefinition) {
try {
return (Class<?>) beanDefinition.getPropertyValues().get("mapperInterface");
}
catch (Exception e) {
LOG.debug("Fail getting mapper interface type.", e);
return null;
}
}
}
}
| mybatis-native-demo/src/main/java/com/example/nativedemo/MyBatisNativeConfiguration.java/0 | {
"file_path": "mybatis-native-demo/src/main/java/com/example/nativedemo/MyBatisNativeConfiguration.java",
"repo_id": "mybatis-native-demo",
"token_count": 5694
} | 145 |
CREATE TABLE messages
(
id bigint auto_increment,
message text,
user_id bigint,
message_type varchar(20),
create_user varchar(50),
update_user varchar(50)
);
CREATE TABLE sys_user
(
id bigint auto_increment,
username varchar(50)
); | mybatis-native-demo/target/classes/schema.sql/0 | {
"file_path": "mybatis-native-demo/target/classes/schema.sql",
"repo_id": "mybatis-native-demo",
"token_count": 139
} | 146 |
{
"bundles": [
{
"name": "i18n.General"
},
{
"name": "i18n.Parsing"
},
{
"name": "i18n.Scalars"
},
{
"name": "i18n.Validation"
}
],
"resources": {
"includes": []
}
}
| mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/com.graphql-java/graphql-java/19.2/resource-config.json/0 | {
"file_path": "mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/com.graphql-java/graphql-java/19.2/resource-config.json",
"repo_id": "mybatis-native-demo",
"token_count": 141
} | 147 |
[
{
"condition": {
"typeReachable": "io.grpc.netty.NettyServer"
},
"name": "io.netty.buffer.AbstractByteBufAllocator",
"allDeclaredMethods": true
},
{
"condition": {
"typeReachable": "io.grpc.netty.NettyChannelBuilder"
},
"name": "io.netty.channel.socket.nio.NioSocketChannel",
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
}
]
| mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/io.grpc/grpc-netty/1.51.0/reflect-config.json/0 | {
"file_path": "mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/io.grpc/grpc-netty/1.51.0/reflect-config.json",
"repo_id": "mybatis-native-demo",
"token_count": 208
} | 148 |
[
{
"latest": true,
"metadata-version": "2.16.11",
"module": "io.nats:jnats",
"tested-versions": [
"2.16.11"
]
}
]
| mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/io.nats/jnats/index.json/0 | {
"file_path": "mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/io.nats/jnats/index.json",
"repo_id": "mybatis-native-demo",
"token_count": 80
} | 149 |
[
{
"condition": {
"typeReachable": "io.opentelemetry.exporter.jaeger.JaegerGrpcSpanExporter"
},
"name": "io.opentelemetry.exporter.jaeger.JaegerGrpcSpanExporter"
}
]
| mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/io.opentelemetry/opentelemetry-exporter-jaeger/1.19.0/reflect-config.json/0 | {
"file_path": "mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/io.opentelemetry/opentelemetry-exporter-jaeger/1.19.0/reflect-config.json",
"repo_id": "mybatis-native-demo",
"token_count": 91
} | 150 |
[
{
"latest": false,
"override": true,
"metadata-version": "1.19.0",
"module": "io.opentelemetry:opentelemetry-sdk-trace",
"tested-versions": [
"1.19.0"
]
}
]
| mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/io.opentelemetry/opentelemetry-sdk-trace/index.json/0 | {
"file_path": "mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/io.opentelemetry/opentelemetry-sdk-trace/index.json",
"repo_id": "mybatis-native-demo",
"token_count": 98
} | 151 |
[
"jni-config.json",
"reflect-config.json",
"resource-config.json"
]
| mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/net.java.dev.jna/jna/5.8.0/index.json/0 | {
"file_path": "mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/net.java.dev.jna/jna/5.8.0/index.json",
"repo_id": "mybatis-native-demo",
"token_count": 32
} | 152 |
[
{
"latest": true,
"metadata-version": "2.28.0",
"module": "org.apache.activemq:artemis-jms-client",
"tested-versions": [
"2.28.0"
]
}
]
| mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/org.apache.activemq/artemis-jms-client/index.json/0 | {
"file_path": "mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/org.apache.activemq/artemis-jms-client/index.json",
"repo_id": "mybatis-native-demo",
"token_count": 87
} | 153 |
[
"reflect-config.json",
"resource-config.json"
]
| mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/org.apache.tomcat.embed/tomcat-embed-core/10.0.20/index.json/0 | {
"file_path": "mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/org.apache.tomcat.embed/tomcat-embed-core/10.0.20/index.json",
"repo_id": "mybatis-native-demo",
"token_count": 22
} | 154 |
[
{
"condition": {
"typeReachable": "org.eclipse.jetty.server.ForwardedRequestCustomizer"
},
"name": "org.eclipse.jetty.server.ForwardedRequestCustomizer$Forwarded",
"allDeclaredConstructors": true,
"allDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.eclipse.jetty.websocket.jakarta.common.decoders.StringDecoder"
},
"name": "org.eclipse.jetty.websocket.jakarta.common.decoders.StringDecoder",
"allDeclaredConstructors": true
},
{
"condition": {
"typeReachable": "org.eclipse.jetty.webapp.StandardDescriptorProcessor"
},
"name": "[Ljava.lang.String;"
},
{
"condition": {
"typeReachable": "org.eclipse.jetty.util.ArrayUtil"
},
"name": "[Lorg.eclipse.jetty.servlet.FilterHolder;"
},
{
"condition": {
"typeReachable": "org.eclipse.jetty.xml.XmlParser"
},
"name": "com.sun.org.apache.xerces.internal.jaxp.SAXParserFactoryImpl",
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.eclipse.jetty.security.SecurityHandler"
},
"name": "jakarta.servlet.GenericServlet",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.eclipse.jetty.servlet.ServletHolder"
},
"name": "jakarta.servlet.GenericServlet",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.eclipse.jetty.security.SecurityHandler"
},
"name": "jakarta.servlet.http.HttpServlet",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.eclipse.jetty.servlet.ServletHolder"
},
"name": "jakarta.servlet.http.HttpServlet",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.eclipse.jetty.util.TypeUtil"
},
"name": "java.lang.Boolean",
"queriedMethods": [
{
"name": "valueOf",
"parameterTypes": [
"java.lang.String"
]
}
]
},
{
"condition": {
"typeReachable": "org.eclipse.jetty.util.TypeUtil"
},
"name": "java.lang.Byte",
"queriedMethods": [
{
"name": "valueOf",
"parameterTypes": [
"java.lang.String"
]
}
]
},
{
"condition": {
"typeReachable": "org.eclipse.jetty.util.TypeUtil"
},
"name": "java.lang.Double",
"queriedMethods": [
{
"name": "valueOf",
"parameterTypes": [
"java.lang.String"
]
}
]
},
{
"condition": {
"typeReachable": "org.eclipse.jetty.util.TypeUtil"
},
"name": "java.lang.Float",
"queriedMethods": [
{
"name": "valueOf",
"parameterTypes": [
"java.lang.String"
]
}
]
},
{
"condition": {
"typeReachable": "org.eclipse.jetty.util.TypeUtil"
},
"name": "java.lang.Integer",
"queriedMethods": [
{
"name": "valueOf",
"parameterTypes": [
"java.lang.String"
]
}
]
},
{
"condition": {
"typeReachable": "org.eclipse.jetty.util.TypeUtil"
},
"name": "java.lang.Long",
"queriedMethods": [
{
"name": "valueOf",
"parameterTypes": [
"java.lang.String"
]
}
]
},
{
"condition": {
"typeReachable": "org.eclipse.jetty.server.HttpChannel"
},
"name": "java.lang.Object"
},
{
"condition": {
"typeReachable": "org.eclipse.jetty.server.HttpConnection"
},
"name": "java.lang.Object"
},
{
"condition": {
"typeReachable": "org.eclipse.jetty.server.Server"
},
"name": "java.lang.Object"
},
{
"condition": {
"typeReachable": "org.eclipse.jetty.server.handler.ContextHandler"
},
"name": "java.lang.Object"
},
{
"condition": {
"typeReachable": "org.eclipse.jetty.util.thread.QueuedThreadPool"
},
"name": "java.lang.Object",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.eclipse.jetty.util.thread.strategy.AdaptiveExecutionStrategy"
},
"name": "java.lang.Object",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.eclipse.jetty.websocket.client.impl.JettyClientUpgradeRequest"
},
"name": "java.lang.Object",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.eclipse.jetty.websocket.common.JettyWebSocketFrameHandlerFactory"
},
"name": "java.lang.Object"
},
{
"condition": {
"typeReachable": "org.eclipse.jetty.websocket.core.internal.util.ReflectUtils"
},
"name": "java.lang.Object"
},
{
"condition": {
"typeReachable": "org.eclipse.jetty.websocket.core.server.internal.AbstractHandshaker"
},
"name": "java.lang.Object"
},
{
"condition": {
"typeReachable": "org.eclipse.jetty.websocket.core.server.internal.CreatorNegotiator"
},
"name": "java.lang.Object"
},
{
"condition": {
"typeReachable": "org.eclipse.jetty.websocket.core.server.internal.HandshakerSelector"
},
"name": "java.lang.Object"
},
{
"condition": {
"typeReachable": "org.eclipse.jetty.websocket.jakarta.common.JakartaWebSocketFrameHandlerFactory"
},
"name": "java.lang.Object"
},
{
"condition": {
"typeReachable": "org.eclipse.jetty.websocket.jakarta.server.internal.JakartaWebSocketServerContainer"
},
"name": "java.lang.Object",
"queryAllDeclaredMethods": true
},
{
"condition": {
"typeReachable": "org.eclipse.jetty.websocket.jakarta.server.internal.JakartaWebSocketServerFrameHandlerFactory"
},
"name": "java.lang.Object"
},
{
"condition": {
"typeReachable": "org.eclipse.jetty.util.TypeUtil"
},
"name": "java.lang.Short",
"queriedMethods": [
{
"name": "valueOf",
"parameterTypes": [
"java.lang.String"
]
}
]
},
{
"condition": {
"typeReachable": "org.eclipse.jetty.util.Uptime$DefaultImpl"
},
"name": "java.lang.management.ManagementFactory",
"methods": [
{
"name": "getRuntimeMXBean",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.eclipse.jetty.util.Uptime$DefaultImpl"
},
"name": "java.lang.management.RuntimeMXBean",
"methods": [
{
"name": "getUptime",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.eclipse.jetty.server.session.DefaultSessionIdManager"
},
"name": "java.security.SecureRandomParameters"
},
{
"condition": {
"typeReachable": "org.eclipse.jetty.util.ssl.SslContextFactory"
},
"name": "java.security.SecureRandomParameters"
},
{
"condition": {
"typeReachable": "org.eclipse.jetty.util.thread.QueuedThreadPool"
},
"name": "java.security.SecureRandomParameters"
},
{
"condition": {
"typeReachable": "org.eclipse.jetty.websocket.core.internal.WebSocketConnection"
},
"name": "java.security.SecureRandomParameters"
},
{
"condition": {
"typeReachable": "org.eclipse.jetty.webapp.Configurations"
},
"name": "org.eclipse.jetty.annotations.AnnotationConfiguration",
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.eclipse.jetty.webapp.JndiConfiguration"
},
"name": "org.eclipse.jetty.jndi.InitialContextFactory"
},
{
"condition": {
"typeReachable": "org.eclipse.jetty.webapp.Configurations"
},
"name": "org.eclipse.jetty.plus.webapp.EnvConfiguration",
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.eclipse.jetty.webapp.Configurations"
},
"name": "org.eclipse.jetty.plus.webapp.PlusConfiguration",
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.eclipse.jetty.servlet.ServletContextHandler"
},
"name": "org.eclipse.jetty.security.ConstraintSecurityHandler",
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.eclipse.jetty.servlet.ServletHolder"
},
"name": "org.eclipse.jetty.servlet.DefaultServlet",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true,
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.eclipse.jetty.servlet.ServletHolder"
},
"name": "org.eclipse.jetty.servlet.NoJspServlet",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true,
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.eclipse.jetty.util.thread.QueuedThreadPool"
},
"name": "org.eclipse.jetty.servlet.ServletHandler$Default404Servlet",
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.eclipse.jetty.servlet.ListenerHolder"
},
"name": "org.eclipse.jetty.servlet.listener.ELContextCleaner",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true,
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.eclipse.jetty.servlet.ListenerHolder"
},
"name": "org.eclipse.jetty.servlet.listener.IntrospectorCleaner",
"allDeclaredFields": true,
"queryAllDeclaredMethods": true,
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.eclipse.jetty.util.TypeUtil"
},
"name": "org.eclipse.jetty.util.TypeUtil",
"methods": [
{
"name": "getClassLoaderLocation",
"parameterTypes": [
"java.lang.Class"
]
},
{
"name": "getCodeSourceLocation",
"parameterTypes": [
"java.lang.Class"
]
},
{
"name": "getModuleLocation",
"parameterTypes": [
"java.lang.Class"
]
},
{
"name": "getSystemClassLoaderLocation",
"parameterTypes": [
"java.lang.Class"
]
}
]
},
{
"condition": {
"typeReachable": "org.eclipse.jetty.util.IncludeExcludeSet"
},
"name": "org.eclipse.jetty.webapp.ClassMatcher$ByLocationOrModule",
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.eclipse.jetty.util.IncludeExcludeSet"
},
"name": "org.eclipse.jetty.webapp.ClassMatcher$ByPackageOrName",
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.eclipse.jetty.webapp.Configurations"
},
"name": "org.eclipse.jetty.webapp.FragmentConfiguration",
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.eclipse.jetty.webapp.Configurations"
},
"name": "org.eclipse.jetty.webapp.JettyWebXmlConfiguration",
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.eclipse.jetty.webapp.Configurations"
},
"name": "org.eclipse.jetty.webapp.JndiConfiguration",
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.eclipse.jetty.webapp.Configurations"
},
"name": "org.eclipse.jetty.webapp.MetaInfConfiguration",
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.eclipse.jetty.webapp.IterativeDescriptorProcessor"
},
"name": "org.eclipse.jetty.webapp.StandardDescriptorProcessor",
"methods": [
{
"name": "visitListener",
"parameterTypes": [
"org.eclipse.jetty.webapp.WebAppContext",
"org.eclipse.jetty.webapp.Descriptor",
"org.eclipse.jetty.xml.XmlParser$Node"
]
},
{
"name": "visitLocaleEncodingList",
"parameterTypes": [
"org.eclipse.jetty.webapp.WebAppContext",
"org.eclipse.jetty.webapp.Descriptor",
"org.eclipse.jetty.xml.XmlParser$Node"
]
},
{
"name": "visitSecurityConstraint",
"parameterTypes": [
"org.eclipse.jetty.webapp.WebAppContext",
"org.eclipse.jetty.webapp.Descriptor",
"org.eclipse.jetty.xml.XmlParser$Node"
]
},
{
"name": "visitServlet",
"parameterTypes": [
"org.eclipse.jetty.webapp.WebAppContext",
"org.eclipse.jetty.webapp.Descriptor",
"org.eclipse.jetty.xml.XmlParser$Node"
]
},
{
"name": "visitServletMapping",
"parameterTypes": [
"org.eclipse.jetty.webapp.WebAppContext",
"org.eclipse.jetty.webapp.Descriptor",
"org.eclipse.jetty.xml.XmlParser$Node"
]
},
{
"name": "visitSessionConfig",
"parameterTypes": [
"org.eclipse.jetty.webapp.WebAppContext",
"org.eclipse.jetty.webapp.Descriptor",
"org.eclipse.jetty.xml.XmlParser$Node"
]
},
{
"name": "visitWelcomeFileList",
"parameterTypes": [
"org.eclipse.jetty.webapp.WebAppContext",
"org.eclipse.jetty.webapp.Descriptor",
"org.eclipse.jetty.xml.XmlParser$Node"
]
}
]
},
{
"condition": {
"typeReachable": "org.eclipse.jetty.webapp.StandardDescriptorProcessor"
},
"name": "org.eclipse.jetty.webapp.StandardDescriptorProcessor",
"queriedMethods": [
{
"name": "visitContextParam",
"parameterTypes": [
"org.eclipse.jetty.webapp.WebAppContext",
"org.eclipse.jetty.webapp.Descriptor",
"org.eclipse.jetty.xml.XmlParser$Node"
]
},
{
"name": "visitDefaultContextPath",
"parameterTypes": [
"org.eclipse.jetty.webapp.WebAppContext",
"org.eclipse.jetty.webapp.Descriptor",
"org.eclipse.jetty.xml.XmlParser$Node"
]
},
{
"name": "visitDenyUncoveredHttpMethods",
"parameterTypes": [
"org.eclipse.jetty.webapp.WebAppContext",
"org.eclipse.jetty.webapp.Descriptor",
"org.eclipse.jetty.xml.XmlParser$Node"
]
},
{
"name": "visitDisplayName",
"parameterTypes": [
"org.eclipse.jetty.webapp.WebAppContext",
"org.eclipse.jetty.webapp.Descriptor",
"org.eclipse.jetty.xml.XmlParser$Node"
]
},
{
"name": "visitErrorPage",
"parameterTypes": [
"org.eclipse.jetty.webapp.WebAppContext",
"org.eclipse.jetty.webapp.Descriptor",
"org.eclipse.jetty.xml.XmlParser$Node"
]
},
{
"name": "visitFilter",
"parameterTypes": [
"org.eclipse.jetty.webapp.WebAppContext",
"org.eclipse.jetty.webapp.Descriptor",
"org.eclipse.jetty.xml.XmlParser$Node"
]
},
{
"name": "visitFilterMapping",
"parameterTypes": [
"org.eclipse.jetty.webapp.WebAppContext",
"org.eclipse.jetty.webapp.Descriptor",
"org.eclipse.jetty.xml.XmlParser$Node"
]
},
{
"name": "visitJspConfig",
"parameterTypes": [
"org.eclipse.jetty.webapp.WebAppContext",
"org.eclipse.jetty.webapp.Descriptor",
"org.eclipse.jetty.xml.XmlParser$Node"
]
},
{
"name": "visitListener",
"parameterTypes": [
"org.eclipse.jetty.webapp.WebAppContext",
"org.eclipse.jetty.webapp.Descriptor",
"org.eclipse.jetty.xml.XmlParser$Node"
]
},
{
"name": "visitLocaleEncodingList",
"parameterTypes": [
"org.eclipse.jetty.webapp.WebAppContext",
"org.eclipse.jetty.webapp.Descriptor",
"org.eclipse.jetty.xml.XmlParser$Node"
]
},
{
"name": "visitLoginConfig",
"parameterTypes": [
"org.eclipse.jetty.webapp.WebAppContext",
"org.eclipse.jetty.webapp.Descriptor",
"org.eclipse.jetty.xml.XmlParser$Node"
]
},
{
"name": "visitMimeMapping",
"parameterTypes": [
"org.eclipse.jetty.webapp.WebAppContext",
"org.eclipse.jetty.webapp.Descriptor",
"org.eclipse.jetty.xml.XmlParser$Node"
]
},
{
"name": "visitRequestCharacterEncoding",
"parameterTypes": [
"org.eclipse.jetty.webapp.WebAppContext",
"org.eclipse.jetty.webapp.Descriptor",
"org.eclipse.jetty.xml.XmlParser$Node"
]
},
{
"name": "visitResponseCharacterEncoding",
"parameterTypes": [
"org.eclipse.jetty.webapp.WebAppContext",
"org.eclipse.jetty.webapp.Descriptor",
"org.eclipse.jetty.xml.XmlParser$Node"
]
},
{
"name": "visitSecurityConstraint",
"parameterTypes": [
"org.eclipse.jetty.webapp.WebAppContext",
"org.eclipse.jetty.webapp.Descriptor",
"org.eclipse.jetty.xml.XmlParser$Node"
]
},
{
"name": "visitSecurityRole",
"parameterTypes": [
"org.eclipse.jetty.webapp.WebAppContext",
"org.eclipse.jetty.webapp.Descriptor",
"org.eclipse.jetty.xml.XmlParser$Node"
]
},
{
"name": "visitServlet",
"parameterTypes": [
"org.eclipse.jetty.webapp.WebAppContext",
"org.eclipse.jetty.webapp.Descriptor",
"org.eclipse.jetty.xml.XmlParser$Node"
]
},
{
"name": "visitServletMapping",
"parameterTypes": [
"org.eclipse.jetty.webapp.WebAppContext",
"org.eclipse.jetty.webapp.Descriptor",
"org.eclipse.jetty.xml.XmlParser$Node"
]
},
{
"name": "visitSessionConfig",
"parameterTypes": [
"org.eclipse.jetty.webapp.WebAppContext",
"org.eclipse.jetty.webapp.Descriptor",
"org.eclipse.jetty.xml.XmlParser$Node"
]
},
{
"name": "visitTagLib",
"parameterTypes": [
"org.eclipse.jetty.webapp.WebAppContext",
"org.eclipse.jetty.webapp.Descriptor",
"org.eclipse.jetty.xml.XmlParser$Node"
]
},
{
"name": "visitWelcomeFileList",
"parameterTypes": [
"org.eclipse.jetty.webapp.WebAppContext",
"org.eclipse.jetty.webapp.Descriptor",
"org.eclipse.jetty.xml.XmlParser$Node"
]
}
]
},
{
"condition": {
"typeReachable": "org.eclipse.jetty.webapp.Configurations"
},
"name": "org.eclipse.jetty.webapp.WebAppConfiguration",
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.eclipse.jetty.webapp.Configurations"
},
"name": "org.eclipse.jetty.webapp.WebInfConfiguration",
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.eclipse.jetty.webapp.Configurations"
},
"name": "org.eclipse.jetty.webapp.WebXmlConfiguration",
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.eclipse.jetty.webapp.Configurations"
},
"name": "org.eclipse.jetty.websocket.client.config.JettyWebSocketClientConfiguration",
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.eclipse.jetty.websocket.common.JettyWebSocketFrameHandlerFactory"
},
"name": "org.eclipse.jetty.websocket.core.internal.messages.ByteArrayMessageSink",
"methods": [
{
"name": "<init>",
"parameterTypes": [
"org.eclipse.jetty.websocket.core.CoreSession",
"java.lang.invoke.MethodHandle"
]
}
]
},
{
"condition": {
"typeReachable": "org.eclipse.jetty.websocket.common.JettyWebSocketFrameHandlerFactory"
},
"name": "org.eclipse.jetty.websocket.core.internal.messages.StringMessageSink",
"methods": [
{
"name": "<init>",
"parameterTypes": [
"org.eclipse.jetty.websocket.core.CoreSession",
"java.lang.invoke.MethodHandle"
]
}
]
},
{
"condition": {
"typeReachable": "org.eclipse.jetty.websocket.jakarta.common.JakartaWebSocketFrameHandlerFactory"
},
"name": "org.eclipse.jetty.websocket.jakarta.common.JakartaWebSocketSession",
"methods": [
{
"name": "filterReturnType",
"parameterTypes": [
"java.lang.Object"
]
}
]
},
{
"condition": {
"typeReachable": "org.eclipse.jetty.websocket.jakarta.common.JakartaWebSocketFrameHandlerFactory"
},
"name": "org.eclipse.jetty.websocket.jakarta.common.messages.DecodedBinaryMessageSink",
"methods": [
{
"name": "<init>",
"parameterTypes": [
"org.eclipse.jetty.websocket.core.CoreSession",
"java.lang.invoke.MethodHandle",
"java.util.List"
]
}
]
},
{
"condition": {
"typeReachable": "org.eclipse.jetty.websocket.jakarta.common.messages.DecodedBinaryMessageSink"
},
"name": "org.eclipse.jetty.websocket.jakarta.common.messages.DecodedBinaryMessageSink",
"methods": [
{
"name": "onWholeMessage",
"parameterTypes": [
"java.nio.ByteBuffer"
]
}
]
},
{
"condition": {
"typeReachable": "org.eclipse.jetty.websocket.jakarta.common.JakartaWebSocketFrameHandlerFactory"
},
"name": "org.eclipse.jetty.websocket.jakarta.common.messages.DecodedTextMessageSink",
"methods": [
{
"name": "<init>",
"parameterTypes": [
"org.eclipse.jetty.websocket.core.CoreSession",
"java.lang.invoke.MethodHandle",
"java.util.List"
]
}
]
},
{
"condition": {
"typeReachable": "org.eclipse.jetty.websocket.jakarta.common.messages.DecodedTextMessageSink"
},
"name": "org.eclipse.jetty.websocket.jakarta.common.messages.DecodedTextMessageSink",
"methods": [
{
"name": "onMessage",
"parameterTypes": [
"java.lang.String"
]
}
]
},
{
"condition": {
"typeReachable": "org.eclipse.jetty.webapp.Configurations"
},
"name": "org.eclipse.jetty.websocket.jakarta.server.config.JakartaWebSocketConfiguration",
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.eclipse.jetty.webapp.Configurations"
},
"name": "org.eclipse.jetty.websocket.server.config.JettyWebSocketConfiguration",
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.eclipse.jetty.annotations.AnnotationParser"
},
"name": "org.objectweb.asm.Opcodes",
"allPublicFields": true
},
{
"condition": {
"typeReachable": "org.eclipse.jetty.server.session.DefaultSessionIdManager"
},
"name": "sun.security.provider.NativePRNG",
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.eclipse.jetty.util.ssl.SslContextFactory"
},
"name": "sun.security.provider.NativePRNG",
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.eclipse.jetty.util.thread.QueuedThreadPool"
},
"name": "sun.security.provider.NativePRNG",
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.eclipse.jetty.websocket.core.internal.WebSocketConnection"
},
"name": "sun.security.provider.NativePRNG",
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.eclipse.jetty.websocket.core.internal.WebSocketCore"
},
"name": "sun.security.provider.SHA",
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.eclipse.jetty.util.ssl.SslContextFactory"
},
"name": "sun.security.ssl.SSLContextImpl$TLSContext",
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.eclipse.jetty.util.ssl.SslContextFactory"
},
"name": "sun.security.ssl.TrustManagerFactoryImpl$PKIXFactory",
"methods": [
{
"name": "<init>",
"parameterTypes": []
}
]
},
{
"condition": {
"typeReachable": "org.eclipse.jetty.websocket.jakarta.client.internal.JakartaWebSocketClientContainer"
},
"name": "org.eclipse.jetty.server.handler.ContextHandler",
"methods": [
{
"name": "getCurrentContext",
"parameterTypes": []
}
]
}
]
| mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/org.eclipse.jetty/jetty-server/11.0.12/reflect-config.json/0 | {
"file_path": "mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/org.eclipse.jetty/jetty-server/11.0.12/reflect-config.json",
"repo_id": "mybatis-native-demo",
"token_count": 13039
} | 155 |
[
{
"condition": {
"typeReachable": "org.ehcache.sizeof.impl.JvmInformation"
},
"name": "java.lang.Boolean",
"methods": [
{
"name": "<init>",
"parameterTypes": [
"boolean"
]
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.jsr107.Eh107CacheManager"
},
"name": "java.util.Arrays",
"methods": [
{
"name": "asList",
"parameterTypes": [
"java.lang.Object[]"
]
}
]
},
{
"condition": {
"typeReachable": "org.ehcache.sizeof.impl.JvmInformation"
},
"name": "java.util.Arrays",
"methods": [
{
"name": "asList",
"parameterTypes": [
"java.lang.Object[]"
]
}
]
}
]
| mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/org.ehcache/ehcache/3.10.8-jakarta/jni-config.json/0 | {
"file_path": "mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/org.ehcache/ehcache/3.10.8-jakarta/jni-config.json",
"repo_id": "mybatis-native-demo",
"token_count": 432
} | 156 |
{
"bundles": [],
"resources": {
"includes": [
{
"pattern": "\\Qfreemarker/ext/beans/DefaultMemberAccessPolicy-rules\\E",
"condition": {
"typeReachable": "freemarker.ext.beans.DefaultMemberAccessPolicy"
}
},
{
"pattern": "\\Qfreemarker/version.properties\\E",
"condition": {
"typeReachable": "freemarker.template.utility.ClassUtil"
}
}
]
}
}
| mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/org.freemarker/freemarker/2.3.31/resource-config.json/0 | {
"file_path": "mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/org.freemarker/freemarker/2.3.31/resource-config.json",
"repo_id": "mybatis-native-demo",
"token_count": 218
} | 157 |
[
{
"latest": true,
"metadata-version": "6.2.0.Final",
"module": "org.hibernate.orm:hibernate-core",
"tested-versions": [
"6.2.0.Final",
"6.2.2.Final"
]
},
{
"metadata-version": "6.1.1.Final",
"default-for": "6\\.1\\..*",
"module": "org.hibernate.orm:hibernate-core",
"tested-versions": [
"6.1.1.Final"
]
}
]
| mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/org.hibernate.orm/hibernate-core/index.json/0 | {
"file_path": "mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/org.hibernate.orm/hibernate-core/index.json",
"repo_id": "mybatis-native-demo",
"token_count": 202
} | 158 |
[
{
"condition":{"typeReachable":"org.hibernate.event.service.internal.EventListenerGroupImpl"},
"name":"[Lorg.hibernate.event.spi.AutoFlushEventListener;"
},
{
"condition":{"typeReachable":"org.hibernate.event.service.internal.EventListenerGroupImpl"},
"name":"[Lorg.hibernate.event.spi.DeleteEventListener;"
},
{
"condition":{"typeReachable":"org.hibernate.event.service.internal.EventListenerGroupImpl"},
"name":"[Lorg.hibernate.event.spi.DirtyCheckEventListener;"
},
{
"condition":{"typeReachable":"org.hibernate.event.service.internal.EventListenerGroupImpl"},
"name":"[Lorg.hibernate.event.spi.EvictEventListener;"
},
{
"condition":{"typeReachable":"org.hibernate.event.service.internal.EventListenerGroupImpl"},
"name":"[Lorg.hibernate.event.spi.FlushEntityEventListener;"
},
{
"condition":{"typeReachable":"org.hibernate.event.service.internal.EventListenerGroupImpl"},
"name":"[Lorg.hibernate.event.spi.FlushEventListener;"
},
{
"condition":{"typeReachable":"org.hibernate.event.service.internal.EventListenerGroupImpl"},
"name":"[Lorg.hibernate.event.spi.InitializeCollectionEventListener;"
},
{
"condition":{"typeReachable":"org.hibernate.event.service.internal.EventListenerGroupImpl"},
"name":"[Lorg.hibernate.event.spi.LoadEventListener;"
},
{
"condition":{"typeReachable":"org.hibernate.event.service.internal.EventListenerGroupImpl"},
"name":"[Lorg.hibernate.event.spi.LockEventListener;"
},
{
"condition":{"typeReachable":"org.hibernate.event.service.internal.EventListenerGroupImpl"},
"name":"[Lorg.hibernate.event.spi.MergeEventListener;"
},
{
"condition":{"typeReachable":"org.hibernate.event.service.internal.EventListenerGroupImpl"},
"name":"[Lorg.hibernate.event.spi.PersistEventListener;"
},
{
"condition":{"typeReachable":"org.hibernate.event.service.internal.EventListenerGroupImpl"},
"name":"[Lorg.hibernate.event.spi.PostDeleteEventListener;"
},
{
"condition":{"typeReachable":"org.hibernate.event.service.internal.EventListenerGroupImpl"},
"name":"[Lorg.hibernate.event.spi.PostInsertEventListener;"
},
{
"condition":{"typeReachable":"org.hibernate.event.service.internal.EventListenerGroupImpl"},
"name":"[Lorg.hibernate.event.spi.PostLoadEventListener;"
},
{
"condition":{"typeReachable":"org.hibernate.event.service.internal.EventListenerGroupImpl"},
"name":"[Lorg.hibernate.event.spi.PostUpdateEventListener;"
},
{
"condition":{"typeReachable":"org.hibernate.event.service.internal.EventListenerGroupImpl"},
"name":"[Lorg.hibernate.event.spi.PreLoadEventListener;"
},
{
"condition":{"typeReachable":"org.hibernate.event.service.internal.EventListenerGroupImpl"},
"name":"[Lorg.hibernate.event.spi.RefreshEventListener;"
},
{
"condition":{"typeReachable":"org.hibernate.event.service.internal.EventListenerGroupImpl"},
"name":"[Lorg.hibernate.event.spi.ReplicateEventListener;"
},
{
"condition":{"typeReachable":"org.hibernate.event.service.internal.EventListenerGroupImpl"},
"name":"[Lorg.hibernate.event.spi.ResolveNaturalIdEventListener;"
},
{
"condition":{"typeReachable":"org.hibernate.event.service.internal.EventListenerGroupImpl"},
"name":"[Lorg.hibernate.event.spi.SaveOrUpdateEventListener;"
},
{
"condition":{"typeReachable":"org.hibernate.jpa.HibernatePersistenceProvider"},
"name":"antlr.CommonToken",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.annotations.common.util.impl.LoggerFactory"},
"name":"org.hibernate.annotations.common.util.impl.Log_$logger",
"methods":[{"name":"<init>","parameterTypes":["org.jboss.logging.Logger"] }]
},
{
"condition":{"typeReachable":"org.hibernate.boot.internal.MetadataBuilderImpl"},
"name":"org.hibernate.boot.cfgxml.internal.CfgXmlAccessServiceImpl",
"queryAllPublicMethods":true
},
{
"condition":{"typeReachable":"org.hibernate.boot.internal.MetadataImpl"},
"name":"org.hibernate.boot.internal.DefaultSessionFactoryBuilderService",
"queryAllPublicMethods":true
},
{
"condition":{"typeReachable":"org.hibernate.boot.internal.MetadataBuilderImpl$MetadataBuildingOptionsImpl$4"},
"name":"org.hibernate.boot.model.naming.ImplicitNamingStrategyJpaCompliantImpl",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.bytecode.enhance.spi.interceptor.BytecodeInterceptorLogging"},
"name":"org.hibernate.bytecode.enhance.spi.interceptor.BytecodeInterceptorLogging_$logger",
"methods":[{"name":"<init>","parameterTypes":["org.jboss.logging.Logger"] }]
},
{
"condition":{"typeReachable":"org.hibernate.boot.internal.SessionFactoryBuilderImpl"},
"name":"org.hibernate.bytecode.internal.none.BytecodeProviderImpl",
"queryAllPublicMethods":true
},
{
"condition":{"typeReachable":"org.hibernate.tuple.entity.PojoEntityTuplizer"},
"name":"org.hibernate.bytecode.internal.none.NoProxyFactoryFactory",
"queryAllPublicMethods":true
},
{
"condition":{"typeReachable":"org.hibernate.internal.SessionFactoryImpl"},
"name":"org.hibernate.cache.internal.DisabledCaching",
"queryAllPublicMethods":true
},
{
"condition":{"typeReachable":"org.hibernate.boot.internal.MetadataBuilderImpl$MetadataBuildingOptionsImpl"},
"name":"org.hibernate.cache.internal.NoCachingRegionFactory",
"queryAllPublicMethods":true
},
{
"condition":{"typeReachable":"org.hibernate.cfg.beanvalidation.BeanValidationIntegrator"},
"name":"org.hibernate.cfg.beanvalidation.TypeSafeActivator",
"methods":[{"name":"activate","parameterTypes":["org.hibernate.cfg.beanvalidation.ActivationContext"] }]
},
{
"condition":{"typeReachable":"org.hibernate.boot.registry.selector.internal.StrategySelectorImpl"},
"name":"org.hibernate.dialect.H2Dialect"
},
{
"condition":{"typeReachable":"org.hibernate.engine.jdbc.dialect.internal.DialectFactoryImpl"},
"name":"org.hibernate.dialect.H2Dialect",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.boot.registry.classloading.internal.ClassLoaderServiceImpl"},
"name":"org.hibernate.dialect.MariaDB102Dialect"
},
{
"condition":{"typeReachable":"org.hibernate.boot.registry.selector.internal.StrategySelectorImpl"},
"name":"org.hibernate.dialect.MariaDB102Dialect",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.boot.registry.classloading.internal.ClassLoaderServiceImpl"},
"name":"org.hibernate.dialect.MariaDB103Dialect"
},
{
"condition":{"typeReachable":"org.hibernate.boot.registry.selector.internal.StrategySelectorImpl"},
"name":"org.hibernate.dialect.MariaDB103Dialect",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.engine.jdbc.dialect.internal.DialectFactoryImpl"},
"name":"org.hibernate.dialect.MariaDB103Dialect",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.boot.registry.classloading.internal.ClassLoaderServiceImpl"},
"name":"org.hibernate.dialect.MariaDB106Dialect"
},
{
"condition":{"typeReachable":"org.hibernate.boot.registry.selector.internal.StrategySelectorImpl"},
"name":"org.hibernate.dialect.MariaDB106Dialect",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.boot.registry.classloading.internal.ClassLoaderServiceImpl"},
"name":"org.hibernate.dialect.MariaDB10Dialect"
},
{
"condition":{"typeReachable":"org.hibernate.boot.registry.selector.internal.StrategySelectorImpl"},
"name":"org.hibernate.dialect.MariaDB10Dialect",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.boot.registry.classloading.internal.ClassLoaderServiceImpl"},
"name":"org.hibernate.dialect.MariaDB53Dialect"
},
{
"condition":{"typeReachable":"org.hibernate.boot.registry.selector.internal.StrategySelectorImpl"},
"name":"org.hibernate.dialect.MariaDB53Dialect",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.boot.registry.classloading.internal.ClassLoaderServiceImpl"},
"name":"org.hibernate.dialect.MariaDBDialect"
},
{
"condition":{"typeReachable":"org.hibernate.boot.registry.selector.internal.StrategySelectorImpl"},
"name":"org.hibernate.dialect.MariaDBDialect",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.boot.registry.classloading.internal.ClassLoaderServiceImpl"},
"name":"org.hibernate.dialect.MySQL55Dialect"
},
{
"condition":{"typeReachable":"org.hibernate.boot.registry.selector.internal.StrategySelectorImpl"},
"name":"org.hibernate.dialect.MySQL55Dialect",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.boot.registry.classloading.internal.ClassLoaderServiceImpl"},
"name":"org.hibernate.dialect.MySQL57Dialect"
},
{
"condition":{"typeReachable":"org.hibernate.boot.registry.selector.internal.StrategySelectorImpl"},
"name":"org.hibernate.dialect.MySQL57Dialect",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.boot.registry.classloading.internal.ClassLoaderServiceImpl"},
"name":"org.hibernate.dialect.MySQL5Dialect"
},
{
"condition":{"typeReachable":"org.hibernate.boot.registry.selector.internal.StrategySelectorImpl"},
"name":"org.hibernate.dialect.MySQL5Dialect",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.boot.registry.classloading.internal.ClassLoaderServiceImpl"},
"name":"org.hibernate.dialect.MySQL8Dialect"
},
{
"condition":{"typeReachable":"org.hibernate.boot.registry.selector.internal.StrategySelectorImpl"},
"name":"org.hibernate.dialect.MySQL8Dialect",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.engine.jdbc.dialect.internal.DialectFactoryImpl"},
"name":"org.hibernate.dialect.MySQL8Dialect",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.boot.registry.classloading.internal.ClassLoaderServiceImpl"},
"name":"org.hibernate.dialect.MySQLDialect"
},
{
"condition":{"typeReachable":"org.hibernate.boot.registry.selector.internal.StrategySelectorImpl"},
"name":"org.hibernate.dialect.MySQLDialect",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.boot.registry.classloading.internal.ClassLoaderServiceImpl"},
"name":"org.hibernate.dialect.Oracle10gDialect"
},
{
"condition":{"typeReachable":"org.hibernate.boot.registry.selector.internal.StrategySelectorImpl"},
"name":"org.hibernate.dialect.Oracle10gDialect",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.engine.jdbc.dialect.internal.DialectFactoryImpl"},
"name":"org.hibernate.dialect.Oracle10gDialect",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.boot.registry.classloading.internal.ClassLoaderServiceImpl"},
"name":"org.hibernate.dialect.Oracle12cDialect"
},
{
"condition":{"typeReachable":"org.hibernate.boot.registry.selector.internal.StrategySelectorImpl"},
"name":"org.hibernate.dialect.Oracle12cDialect",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.boot.registry.classloading.internal.ClassLoaderServiceImpl"},
"name":"org.hibernate.dialect.Oracle8iDialect"
},
{
"condition":{"typeReachable":"org.hibernate.boot.registry.selector.internal.StrategySelectorImpl"},
"name":"org.hibernate.dialect.Oracle8iDialect",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.boot.registry.classloading.internal.ClassLoaderServiceImpl"},
"name":"org.hibernate.dialect.Oracle9Dialect"
},
{
"condition":{"typeReachable":"org.hibernate.boot.registry.selector.internal.StrategySelectorImpl"},
"name":"org.hibernate.dialect.Oracle9Dialect",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.boot.registry.classloading.internal.ClassLoaderServiceImpl"},
"name":"org.hibernate.dialect.Oracle9iDialect"
},
{
"condition":{"typeReachable":"org.hibernate.boot.registry.selector.internal.StrategySelectorImpl"},
"name":"org.hibernate.dialect.Oracle9iDialect",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.boot.registry.classloading.internal.ClassLoaderServiceImpl"},
"name":"org.hibernate.dialect.OracleDialect"
},
{
"condition":{"typeReachable":"org.hibernate.boot.registry.selector.internal.StrategySelectorImpl"},
"name":"org.hibernate.dialect.OracleDialect",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.boot.registry.classloading.internal.ClassLoaderServiceImpl"},
"name":"org.hibernate.dialect.PostgreSQL10Dialect"
},
{
"condition":{"typeReachable":"org.hibernate.boot.registry.selector.internal.StrategySelectorImpl"},
"name":"org.hibernate.dialect.PostgreSQL10Dialect",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.boot.registry.classloading.internal.ClassLoaderServiceImpl"},
"name":"org.hibernate.dialect.PostgreSQL81Dialect"
},
{
"condition":{"typeReachable":"org.hibernate.boot.registry.selector.internal.StrategySelectorImpl"},
"name":"org.hibernate.dialect.PostgreSQL81Dialect",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.boot.registry.classloading.internal.ClassLoaderServiceImpl"},
"name":"org.hibernate.dialect.PostgreSQL82Dialect"
},
{
"condition":{"typeReachable":"org.hibernate.boot.registry.selector.internal.StrategySelectorImpl"},
"name":"org.hibernate.dialect.PostgreSQL82Dialect",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.boot.registry.classloading.internal.ClassLoaderServiceImpl"},
"name":"org.hibernate.dialect.PostgreSQL91Dialect"
},
{
"condition":{"typeReachable":"org.hibernate.boot.registry.selector.internal.StrategySelectorImpl"},
"name":"org.hibernate.dialect.PostgreSQL91Dialect",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.boot.registry.classloading.internal.ClassLoaderServiceImpl"},
"name":"org.hibernate.dialect.PostgreSQL92Dialect"
},
{
"condition":{"typeReachable":"org.hibernate.boot.registry.selector.internal.StrategySelectorImpl"},
"name":"org.hibernate.dialect.PostgreSQL92Dialect",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.boot.registry.classloading.internal.ClassLoaderServiceImpl"},
"name":"org.hibernate.dialect.PostgreSQL93Dialect"
},
{
"condition":{"typeReachable":"org.hibernate.boot.registry.selector.internal.StrategySelectorImpl"},
"name":"org.hibernate.dialect.PostgreSQL93Dialect",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.boot.registry.classloading.internal.ClassLoaderServiceImpl"},
"name":"org.hibernate.dialect.PostgreSQL94Dialect"
},
{
"condition":{"typeReachable":"org.hibernate.boot.registry.selector.internal.StrategySelectorImpl"},
"name":"org.hibernate.dialect.PostgreSQL94Dialect",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.boot.registry.classloading.internal.ClassLoaderServiceImpl"},
"name":"org.hibernate.dialect.PostgreSQL95Dialect"
},
{
"condition":{"typeReachable":"org.hibernate.boot.registry.selector.internal.StrategySelectorImpl"},
"name":"org.hibernate.dialect.PostgreSQL95Dialect",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.engine.jdbc.dialect.internal.DialectFactoryImpl"},
"name":"org.hibernate.dialect.PostgreSQL95Dialect",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.boot.registry.classloading.internal.ClassLoaderServiceImpl"},
"name":"org.hibernate.dialect.PostgreSQL9Dialect"
},
{
"condition":{"typeReachable":"org.hibernate.boot.registry.selector.internal.StrategySelectorImpl"},
"name":"org.hibernate.dialect.PostgreSQL9Dialect",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.boot.registry.classloading.internal.ClassLoaderServiceImpl"},
"name":"org.hibernate.dialect.PostgresPlusDialect"
},
{
"condition":{"typeReachable":"org.hibernate.boot.registry.selector.internal.StrategySelectorImpl"},
"name":"org.hibernate.dialect.PostgresPlusDialect",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.boot.registry.classloading.internal.ClassLoaderServiceImpl"},
"name":"org.hibernate.dialect.SQLServer2005Dialect"
},
{
"condition":{"typeReachable":"org.hibernate.boot.registry.selector.internal.StrategySelectorImpl"},
"name":"org.hibernate.dialect.SQLServer2005Dialect",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.boot.registry.classloading.internal.ClassLoaderServiceImpl"},
"name":"org.hibernate.dialect.SQLServer2008Dialect"
},
{
"condition":{"typeReachable":"org.hibernate.boot.registry.selector.internal.StrategySelectorImpl"},
"name":"org.hibernate.dialect.SQLServer2008Dialect",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.boot.registry.classloading.internal.ClassLoaderServiceImpl"},
"name":"org.hibernate.dialect.SQLServer2012Dialect"
},
{
"condition":{"typeReachable":"org.hibernate.boot.registry.selector.internal.StrategySelectorImpl"},
"name":"org.hibernate.dialect.SQLServer2012Dialect",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.engine.jdbc.dialect.internal.DialectFactoryImpl"},
"name":"org.hibernate.dialect.SQLServer2012Dialect",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.boot.registry.classloading.internal.ClassLoaderServiceImpl"},
"name":"org.hibernate.dialect.SQLServer2016Dialect"
},
{
"condition":{"typeReachable":"org.hibernate.boot.registry.selector.internal.StrategySelectorImpl"},
"name":"org.hibernate.dialect.SQLServer2016Dialect",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.boot.registry.classloading.internal.ClassLoaderServiceImpl"},
"name":"org.hibernate.dialect.SQLServerDialect"
},
{
"condition":{"typeReachable":"org.hibernate.boot.registry.selector.internal.StrategySelectorImpl"},
"name":"org.hibernate.dialect.SQLServerDialect",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.boot.internal.MetadataBuilderImpl$MetadataBuildingOptionsImpl"},
"name":"org.hibernate.engine.config.internal.ConfigurationServiceImpl",
"queryAllPublicMethods":true
},
{
"condition":{"typeReachable":"org.hibernate.engine.jdbc.internal.JdbcCoordinatorImpl"},
"name":"org.hibernate.engine.jdbc.batch.internal.BatchBuilderImpl",
"queryAllPublicMethods":true
},
{
"condition":{"typeReachable":"org.hibernate.internal.SessionFactoryImpl"},
"name":"org.hibernate.engine.jdbc.connections.internal.DriverManagerConnectionProviderImpl",
"queryAllPublicMethods":true
},
{
"condition":{"typeReachable":"org.hibernate.engine.jdbc.env.internal.JdbcEnvironmentInitiator"},
"name":"org.hibernate.engine.jdbc.dialect.internal.DialectFactoryImpl",
"queryAllPublicMethods":true
},
{
"condition":{"typeReachable":"org.hibernate.engine.jdbc.dialect.internal.DialectFactoryImpl"},
"name":"org.hibernate.engine.jdbc.dialect.internal.DialectResolverSet",
"queryAllPublicMethods":true
},
{
"condition":{"typeReachable":"org.hibernate.id.factory.internal.DefaultIdentifierGeneratorFactory"},
"name":"org.hibernate.engine.jdbc.env.internal.JdbcEnvironmentImpl",
"queryAllPublicMethods":true
},
{
"condition":{"typeReachable":"org.hibernate.boot.model.relational.Database"},
"name":"org.hibernate.engine.jdbc.internal.JdbcServicesImpl",
"queryAllPublicMethods":true
},
{
"condition":{"typeReachable":"org.hibernate.internal.SessionFactoryImpl"},
"name":"org.hibernate.engine.jndi.internal.JndiServiceImpl",
"queryAllPublicMethods":true
},
{
"condition":{"typeReachable":"org.hibernate.engine.query.spi.QueryPlanCache"},
"name":"org.hibernate.engine.query.internal.NativeQueryInterpreterStandardImpl",
"queryAllPublicMethods":true
},
{
"condition":{"typeReachable":"org.hibernate.internal.SessionFactoryImpl"},
"name":"org.hibernate.engine.transaction.jta.platform.internal.NoJtaPlatform",
"queryAllPublicMethods":true
},
{
"condition":{"typeReachable":"org.hibernate.engine.transaction.jta.platform.internal.JtaPlatformInitiator"},
"name":"org.hibernate.engine.transaction.jta.platform.internal.StandardJtaPlatformResolver",
"queryAllPublicMethods":true
},
{
"condition":{"typeReachable":"org.hibernate.event.spi.EventType$1"},
"name":"org.hibernate.event.spi.EventType",
"allDeclaredFields":true
},
{
"condition":{"typeReachable":"org.hibernate.hql.internal.ast.util.TokenPrinters"},
"name":"org.hibernate.hql.internal.antlr.HqlTokenTypes",
"allPublicFields":true
},
{
"condition":{"typeReachable":"org.hibernate.hql.internal.ast.util.ASTUtil"},
"name":"org.hibernate.hql.internal.antlr.SqlTokenTypes",
"allPublicFields":true
},
{
"condition":{"typeReachable":"org.hibernate.engine.query.spi.HQLQueryPlan"},
"name":"org.hibernate.hql.internal.ast.ASTQueryTranslatorFactory",
"queryAllPublicMethods":true
},
{
"condition":{"typeReachable":"org.hibernate.hql.internal.ast.HqlLexer"},
"name":"org.hibernate.hql.internal.ast.HqlToken",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.jpa.HibernatePersistenceProvider"},
"name":"org.hibernate.hql.internal.ast.HqlToken",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.jpa.HibernatePersistenceProvider"},
"name":"org.hibernate.hql.internal.ast.tree.AggregateNode",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.jpa.HibernatePersistenceProvider"},
"name":"org.hibernate.hql.internal.ast.tree.BetweenOperatorNode",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.jpa.HibernatePersistenceProvider"},
"name":"org.hibernate.hql.internal.ast.tree.BinaryArithmeticOperatorNode",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.jpa.HibernatePersistenceProvider"},
"name":"org.hibernate.hql.internal.ast.tree.BinaryLogicOperatorNode",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.jpa.HibernatePersistenceProvider"},
"name":"org.hibernate.hql.internal.ast.tree.BooleanLiteralNode",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.jpa.HibernatePersistenceProvider"},
"name":"org.hibernate.hql.internal.ast.tree.CastFunctionNode",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.jpa.HibernatePersistenceProvider"},
"name":"org.hibernate.hql.internal.ast.tree.CollectionFunction",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.jpa.HibernatePersistenceProvider"},
"name":"org.hibernate.hql.internal.ast.tree.ConstructorNode",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.jpa.HibernatePersistenceProvider"},
"name":"org.hibernate.hql.internal.ast.tree.CountNode",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.jpa.HibernatePersistenceProvider"},
"name":"org.hibernate.hql.internal.ast.tree.DeleteStatement",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.jpa.HibernatePersistenceProvider"},
"name":"org.hibernate.hql.internal.ast.tree.DotNode",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.jpa.HibernatePersistenceProvider"},
"name":"org.hibernate.hql.internal.ast.tree.FromClause",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.jpa.HibernatePersistenceProvider"},
"name":"org.hibernate.hql.internal.ast.tree.FromElement",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.jpa.HibernatePersistenceProvider"},
"name":"org.hibernate.hql.internal.ast.tree.HqlSqlWalkerNode",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.jpa.HibernatePersistenceProvider"},
"name":"org.hibernate.hql.internal.ast.tree.IdentNode",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.jpa.HibernatePersistenceProvider"},
"name":"org.hibernate.hql.internal.ast.tree.ImpliedFromElement",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.jpa.HibernatePersistenceProvider"},
"name":"org.hibernate.hql.internal.ast.tree.InLogicOperatorNode",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.jpa.HibernatePersistenceProvider"},
"name":"org.hibernate.hql.internal.ast.tree.IndexNode",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.jpa.HibernatePersistenceProvider"},
"name":"org.hibernate.hql.internal.ast.tree.InsertStatement",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.jpa.HibernatePersistenceProvider"},
"name":"org.hibernate.hql.internal.ast.tree.IntoClause",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.jpa.HibernatePersistenceProvider"},
"name":"org.hibernate.hql.internal.ast.tree.IsNotNullLogicOperatorNode",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.jpa.HibernatePersistenceProvider"},
"name":"org.hibernate.hql.internal.ast.tree.IsNullLogicOperatorNode",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.jpa.HibernatePersistenceProvider"},
"name":"org.hibernate.hql.internal.ast.tree.JavaConstantNode",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.jpa.HibernatePersistenceProvider"},
"name":"org.hibernate.hql.internal.ast.tree.LiteralNode",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.jpa.HibernatePersistenceProvider"},
"name":"org.hibernate.hql.internal.ast.tree.MapEntryNode",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.jpa.HibernatePersistenceProvider"},
"name":"org.hibernate.hql.internal.ast.tree.MapKeyNode",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.jpa.HibernatePersistenceProvider"},
"name":"org.hibernate.hql.internal.ast.tree.MapValueNode",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.jpa.HibernatePersistenceProvider"},
"name":"org.hibernate.hql.internal.ast.tree.MethodNode",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.hql.internal.antlr.HqlBaseParser"},
"name":"org.hibernate.hql.internal.ast.tree.Node",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.jpa.HibernatePersistenceProvider"},
"name":"org.hibernate.hql.internal.ast.tree.Node",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.jpa.HibernatePersistenceProvider"},
"name":"org.hibernate.hql.internal.ast.tree.NullNode",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.jpa.HibernatePersistenceProvider"},
"name":"org.hibernate.hql.internal.ast.tree.OrderByClause",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.jpa.HibernatePersistenceProvider"},
"name":"org.hibernate.hql.internal.ast.tree.ParameterNode",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.jpa.HibernatePersistenceProvider"},
"name":"org.hibernate.hql.internal.ast.tree.QueryNode",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.jpa.HibernatePersistenceProvider"},
"name":"org.hibernate.hql.internal.ast.tree.ResultVariableRefNode",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.jpa.HibernatePersistenceProvider"},
"name":"org.hibernate.hql.internal.ast.tree.SearchedCaseNode",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.jpa.HibernatePersistenceProvider"},
"name":"org.hibernate.hql.internal.ast.tree.SelectClause",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.jpa.HibernatePersistenceProvider"},
"name":"org.hibernate.hql.internal.ast.tree.SelectExpressionImpl",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.jpa.HibernatePersistenceProvider"},
"name":"org.hibernate.hql.internal.ast.tree.SimpleCaseNode",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.jpa.HibernatePersistenceProvider"},
"name":"org.hibernate.hql.internal.ast.tree.SqlFragment",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.jpa.HibernatePersistenceProvider"},
"name":"org.hibernate.hql.internal.ast.tree.SqlNode",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.jpa.HibernatePersistenceProvider"},
"name":"org.hibernate.hql.internal.ast.tree.UnaryArithmeticNode",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.jpa.HibernatePersistenceProvider"},
"name":"org.hibernate.hql.internal.ast.tree.UnaryLogicOperatorNode",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.jpa.HibernatePersistenceProvider"},
"name":"org.hibernate.hql.internal.ast.tree.UpdateStatement",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.id.factory.internal.DefaultIdentifierGeneratorFactory"},
"name":"org.hibernate.id.Assigned",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.id.factory.internal.DefaultIdentifierGeneratorFactory"},
"name":"org.hibernate.id.ForeignGenerator",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.id.factory.internal.DefaultIdentifierGeneratorFactory"},
"name":"org.hibernate.id.GUIDGenerator",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.id.factory.internal.DefaultIdentifierGeneratorFactory"},
"name":"org.hibernate.id.IdentityGenerator",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.id.factory.internal.DefaultIdentifierGeneratorFactory"},
"name":"org.hibernate.id.IncrementGenerator",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.id.factory.internal.DefaultIdentifierGeneratorFactory"},
"name":"org.hibernate.id.SelectGenerator",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.id.factory.internal.DefaultIdentifierGeneratorFactory"},
"name":"org.hibernate.id.SequenceHiLoGenerator",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.id.factory.internal.DefaultIdentifierGeneratorFactory"},
"name":"org.hibernate.id.SequenceIdentityGenerator",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.id.factory.internal.DefaultIdentifierGeneratorFactory"},
"name":"org.hibernate.id.UUIDGenerator",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.id.factory.internal.DefaultIdentifierGeneratorFactory"},
"name":"org.hibernate.id.UUIDHexGenerator",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.id.enhanced.OptimizerFactory"},
"name":"org.hibernate.id.enhanced.HiLoOptimizer",
"methods":[{"name":"<init>","parameterTypes":["java.lang.Class","int"] }]
},
{
"condition":{"typeReachable":"org.hibernate.id.enhanced.OptimizerFactory"},
"name":"org.hibernate.id.enhanced.LegacyHiLoAlgorithmOptimizer",
"methods":[{"name":"<init>","parameterTypes":["java.lang.Class","int"] }]
},
{
"condition":{"typeReachable":"org.hibernate.id.enhanced.OptimizerFactory"},
"name":"org.hibernate.id.enhanced.NoopOptimizer",
"methods":[{"name":"<init>","parameterTypes":["java.lang.Class","int"] }]
},
{
"condition":{"typeReachable":"org.hibernate.id.enhanced.OptimizerFactory"},
"name":"org.hibernate.id.enhanced.PooledLoOptimizer",
"methods":[{"name":"<init>","parameterTypes":["java.lang.Class","int"] }]
},
{
"condition":{"typeReachable":"org.hibernate.id.enhanced.OptimizerFactory"},
"name":"org.hibernate.id.enhanced.PooledLoThreadLocalOptimizer",
"methods":[{"name":"<init>","parameterTypes":["java.lang.Class","int"] }]
},
{
"condition":{"typeReachable":"org.hibernate.id.enhanced.OptimizerFactory"},
"name":"org.hibernate.id.enhanced.PooledOptimizer",
"methods":[{"name":"<init>","parameterTypes":["java.lang.Class","int"] }]
},
{
"condition":{"typeReachable":"org.hibernate.id.factory.internal.DefaultIdentifierGeneratorFactory"},
"name":"org.hibernate.id.enhanced.SequenceStyleGenerator",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.id.factory.internal.DefaultIdentifierGeneratorFactory"},
"name":"org.hibernate.id.enhanced.TableGenerator",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.boot.internal.InFlightMetadataCollectorImpl"},
"name":"org.hibernate.id.factory.internal.DefaultIdentifierGeneratorFactory",
"queryAllPublicMethods":true
},
{
"condition":{"typeReachable":"org.hibernate.MultiTenancyStrategy"},
"name":"org.hibernate.internal.CoreMessageLogger_$logger",
"methods":[{"name":"<init>","parameterTypes":["org.jboss.logging.Logger"] }]
},
{
"condition":{"typeReachable":"org.hibernate.Version"},
"name":"org.hibernate.internal.CoreMessageLogger_$logger",
"methods":[{"name":"<init>","parameterTypes":["org.jboss.logging.Logger"] }]
},
{
"condition":{"typeReachable":"org.hibernate.cfg.CollectionSecondPass"},
"name":"org.hibernate.internal.CoreMessageLogger_$logger",
"methods":[{"name":"<init>","parameterTypes":["org.jboss.logging.Logger"] }]
},
{
"condition":{"typeReachable":"org.hibernate.cfg.Ejb3Column"},
"name":"org.hibernate.internal.CoreMessageLogger_$logger",
"methods":[{"name":"<init>","parameterTypes":["org.jboss.logging.Logger"] }]
},
{
"condition":{"typeReachable":"org.hibernate.cfg.Environment"},
"name":"org.hibernate.internal.CoreMessageLogger_$logger",
"methods":[{"name":"<init>","parameterTypes":["org.jboss.logging.Logger"] }]
},
{
"condition":{"typeReachable":"org.hibernate.cfg.PropertyContainer"},
"name":"org.hibernate.internal.CoreMessageLogger_$logger",
"methods":[{"name":"<init>","parameterTypes":["org.jboss.logging.Logger"] }]
},
{
"condition":{"typeReachable":"org.hibernate.cfg.annotations.CollectionBinder"},
"name":"org.hibernate.internal.CoreMessageLogger_$logger",
"methods":[{"name":"<init>","parameterTypes":["org.jboss.logging.Logger"] }]
},
{
"condition":{"typeReachable":"org.hibernate.cfg.annotations.EntityBinder"},
"name":"org.hibernate.internal.CoreMessageLogger_$logger",
"methods":[{"name":"<init>","parameterTypes":["org.jboss.logging.Logger"] }]
},
{
"condition":{"typeReachable":"org.hibernate.cfg.annotations.PropertyBinder"},
"name":"org.hibernate.internal.CoreMessageLogger_$logger",
"methods":[{"name":"<init>","parameterTypes":["org.jboss.logging.Logger"] }]
},
{
"condition":{"typeReachable":"org.hibernate.cfg.annotations.QueryBinder"},
"name":"org.hibernate.internal.CoreMessageLogger_$logger",
"methods":[{"name":"<init>","parameterTypes":["org.jboss.logging.Logger"] }]
},
{
"condition":{"typeReachable":"org.hibernate.cfg.annotations.SimpleValueBinder"},
"name":"org.hibernate.internal.CoreMessageLogger_$logger",
"methods":[{"name":"<init>","parameterTypes":["org.jboss.logging.Logger"] }]
},
{
"condition":{"typeReachable":"org.hibernate.cfg.annotations.TableBinder"},
"name":"org.hibernate.internal.CoreMessageLogger_$logger",
"methods":[{"name":"<init>","parameterTypes":["org.jboss.logging.Logger"] }]
},
{
"condition":{"typeReachable":"org.hibernate.cfg.beanvalidation.BeanValidationIntegrator"},
"name":"org.hibernate.internal.CoreMessageLogger_$logger",
"methods":[{"name":"<init>","parameterTypes":["org.jboss.logging.Logger"] }]
},
{
"condition":{"typeReachable":"org.hibernate.cfg.beanvalidation.TypeSafeActivator"},
"name":"org.hibernate.internal.CoreMessageLogger_$logger",
"methods":[{"name":"<init>","parameterTypes":["org.jboss.logging.Logger"] }]
},
{
"condition":{"typeReachable":"org.hibernate.dialect.H2Dialect"},
"name":"org.hibernate.internal.CoreMessageLogger_$logger",
"methods":[{"name":"<init>","parameterTypes":["org.jboss.logging.Logger"] }]
},
{
"condition":{"typeReachable":"org.hibernate.dialect.Oracle9Dialect"},
"name":"org.hibernate.internal.CoreMessageLogger_$logger",
"methods":[{"name":"<init>","parameterTypes":["org.jboss.logging.Logger"] }]
},
{
"condition":{"typeReachable":"org.hibernate.dialect.OracleDialect"},
"name":"org.hibernate.internal.CoreMessageLogger_$logger",
"methods":[{"name":"<init>","parameterTypes":["org.jboss.logging.Logger"] }]
},
{
"condition":{"typeReachable":"org.hibernate.dialect.function.TemplateRenderer"},
"name":"org.hibernate.internal.CoreMessageLogger_$logger",
"methods":[{"name":"<init>","parameterTypes":["org.jboss.logging.Logger"] }]
},
{
"condition":{"typeReachable":"org.hibernate.engine.config.internal.ConfigurationServiceImpl"},
"name":"org.hibernate.internal.CoreMessageLogger_$logger",
"methods":[{"name":"<init>","parameterTypes":["org.jboss.logging.Logger"] }]
},
{
"condition":{"typeReachable":"org.hibernate.engine.internal.Collections"},
"name":"org.hibernate.internal.CoreMessageLogger_$logger",
"methods":[{"name":"<init>","parameterTypes":["org.jboss.logging.Logger"] }]
},
{
"condition":{"typeReachable":"org.hibernate.engine.internal.StatefulPersistenceContext"},
"name":"org.hibernate.internal.CoreMessageLogger_$logger",
"methods":[{"name":"<init>","parameterTypes":["org.jboss.logging.Logger"] }]
},
{
"condition":{"typeReachable":"org.hibernate.engine.internal.TwoPhaseLoad"},
"name":"org.hibernate.internal.CoreMessageLogger_$logger",
"methods":[{"name":"<init>","parameterTypes":["org.jboss.logging.Logger"] }]
},
{
"condition":{"typeReachable":"org.hibernate.engine.internal.Versioning"},
"name":"org.hibernate.internal.CoreMessageLogger_$logger",
"methods":[{"name":"<init>","parameterTypes":["org.jboss.logging.Logger"] }]
},
{
"condition":{"typeReachable":"org.hibernate.engine.jdbc.batch.internal.AbstractBatchImpl"},
"name":"org.hibernate.internal.CoreMessageLogger_$logger",
"methods":[{"name":"<init>","parameterTypes":["org.jboss.logging.Logger"] }]
},
{
"condition":{"typeReachable":"org.hibernate.engine.jdbc.batch.internal.NonBatchingBatch"},
"name":"org.hibernate.internal.CoreMessageLogger_$logger",
"methods":[{"name":"<init>","parameterTypes":["org.jboss.logging.Logger"] }]
},
{
"condition":{"typeReachable":"org.hibernate.engine.jdbc.env.internal.JdbcEnvironmentInitiator"},
"name":"org.hibernate.internal.CoreMessageLogger_$logger",
"methods":[{"name":"<init>","parameterTypes":["org.jboss.logging.Logger"] }]
},
{
"condition":{"typeReachable":"org.hibernate.engine.jdbc.env.internal.LobCreatorBuilderImpl"},
"name":"org.hibernate.internal.CoreMessageLogger_$logger",
"methods":[{"name":"<init>","parameterTypes":["org.jboss.logging.Logger"] }]
},
{
"condition":{"typeReachable":"org.hibernate.engine.jdbc.spi.SqlExceptionHelper"},
"name":"org.hibernate.internal.CoreMessageLogger_$logger",
"methods":[{"name":"<init>","parameterTypes":["org.jboss.logging.Logger"] }]
},
{
"condition":{"typeReachable":"org.hibernate.engine.jndi.internal.JndiServiceImpl"},
"name":"org.hibernate.internal.CoreMessageLogger_$logger",
"methods":[{"name":"<init>","parameterTypes":["org.jboss.logging.Logger"] }]
},
{
"condition":{"typeReachable":"org.hibernate.engine.spi.CascadingActions"},
"name":"org.hibernate.internal.CoreMessageLogger_$logger",
"methods":[{"name":"<init>","parameterTypes":["org.jboss.logging.Logger"] }]
},
{
"condition":{"typeReachable":"org.hibernate.engine.transaction.jta.platform.internal.JtaPlatformInitiator"},
"name":"org.hibernate.internal.CoreMessageLogger_$logger",
"methods":[{"name":"<init>","parameterTypes":["org.jboss.logging.Logger"] }]
},
{
"condition":{"typeReachable":"org.hibernate.event.internal.AbstractFlushingEventListener"},
"name":"org.hibernate.internal.CoreMessageLogger_$logger",
"methods":[{"name":"<init>","parameterTypes":["org.jboss.logging.Logger"] }]
},
{
"condition":{"typeReachable":"org.hibernate.event.internal.DefaultAutoFlushEventListener"},
"name":"org.hibernate.internal.CoreMessageLogger_$logger",
"methods":[{"name":"<init>","parameterTypes":["org.jboss.logging.Logger"] }]
},
{
"condition":{"typeReachable":"org.hibernate.event.internal.DefaultLockEventListener"},
"name":"org.hibernate.internal.CoreMessageLogger_$logger",
"methods":[{"name":"<init>","parameterTypes":["org.jboss.logging.Logger"] }]
},
{
"condition":{"typeReachable":"org.hibernate.hql.internal.antlr.HqlSqlBaseWalker"},
"name":"org.hibernate.internal.CoreMessageLogger_$logger",
"methods":[{"name":"<init>","parameterTypes":["org.jboss.logging.Logger"] }]
},
{
"condition":{"typeReachable":"org.hibernate.hql.internal.ast.ErrorTracker"},
"name":"org.hibernate.internal.CoreMessageLogger_$logger",
"methods":[{"name":"<init>","parameterTypes":["org.jboss.logging.Logger"] }]
},
{
"condition":{"typeReachable":"org.hibernate.hql.internal.ast.QueryTranslatorImpl"},
"name":"org.hibernate.internal.CoreMessageLogger_$logger",
"methods":[{"name":"<init>","parameterTypes":["org.jboss.logging.Logger"] }]
},
{
"condition":{"typeReachable":"org.hibernate.hql.internal.ast.util.LiteralProcessor"},
"name":"org.hibernate.internal.CoreMessageLogger_$logger",
"methods":[{"name":"<init>","parameterTypes":["org.jboss.logging.Logger"] }]
},
{
"condition":{"typeReachable":"org.hibernate.id.enhanced.OptimizerFactory"},
"name":"org.hibernate.internal.CoreMessageLogger_$logger",
"methods":[{"name":"<init>","parameterTypes":["org.jboss.logging.Logger"] }]
},
{
"condition":{"typeReachable":"org.hibernate.id.enhanced.PooledLoOptimizer"},
"name":"org.hibernate.internal.CoreMessageLogger_$logger",
"methods":[{"name":"<init>","parameterTypes":["org.jboss.logging.Logger"] }]
},
{
"condition":{"typeReachable":"org.hibernate.id.enhanced.PooledLoThreadLocalOptimizer"},
"name":"org.hibernate.internal.CoreMessageLogger_$logger",
"methods":[{"name":"<init>","parameterTypes":["org.jboss.logging.Logger"] }]
},
{
"condition":{"typeReachable":"org.hibernate.id.enhanced.PooledOptimizer"},
"name":"org.hibernate.internal.CoreMessageLogger_$logger",
"methods":[{"name":"<init>","parameterTypes":["org.jboss.logging.Logger"] }]
},
{
"condition":{"typeReachable":"org.hibernate.id.enhanced.TableGenerator"},
"name":"org.hibernate.internal.CoreMessageLogger_$logger",
"methods":[{"name":"<init>","parameterTypes":["org.jboss.logging.Logger"] }]
},
{
"condition":{"typeReachable":"org.hibernate.internal.CoreLogging"},
"name":"org.hibernate.internal.CoreMessageLogger_$logger",
"methods":[{"name":"<init>","parameterTypes":["org.jboss.logging.Logger"] }]
},
{
"condition":{"typeReachable":"org.hibernate.persister.collection.AbstractCollectionPersister"},
"name":"org.hibernate.internal.CoreMessageLogger_$logger",
"methods":[{"name":"<init>","parameterTypes":["org.jboss.logging.Logger"] }]
},
{
"condition":{"typeReachable":"org.hibernate.stat.internal.StatisticsInitiator"},
"name":"org.hibernate.internal.CoreMessageLogger_$logger",
"methods":[{"name":"<init>","parameterTypes":["org.jboss.logging.Logger"] }]
},
{
"condition":{"typeReachable":"org.hibernate.type.CollectionType"},
"name":"org.hibernate.internal.CoreMessageLogger_$logger",
"methods":[{"name":"<init>","parameterTypes":["org.jboss.logging.Logger"] }]
},
{
"condition":{"typeReachable":"org.hibernate.type.DbTimestampType"},
"name":"org.hibernate.internal.CoreMessageLogger_$logger",
"methods":[{"name":"<init>","parameterTypes":["org.jboss.logging.Logger"] }]
},
{
"condition":{"typeReachable":"org.hibernate.internal.HEMLogging"},
"name":"org.hibernate.internal.EntityManagerMessageLogger_$logger",
"methods":[{"name":"<init>","parameterTypes":["org.jboss.logging.Logger"] }]
},
{
"condition":{"typeReachable":"org.hibernate.jpa.internal.util.LogHelper"},
"name":"org.hibernate.internal.EntityManagerMessageLogger_$logger",
"methods":[{"name":"<init>","parameterTypes":["org.jboss.logging.Logger"] }]
},
{
"condition":{"typeReachable":"org.hibernate.internal.log.ConnectionAccessLogger"},
"name":"org.hibernate.internal.log.ConnectionAccessLogger_$logger",
"methods":[{"name":"<init>","parameterTypes":["org.jboss.logging.Logger"] }]
},
{
"condition":{"typeReachable":"org.hibernate.internal.log.ConnectionPoolingLogger"},
"name":"org.hibernate.internal.log.ConnectionPoolingLogger_$logger",
"methods":[{"name":"<init>","parameterTypes":["org.jboss.logging.Logger"] }]
},
{
"condition":{"typeReachable":"org.hibernate.jpa.HibernatePersistenceProvider"},
"name":"org.hibernate.internal.log.UrlMessageBundle_$logger",
"methods":[{"name":"<init>","parameterTypes":["org.jboss.logging.Logger"] }]
},
{
"condition":{"typeReachable":"org.hibernate.service.internal.AbstractServiceRegistryImpl"},
"name":"org.hibernate.jmx.internal.DisabledJmxServiceImpl",
"queryAllPublicMethods":true
},
{
"condition":{"typeReachable":"org.hibernate.persister.internal.PersisterFactoryImpl"},
"name":"org.hibernate.persister.collection.BasicCollectionPersister",
"methods":[{"name":"<init>","parameterTypes":["org.hibernate.mapping.Collection","org.hibernate.cache.spi.access.CollectionDataAccess","org.hibernate.persister.spi.PersisterCreationContext"] }]
},
{
"condition":{"typeReachable":"org.hibernate.persister.internal.PersisterFactoryImpl"},
"name":"org.hibernate.persister.collection.OneToManyPersister",
"methods":[{"name":"<init>","parameterTypes":["org.hibernate.mapping.Collection","org.hibernate.cache.spi.access.CollectionDataAccess","org.hibernate.persister.spi.PersisterCreationContext"] }]
},
{
"condition":{"typeReachable":"org.hibernate.persister.internal.PersisterFactoryImpl"},
"name":"org.hibernate.persister.entity.SingleTableEntityPersister",
"methods":[{"name":"<init>","parameterTypes":["org.hibernate.mapping.PersistentClass","org.hibernate.cache.spi.access.EntityDataAccess","org.hibernate.cache.spi.access.NaturalIdDataAccess","org.hibernate.persister.spi.PersisterCreationContext"] }]
},
{
"condition":{"typeReachable":"org.hibernate.metamodel.internal.MetamodelImpl"},
"name":"org.hibernate.persister.internal.PersisterFactoryImpl",
"queryAllPublicMethods":true
},
{
"condition":{"typeReachable":"org.hibernate.persister.internal.PersisterFactoryImpl"},
"name":"org.hibernate.persister.internal.StandardPersisterClassResolver",
"queryAllPublicMethods":true
},
{
"condition":{"typeReachable":"org.hibernate.tuple.PropertyFactory"},
"name":"org.hibernate.property.access.internal.PropertyAccessStrategyResolverStandardImpl",
"queryAllPublicMethods":true
},
{
"condition":{"typeReachable":"org.hibernate.id.factory.internal.DefaultIdentifierGeneratorFactory"},
"name":"org.hibernate.resource.beans.internal.ManagedBeanRegistryImpl",
"queryAllPublicMethods":true
},
{
"condition":{"typeReachable":"org.hibernate.boot.internal.SessionFactoryOptionsBuilder"},
"name":"org.hibernate.resource.transaction.backend.jdbc.internal.JdbcResourceLocalTransactionCoordinatorBuilderImpl",
"queryAllPublicMethods":true
},
{
"condition":{"typeReachable":"org.hibernate.resource.transaction.internal.TransactionCoordinatorBuilderInitiator"},
"name":"org.hibernate.resource.transaction.backend.jdbc.internal.JdbcResourceLocalTransactionCoordinatorBuilderImpl",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.internal.SessionFactoryImpl"},
"name":"org.hibernate.service.internal.SessionFactoryServiceRegistryFactoryImpl",
"queryAllPublicMethods":true
},
{
"condition":{"typeReachable":"org.hibernate.jpa.HibernatePersistenceProvider"},
"name":"org.hibernate.sql.ordering.antlr.CollationSpecification",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.hql.internal.ast.util.TokenPrinters"},
"name":"org.hibernate.sql.ordering.antlr.GeneratedOrderByFragmentRendererTokenTypes",
"allPublicFields":true
},
{
"condition":{"typeReachable":"org.hibernate.jpa.HibernatePersistenceProvider"},
"name":"org.hibernate.sql.ordering.antlr.NodeSupport",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.jpa.HibernatePersistenceProvider"},
"name":"org.hibernate.sql.ordering.antlr.OrderByFragment",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.jpa.HibernatePersistenceProvider"},
"name":"org.hibernate.sql.ordering.antlr.OrderingSpecification",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.jpa.HibernatePersistenceProvider"},
"name":"org.hibernate.sql.ordering.antlr.SortKey",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.jpa.HibernatePersistenceProvider"},
"name":"org.hibernate.sql.ordering.antlr.SortSpecification",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.internal.SessionFactoryImpl"},
"name":"org.hibernate.stat.internal.StatisticsImpl",
"queryAllPublicMethods":true
},
{
"condition":{"typeReachable":"org.hibernate.tool.schema.internal.SchemaCreatorImpl"},
"name":"org.hibernate.tool.hbm2ddl.SingleLineSqlCommandExtractor",
"queryAllPublicMethods":true
},
{
"condition":{"typeReachable":"org.hibernate.tool.schema.spi.SchemaManagementToolCoordinator"},
"name":"org.hibernate.tool.schema.internal.HibernateSchemaManagementTool",
"queryAllPublicMethods":true
},
{
"condition":{"typeReachable":"org.hibernate.tuple.entity.EntityTuplizerFactory"},
"name":"org.hibernate.tuple.entity.PojoEntityTuplizer",
"methods":[{"name":"<init>","parameterTypes":["org.hibernate.tuple.entity.EntityMetamodel","org.hibernate.mapping.PersistentClass"] }]
},
{
"condition":{"typeReachable":"org.hibernate.cfg.annotations.SimpleValueBinder"},
"name":"org.hibernate.type.EnumType"
},
{
"condition":{"typeReachable":"org.hibernate.type.TypeFactory"},
"name":"org.hibernate.type.EnumType",
"methods":[{"name":"<init>","parameterTypes":[] }]
},
{
"condition":{"typeReachable":"org.hibernate.type.TypeResolver"},
"name":"org.hibernate.type.EnumType"
}
] | mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/org.hibernate/hibernate-core/5.6.14.Final/reflect-config.json/0 | {
"file_path": "mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/org.hibernate/hibernate-core/5.6.14.Final/reflect-config.json",
"repo_id": "mybatis-native-demo",
"token_count": 24730
} | 159 |
[
"jni-config.json",
"proxy-config.json",
"reflect-config.json",
"resource-config.json"
]
| mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/org.jline/jline/3.21.0/index.json/0 | {
"file_path": "mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/org.jline/jline/3.21.0/index.json",
"repo_id": "mybatis-native-demo",
"token_count": 41
} | 160 |
{
"resources":{
"includes":[]},
"bundles":[{
"name":"org.opengauss.translation.messages",
"classNames":["org.opengauss.translation.messages_zh_CN"]
}]
}
| mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/org.opengauss/opengauss-jdbc/3.1.0-og/resource-config.json/0 | {
"file_path": "mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/org.opengauss/opengauss-jdbc/3.1.0-og/resource-config.json",
"repo_id": "mybatis-native-demo",
"token_count": 74
} | 161 |
[
{
"name": "org.thymeleaf.extras.springsecurity6.util.Spring6VersionSpecificUtility",
"allDeclaredConstructors": true,
"condition": {
"typeReachable": "org.thymeleaf.extras.springsecurity6.util.SpringVersionSpecificUtils"
}
},
{
"name": "org.springframework.web.servlet.View",
"condition": {
"typeReachable": "org.springframework.web.servlet.View"
}
},
{
"name": "org.springframework.web.reactive.result.view.View",
"condition": {
"typeReachable": "org.springframework.web.reactive.result.view.View"
}
}
]
| mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/org.thymeleaf.extras/thymeleaf-extras-springsecurity6/3.1.0.M1/reflect-config.json/0 | {
"file_path": "mybatis-native-demo/target/graalvm-reachability-metadata/5c2bbae17873953cfd284011e1cedc9ce1396f45/org.thymeleaf.extras/thymeleaf-extras-springsecurity6/3.1.0.M1/reflect-config.json",
"repo_id": "mybatis-native-demo",
"token_count": 233
} | 162 |
com/example/nativedemo/MyMetaObjectHandler.class
com/example/nativedemo/MyBatisNativeConfiguration$MyBaitsRuntimeHintsRegistrar.class
com/example/nativedemo/dao/MessagesDao.class
com/example/nativedemo/mapper/MessagesMapper.class
com/example/nativedemo/MyBatisNativeConfiguration$MyBatisMapperFactoryBeanPostProcessor.class
com/example/nativedemo/dao/SysUserDao.class
com/example/nativedemo/entity/Messages.class
com/example/nativedemo/NativeDemoApplication.class
com/example/nativedemo/MyBatisNativeConfiguration$MyBatisBeanFactoryInitializationAotProcessor.class
com/example/nativedemo/enmus/MessageType.class
com/example/nativedemo/controller/DemoController.class
com/example/nativedemo/MyBatisNativeConfiguration.class
com/example/nativedemo/mapper/SysUserMapper.class
com/example/nativedemo/entity/SysUser.class
com/example/nativedemo/MyBatisNativeConfiguration$MyBatisMapperTypeUtils.class
com/example/nativedemo/entity/MessageDto.class
com/example/nativedemo/MybatisPlusConfig.class
com/example/nativedemo/LambdaRegistrationFeature.class
| mybatis-native-demo/target/maven-status/maven-compiler-plugin/compile/default-compile/createdFiles.lst/0 | {
"file_path": "mybatis-native-demo/target/maven-status/maven-compiler-plugin/compile/default-compile/createdFiles.lst",
"repo_id": "mybatis-native-demo",
"token_count": 336
} | 163 |
package com.baomidou.mybatisplus.autoconfigure;
import org.springframework.beans.factory.config.BeanDefinition;
import org.springframework.beans.factory.support.RootBeanDefinition;
/**
* Bean definitions for {@link MybatisPlusProperties}.
*/
public class MybatisPlusProperties__BeanDefinitions {
/**
* Get the bean definition for 'mybatisPlusProperties'.
*/
public static BeanDefinition getMybatisPlusPropertiesBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(MybatisPlusProperties.class);
beanDefinition.setInstanceSupplier(MybatisPlusProperties::new);
return beanDefinition;
}
}
| mybatis-native-demo/target/spring-aot/main/sources/com/baomidou/mybatisplus/autoconfigure/MybatisPlusProperties__BeanDefinitions.java/0 | {
"file_path": "mybatis-native-demo/target/spring-aot/main/sources/com/baomidou/mybatisplus/autoconfigure/MybatisPlusProperties__BeanDefinitions.java",
"repo_id": "mybatis-native-demo",
"token_count": 193
} | 164 |
package org.springframework.aop.framework.autoproxy;
import org.springframework.beans.factory.config.BeanDefinition;
import org.springframework.beans.factory.support.RootBeanDefinition;
/**
* Bean definitions for {@link InfrastructureAdvisorAutoProxyCreator}.
*/
public class InfrastructureAdvisorAutoProxyCreator__BeanDefinitions {
/**
* Get the bean definition for 'internalAutoProxyCreator'.
*/
public static BeanDefinition getInternalAutoProxyCreatorBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(InfrastructureAdvisorAutoProxyCreator.class);
beanDefinition.setRole(BeanDefinition.ROLE_INFRASTRUCTURE);
beanDefinition.getPropertyValues().addPropertyValue("order", -2147483648);
beanDefinition.getPropertyValues().addPropertyValue("proxyTargetClass", true);
beanDefinition.setInstanceSupplier(InfrastructureAdvisorAutoProxyCreator::new);
return beanDefinition;
}
}
| mybatis-native-demo/target/spring-aot/main/sources/org/springframework/aop/framework/autoproxy/InfrastructureAdvisorAutoProxyCreator__BeanDefinitions.java/0 | {
"file_path": "mybatis-native-demo/target/spring-aot/main/sources/org/springframework/aop/framework/autoproxy/InfrastructureAdvisorAutoProxyCreator__BeanDefinitions.java",
"repo_id": "mybatis-native-demo",
"token_count": 264
} | 165 |
package org.springframework.boot.autoconfigure.jdbc;
import com.zaxxer.hikari.HikariDataSource;
import org.springframework.beans.factory.ObjectProvider;
import org.springframework.beans.factory.aot.BeanInstanceSupplier;
import org.springframework.beans.factory.config.BeanDefinition;
import org.springframework.beans.factory.support.RootBeanDefinition;
/**
* Bean definitions for {@link DataSourceConfiguration}.
*/
public class DataSourceConfiguration__BeanDefinitions {
/**
* Bean definitions for {@link DataSourceConfiguration.Hikari}.
*/
public static class Hikari {
/**
* Get the bean definition for 'hikari'.
*/
public static BeanDefinition getHikariBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(DataSourceConfiguration.Hikari.class);
beanDefinition.setInstanceSupplier(DataSourceConfiguration.Hikari::new);
return beanDefinition;
}
/**
* Get the bean instance supplier for 'jdbcConnectionDetailsHikariBeanPostProcessor'.
*/
private static BeanInstanceSupplier<HikariJdbcConnectionDetailsBeanPostProcessor> getJdbcConnectionDetailsHikariBeanPostProcessorInstanceSupplier(
) {
return BeanInstanceSupplier.<HikariJdbcConnectionDetailsBeanPostProcessor>forFactoryMethod(DataSourceConfiguration.Hikari.class, "jdbcConnectionDetailsHikariBeanPostProcessor", ObjectProvider.class)
.withGenerator((registeredBean, args) -> DataSourceConfiguration.Hikari.jdbcConnectionDetailsHikariBeanPostProcessor(args.get(0)));
}
/**
* Get the bean definition for 'jdbcConnectionDetailsHikariBeanPostProcessor'.
*/
public static BeanDefinition getJdbcConnectionDetailsHikariBeanPostProcessorBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(DataSourceConfiguration.Hikari.class);
beanDefinition.setTargetType(HikariJdbcConnectionDetailsBeanPostProcessor.class);
beanDefinition.setInstanceSupplier(getJdbcConnectionDetailsHikariBeanPostProcessorInstanceSupplier());
return beanDefinition;
}
/**
* Get the bean instance supplier for 'dataSource'.
*/
private static BeanInstanceSupplier<HikariDataSource> getDataSourceInstanceSupplier() {
return BeanInstanceSupplier.<HikariDataSource>forFactoryMethod(DataSourceConfiguration.Hikari.class, "dataSource", DataSourceProperties.class, JdbcConnectionDetails.class)
.withGenerator((registeredBean, args) -> registeredBean.getBeanFactory().getBean(DataSourceConfiguration.Hikari.class).dataSource(args.get(0), args.get(1)));
}
/**
* Get the bean definition for 'dataSource'.
*/
public static BeanDefinition getDataSourceBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(HikariDataSource.class);
beanDefinition.setDestroyMethodNames("close");
beanDefinition.setInstanceSupplier(getDataSourceInstanceSupplier());
return beanDefinition;
}
}
}
| mybatis-native-demo/target/spring-aot/main/sources/org/springframework/boot/autoconfigure/jdbc/DataSourceConfiguration__BeanDefinitions.java/0 | {
"file_path": "mybatis-native-demo/target/spring-aot/main/sources/org/springframework/boot/autoconfigure/jdbc/DataSourceConfiguration__BeanDefinitions.java",
"repo_id": "mybatis-native-demo",
"token_count": 963
} | 166 |
package org.springframework.boot.autoconfigure.task;
import org.springframework.beans.factory.config.BeanDefinition;
import org.springframework.beans.factory.support.RootBeanDefinition;
/**
* Bean definitions for {@link TaskExecutionProperties}.
*/
public class TaskExecutionProperties__BeanDefinitions {
/**
* Get the bean definition for 'taskExecutionProperties'.
*/
public static BeanDefinition getTaskExecutionPropertiesBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(TaskExecutionProperties.class);
beanDefinition.setInstanceSupplier(TaskExecutionProperties::new);
return beanDefinition;
}
}
| mybatis-native-demo/target/spring-aot/main/sources/org/springframework/boot/autoconfigure/task/TaskExecutionProperties__BeanDefinitions.java/0 | {
"file_path": "mybatis-native-demo/target/spring-aot/main/sources/org/springframework/boot/autoconfigure/task/TaskExecutionProperties__BeanDefinitions.java",
"repo_id": "mybatis-native-demo",
"token_count": 184
} | 167 |
package org.springframework.cloud.autoconfigure;
import org.springframework.beans.factory.aot.BeanInstanceSupplier;
import org.springframework.beans.factory.config.BeanDefinition;
import org.springframework.beans.factory.support.RootBeanDefinition;
import org.springframework.cloud.context.properties.ConfigurationPropertiesBeans;
import org.springframework.cloud.context.properties.ConfigurationPropertiesRebinder;
/**
* Bean definitions for {@link ConfigurationPropertiesRebinderAutoConfiguration}.
*/
public class ConfigurationPropertiesRebinderAutoConfiguration__BeanDefinitions {
/**
* Get the bean definition for 'configurationPropertiesRebinderAutoConfiguration'.
*/
public static BeanDefinition getConfigurationPropertiesRebinderAutoConfigurationBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(ConfigurationPropertiesRebinderAutoConfiguration.class);
beanDefinition.setInstanceSupplier(ConfigurationPropertiesRebinderAutoConfiguration::new);
return beanDefinition;
}
/**
* Get the bean definition for 'configurationPropertiesBeans'.
*/
public static BeanDefinition getConfigurationPropertiesBeansBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(ConfigurationPropertiesRebinderAutoConfiguration.class);
beanDefinition.setTargetType(ConfigurationPropertiesBeans.class);
beanDefinition.setInstanceSupplier(BeanInstanceSupplier.<ConfigurationPropertiesBeans>forFactoryMethod(ConfigurationPropertiesRebinderAutoConfiguration.class, "configurationPropertiesBeans").withGenerator((registeredBean) -> ConfigurationPropertiesRebinderAutoConfiguration.configurationPropertiesBeans()));
return beanDefinition;
}
/**
* Get the bean instance supplier for 'configurationPropertiesRebinder'.
*/
private static BeanInstanceSupplier<ConfigurationPropertiesRebinder> getConfigurationPropertiesRebinderInstanceSupplier(
) {
return BeanInstanceSupplier.<ConfigurationPropertiesRebinder>forFactoryMethod(ConfigurationPropertiesRebinderAutoConfiguration.class, "configurationPropertiesRebinder", ConfigurationPropertiesBeans.class)
.withGenerator((registeredBean, args) -> registeredBean.getBeanFactory().getBean(ConfigurationPropertiesRebinderAutoConfiguration.class).configurationPropertiesRebinder(args.get(0)));
}
/**
* Get the bean definition for 'configurationPropertiesRebinder'.
*/
public static BeanDefinition getConfigurationPropertiesRebinderBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(ConfigurationPropertiesRebinder.class);
beanDefinition.setInstanceSupplier(getConfigurationPropertiesRebinderInstanceSupplier());
return beanDefinition;
}
}
| mybatis-native-demo/target/spring-aot/main/sources/org/springframework/cloud/autoconfigure/ConfigurationPropertiesRebinderAutoConfiguration__BeanDefinitions.java/0 | {
"file_path": "mybatis-native-demo/target/spring-aot/main/sources/org/springframework/cloud/autoconfigure/ConfigurationPropertiesRebinderAutoConfiguration__BeanDefinitions.java",
"repo_id": "mybatis-native-demo",
"token_count": 690
} | 168 |
package org.springframework.cloud.commons.util;
import org.springframework.beans.factory.aot.AutowiredFieldValueResolver;
import org.springframework.beans.factory.support.RegisteredBean;
/**
* Autowiring for {@link InetUtilsProperties}.
*/
public class InetUtilsProperties__Autowiring {
/**
* Apply the autowiring.
*/
public static InetUtilsProperties apply(RegisteredBean registeredBean,
InetUtilsProperties instance) {
AutowiredFieldValueResolver.forRequiredField("timeoutSeconds").resolveAndSet(registeredBean, instance);
return instance;
}
}
| mybatis-native-demo/target/spring-aot/main/sources/org/springframework/cloud/commons/util/InetUtilsProperties__Autowiring.java/0 | {
"file_path": "mybatis-native-demo/target/spring-aot/main/sources/org/springframework/cloud/commons/util/InetUtilsProperties__Autowiring.java",
"repo_id": "mybatis-native-demo",
"token_count": 185
} | 169 |
# spring
server.servlet.contextPath=${SERVER_SERVLET_CONTEXTPATH:/nacos}
server.contextPath=/nacos
server.port=${NACOS_APPLICATION_PORT:8848}
server.tomcat.accesslog.max-days=30
server.tomcat.accesslog.pattern=%h %l %u %t "%r" %s %b %D %{User-Agent}i %{Request-Source}i
server.tomcat.accesslog.enabled=${TOMCAT_ACCESSLOG_ENABLED:false}
server.error.include-message=ALWAYS
# default current work dir
server.tomcat.basedir=file:.
#*************** Config Module Related Configurations ***************#
### Deprecated configuration property, it is recommended to use `spring.sql.init.platform` replaced.
#spring.datasource.platform=${SPRING_DATASOURCE_PLATFORM:}
spring.sql.init.platform=${SPRING_DATASOURCE_PLATFORM:}
nacos.cmdb.dumpTaskInterval=3600
nacos.cmdb.eventTaskInterval=10
nacos.cmdb.labelTaskInterval=300
nacos.cmdb.loadDataAtStart=false
db.num=${MYSQL_DATABASE_NUM:1}
db.url.0=jdbc:mysql://${MYSQL_SERVICE_HOST}:${MYSQL_SERVICE_PORT:3306}/${MYSQL_SERVICE_DB_NAME}?${MYSQL_SERVICE_DB_PARAM:characterEncoding=utf8&connectTimeout=1000&socketTimeout=3000&autoReconnect=true&useSSL=false}
db.user.0=${MYSQL_SERVICE_USER}
db.password.0=${MYSQL_SERVICE_PASSWORD}
## DB connection pool settings
db.pool.config.connectionTimeout=${DB_POOL_CONNECTION_TIMEOUT:30000}
db.pool.config.validationTimeout=10000
db.pool.config.maximumPoolSize=20
db.pool.config.minimumIdle=2
### The auth system to use, currently only 'nacos' and 'ldap' is supported:
nacos.core.auth.system.type=${NACOS_AUTH_SYSTEM_TYPE:nacos}
### worked when nacos.core.auth.system.type=nacos
### The token expiration in seconds:
nacos.core.auth.plugin.nacos.token.expire.seconds=${NACOS_AUTH_TOKEN_EXPIRE_SECONDS:18000}
### The default token:
nacos.core.auth.plugin.nacos.token.secret.key=${NACOS_AUTH_TOKEN:}
### Turn on/off caching of auth information. By turning on this switch, the update of auth information would have a 15 seconds delay.
nacos.core.auth.caching.enabled=${NACOS_AUTH_CACHE_ENABLE:false}
nacos.core.auth.enable.userAgentAuthWhite=${NACOS_AUTH_USER_AGENT_AUTH_WHITE_ENABLE:false}
nacos.core.auth.server.identity.key=${NACOS_AUTH_IDENTITY_KEY:}
nacos.core.auth.server.identity.value=${NACOS_AUTH_IDENTITY_VALUE:}
## spring security config
### turn off security
nacos.security.ignore.urls=${NACOS_SECURITY_IGNORE_URLS:/,/error,/**/*.css,/**/*.js,/**/*.html,/**/*.map,/**/*.svg,/**/*.png,/**/*.ico,/console-fe/public/**,/v1/auth/**,/v1/console/health/**,/actuator/**,/v1/console/server/**}
# metrics for elastic search
management.metrics.export.elastic.enabled=false
management.metrics.export.influx.enabled=false
nacos.naming.distro.taskDispatchThreadCount=10
nacos.naming.distro.taskDispatchPeriod=200
nacos.naming.distro.batchSyncKeyCount=1000
nacos.naming.distro.initDataRatio=0.9
nacos.naming.distro.syncRetryDelay=5000
nacos.naming.data.warmup=true
nacos.console.ui.enabled=true
nacos.core.param.check.enabled=true
| nacos-docker/build/conf/application.properties/0 | {
"file_path": "nacos-docker/build/conf/application.properties",
"repo_id": "nacos-docker",
"token_count": 1130
} | 170 |
# my global config
global:
scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
# scrape_timeout is set to the global default (10s).
# Alertmanager configuration
alerting:
alertmanagers:
- static_configs:
- targets:
# - alertmanager:9093
# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
rule_files:
# - "first_rules.yml"
# - "second_rules.yml"
# A scrape configuration containing exactly one endpoint to scrape:
# Here it's Prometheus itself.
scrape_configs:
# The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
- job_name: 'prometheus'
# metrics_path defaults to '/metrics'
# scheme defaults to 'http'.
static_configs:
- targets: ['localhost:9090']
- job_name: 'nacos'
metrics_path: '/nacos/actuator/prometheus'
static_configs:
- targets: ["nacos1:8848","nacos2:8848","nacos3:8848"] | nacos-docker/example/prometheus/prometheus-cluster.yaml/0 | {
"file_path": "nacos-docker/example/prometheus/prometheus-cluster.yaml",
"repo_id": "nacos-docker",
"token_count": 371
} | 171 |
2024-06-12 10:12:20,070 INFO [dump] add formal task. groupKey=remote.yml+DEFAULT_GROUP+pgvector
2024-06-12 10:12:20,162 INFO [dump] process formal task. groupKey=remote.yml+DEFAULT_GROUP+pgvector
2024-06-12 10:12:20,169 INFO [dump] md5 changed, save to disk cache ,groupKey=remote.yml+DEFAULT_GROUP+pgvector, newMd5=9bf4f8d111bb4bbc63de484dc04158f5,oldMd5=83267b8356f558ebb72b0da2ebdde5f0
2024-06-12 10:12:20,169 INFO [dump] md5 changed, update md5 and timestamp in jvm cache ,groupKey=remote.yml+DEFAULT_GROUP+pgvector, newMd5=9bf4f8d111bb4bbc63de484dc04158f5,oldMd5=83267b8356f558ebb72b0da2ebdde5f0,lastModifiedTs=1718158340034
2024-06-12 10:13:11,960 INFO [dump] add formal task. groupKey=remote.yml+DEFAULT_GROUP+pgvector
2024-06-12 10:13:12,030 INFO [dump] process formal task. groupKey=remote.yml+DEFAULT_GROUP+pgvector
2024-06-12 10:13:12,031 INFO [dump] md5 changed, save to disk cache ,groupKey=remote.yml+DEFAULT_GROUP+pgvector, newMd5=87694188aa4ef4e7b518d586a9aee34b,oldMd5=9bf4f8d111bb4bbc63de484dc04158f5
2024-06-12 10:13:12,032 INFO [dump] md5 changed, update md5 and timestamp in jvm cache ,groupKey=remote.yml+DEFAULT_GROUP+pgvector, newMd5=87694188aa4ef4e7b518d586a9aee34b,oldMd5=9bf4f8d111bb4bbc63de484dc04158f5,lastModifiedTs=1718158391958
2024-06-12 10:37:56,671 INFO [dump] add formal task. groupKey=remote.yml+DEFAULT_GROUP+pgvector
2024-06-12 10:37:56,754 INFO [dump] process formal task. groupKey=remote.yml+DEFAULT_GROUP+pgvector
2024-06-12 10:37:56,757 INFO [dump] md5 changed, save to disk cache ,groupKey=remote.yml+DEFAULT_GROUP+pgvector, newMd5=83267b8356f558ebb72b0da2ebdde5f0,oldMd5=87694188aa4ef4e7b518d586a9aee34b
2024-06-12 10:37:56,757 INFO [dump] md5 changed, update md5 and timestamp in jvm cache ,groupKey=remote.yml+DEFAULT_GROUP+pgvector, newMd5=83267b8356f558ebb72b0da2ebdde5f0,oldMd5=87694188aa4ef4e7b518d586a9aee34b,lastModifiedTs=1718159876654
| nacos-docker/example/standalone-logs/config-dump.log/0 | {
"file_path": "nacos-docker/example/standalone-logs/config-dump.log",
"repo_id": "nacos-docker",
"token_count": 840
} | 172 |
version: "3.8"
services:
nacos:
image: nacos/nacos-server:${NACOS_VERSION}
container_name: nacos-standalone-mysql
env_file:
- ../env/custom-application-config.env
volumes:
- ./standalone-logs/:/home/nacos/logs
- ./init.d/application.properties:/home/nacos/conf/application.properties
ports:
- "8848:8848"
- "9848:9848"
depends_on:
mysql:
condition: service_healthy
restart: on-failure
mysql:
container_name: mysql
build:
context: .
dockerfile: ./image/mysql/5.7/Dockerfile
image: example/mysql:5.7
env_file:
- ../env/mysql.env
volumes:
- ./mysql:/var/lib/mysql
ports:
- "3306:3306"
healthcheck:
test: [ "CMD", "mysqladmin" ,"ping", "-h", "localhost" ]
interval: 5s
timeout: 10s
retries: 10
| nacos-docker/example/standalone-mysql-5.7.yaml/0 | {
"file_path": "nacos-docker/example/standalone-mysql-5.7.yaml",
"repo_id": "nacos-docker",
"token_count": 408
} | 173 |
#Thu Apr 18 11:28:02 CST 2024
gradle.version=8.7
| pgvector/.gradle/buildOutputCleanup/cache.properties/0 | {
"file_path": "pgvector/.gradle/buildOutputCleanup/cache.properties",
"repo_id": "pgvector",
"token_count": 21
} | 174 |
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="RemoteTargetsManager">
<targets>
<target name="ubuntu@172.16.14.8:50022" type="ssh/sftp" uuid="eebe911c-5318-468c-9aba-b94a1795e44c">
<config>
<option name="projectRootOnTarget" value="/home/ubuntu/pgvector" />
<option name="serverName" value="ubuntu@172.16.14.8:50022 password" />
</config>
<ContributedStateBase type="GradleRuntime">
<config />
</ContributedStateBase>
<ContributedStateBase type="JavaLanguageRuntime">
<config>
<option name="homePath" value="C:\Users\hukai\.jdks\corretto-21.0.2" />
</config>
</ContributedStateBase>
</target>
</targets>
</component>
</project> | pgvector/.idea/remote-targets.xml/0 | {
"file_path": "pgvector/.idea/remote-targets.xml",
"repo_id": "pgvector",
"token_count": 361
} | 175 |
package org.springframework.boot.autoconfigure.availability;
import org.springframework.aot.generate.Generated;
import org.springframework.beans.factory.aot.BeanInstanceSupplier;
import org.springframework.beans.factory.config.BeanDefinition;
import org.springframework.beans.factory.support.RootBeanDefinition;
import org.springframework.boot.availability.ApplicationAvailabilityBean;
/**
* Bean definitions for {@link ApplicationAvailabilityAutoConfiguration}.
*/
@Generated
public class ApplicationAvailabilityAutoConfiguration__BeanDefinitions {
/**
* Get the bean definition for 'applicationAvailabilityAutoConfiguration'.
*/
public static BeanDefinition getApplicationAvailabilityAutoConfigurationBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(ApplicationAvailabilityAutoConfiguration.class);
beanDefinition.setInstanceSupplier(ApplicationAvailabilityAutoConfiguration::new);
return beanDefinition;
}
/**
* Get the bean instance supplier for 'applicationAvailability'.
*/
private static BeanInstanceSupplier<ApplicationAvailabilityBean> getApplicationAvailabilityInstanceSupplier(
) {
return BeanInstanceSupplier.<ApplicationAvailabilityBean>forFactoryMethod(ApplicationAvailabilityAutoConfiguration.class, "applicationAvailability")
.withGenerator((registeredBean) -> registeredBean.getBeanFactory().getBean(ApplicationAvailabilityAutoConfiguration.class).applicationAvailability());
}
/**
* Get the bean definition for 'applicationAvailability'.
*/
public static BeanDefinition getApplicationAvailabilityBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(ApplicationAvailabilityBean.class);
beanDefinition.setInstanceSupplier(getApplicationAvailabilityInstanceSupplier());
return beanDefinition;
}
}
| pgvector/build/generated/aotSources/org/springframework/boot/autoconfigure/availability/ApplicationAvailabilityAutoConfiguration__BeanDefinitions.java/0 | {
"file_path": "pgvector/build/generated/aotSources/org/springframework/boot/autoconfigure/availability/ApplicationAvailabilityAutoConfiguration__BeanDefinitions.java",
"repo_id": "pgvector",
"token_count": 451
} | 176 |
package org.springframework.boot.autoconfigure.orm.jpa;
import javax.sql.DataSource;
import org.springframework.aot.generate.Generated;
import org.springframework.beans.factory.ObjectProvider;
import org.springframework.beans.factory.aot.BeanInstanceSupplier;
import org.springframework.beans.factory.config.BeanDefinition;
import org.springframework.beans.factory.config.ConfigurableListableBeanFactory;
import org.springframework.beans.factory.support.RootBeanDefinition;
/**
* Bean definitions for {@link HibernateJpaConfiguration}.
*/
@Generated
public class HibernateJpaConfiguration__BeanDefinitions {
/**
* Get the bean instance supplier for 'org.springframework.boot.autoconfigure.orm.jpa.HibernateJpaConfiguration'.
*/
private static BeanInstanceSupplier<HibernateJpaConfiguration> getHibernateJpaConfigurationInstanceSupplier(
) {
return BeanInstanceSupplier.<HibernateJpaConfiguration>forConstructor(DataSource.class, JpaProperties.class, ConfigurableListableBeanFactory.class, ObjectProvider.class, HibernateProperties.class, ObjectProvider.class, ObjectProvider.class, ObjectProvider.class, ObjectProvider.class, ObjectProvider.class)
.withGenerator((registeredBean, args) -> new HibernateJpaConfiguration(args.get(0), args.get(1), args.get(2), args.get(3), args.get(4), args.get(5), args.get(6), args.get(7), args.get(8), args.get(9)));
}
/**
* Get the bean definition for 'hibernateJpaConfiguration'.
*/
public static BeanDefinition getHibernateJpaConfigurationBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(HibernateJpaConfiguration.class);
beanDefinition.setInstanceSupplier(getHibernateJpaConfigurationInstanceSupplier());
return beanDefinition;
}
}
| pgvector/build/generated/aotSources/org/springframework/boot/autoconfigure/orm/jpa/HibernateJpaConfiguration__BeanDefinitions.java/0 | {
"file_path": "pgvector/build/generated/aotSources/org/springframework/boot/autoconfigure/orm/jpa/HibernateJpaConfiguration__BeanDefinitions.java",
"repo_id": "pgvector",
"token_count": 549
} | 177 |
package org.springframework.boot.autoconfigure.task;
import org.springframework.aot.generate.Generated;
import org.springframework.beans.factory.config.BeanDefinition;
import org.springframework.beans.factory.support.RootBeanDefinition;
/**
* Bean definitions for {@link TaskSchedulingProperties}.
*/
@Generated
public class TaskSchedulingProperties__BeanDefinitions {
/**
* Get the bean definition for 'taskSchedulingProperties'.
*/
public static BeanDefinition getTaskSchedulingPropertiesBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(TaskSchedulingProperties.class);
beanDefinition.setInstanceSupplier(TaskSchedulingProperties::new);
return beanDefinition;
}
}
| pgvector/build/generated/aotSources/org/springframework/boot/autoconfigure/task/TaskSchedulingProperties__BeanDefinitions.java/0 | {
"file_path": "pgvector/build/generated/aotSources/org/springframework/boot/autoconfigure/task/TaskSchedulingProperties__BeanDefinitions.java",
"repo_id": "pgvector",
"token_count": 210
} | 178 |
package org.springframework.boot.autoconfigure.web.servlet;
import java.util.List;
import org.springframework.aot.generate.Generated;
import org.springframework.beans.factory.aot.AutowiredMethodArgumentsResolver;
import org.springframework.beans.factory.support.RegisteredBean;
/**
* Autowiring for {@link WebMvcAutoConfiguration.EnableWebMvcConfiguration}.
*/
@Generated
public class WebMvcAutoConfiguration_EnableWebMvcConfiguration__Autowiring {
/**
* Apply the autowiring.
*/
public static WebMvcAutoConfiguration.EnableWebMvcConfiguration apply(
RegisteredBean registeredBean, WebMvcAutoConfiguration.EnableWebMvcConfiguration instance) {
AutowiredMethodArgumentsResolver.forMethod("setConfigurers", List.class).resolve(registeredBean, args -> instance.setConfigurers(args.get(0)));
return instance;
}
}
| pgvector/build/generated/aotSources/org/springframework/boot/autoconfigure/web/servlet/WebMvcAutoConfiguration_EnableWebMvcConfiguration__Autowiring.java/0 | {
"file_path": "pgvector/build/generated/aotSources/org/springframework/boot/autoconfigure/web/servlet/WebMvcAutoConfiguration_EnableWebMvcConfiguration__Autowiring.java",
"repo_id": "pgvector",
"token_count": 253
} | 179 |
package org.springframework.data.jpa.repository.support;
import java.util.Set;
import org.springframework.aot.generate.Generated;
import org.springframework.beans.factory.aot.BeanInstanceSupplier;
import org.springframework.beans.factory.config.BeanDefinition;
import org.springframework.beans.factory.support.RootBeanDefinition;
/**
* Bean definitions for {@link DefaultJpaContext}.
*/
@Generated
public class DefaultJpaContext__BeanDefinitions {
/**
* Get the bean instance supplier for 'jpaContext'.
*/
private static BeanInstanceSupplier<DefaultJpaContext> getJpaContextInstanceSupplier() {
return BeanInstanceSupplier.<DefaultJpaContext>forConstructor(Set.class)
.withGenerator((registeredBean, args) -> new DefaultJpaContext(args.get(0)));
}
/**
* Get the bean definition for 'jpaContext'.
*/
public static BeanDefinition getJpaContextBeanDefinition() {
RootBeanDefinition beanDefinition = new RootBeanDefinition(DefaultJpaContext.class);
beanDefinition.setLazyInit(true);
beanDefinition.setInstanceSupplier(getJpaContextInstanceSupplier());
return beanDefinition;
}
}
| pgvector/build/generated/aotSources/org/springframework/data/jpa/repository/support/DefaultJpaContext__BeanDefinitions.java/0 | {
"file_path": "pgvector/build/generated/aotSources/org/springframework/data/jpa/repository/support/DefaultJpaContext__BeanDefinitions.java",
"repo_id": "pgvector",
"token_count": 351
} | 180 |
[
{
"name": "org.apache.commons.compress.archivers.zip.AsiExtraField",
"condition": {
"typeReachable": "org.apache.commons.compress.archivers.zip.ExtraFieldUtils"
},
"methods": [
{
"name": "<init>",
"parameterTypes": [
]
}
]
},
{
"name": "org.apache.commons.compress.archivers.zip.JarMarker",
"condition": {
"typeReachable": "org.apache.commons.compress.archivers.zip.ExtraFieldUtils"
},
"methods": [
{
"name": "<init>",
"parameterTypes": [
]
}
]
},
{
"name": "org.apache.commons.compress.archivers.zip.ResourceAlignmentExtraField",
"condition": {
"typeReachable": "org.apache.commons.compress.archivers.zip.ExtraFieldUtils"
},
"methods": [
{
"name": "<init>",
"parameterTypes": [
]
}
]
},
{
"name": "org.apache.commons.compress.archivers.zip.UnicodeCommentExtraField",
"condition": {
"typeReachable": "org.apache.commons.compress.archivers.zip.ExtraFieldUtils"
},
"methods": [
{
"name": "<init>",
"parameterTypes": [
]
}
]
},
{
"name": "org.apache.commons.compress.archivers.zip.UnicodePathExtraField",
"condition": {
"typeReachable": "org.apache.commons.compress.archivers.zip.ExtraFieldUtils"
},
"methods": [
{
"name": "<init>",
"parameterTypes": [
]
}
]
},
{
"name": "org.apache.commons.compress.archivers.zip.X000A_NTFS",
"condition": {
"typeReachable": "org.apache.commons.compress.archivers.zip.ExtraFieldUtils"
},
"methods": [
{
"name": "<init>",
"parameterTypes": [
]
}
]
},
{
"name": "org.apache.commons.compress.archivers.zip.X0014_X509Certificates",
"condition": {
"typeReachable": "org.apache.commons.compress.archivers.zip.ExtraFieldUtils"
},
"methods": [
{
"name": "<init>",
"parameterTypes": [
]
}
]
},
{
"name": "org.apache.commons.compress.archivers.zip.X0015_CertificateIdForFile",
"condition": {
"typeReachable": "org.apache.commons.compress.archivers.zip.ExtraFieldUtils"
},
"methods": [
{
"name": "<init>",
"parameterTypes": [
]
}
]
},
{
"name": "org.apache.commons.compress.archivers.zip.X0016_CertificateIdForCentralDirectory",
"condition": {
"typeReachable": "org.apache.commons.compress.archivers.zip.ExtraFieldUtils"
},
"methods": [
{
"name": "<init>",
"parameterTypes": [
]
}
]
},
{
"name": "org.apache.commons.compress.archivers.zip.X0017_StrongEncryptionHeader",
"condition": {
"typeReachable": "org.apache.commons.compress.archivers.zip.ExtraFieldUtils"
},
"methods": [
{
"name": "<init>",
"parameterTypes": [
]
}
]
},
{
"name": "org.apache.commons.compress.archivers.zip.X0019_EncryptionRecipientCertificateList",
"condition": {
"typeReachable": "org.apache.commons.compress.archivers.zip.ExtraFieldUtils"
},
"methods": [
{
"name": "<init>",
"parameterTypes": [
]
}
]
},
{
"name": "org.apache.commons.compress.archivers.zip.X5455_ExtendedTimestamp",
"condition": {
"typeReachable": "org.apache.commons.compress.archivers.zip.ExtraFieldUtils"
},
"methods": [
{
"name": "<init>",
"parameterTypes": [
]
}
]
},
{
"name": "org.apache.commons.compress.archivers.zip.X7875_NewUnix",
"condition": {
"typeReachable": "org.apache.commons.compress.archivers.zip.ExtraFieldUtils"
},
"methods": [
{
"name": "<init>",
"parameterTypes": [
]
}
]
},
{
"name": "org.apache.commons.compress.archivers.zip.Zip64ExtendedInformationExtraField",
"condition": {
"typeReachable": "org.apache.commons.compress.archivers.zip.ExtraFieldUtils"
},
"methods": [
{
"name": "<init>",
"parameterTypes": [
]
}
]
}
]
| pgvector/build/native-reachability-metadata/META-INF/native-image/org.apache.commons/commons-compress/1.24.0/reflect-config.json/0 | {
"file_path": "pgvector/build/native-reachability-metadata/META-INF/native-image/org.apache.commons/commons-compress/1.24.0/reflect-config.json",
"repo_id": "pgvector",
"token_count": 2077
} | 181 |
MIT | swift/benchmarks/modelscope_mmlu/mmlu/conceptual_physics/1.0.0/fedfb5e4f551779e93567fbaaa992d74323de5ed8041b2a38b33dc9af632e3f5/LICENSE/0 | {
"file_path": "swift/benchmarks/modelscope_mmlu/mmlu/conceptual_physics/1.0.0/fedfb5e4f551779e93567fbaaa992d74323de5ed8041b2a38b33dc9af632e3f5/LICENSE",
"repo_id": "swift",
"token_count": 1
} | 182 |
MIT | swift/benchmarks/modelscope_mmlu/mmlu/high_school_psychology/1.0.0/fedfb5e4f551779e93567fbaaa992d74323de5ed8041b2a38b33dc9af632e3f5/LICENSE/0 | {
"file_path": "swift/benchmarks/modelscope_mmlu/mmlu/high_school_psychology/1.0.0/fedfb5e4f551779e93567fbaaa992d74323de5ed8041b2a38b33dc9af632e3f5/LICENSE",
"repo_id": "swift",
"token_count": 1
} | 183 |
MIT | swift/benchmarks/modelscope_mmlu/mmlu/nutrition/1.0.0/fedfb5e4f551779e93567fbaaa992d74323de5ed8041b2a38b33dc9af632e3f5/LICENSE/0 | {
"file_path": "swift/benchmarks/modelscope_mmlu/mmlu/nutrition/1.0.0/fedfb5e4f551779e93567fbaaa992d74323de5ed8041b2a38b33dc9af632e3f5/LICENSE",
"repo_id": "swift",
"token_count": 1
} | 184 |
# 安装和使用
## Wheel包安装
可以使用pip进行安装:
```shell
# 全量能力
pip install 'ms-swift[all]' -U
# 仅使用LLM
pip install 'ms-swift[llm]' -U
# 仅使用AIGC
pip install 'ms-swift[aigc]' -U
# 仅使用adapters
pip install ms-swift -U
```
## 源代码安装
```shell
git clone https://github.com/modelscope/swift.git
cd swift
pip install -e '.[all]'
```
## Notebook环境
Swift支持训练的绝大多数模型都可以在`A10`显卡上使用,用户可以使用ModelScope官方提供的免费显卡资源:
1. 进入[ModelScope](https://www.modelscope.cn)官方网站并登录
2. 点击左侧的`我的Notebook`并开启一个免费GPU实例
3. 愉快地薅A10显卡羊毛
## Build文档
Swift支持完整的API Doc文档,在swift根目录下执行:
```shell
make docs
```
等待执行完成后,查看`docs/build/html/index.html`即可。
| swift/docs/source/GetStarted/SWIFT安装.md/0 | {
"file_path": "swift/docs/source/GetStarted/SWIFT安装.md",
"repo_id": "swift",
"token_count": 511
} | 185 |
# Internlm-Xcomposer2 最佳实践
## 目录
- [环境准备](#环境准备)
- [推理](#推理)
- [微调](#微调)
- [微调后推理](#微调后推理)
## 环境准备
```shell
pip install 'ms-swift[llm]' -U
```
## 推理
推理[internlm-xcomposer2-7b-chat](https://modelscope.cn/models/Shanghai_AI_Laboratory/internlm-xcomposer2-7b/summary):
```shell
# Experimental environment: A10, 3090, V100, ...
# 21GB GPU memory
CUDA_VISIBLE_DEVICES=0 swift infer --model_type internlm-xcomposer2-7b-chat
```
输出: (支持传入本地路径或URL)
```python
"""
<<< 你是谁?
我是浦语·灵笔,一个由上海人工智能实验室开发的语言模型。我能理解并流畅地使用英语和中文与你对话。
--------------------------------------------------
<<< <img>http://modelscope-open.oss-cn-hangzhou.aliyuncs.com/images/animal.png</img><img>http://modelscope-open.oss-cn-hangzhou.aliyuncs.com/images/cat.png</img>这两张图片有什么区别
这两张图片没有直接的关联,它们分别展示了两个不同的场景。第一幅图是一张卡通画,描绘了一群羊在草地上,背景是蓝天和山脉。第二幅图则是一张猫的照片,猫正看着镜头,背景模糊不清。
--------------------------------------------------
<<< clear
<<< <img>http://modelscope-open.oss-cn-hangzhou.aliyuncs.com/images/animal.png</img>图中有几只羊
图中有4只羊
--------------------------------------------------
<<< <img>http://modelscope-open.oss-cn-hangzhou.aliyuncs.com/images/math.png</img>计算结果是多少
1452 + 45304 = 46756
--------------------------------------------------
<<< <img>http://modelscope-open.oss-cn-hangzhou.aliyuncs.com/images/poem.png</img>根据图片中的内容写首诗
夜色苍茫月影斜,
湖面平静如明镜。
小舟轻荡波光里,
灯火微摇映水乡。
--------------------------------------------------
<<< clear
<<< <img>https://modelscope-open.oss-cn-hangzhou.aliyuncs.com/images/ocr.png</img>对图片进行OCR
很抱歉,我无法对您提供的图片进行OCR。如果您需要文本识别服务,您可以上传图片到其他支持OCR服务的平台,或者您可以尝试使用一些在线OCR工具。
"""
```
示例图片如下:
cat:
<img src="http://modelscope-open.oss-cn-hangzhou.aliyuncs.com/images/cat.png" width="250" style="display: inline-block;">
animal:
<img src="http://modelscope-open.oss-cn-hangzhou.aliyuncs.com/images/animal.png" width="250" style="display: inline-block;">
math:
<img src="http://modelscope-open.oss-cn-hangzhou.aliyuncs.com/images/math.png" width="250" style="display: inline-block;">
poem:
<img src="http://modelscope-open.oss-cn-hangzhou.aliyuncs.com/images/poem.png" width="250" style="display: inline-block;">
ocr:
<img src="https://modelscope-open.oss-cn-hangzhou.aliyuncs.com/images/ocr.png" width="250" style="display: inline-block;">
**单样本推理**
```python
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
from swift.llm import (
get_model_tokenizer, get_template, inference, ModelType,
get_default_template_type, inference_stream
)
from swift.utils import seed_everything
import torch
model_type = ModelType.internlm_xcomposer2_7b_chat
template_type = get_default_template_type(model_type)
print(f'template_type: {template_type}')
model, tokenizer = get_model_tokenizer(model_type, torch.float16,
model_kwargs={'device_map': 'auto'})
model.generation_config.max_new_tokens = 256
template = get_template(template_type, tokenizer)
seed_everything(42)
query = """<img>http://modelscope-open.oss-cn-hangzhou.aliyuncs.com/images/road.png</img>距离各城市多远?"""
response, history = inference(model, template, query)
print(f'query: {query}')
print(f'response: {response}')
# 流式
query = '距离最远的城市是哪?'
gen = inference_stream(model, template, query, history)
print_idx = 0
print(f'query: {query}\nresponse: ', end='')
for response, history in gen:
delta = response[print_idx:]
print(delta, end='', flush=True)
print_idx = len(response)
print()
print(f'history: {history}')
"""
query: <img>http://modelscope-open.oss-cn-hangzhou.aliyuncs.com/images/road.png</img>距离各城市多远?
response: 马鞍山距离阳江62公里,广州距离广州293公里。
query: 距离最远的城市是哪?
response: 距离最最远的城市是广州,距离广州293公里。
history: [['<img>http://modelscope-open.oss-cn-hangzhou.aliyuncs.com/images/road.png</img>距离各城市多远?', ' 马鞍山距离阳江62公里,广州距离广州293公里。'], ['距离最远的城市是哪?', ' 距离最远的城市是广州,距离广州293公里。']]
"""
```
示例图片如下:
road:
<img src="http://modelscope-open.oss-cn-hangzhou.aliyuncs.com/images/road.png" width="250" style="display: inline-block;">
## 微调
多模态大模型微调通常使用**自定义数据集**进行微调. 这里展示可直接运行的demo:
(默认只对LLM部分的qkv进行lora微调. 不支持`--lora_target_modules ALL`. 支持全参数微调.)
```shell
# Experimental environment: A10, 3090, V100, ...
# 21GB GPU memory
CUDA_VISIBLE_DEVICES=0 swift sft \
--model_type internlm-xcomposer2-7b-chat \
--dataset coco-en-mini \
```
[自定义数据集](../LLM/自定义与拓展.md#-推荐命令行参数的形式)支持json, jsonl样式, 以下是自定义数据集的例子:
(支持多轮对话, 支持每轮对话含多张图片或不含图片, 支持传入本地路径或URL. 该模型不支持merge-lora)
```json
[
{"conversations": [
{"from": "user", "value": "<img>img_path</img>11111"},
{"from": "assistant", "value": "22222"}
]},
{"conversations": [
{"from": "user", "value": "<img>img_path</img><img>img_path2</img><img>img_path3</img>aaaaa"},
{"from": "assistant", "value": "bbbbb"},
{"from": "user", "value": "<img>img_path</img>ccccc"},
{"from": "assistant", "value": "ddddd"}
]},
{"conversations": [
{"from": "user", "value": "AAAAA"},
{"from": "assistant", "value": "BBBBB"},
{"from": "user", "value": "CCCCC"},
{"from": "assistant", "value": "DDDDD"}
]}
]
```
## 微调后推理
```shell
CUDA_VISIBLE_DEVICES=0 swift infer \
--ckpt_dir output/internlm-xcomposer2-7b-chat/vx-xxx/checkpoint-xxx \
--load_dataset_config true \
```
| swift/docs/source/Multi-Modal/internlm-xcomposer2最佳实践.md/0 | {
"file_path": "swift/docs/source/Multi-Modal/internlm-xcomposer2最佳实践.md",
"repo_id": "swift",
"token_count": 3223
} | 186 |
swift.tuners
==============
.. automodule:: swift.tuners
.. currentmodule:: swift.tuners
.. autosummary::
:toctree: generated
:nosignatures:
:template: classtemplate.rst
adapter.AdapterConfig
base.SwiftModel
base.Swift
lora.LoRAConfig
prompt.PromptConfig
restuning.ResTuningConfig
side.SideConfig
utils.SwiftConfig
utils.SwiftOutput
| swift/docs/source/api/swift.tuners.rst/0 | {
"file_path": "swift/docs/source/api/swift.tuners.rst",
"repo_id": "swift",
"token_count": 155
} | 187 |
Subsets and Splits