text
stringlengths 3
11.2M
| id
stringlengths 15
188
| metadata
dict | __index_level_0__
int64 0
275
|
---|---|---|---|
Global:
reader_config: configs/rtdetr_reader.yml
include_nms: True
Evaluation: True
model_dir: ./rtdetr_hgnetv2_l_6x_coco/
model_filename: model.pdmodel
params_filename: model.pdiparams
Distillation:
alpha: 1.0
loss: soft_label
QuantAware:
onnx_format: true
activation_quantize_type: 'moving_average_abs_max'
quantize_op_types:
- conv2d
- depthwise_conv2d
- matmul_v2
TrainConfig:
train_iter: 200
eval_iter: 50
learning_rate:
type: CosineAnnealingDecay
learning_rate: 0.00003
T_max: 10000
optimizer_builder:
optimizer:
type: SGD
weight_decay: 4.0e-05
| PaddleDetection/deploy/auto_compression/configs/rtdetr_hgnetv2_l_qat_dis.yaml/0 | {
"file_path": "PaddleDetection/deploy/auto_compression/configs/rtdetr_hgnetv2_l_qat_dis.yaml",
"repo_id": "PaddleDetection",
"token_count": 266
} | 54 |
# All rights `PaddleDetection` reserved
#!/bin/bash
model_dir=$1
model_name=$2
export img_dir="demo"
export log_path="output_pipeline"
echo "model_dir : ${model_dir}"
echo "img_dir: ${img_dir}"
# TODO: support batch size>1
for run_mode in "trt_int8"; do
echo "${model_name} ${model_dir}, run_mode: ${run_mode}"
python deploy/python/infer.py \
--model_dir=${model_dir} \
--run_benchmark=True \
--device=GPU \
--run_mode=${run_mode} \
--image_dir=${img_dir} 2>&1 | tee ${log_path}/${model_name}_gpu_runmode_${run_mode}_bs1_infer.log
done
| PaddleDetection/deploy/benchmark/benchmark_quant.sh/0 | {
"file_path": "PaddleDetection/deploy/benchmark/benchmark_quant.sh",
"repo_id": "PaddleDetection",
"token_count": 235
} | 55 |
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// The code is based on:
// https://github.com/CnybTseng/JDE/blob/master/platforms/common/jdetracker.h
// Ths copyright of CnybTseng/JDE is as follows:
// MIT License
#pragma once
#include <map>
#include <vector>
#include <opencv2/opencv.hpp>
#include "trajectory.h"
namespace PaddleDetection {
typedef std::map<int, int> Match;
typedef std::map<int, int>::iterator MatchIterator;
struct Track
{
int id;
float score;
cv::Vec4f ltrb;
};
class JDETracker
{
public:
static JDETracker *instance(void);
virtual bool update(const cv::Mat &dets, const cv::Mat &emb, std::vector<Track> &tracks);
private:
JDETracker(void);
virtual ~JDETracker(void) {}
cv::Mat motion_distance(const TrajectoryPtrPool &a, const TrajectoryPool &b);
void linear_assignment(const cv::Mat &cost, float cost_limit, Match &matches,
std::vector<int> &mismatch_row, std::vector<int> &mismatch_col);
void remove_duplicate_trajectory(TrajectoryPool &a, TrajectoryPool &b, float iou_thresh=0.15f);
private:
static JDETracker *me;
int timestamp;
TrajectoryPool tracked_trajectories;
TrajectoryPool lost_trajectories;
TrajectoryPool removed_trajectories;
int max_lost_time;
float lambda;
float det_thresh;
};
} // namespace PaddleDetection
| PaddleDetection/deploy/cpp/include/tracker.h/0 | {
"file_path": "PaddleDetection/deploy/cpp/include/tracker.h",
"repo_id": "PaddleDetection",
"token_count": 672
} | 56 |
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "include/utils.h"
namespace PaddleDetection {
void nms(std::vector<ObjectResult> &input_boxes, float nms_threshold) {
std::sort(input_boxes.begin(),
input_boxes.end(),
[](ObjectResult a, ObjectResult b) { return a.confidence > b.confidence; });
std::vector<float> vArea(input_boxes.size());
for (int i = 0; i < int(input_boxes.size()); ++i) {
vArea[i] = (input_boxes.at(i).rect[2] - input_boxes.at(i).rect[0] + 1)
* (input_boxes.at(i).rect[3] - input_boxes.at(i).rect[1] + 1);
}
for (int i = 0; i < int(input_boxes.size()); ++i) {
for (int j = i + 1; j < int(input_boxes.size());) {
float xx1 = (std::max)(input_boxes[i].rect[0], input_boxes[j].rect[0]);
float yy1 = (std::max)(input_boxes[i].rect[1], input_boxes[j].rect[1]);
float xx2 = (std::min)(input_boxes[i].rect[2], input_boxes[j].rect[2]);
float yy2 = (std::min)(input_boxes[i].rect[3], input_boxes[j].rect[3]);
float w = (std::max)(float(0), xx2 - xx1 + 1);
float h = (std::max)(float(0), yy2 - yy1 + 1);
float inter = w * h;
float ovr = inter / (vArea[i] + vArea[j] - inter);
if (ovr >= nms_threshold) {
input_boxes.erase(input_boxes.begin() + j);
vArea.erase(vArea.begin() + j);
}
else {
j++;
}
}
}
}
} // namespace PaddleDetection
| PaddleDetection/deploy/cpp/src/utils.cc/0 | {
"file_path": "PaddleDetection/deploy/cpp/src/utils.cc",
"repo_id": "PaddleDetection",
"token_count": 782
} | 57 |
import cv2
import os
import fastdeploy as fd
def parse_arguments():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"--model_dir", required=True, help="Path of PaddleDetection model.")
parser.add_argument(
"--image_file", type=str, required=True, help="Path of test image file.")
return parser.parse_args()
args = parse_arguments()
runtime_option = fd.RuntimeOption()
runtime_option.use_ascend()
if args.model_dir is None:
model_dir = fd.download_model(name='ppyoloe_crn_l_300e_coco')
else:
model_dir = args.model_dir
model_file = os.path.join(model_dir, "model.pdmodel")
params_file = os.path.join(model_dir, "model.pdiparams")
config_file = os.path.join(model_dir, "infer_cfg.yml")
# settting for runtime
model = fd.vision.detection.PPYOLOE(
model_file, params_file, config_file, runtime_option=runtime_option)
# predict
if args.image_file is None:
image_file = fd.utils.get_detection_test_image()
else:
image_file = args.image_file
im = cv2.imread(image_file)
result = model.predict(im)
print(result)
# visualize
vis_im = fd.vision.vis_detection(im, result, score_threshold=0.5)
cv2.imwrite("visualized_result.jpg", vis_im)
print("Visualized result save in ./visualized_result.jpg")
| PaddleDetection/deploy/fastdeploy/ascend/python/infer.py/0 | {
"file_path": "PaddleDetection/deploy/fastdeploy/ascend/python/infer.py",
"repo_id": "PaddleDetection",
"token_count": 487
} | 58 |
[English](README.md) | 简体中文
# PaddleDetection 昆仑芯 XPU C++部署示例
本目录下提供`infer.cc`快速完成PPYOLOE模型包括PPYOLOE在昆仑芯 XPU加速部署的示例。
## 1. 说明
PaddleDetection支持利用FastDeploy在NVIDIA GPU、X86 CPU、飞腾CPU、ARM CPU、Intel GPU(独立显卡/集成显卡)硬件上快速部署PaddleDetection模型。FastDeploy目前支持的模型系列,包括但不限于`PPYOLOE`, `PicoDet`, `PaddleYOLOX`, `PPYOLO`, `FasterRCNN`,`SSD`,`PaddleYOLOv5`,`PaddleYOLOv6`,`PaddleYOLOv7`,`RTMDet`,`CascadeRCNN`,`PSSDet`,`RetinaNet`,`PPYOLOESOD`,`FCOS`,`TTFNet`,`TOOD`,`GFL`所有类名的构造函数和预测函数在参数上完全一致。所有模型的调用,只需要参考PPYOLOE的示例,即可快速调用。
## 2. 部署环境准备
在部署前,需自行编译基于昆仑芯XPU的预测库,参考文档[昆仑芯XPU部署环境编译安装](https://github.com/PaddlePaddle/FastDeploy/blob/develop/docs/cn/build_and_install#自行编译安装)
## 3. 部署模型准备
在部署前,请准备好您所需要运行的推理模型,你可以选择使用[预导出的推理模型](../README.md)或者[自行导出PaddleDetection部署模型](../README.md)。
## 4. 运行部署示例
以Linux上推理为例,在本目录执行如下命令即可完成编译测试,支持此模型需保证FastDeploy版本1.0.4以上(x.x.x>=1.0.4)
### 4.1 目标检测示例
```bash
# 下载部署示例代码
git clone https://github.com/PaddlePaddle/PaddleDetection.git
cd PaddleDetection/deploy/fastdeploy/kunlunxin/cpp
# 注意:如果当前分支找不到下面的fastdeploy测试代码,请切换到develop分支
# git checkout develop
# 编译部署示例
mkdir build
cd build
# 使用编译完成的FastDeploy库编译infer_demo
cmake .. -DFASTDEPLOY_INSTALL_DIR=${PWD}/fastdeploy-kunlunxin
make -j
# 下载PPYOLOE模型文件和测试图片
wget https://bj.bcebos.com/paddlehub/fastdeploy/ppyoloe_crn_l_300e_coco.tgz
wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000014439.jpg
tar xvf ppyoloe_crn_l_300e_coco.tgz
# 运行部署示例
./infer_demo ./ppyoloe_crn_l_300e_coco 000000014439.jpg
```
运行完成可视化结果如下图所示
<div align="center">
<img src="https://user-images.githubusercontent.com/19339784/184326520-7075e907-10ed-4fad-93f8-52d0e35d4964.jpg", width=480px, height=320px />
</div>
### 4.2 关键点检测示例
```bash
# 下载FastDeploy预编译库,用户可在上文提到的`FastDeploy预编译库`中自行选择合适的版本使用
wget https://bj.bcebos.com/fastdeploy/release/cpp/fastdeploy-linux-x64-gpu-x.x.x.tgz
tar xvf fastdeploy-linux-x64-gpu-x.x.x.tgz
# 下载部署示例代码
git clone https://github.com/PaddlePaddle/PaddleDetection.git
cd PaddleDetection/deploy/fastdeploy/kunlunxin/cpp
# 注意:如果当前分支找不到下面的fastdeploy测试代码,请切换到develop分支
# git checkout develop
# 编译部署示例
mkdir build && cd build
mv ../fastdeploy-linux-x64-gpu-x.x.x .
cmake .. -DFASTDEPLOY_INSTALL_DIR=${PWD}/fastdeploy-linux-x64-gpu-x.x.x
make -j
# 下载PP-TinyPose模型文件和测试图片
wget https://bj.bcebos.com/paddlehub/fastdeploy/PP_TinyPose_256x192_infer.tgz
tar -xvf PP_TinyPose_256x192_infer.tgz
wget https://bj.bcebos.com/paddlehub/fastdeploy/hrnet_demo.jpg
# 运行部署示例
./infer_tinypose_demo PP_TinyPose_256x192_infer hrnet_demo.jpg
```
运行完成可视化结果如下图所示
<div align="center">
<img src="https://user-images.githubusercontent.com/16222477/196386764-dd51ad56-c410-4c54-9580-643f282f5a83.jpeg", width=359px, height=423px />
</div>
关于如何进行多人关键点检测,请参考[PPTinyPose Pipeline示例](./det_keypoint_unite/)
- 关于如何通过FastDeploy使用更多不同的推理后端,以及如何使用不同的硬件,请参考文档:[如何切换模型推理后端引擎](https://github.com/PaddlePaddle/FastDeploy/blob/develop/docs/cn/faq/how_to_change_backend.md)
## 5. PaddleDetection C++接口
FastDeploy目前支持的模型系列,包括但不限于`PPYOLOE`, `PicoDet`, `PaddleYOLOX`, `PPYOLO`, `FasterRCNN`,`SSD`,`PaddleYOLOv5`,`PaddleYOLOv6`,`PaddleYOLOv7`,`RTMDet`,`CascadeRCNN`,`PSSDet`,`RetinaNet`,`PPYOLOESOD`,`FCOS`,`TTFNet`,`TOOD`,`GFL`所有类名的构造函数和预测函数在参数上完全一致。所有模型的调用,只需要参考PPYOLOE的示例,即可快速调用。
### 5.1 目标检测及实例分割模型
```c++
fastdeploy::vision::detection::PicoDet(const string& model_file, const string& params_file, const string& config_file, const RuntimeOption& runtime_option = RuntimeOption(), const ModelFormat& model_format = ModelFormat::PADDLE);
fastdeploy::vision::detection::SOLOv2(const string& model_file, const string& params_file, const string& config_file, const RuntimeOption& runtime_option = RuntimeOption(), const ModelFormat& model_format = ModelFormat::PADDLE);
fastdeploy::vision::detection::PPYOLOE(const string& model_file, const string& params_file, const string& config_file, const RuntimeOption& runtime_option = RuntimeOption(), const ModelFormat& model_format = ModelFormat::PADDLE);
fastdeploy::vision::detection::PPYOLO(const string& model_file, const string& params_file, const string& config_file, const RuntimeOption& runtime_option = RuntimeOption(), const ModelFormat& model_format = ModelFormat::PADDLE);
fastdeploy::vision::detection::YOLOv3(const string& model_file, const string& params_file, const string& config_file, const RuntimeOption& runtime_option = RuntimeOption(), const ModelFormat& model_format = ModelFormat::PADDLE);
fastdeploy::vision::detection::PaddleYOLOX(const string& model_file, const string& params_file, const string& config_file, const RuntimeOption& runtime_option = RuntimeOption(), const ModelFormat& model_format = ModelFormat::PADDLE);
fastdeploy::vision::detection::FasterRCNN(const string& model_file, const string& params_file, const string& config_file, const RuntimeOption& runtime_option = RuntimeOption(), const ModelFormat& model_format = ModelFormat::PADDLE);
fastdeploy::vision::detection::MaskRCNN(const string& model_file, const string& params_file, const string& config_file, const RuntimeOption& runtime_option = RuntimeOption(), const ModelFormat& model_format = ModelFormat::PADDLE);
fastdeploy::vision::detection::SSD(const string& model_file, const string& params_file, const string& config_file, const RuntimeOption& runtime_option = RuntimeOption(), const ModelFormat& model_format = ModelFormat::PADDLE);
fastdeploy::vision::detection::PaddleYOLOv5(const string& model_file, const string& params_file, const string& config_file, const RuntimeOption& runtime_option = RuntimeOption(), const ModelFormat& model_format = ModelFormat::PADDLE);
fastdeploy::vision::detection::PaddleYOLOv6(const string& model_file, const string& params_file, const string& config_file, const RuntimeOption& runtime_option = RuntimeOption(), const ModelFormat& model_format = ModelFormat::PADDLE);
fastdeploy::vision::detection::PaddleYOLOv7(const string& model_file, const string& params_file, const string& config_file, const RuntimeOption& runtime_option = RuntimeOption(), const ModelFormat& model_format = ModelFormat::PADDLE);
fastdeploy::vision::detection::PaddleYOLOv8(const string& model_file, const string& params_file, const string& config_file, const RuntimeOption& runtime_option = RuntimeOption(), const ModelFormat& model_format = ModelFormat::PADDLE);
fastdeploy::vision::detection::CascadeRCNN(const string& model_file, const string& params_file, const string& config_file, const RuntimeOption& runtime_option = RuntimeOption(), const ModelFormat& model_format = ModelFormat::PADDLE);
fastdeploy::vision::detection::PSSDet(const string& model_file, const string& params_file, const string& config_file, const RuntimeOption& runtime_option = RuntimeOption(), const ModelFormat& model_format = ModelFormat::PADDLE);
fastdeploy::vision::detection::RetinaNet(const string& model_file, const string& params_file, const string& config_file, const RuntimeOption& runtime_option = RuntimeOption(), const ModelFormat& model_format = ModelFormat::PADDLE);
fastdeploy::vision::detection::PPYOLOESOD(const string& model_file, const string& params_file, const string& config_file, const RuntimeOption& runtime_option = RuntimeOption(), const ModelFormat& model_format = ModelFormat::PADDLE);
fastdeploy::vision::detection::FCOS(const string& model_file, const string& params_file, const string& config_file, const RuntimeOption& runtime_option = RuntimeOption(), const ModelFormat& model_format = ModelFormat::PADDLE);
fastdeploy::vision::detection::TOOD(const string& model_file, const string& params_file, const string& config_file, const RuntimeOption& runtime_option = RuntimeOption(), const ModelFormat& model_format = ModelFormat::PADDLE);
fastdeploy::vision::detection::GFL(const string& model_file, const string& params_file, const string& config_file, const RuntimeOption& runtime_option = RuntimeOption(), const ModelFormat& model_format = ModelFormat::PADDLE);
```
### 5.2 关键点检测模型
```C++
fastdeploy::vision::keypointdetection::PPTinyPose(const string& model_file, const string& params_file, const string& config_file, const RuntimeOption& runtime_option = RuntimeOption(), const ModelFormat& model_format = ModelFormat::PADDLE);
```
PaddleDetection模型加载和初始化,其中model_file, params_file为导出的Paddle部署模型格式, config_file为PaddleDetection同时导出的部署配置yaml文件
## 6. 更多指南
- [PaddleDetection C++ API文档](https://www.paddlepaddle.org.cn/fastdeploy-api-doc/cpp/html/namespacefastdeploy_1_1vision_1_1detection.html)
- [FastDeploy部署PaddleDetection模型概览](../../)
- [Python部署](../python)
## 7. 常见问题
- [如何切换模型推理后端引擎](https://github.com/PaddlePaddle/FastDeploy/blob/develop/docs/cn/faq/how_to_change_backend.md)
- [Intel GPU(独立显卡/集成显卡)的使用](https://github.com/PaddlePaddle/FastDeploy/blob/develop/tutorials/intel_gpu/README.md)
- [编译CPU部署库](https://github.com/PaddlePaddle/FastDeploy/blob/develop/docs/cn/build_and_install/cpu.md)
- [编译GPU部署库](https://github.com/PaddlePaddle/FastDeploy/blob/develop/docs/cn/build_and_install/gpu.md)
- [编译Jetson部署库](https://github.com/PaddlePaddle/FastDeploy/blob/develop/docs/cn/build_and_install/jetson.md) | PaddleDetection/deploy/fastdeploy/kunlunxin/cpp/README.md/0 | {
"file_path": "PaddleDetection/deploy/fastdeploy/kunlunxin/cpp/README.md",
"repo_id": "PaddleDetection",
"token_count": 4524
} | 59 |
[English](README.md) | 简体中文
# PaddleDetection RKNPU2 Python部署示例
本目录下用于展示PaddleDetection系列模型在RKNPU2上的部署,以下的部署过程以PPYOLOE为例子。
## 1. 部署环境准备
在部署前,需确认以下步骤
- 1. 软硬件环境满足要求,RKNPU2环境部署等参考[FastDeploy环境要求](https://github.com/PaddlePaddle/FastDeploy/blob/develop/docs/cn/faq/rknpu2/rknpu2.md)
## 2. 部署模型准备
模型转换代码请参考[模型转换文档](../README.md)
## 3. 运行部署示例
本目录下提供`infer.py`快速完成PPYOLOE在RKNPU上部署的示例。执行如下脚本即可完成
```bash
# 下载部署示例代码
git clone https://github.com/PaddlePaddle/PaddleDetection.git
cd PaddleDetection/deploy/fastdeploy/rockchip/rknpu2/python
# 注意:如果当前分支找不到下面的fastdeploy测试代码,请切换到develop分支
# git checkout develop
# 下载图片
wget https://bj.bcebos.com/paddlehub/fastdeploy/rknpu2/ppyoloe_plus_crn_s_80e_coco.zip
unzip ppyoloe_plus_crn_s_80e_coco.zip
wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000014439.jpg
# 运行部署示例
python3 infer.py --model_file ./ppyoloe_plus_crn_s_80e_coco/ppyoloe_plus_crn_s_80e_coco_rk3588_quantized.rknn \
--config_file ./ppyoloe_plus_crn_s_80e_coco/infer_cfg.yml \
--image_file 000000014439.jpg
```
# 4. 更多指南
RKNPU上对模型的输入要求是使用NHWC格式,且图片归一化操作会在转RKNN模型时,内嵌到模型中,因此我们在使用FastDeploy部署时,需要先调用DisableNormalizeAndPermute(C++)或`disable_normalize_and_permute(Python),在预处理阶段禁用归一化以及数据格式的转换。
- [C++部署](../cpp)
- [转换PaddleDetection RKNN模型文档](../README.md) | PaddleDetection/deploy/fastdeploy/rockchip/rknpu2/python/README.md/0 | {
"file_path": "PaddleDetection/deploy/fastdeploy/rockchip/rknpu2/python/README.md",
"repo_id": "PaddleDetection",
"token_count": 1119
} | 60 |
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import numpy as np
import os
import fastdeploy as fd
# triton_python_backend_utils is available in every Triton Python model. You
# need to use this module to create inference requests and responses. It also
# contains some utility functions for extracting information from model_config
# and converting Triton input/output types to numpy types.
import triton_python_backend_utils as pb_utils
class TritonPythonModel:
"""Your Python model must use the same class name. Every Python model
that is created must have "TritonPythonModel" as the class name.
"""
def initialize(self, args):
"""`initialize` is called only once when the model is being loaded.
Implementing `initialize` function is optional. This function allows
the model to intialize any state associated with this model.
Parameters
----------
args : dict
Both keys and values are strings. The dictionary keys and values are:
* model_config: A JSON string containing the model configuration
* model_instance_kind: A string containing model instance kind
* model_instance_device_id: A string containing model instance device ID
* model_repository: Model repository path
* model_version: Model version
* model_name: Model name
"""
# You must parse model_config. JSON string is not parsed here
self.model_config = json.loads(args['model_config'])
print("model_config:", self.model_config)
self.input_names = []
for input_config in self.model_config["input"]:
self.input_names.append(input_config["name"])
print("preprocess input names:", self.input_names)
self.output_names = []
self.output_dtype = []
for output_config in self.model_config["output"]:
self.output_names.append(output_config["name"])
# dtype = pb_utils.triton_string_to_numpy(output_config["data_type"])
# self.output_dtype.append(dtype)
self.output_dtype.append(output_config["data_type"])
print("preprocess output names:", self.output_names)
# init PaddleClasPreprocess class
yaml_path = os.path.abspath(os.path.dirname(
__file__)) + "/infer_cfg.yml"
self.preprocess_ = fd.vision.detection.PaddleDetPreprocessor(yaml_path)
def execute(self, requests):
"""`execute` must be implemented in every Python model. `execute`
function receives a list of pb_utils.InferenceRequest as the only
argument. This function is called when an inference is requested
for this model. Depending on the batching configuration (e.g. Dynamic
Batching) used, `requests` may contain multiple requests. Every
Python model, must create one pb_utils.InferenceResponse for every
pb_utils.InferenceRequest in `requests`. If there is an error, you can
set the error argument when creating a pb_utils.InferenceResponse.
Parameters
----------
requests : list
A list of pb_utils.InferenceRequest
Returns
-------
list
A list of pb_utils.InferenceResponse. The length of this list must
be the same as `requests`
"""
responses = []
for request in requests:
data = pb_utils.get_input_tensor_by_name(request,
self.input_names[0])
data = data.as_numpy()
outputs = self.preprocess_.run(data)
output_tensors = []
for idx, name in enumerate(self.output_names):
dlpack_tensor = outputs[idx].to_dlpack()
output_tensor = pb_utils.Tensor.from_dlpack(name,
dlpack_tensor)
output_tensors.append(output_tensor)
inference_response = pb_utils.InferenceResponse(
output_tensors=output_tensors)
responses.append(inference_response)
return responses
def finalize(self):
"""`finalize` is called only once when the model is being unloaded.
Implementing `finalize` function is optional. This function allows
the model to perform any necessary clean ups before exit.
"""
print('Cleaning up...')
| PaddleDetection/deploy/fastdeploy/serving/models/preprocess/1/model.py/0 | {
"file_path": "PaddleDetection/deploy/fastdeploy/serving/models/preprocess/1/model.py",
"repo_id": "PaddleDetection",
"token_count": 1927
} | 61 |
import yaml
import json
import sys
yamlf = sys.argv[1]
assert yamlf.endswith(".yml")
with open(yamlf, 'r') as rf:
yaml_data = yaml.safe_load(rf)
jsonf = yamlf[:-4] + ".json"
with open(jsonf, 'w') as wf:
json.dump(yaml_data, wf, indent=4)
| PaddleDetection/deploy/lite/convert_yml_to_json.py/0 | {
"file_path": "PaddleDetection/deploy/lite/convert_yml_to_json.py",
"repo_id": "PaddleDetection",
"token_count": 122
} | 62 |
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <string>
#include <thread>
#include <vector>
#include "include/preprocess_op.h"
namespace PaddleDetection {
void InitInfo::Run(cv::Mat* im, ImageBlob* data) {
data->im_shape_ = {static_cast<float>(im->rows),
static_cast<float>(im->cols)};
data->scale_factor_ = {1., 1.};
data->in_net_shape_ = {static_cast<float>(im->rows),
static_cast<float>(im->cols)};
}
void NormalizeImage::Run(cv::Mat* im, ImageBlob* data) {
double e = 1.0;
if (is_scale_) {
e *= 1./255.0;
}
(*im).convertTo(*im, CV_32FC3, e);
for (int h = 0; h < im->rows; h++) {
for (int w = 0; w < im->cols; w++) {
im->at<cv::Vec3f>(h, w)[0] =
(im->at<cv::Vec3f>(h, w)[0] - mean_[0]) / scale_[0];
im->at<cv::Vec3f>(h, w)[1] =
(im->at<cv::Vec3f>(h, w)[1] - mean_[1]) / scale_[1];
im->at<cv::Vec3f>(h, w)[2] =
(im->at<cv::Vec3f>(h, w)[2] - mean_[2]) / scale_[2];
}
}
}
void Permute::Run(cv::Mat* im, ImageBlob* data) {
(*im).convertTo(*im, CV_32FC3);
int rh = im->rows;
int rw = im->cols;
int rc = im->channels();
(data->im_data_).resize(rc * rh * rw);
float* base = (data->im_data_).data();
for (int i = 0; i < rc; ++i) {
cv::extractChannel(*im, cv::Mat(rh, rw, CV_32FC1, base + i * rh * rw), i);
}
}
void Resize::Run(cv::Mat* im, ImageBlob* data) {
auto resize_scale = GenerateScale(*im);
data->im_shape_ = {static_cast<float>(im->cols * resize_scale.first),
static_cast<float>(im->rows * resize_scale.second)};
data->in_net_shape_ = {static_cast<float>(im->cols * resize_scale.first),
static_cast<float>(im->rows * resize_scale.second)};
cv::resize(
*im, *im, cv::Size(), resize_scale.first, resize_scale.second, interp_);
data->im_shape_ = {
static_cast<float>(im->rows), static_cast<float>(im->cols),
};
data->scale_factor_ = {
resize_scale.second, resize_scale.first,
};
}
std::pair<float, float> Resize::GenerateScale(const cv::Mat& im) {
std::pair<float, float> resize_scale;
int origin_w = im.cols;
int origin_h = im.rows;
if (keep_ratio_) {
int im_size_max = std::max(origin_w, origin_h);
int im_size_min = std::min(origin_w, origin_h);
int target_size_max =
*std::max_element(target_size_.begin(), target_size_.end());
int target_size_min =
*std::min_element(target_size_.begin(), target_size_.end());
float scale_min =
static_cast<float>(target_size_min) / static_cast<float>(im_size_min);
float scale_max =
static_cast<float>(target_size_max) / static_cast<float>(im_size_max);
float scale_ratio = std::min(scale_min, scale_max);
resize_scale = {scale_ratio, scale_ratio};
} else {
resize_scale.first =
static_cast<float>(target_size_[1]) / static_cast<float>(origin_w);
resize_scale.second =
static_cast<float>(target_size_[0]) / static_cast<float>(origin_h);
}
return resize_scale;
}
void PadStride::Run(cv::Mat* im, ImageBlob* data) {
if (stride_ <= 0) {
return;
}
int rc = im->channels();
int rh = im->rows;
int rw = im->cols;
int nh = (rh / stride_) * stride_ + (rh % stride_ != 0) * stride_;
int nw = (rw / stride_) * stride_ + (rw % stride_ != 0) * stride_;
cv::copyMakeBorder(
*im, *im, 0, nh - rh, 0, nw - rw, cv::BORDER_CONSTANT, cv::Scalar(0));
data->in_net_shape_ = {
static_cast<float>(im->rows), static_cast<float>(im->cols),
};
}
void TopDownEvalAffine::Run(cv::Mat* im, ImageBlob* data) {
cv::resize(*im, *im, cv::Size(trainsize_[0], trainsize_[1]), 0, 0, interp_);
// todo: Simd::ResizeBilinear();
data->in_net_shape_ = {
static_cast<float>(trainsize_[1]), static_cast<float>(trainsize_[0]),
};
}
// Preprocessor op running order
const std::vector<std::string> Preprocessor::RUN_ORDER = {"InitInfo",
"TopDownEvalAffine",
"Resize",
"NormalizeImage",
"PadStride",
"Permute"};
void Preprocessor::Run(cv::Mat* im, ImageBlob* data) {
for (const auto& name : RUN_ORDER) {
if (ops_.find(name) != ops_.end()) {
ops_[name]->Run(im, data);
}
}
}
void CropImg(cv::Mat& img,
cv::Mat& crop_img,
std::vector<int>& area,
std::vector<float>& center,
std::vector<float>& scale,
float expandratio) {
int crop_x1 = std::max(0, area[0]);
int crop_y1 = std::max(0, area[1]);
int crop_x2 = std::min(img.cols - 1, area[2]);
int crop_y2 = std::min(img.rows - 1, area[3]);
int center_x = (crop_x1 + crop_x2) / 2.;
int center_y = (crop_y1 + crop_y2) / 2.;
int half_h = (crop_y2 - crop_y1) / 2.;
int half_w = (crop_x2 - crop_x1) / 2.;
if (half_h * 3 > half_w * 4) {
half_w = static_cast<int>(half_h * 0.75);
} else {
half_h = static_cast<int>(half_w * 4 / 3);
}
crop_x1 =
std::max(0, center_x - static_cast<int>(half_w * (1 + expandratio)));
crop_y1 =
std::max(0, center_y - static_cast<int>(half_h * (1 + expandratio)));
crop_x2 = std::min(img.cols - 1,
static_cast<int>(center_x + half_w * (1 + expandratio)));
crop_y2 = std::min(img.rows - 1,
static_cast<int>(center_y + half_h * (1 + expandratio)));
crop_img =
img(cv::Range(crop_y1, crop_y2 + 1), cv::Range(crop_x1, crop_x2 + 1));
center.clear();
center.emplace_back((crop_x1 + crop_x2) / 2);
center.emplace_back((crop_y1 + crop_y2) / 2);
scale.clear();
scale.emplace_back((crop_x2 - crop_x1));
scale.emplace_back((crop_y2 - crop_y1));
}
} // namespace PaddleDetection
| PaddleDetection/deploy/lite/src/preprocess_op.cc/0 | {
"file_path": "PaddleDetection/deploy/lite/src/preprocess_op.cc",
"repo_id": "PaddleDetection",
"token_count": 3045
} | 63 |
import ast
import yaml
import copy
import argparse
from argparse import ArgumentParser, RawDescriptionHelpFormatter
class ArgsParser(ArgumentParser):
def __init__(self):
super(ArgsParser, self).__init__(
formatter_class=RawDescriptionHelpFormatter)
self.add_argument(
"-o", "--opt", nargs='*', help="set configuration options")
def parse_args(self, argv=None):
args = super(ArgsParser, self).parse_args(argv)
assert args.config is not None, \
"Please specify --config=configure_file_path."
args.opt = self._parse_opt(args.opt)
return args
def _parse_opt(self, opts):
config = {}
if not opts:
return config
for s in opts:
s = s.strip()
k, v = s.split('=', 1)
if '.' not in k:
config[k] = yaml.load(v, Loader=yaml.Loader)
else:
keys = k.split('.')
if keys[0] not in config:
config[keys[0]] = {}
cur = config[keys[0]]
for idx, key in enumerate(keys[1:]):
if idx == len(keys) - 2:
cur[key] = yaml.load(v, Loader=yaml.Loader)
else:
cur[key] = {}
cur = cur[key]
return config
def argsparser():
parser = ArgsParser()
parser.add_argument(
"--config",
type=str,
default=None,
help=("Path of configure"),
required=True)
parser.add_argument(
"--image_file", type=str, default=None, help="Path of image file.")
parser.add_argument(
"--image_dir",
type=str,
default=None,
help="Dir of image file, `image_file` has a higher priority.")
parser.add_argument(
"--video_file",
type=str,
default=None,
help="Path of video file, `video_file` or `camera_id` has a highest priority."
)
parser.add_argument(
"--video_dir",
type=str,
default=None,
help="Dir of video file, `video_file` has a higher priority.")
parser.add_argument(
"--rtsp",
type=str,
nargs='+',
default=None,
help="list of rtsp inputs, for one or multiple rtsp input.")
parser.add_argument(
"--camera_id",
type=int,
default=-1,
help="device id of camera to predict.")
parser.add_argument(
"--output_dir",
type=str,
default="output",
help="Directory of output visualization files.")
parser.add_argument(
"--pushurl",
type=str,
default="",
help="url of output visualization stream.")
parser.add_argument(
"--run_mode",
type=str,
default='paddle',
help="mode of running(paddle/trt_fp32/trt_fp16/trt_int8)")
parser.add_argument(
"--device",
type=str,
default='cpu',
help="Choose the device you want to run, it can be: CPU/GPU/XPU, default is CPU."
)
parser.add_argument(
"--enable_mkldnn",
type=ast.literal_eval,
default=False,
help="Whether use mkldnn with CPU.")
parser.add_argument(
"--cpu_threads", type=int, default=1, help="Num of threads with CPU.")
parser.add_argument(
"--trt_min_shape", type=int, default=1, help="min_shape for TensorRT.")
parser.add_argument(
"--trt_max_shape",
type=int,
default=1280,
help="max_shape for TensorRT.")
parser.add_argument(
"--trt_opt_shape",
type=int,
default=640,
help="opt_shape for TensorRT.")
parser.add_argument(
"--trt_calib_mode",
type=bool,
default=False,
help="If the model is produced by TRT offline quantitative "
"calibration, trt_calib_mode need to set True.")
parser.add_argument(
"--do_entrance_counting",
action='store_true',
help="Whether counting the numbers of identifiers entering "
"or getting out from the entrance. Note that only support single-class MOT."
)
parser.add_argument(
"--do_break_in_counting",
action='store_true',
help="Whether counting the numbers of identifiers break in "
"the area. Note that only support single-class MOT and "
"the video should be taken by a static camera.")
parser.add_argument(
"--illegal_parking_time",
type=int,
default=-1,
help="illegal parking time which units are seconds, default is -1 which means not recognition illegal parking"
)
parser.add_argument(
"--region_type",
type=str,
default='horizontal',
help="Area type for entrance counting or break in counting, 'horizontal' and "
"'vertical' used when do entrance counting. 'custom' used when do break in counting. "
"Note that only support single-class MOT, and the video should be taken by a static camera."
)
parser.add_argument(
'--region_polygon',
nargs='+',
type=int,
default=[],
help="Clockwise point coords (x0,y0,x1,y1...) of polygon of area when "
"do_break_in_counting. Note that only support single-class MOT and "
"the video should be taken by a static camera.")
parser.add_argument(
"--secs_interval",
type=int,
default=2,
help="The seconds interval to count after tracking")
parser.add_argument(
"--draw_center_traj",
action='store_true',
help="Whether drawing the trajectory of center")
return parser
def merge_cfg(args):
# load config
with open(args.config) as f:
pred_config = yaml.safe_load(f)
def merge(cfg, arg):
# update cfg from arg directly
merge_cfg = copy.deepcopy(cfg)
for k, v in cfg.items():
if k in arg:
merge_cfg[k] = arg[k]
else:
if isinstance(v, dict):
merge_cfg[k] = merge(v, arg)
return merge_cfg
def merge_opt(cfg, arg):
merge_cfg = copy.deepcopy(cfg)
# merge opt
if 'opt' in arg.keys() and arg['opt']:
for name, value in arg['opt'].items(
): # example: {'MOT': {'batch_size': 3}}
if name not in merge_cfg.keys():
print("No", name, "in config file!")
continue
for sub_k, sub_v in value.items():
if sub_k not in merge_cfg[name].keys():
print("No", sub_k, "in config file of", name, "!")
continue
merge_cfg[name][sub_k] = sub_v
return merge_cfg
args_dict = vars(args)
pred_config = merge(pred_config, args_dict)
pred_config = merge_opt(pred_config, args_dict)
return pred_config
def print_arguments(cfg):
print('----------- Running Arguments -----------')
buffer = yaml.dump(cfg)
print(buffer)
print('------------------------------------------')
| PaddleDetection/deploy/pipeline/cfg_utils.py/0 | {
"file_path": "PaddleDetection/deploy/pipeline/cfg_utils.py",
"repo_id": "PaddleDetection",
"token_count": 3348
} | 64 |
from typing import Optional
from pgvector.sqlalchemy import Vector
from sqlalchemy import Column
from sqlalchemy.dialects.postgresql import JSON
from sqlalchemy.orm import DeclarativeBase, Mapped, mapped_column
class Base(DeclarativeBase):
pass
class Frame(Base):
__tablename__ = "frame"
id: Mapped[int] = mapped_column(primary_key=True)
frame_id: Mapped[int]
trace_id: Mapped[str]
crop_image: Mapped[str]
feature = mapped_column(Vector(256))
quality = Mapped[float]
class Trace(Base):
__tablename__ = "trace"
trace_id: Mapped[str] = mapped_column(primary_key=True)
reid: Mapped[Optional[int]]
best_crop_image: Mapped[str]
frame_range = Column(JSON)
mean_feature = mapped_column(Vector(256))
class InOutRecord(Base):
__tablename__ = "in_out_record"
record_id: Mapped[int] = mapped_column(primary_key=True)
trace_id: Mapped[str]
action: Mapped[str]
frame: Mapped[int]
| PaddleDetection/deploy/pipeline/database_model.py/0 | {
"file_path": "PaddleDetection/deploy/pipeline/database_model.py",
"repo_id": "PaddleDetection",
"token_count": 364
} | 65 |
English | [简体中文](ppvehicle_attribute.md)
# Attribute Recognition Module of PP-Vehicle
Vehicle attribute recognition is widely used in smart cities, smart transportation and other scenarios. In PP-Vehicle, a vehicle attribute recognition module is integrated, which can identify vehicle color and model.
| Task | Algorithm | Precision | Inference Speed | Download |
|-----------|------|-----------|----------|---------------------|
| Vehicle Detection/Tracking | PP-YOLOE | mAP 63.9 | 38.67ms | [Inference and Deployment Model](https://bj.bcebos.com/v1/paddledet/models/pipeline/mot_ppyoloe_l_36e_ppvehicle.zip) |
| Vehicle Attribute Recognition | PPLCNet | 90.81 | 7.31 ms | [Inference and Deployment Model](https://bj.bcebos.com/v1/paddledet/models/pipeline/vehicle_attribute_model.zip) |
Note:
1. The inference speed of the attribute model is obtained from the test on NVIDIA T4, with TensorRT FP16. The time includes data pre-process, model inference and post-process.
2. For introductions, please refer to [PP-LCNet Series](https://github.com/PaddlePaddle/PaddleClas/blob/release/2.4/docs/en/models/PP-LCNet_en.md). Related paper is available on PP-LCNet paper
3. The training and test phase of vehicle attribute recognition model are both obtained from [VeRi dataset](https://www.v7labs.com/open-datasets/veri-dataset).
- The provided pre-trained model supports 10 colors and 9 models, which is the same with VeRi dataset. The details are as follows:
```yaml
# Vehicle Colors
- "yellow"
- "orange"
- "green"
- "gray"
- "red"
- "blue"
- "white"
- "golden"
- "brown"
- "black"
# Vehicle Models
- "sedan"
- "suv"
- "van"
- "hatchback"
- "mpv"
- "pickup"
- "bus"
- "truck"
- "estate"
```
## Instructions
### Description of Configuration
Parameters related to vehicle attribute recognition in the [config file](../../config/infer_cfg_ppvehicle.yml) are as follows:
```yaml
VEHICLE_ATTR:
model_dir: output_inference/vehicle_attribute_infer/ # Path of the model
batch_size: 8 # The size of the inference batch
color_threshold: 0.5 # Threshold of color. Confidence is required to reach this threshold to determine the specific attribute, otherwise it will be 'Unknown‘.
type_threshold: 0.5 # Threshold of vehicle model. Confidence is required to reach this threshold to determine the specific attribute, otherwise it will be 'Unknown‘.
enable: False # Whether to enable this function
```
### How to Use
1. Download models `Vehicle Detection/Tracking` and `Vehicle Attribute Recognition` from the links in `Model Zoo` and unzip them to ```./output_inference```. The models are automatically downloaded by default. If you download them manually, you need to modify the `model_dir` as the model storage path to use this function.
2. Set the "enable: True" of `VEHICLE_ATTR` in infer_cfg_ppvehicle.yml.
3. For image input, please run these commands. (Description of more parameters, please refer to [QUICK_STARTED - Parameter_Description](./PPVehicle_QUICK_STARTED.md).
```bash
# For single image
python deploy/pipeline/pipeline.py --config deploy/pipeline/config/infer_cfg_ppvehicle.yml \
--image_file=test_image.jpg \
--device=gpu
# For folder contains one or multiple images
python deploy/pipeline/pipeline.py --config deploy/pipeline/config/infer_cfg_ppvehicle.yml \
--image_dir=images/ \
--device=gpu
```
4. For video input, please run these commands.
```bash
# For single video
python deploy/pipeline/pipeline.py --config deploy/pipeline/config/infer_cfg_ppvehicle.yml \
--video_file=test_video.mp4 \
--device=gpu
# For folder contains one or multiple videos
python deploy/pipeline/pipeline.py --config deploy/pipeline/config/infer_cfg_ppvehicle.yml \
--video_dir=test_videos/ \
--device=gpu
```
5. There are two ways to modify the model path:
- Method 1:Set paths of each model in `./deploy/pipeline/config/infer_cfg_ppvehicle.yml`. For vehicle attribute recognition, the path should be modified under the `VEHICLE_ATTR` field.
- Method 2: Directly add `-o` in command line to override the default model path in the configuration file:
```bash
python deploy/pipeline/pipeline.py --config deploy/pipeline/config/infer_cfg_ppvehicle.yml \
--video_file=test_video.mp4 \
--device=gpu \
-o VEHICLE_ATTR.model_dir=output_inference/vehicle_attribute_infer
```
The result is shown as follow:
<div width="600" align="center">
<img src="https://user-images.githubusercontent.com/22989727/205599146-56abd72f-6e0a-4a21-bd11-f8bb421f2887.gif"/>
</div>
### Features to the Solution
The vehicle attribute recognition model adopts PULC, Practical Ultra Lightweight image Classification from [PaddleClas](https://github.com/PaddlePaddle/PaddleClas). For details on data preparation, training, and testing of the model, please refer to [PULC Recognition Model of Vehicle Attribute](https://github.com/PaddlePaddle/PaddleClas/blob/release/2.4/docs/en/PULC/PULC_vehicle_attribute_en.md).
The vehicle attribute recognition model adopts the lightweight and high-precision PPLCNet. And on top of PPLCNet, our model optimized via::
- Improved about 0.5 percentage points accuracy by using the SSLD pre-trained model without changing the inference speed.
- Improved 0.52 percentage points accuracy further by integrating EDA data augmentation strategy.
- Improved 0.23 percentage points accuracy by using SKL-UGI knowledge distillation.
| PaddleDetection/deploy/pipeline/docs/tutorials/ppvehicle_attribute_en.md/0 | {
"file_path": "PaddleDetection/deploy/pipeline/docs/tutorials/ppvehicle_attribute_en.md",
"repo_id": "PaddleDetection",
"token_count": 2228
} | 66 |
import os
import sys
import cv2
import numpy as np
import argparse
def argsparser():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"--video_file",
type=str,
default=None,
help="Path of video file, `video_file` or `camera_id` has a highest priority."
)
parser.add_argument(
'--region_polygon',
nargs='+',
type=int,
default=[],
help="Clockwise point coords (x0,y0,x1,y1...) of polygon of area when "
"do_break_in_counting. Note that only support single-class MOT and "
"the video should be taken by a static camera.")
return parser
def get_video_info(video_file, region_polygon):
entrance = []
assert len(region_polygon
) % 2 == 0, "region_polygon should be pairs of coords points."
for i in range(0, len(region_polygon), 2):
entrance.append([region_polygon[i], region_polygon[i + 1]])
if not os.path.exists(video_file):
print("video path '{}' not exists".format(video_file))
sys.exit(-1)
capture = cv2.VideoCapture(video_file)
width = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT))
print("video width: %d, height: %d" % (width, height))
np_masks = np.zeros((height, width, 1), np.uint8)
entrance = np.array(entrance)
cv2.fillPoly(np_masks, [entrance], 255)
fps = int(capture.get(cv2.CAP_PROP_FPS))
frame_count = int(capture.get(cv2.CAP_PROP_FRAME_COUNT))
print("video fps: %d, frame_count: %d" % (fps, frame_count))
cnt = 0
while (1):
ret, frame = capture.read()
cnt += 1
if cnt == 3: break
alpha = 0.3
img = np.array(frame).astype('float32')
mask = np_masks[:, :, 0]
color_mask = [0, 0, 255]
idx = np.nonzero(mask)
color_mask = np.array(color_mask)
img[idx[0], idx[1], :] *= 1.0 - alpha
img[idx[0], idx[1], :] += alpha * color_mask
cv2.imwrite('region_vis.jpg', img)
if __name__ == "__main__":
parser = argsparser()
FLAGS = parser.parse_args()
get_video_info(FLAGS.video_file, FLAGS.region_polygon)
# python get_video_info.py --video_file=demo.mp4 --region_polygon 200 200 400 200 300 400 100 400
| PaddleDetection/deploy/pipeline/tools/get_video_info.py/0 | {
"file_path": "PaddleDetection/deploy/pipeline/tools/get_video_info.py",
"repo_id": "PaddleDetection",
"token_count": 1003
} | 67 |
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <ctime>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include "paddle_inference_api.h" // NOLINT
#include "include/config_parser.h"
#include "include/preprocess_op.h"
#include "include/utils.h"
using namespace paddle_infer; // NOLINT
namespace PaddleDetection {
class SDEPredictor {
public:
explicit SDEPredictor(const std::string& device,
const std::string& det_model_dir = "",
const std::string& reid_model_dir = "",
const double threshold = -1.,
const std::string& run_mode = "paddle",
const int gpu_id = 0,
const bool use_mkldnn = false,
const int cpu_threads = 1,
bool trt_calib_mode = false,
const int min_box_area = 200) {
this->device_ = device;
this->gpu_id_ = gpu_id;
this->use_mkldnn_ = use_mkldnn;
this->cpu_math_library_num_threads_ = cpu_threads;
this->trt_calib_mode_ = trt_calib_mode;
this->min_box_area_ = min_box_area;
det_config_.load_config(det_model_dir);
this->min_subgraph_size_ = det_config_.min_subgraph_size_;
det_preprocessor_.Init(det_config_.preprocess_info_);
reid_config_.load_config(reid_model_dir);
reid_preprocessor_.Init(reid_config_.preprocess_info_);
LoadModel(det_model_dir, reid_model_dir, run_mode);
this->conf_thresh_ = det_config_.conf_thresh_;
}
// Load Paddle inference model
void LoadModel(const std::string& det_model_dir,
const std::string& reid_model_dir,
const std::string& run_mode = "paddle");
// Run predictor
void Predict(const std::vector<cv::Mat> imgs,
const double threshold = 0.5,
MOTResult* result = nullptr,
std::vector<double>* times = nullptr);
private:
std::string device_ = "CPU";
float threhold = 0.5;
int gpu_id_ = 0;
bool use_mkldnn_ = false;
int cpu_math_library_num_threads_ = 1;
int min_subgraph_size_ = 3;
bool trt_calib_mode_ = false;
// Preprocess image and copy data to input buffer
void Preprocess(const cv::Mat& image_mat);
// Postprocess result
void Postprocess(const cv::Mat dets, const cv::Mat emb, MOTResult* result);
std::shared_ptr<Predictor> det_predictor_;
std::shared_ptr<Predictor> reid_predictor_;
Preprocessor det_preprocessor_;
Preprocessor reid_preprocessor_;
ImageBlob inputs_;
std::vector<float> bbox_data_;
std::vector<float> emb_data_;
double threshold_;
ConfigPaser det_config_;
ConfigPaser reid_config_;
float min_box_area_ = 200;
float conf_thresh_;
};
} // namespace PaddleDetection
| PaddleDetection/deploy/pptracking/cpp/include/sde_predictor.h/0 | {
"file_path": "PaddleDetection/deploy/pptracking/cpp/include/sde_predictor.h",
"repo_id": "PaddleDetection",
"token_count": 1423
} | 68 |
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This code is based on https://github.com/noahcao/OC_SORT/blob/master/trackers/ocsort_tracker/association.py
"""
import os
import numpy as np
def iou_batch(bboxes1, bboxes2):
"""
From SORT: Computes IOU between two bboxes in the form [x1,y1,x2,y2]
"""
bboxes2 = np.expand_dims(bboxes2, 0)
bboxes1 = np.expand_dims(bboxes1, 1)
xx1 = np.maximum(bboxes1[..., 0], bboxes2[..., 0])
yy1 = np.maximum(bboxes1[..., 1], bboxes2[..., 1])
xx2 = np.minimum(bboxes1[..., 2], bboxes2[..., 2])
yy2 = np.minimum(bboxes1[..., 3], bboxes2[..., 3])
w = np.maximum(0., xx2 - xx1)
h = np.maximum(0., yy2 - yy1)
wh = w * h
o = wh / ((bboxes1[..., 2] - bboxes1[..., 0]) *
(bboxes1[..., 3] - bboxes1[..., 1]) +
(bboxes2[..., 2] - bboxes2[..., 0]) *
(bboxes2[..., 3] - bboxes2[..., 1]) - wh)
return (o)
def speed_direction_batch(dets, tracks):
tracks = tracks[..., np.newaxis]
CX1, CY1 = (dets[:, 0] + dets[:, 2]) / 2.0, (dets[:, 1] + dets[:, 3]) / 2.0
CX2, CY2 = (tracks[:, 0] + tracks[:, 2]) / 2.0, (
tracks[:, 1] + tracks[:, 3]) / 2.0
dx = CX1 - CX2
dy = CY1 - CY2
norm = np.sqrt(dx**2 + dy**2) + 1e-6
dx = dx / norm
dy = dy / norm
return dy, dx # size: num_track x num_det
def linear_assignment(cost_matrix):
try:
import lap
_, x, y = lap.lapjv(cost_matrix, extend_cost=True)
match = np.array([[y[i], i] for i in x if i >= 0])
return match
except ImportError:
from scipy.optimize import linear_sum_assignment
x, y = linear_sum_assignment(cost_matrix)
return np.array(list(zip(x, y)))
def associate(detections, trackers, iou_threshold, velocities, previous_obs,
vdc_weight):
if (len(trackers) == 0):
return np.empty(
(0, 2), dtype=int), np.arange(len(detections)), np.empty(
(0, 5), dtype=int)
Y, X = speed_direction_batch(detections, previous_obs)
inertia_Y, inertia_X = velocities[:, 0], velocities[:, 1]
inertia_Y = np.repeat(inertia_Y[:, np.newaxis], Y.shape[1], axis=1)
inertia_X = np.repeat(inertia_X[:, np.newaxis], X.shape[1], axis=1)
diff_angle_cos = inertia_X * X + inertia_Y * Y
diff_angle_cos = np.clip(diff_angle_cos, a_min=-1, a_max=1)
diff_angle = np.arccos(diff_angle_cos)
diff_angle = (np.pi / 2.0 - np.abs(diff_angle)) / np.pi
valid_mask = np.ones(previous_obs.shape[0])
valid_mask[np.where(previous_obs[:, 4] < 0)] = 0
iou_matrix = iou_batch(detections, trackers)
scores = np.repeat(
detections[:, -1][:, np.newaxis], trackers.shape[0], axis=1)
# iou_matrix = iou_matrix * scores # a trick sometiems works, we don't encourage this
valid_mask = np.repeat(valid_mask[:, np.newaxis], X.shape[1], axis=1)
angle_diff_cost = (valid_mask * diff_angle) * vdc_weight
angle_diff_cost = angle_diff_cost.T
angle_diff_cost = angle_diff_cost * scores
if min(iou_matrix.shape) > 0:
a = (iou_matrix > iou_threshold).astype(np.int32)
if a.sum(1).max() == 1 and a.sum(0).max() == 1:
matched_indices = np.stack(np.where(a), axis=1)
else:
matched_indices = linear_assignment(-(iou_matrix + angle_diff_cost))
else:
matched_indices = np.empty(shape=(0, 2))
unmatched_detections = []
for d, det in enumerate(detections):
if (d not in matched_indices[:, 0]):
unmatched_detections.append(d)
unmatched_trackers = []
for t, trk in enumerate(trackers):
if (t not in matched_indices[:, 1]):
unmatched_trackers.append(t)
# filter out matched with low IOU
matches = []
for m in matched_indices:
if (iou_matrix[m[0], m[1]] < iou_threshold):
unmatched_detections.append(m[0])
unmatched_trackers.append(m[1])
else:
matches.append(m.reshape(1, 2))
if (len(matches) == 0):
matches = np.empty((0, 2), dtype=int)
else:
matches = np.concatenate(matches, axis=0)
return matches, np.array(unmatched_detections), np.array(unmatched_trackers)
def associate_only_iou(detections, trackers, iou_threshold):
if (len(trackers) == 0):
return np.empty(
(0, 2), dtype=int), np.arange(len(detections)), np.empty(
(0, 5), dtype=int)
iou_matrix = iou_batch(detections, trackers)
if min(iou_matrix.shape) > 0:
a = (iou_matrix > iou_threshold).astype(np.int32)
if a.sum(1).max() == 1 and a.sum(0).max() == 1:
matched_indices = np.stack(np.where(a), axis=1)
else:
matched_indices = linear_assignment(-iou_matrix)
else:
matched_indices = np.empty(shape=(0, 2))
unmatched_detections = []
for d, det in enumerate(detections):
if (d not in matched_indices[:, 0]):
unmatched_detections.append(d)
unmatched_trackers = []
for t, trk in enumerate(trackers):
if (t not in matched_indices[:, 1]):
unmatched_trackers.append(t)
# filter out matched with low IOU
matches = []
for m in matched_indices:
if (iou_matrix[m[0], m[1]] < iou_threshold):
unmatched_detections.append(m[0])
unmatched_trackers.append(m[1])
else:
matches.append(m.reshape(1, 2))
if (len(matches) == 0):
matches = np.empty((0, 2), dtype=int)
else:
matches = np.concatenate(matches, axis=0)
return matches, np.array(unmatched_detections), np.array(unmatched_trackers)
| PaddleDetection/deploy/pptracking/python/mot/matching/ocsort_matching.py/0 | {
"file_path": "PaddleDetection/deploy/pptracking/python/mot/matching/ocsort_matching.py",
"repo_id": "PaddleDetection",
"token_count": 2837
} | 69 |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cv2
import numpy as np
def decode_image(im_file, im_info):
"""read rgb image
Args:
im_file (str|np.ndarray): input can be image path or np.ndarray
im_info (dict): info of image
Returns:
im (np.ndarray): processed image (np.ndarray)
im_info (dict): info of processed image
"""
if isinstance(im_file, str):
with open(im_file, 'rb') as f:
im_read = f.read()
data = np.frombuffer(im_read, dtype='uint8')
im = cv2.imdecode(data, 1) # BGR mode, but need RGB mode
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
else:
im = im_file
im_info['im_shape'] = np.array(im.shape[:2], dtype=np.float32)
im_info['scale_factor'] = np.array([1., 1.], dtype=np.float32)
return im, im_info
class Resize(object):
"""resize image by target_size and max_size
Args:
target_size (int): the target size of image
keep_ratio (bool): whether keep_ratio or not, default true
interp (int): method of resize
"""
def __init__(self, target_size, keep_ratio=True, interp=cv2.INTER_LINEAR):
if isinstance(target_size, int):
target_size = [target_size, target_size]
self.target_size = target_size
self.keep_ratio = keep_ratio
self.interp = interp
def __call__(self, im, im_info):
"""
Args:
im (np.ndarray): image (np.ndarray)
im_info (dict): info of image
Returns:
im (np.ndarray): processed image (np.ndarray)
im_info (dict): info of processed image
"""
assert len(self.target_size) == 2
assert self.target_size[0] > 0 and self.target_size[1] > 0
im_channel = im.shape[2]
im_scale_y, im_scale_x = self.generate_scale(im)
im = cv2.resize(
im,
None,
None,
fx=im_scale_x,
fy=im_scale_y,
interpolation=self.interp)
im_info['im_shape'] = np.array(im.shape[:2]).astype('float32')
im_info['scale_factor'] = np.array(
[im_scale_y, im_scale_x]).astype('float32')
return im, im_info
def generate_scale(self, im):
"""
Args:
im (np.ndarray): image (np.ndarray)
Returns:
im_scale_x: the resize ratio of X
im_scale_y: the resize ratio of Y
"""
origin_shape = im.shape[:2]
im_c = im.shape[2]
if self.keep_ratio:
im_size_min = np.min(origin_shape)
im_size_max = np.max(origin_shape)
target_size_min = np.min(self.target_size)
target_size_max = np.max(self.target_size)
im_scale = float(target_size_min) / float(im_size_min)
if np.round(im_scale * im_size_max) > target_size_max:
im_scale = float(target_size_max) / float(im_size_max)
im_scale_x = im_scale
im_scale_y = im_scale
else:
resize_h, resize_w = self.target_size
im_scale_y = resize_h / float(origin_shape[0])
im_scale_x = resize_w / float(origin_shape[1])
return im_scale_y, im_scale_x
class NormalizeImage(object):
"""normalize image
Args:
mean (list): im - mean
std (list): im / std
is_scale (bool): whether need im / 255
is_channel_first (bool): if True: image shape is CHW, else: HWC
"""
def __init__(self, mean, std, is_scale=True):
self.mean = mean
self.std = std
self.is_scale = is_scale
def __call__(self, im, im_info):
"""
Args:
im (np.ndarray): image (np.ndarray)
im_info (dict): info of image
Returns:
im (np.ndarray): processed image (np.ndarray)
im_info (dict): info of processed image
"""
im = im.astype(np.float32, copy=False)
mean = np.array(self.mean)[np.newaxis, np.newaxis, :]
std = np.array(self.std)[np.newaxis, np.newaxis, :]
if self.is_scale:
im = im / 255.0
im -= mean
im /= std
return im, im_info
class Permute(object):
"""permute image
Args:
to_bgr (bool): whether convert RGB to BGR
channel_first (bool): whether convert HWC to CHW
"""
def __init__(self, ):
super(Permute, self).__init__()
def __call__(self, im, im_info):
"""
Args:
im (np.ndarray): image (np.ndarray)
im_info (dict): info of image
Returns:
im (np.ndarray): processed image (np.ndarray)
im_info (dict): info of processed image
"""
im = im.transpose((2, 0, 1)).copy()
return im, im_info
class PadStride(object):
""" padding image for model with FPN, instead PadBatch(pad_to_stride) in original config
Args:
stride (bool): model with FPN need image shape % stride == 0
"""
def __init__(self, stride=0):
self.coarsest_stride = stride
def __call__(self, im, im_info):
"""
Args:
im (np.ndarray): image (np.ndarray)
im_info (dict): info of image
Returns:
im (np.ndarray): processed image (np.ndarray)
im_info (dict): info of processed image
"""
coarsest_stride = self.coarsest_stride
if coarsest_stride <= 0:
return im, im_info
im_c, im_h, im_w = im.shape
pad_h = int(np.ceil(float(im_h) / coarsest_stride) * coarsest_stride)
pad_w = int(np.ceil(float(im_w) / coarsest_stride) * coarsest_stride)
padding_im = np.zeros((im_c, pad_h, pad_w), dtype=np.float32)
padding_im[:, :im_h, :im_w] = im
return padding_im, im_info
class LetterBoxResize(object):
def __init__(self, target_size):
"""
Resize image to target size, convert normalized xywh to pixel xyxy
format ([x_center, y_center, width, height] -> [x0, y0, x1, y1]).
Args:
target_size (int|list): image target size.
"""
super(LetterBoxResize, self).__init__()
if isinstance(target_size, int):
target_size = [target_size, target_size]
self.target_size = target_size
def letterbox(self, img, height, width, color=(127.5, 127.5, 127.5)):
# letterbox: resize a rectangular image to a padded rectangular
shape = img.shape[:2] # [height, width]
ratio_h = float(height) / shape[0]
ratio_w = float(width) / shape[1]
ratio = min(ratio_h, ratio_w)
new_shape = (round(shape[1] * ratio),
round(shape[0] * ratio)) # [width, height]
padw = (width - new_shape[0]) / 2
padh = (height - new_shape[1]) / 2
top, bottom = round(padh - 0.1), round(padh + 0.1)
left, right = round(padw - 0.1), round(padw + 0.1)
img = cv2.resize(
img, new_shape, interpolation=cv2.INTER_AREA) # resized, no border
img = cv2.copyMakeBorder(
img, top, bottom, left, right, cv2.BORDER_CONSTANT,
value=color) # padded rectangular
return img, ratio, padw, padh
def __call__(self, im, im_info):
"""
Args:
im (np.ndarray): image (np.ndarray)
im_info (dict): info of image
Returns:
im (np.ndarray): processed image (np.ndarray)
im_info (dict): info of processed image
"""
assert len(self.target_size) == 2
assert self.target_size[0] > 0 and self.target_size[1] > 0
height, width = self.target_size
h, w = im.shape[:2]
im, ratio, padw, padh = self.letterbox(im, height=height, width=width)
new_shape = [round(h * ratio), round(w * ratio)]
im_info['im_shape'] = np.array(new_shape, dtype=np.float32)
im_info['scale_factor'] = np.array([ratio, ratio], dtype=np.float32)
return im, im_info
class Pad(object):
def __init__(self, size, fill_value=[114.0, 114.0, 114.0]):
"""
Pad image to a specified size.
Args:
size (list[int]): image target size
fill_value (list[float]): rgb value of pad area, default (114.0, 114.0, 114.0)
"""
super(Pad, self).__init__()
if isinstance(size, int):
size = [size, size]
self.size = size
self.fill_value = fill_value
def __call__(self, im, im_info):
im_h, im_w = im.shape[:2]
h, w = self.size
if h == im_h and w == im_w:
im = im.astype(np.float32)
return im, im_info
canvas = np.ones((h, w, 3), dtype=np.float32)
canvas *= np.array(self.fill_value, dtype=np.float32)
canvas[0:im_h, 0:im_w, :] = im.astype(np.float32)
im = canvas
return im, im_info
def preprocess(im, preprocess_ops):
# process image by preprocess_ops
im_info = {
'scale_factor': np.array(
[1., 1.], dtype=np.float32),
'im_shape': None,
}
im, im_info = decode_image(im, im_info)
for operator in preprocess_ops:
im, im_info = operator(im, im_info)
return im, im_info
| PaddleDetection/deploy/pptracking/python/preprocess.py/0 | {
"file_path": "PaddleDetection/deploy/pptracking/python/preprocess.py",
"repo_id": "PaddleDetection",
"token_count": 4710
} | 70 |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cv2
import numpy as np
import imgaug.augmenters as iaa
from keypoint_preprocess import get_affine_transform
from PIL import Image
def decode_image(im_file, im_info):
"""read rgb image
Args:
im_file (str|np.ndarray): input can be image path or np.ndarray
im_info (dict): info of image
Returns:
im (np.ndarray): processed image (np.ndarray)
im_info (dict): info of processed image
"""
if isinstance(im_file, str):
with open(im_file, 'rb') as f:
im_read = f.read()
data = np.frombuffer(im_read, dtype='uint8')
im = cv2.imdecode(data, 1) # BGR mode, but need RGB mode
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
else:
im = im_file
im_info['im_shape'] = np.array(im.shape[:2], dtype=np.float32)
im_info['scale_factor'] = np.array([1., 1.], dtype=np.float32)
return im, im_info
class Resize_Mult32(object):
"""resize image by target_size and max_size
Args:
target_size (int): the target size of image
keep_ratio (bool): whether keep_ratio or not, default true
interp (int): method of resize
"""
def __init__(self, limit_side_len, limit_type, interp=cv2.INTER_LINEAR):
self.limit_side_len = limit_side_len
self.limit_type = limit_type
self.interp = interp
def __call__(self, im, im_info):
"""
Args:
im (np.ndarray): image (np.ndarray)
im_info (dict): info of image
Returns:
im (np.ndarray): processed image (np.ndarray)
im_info (dict): info of processed image
"""
im_channel = im.shape[2]
im_scale_y, im_scale_x = self.generate_scale(im)
im = cv2.resize(
im,
None,
None,
fx=im_scale_x,
fy=im_scale_y,
interpolation=self.interp)
im_info['im_shape'] = np.array(im.shape[:2]).astype('float32')
im_info['scale_factor'] = np.array(
[im_scale_y, im_scale_x]).astype('float32')
return im, im_info
def generate_scale(self, img):
"""
Args:
img (np.ndarray): image (np.ndarray)
Returns:
im_scale_x: the resize ratio of X
im_scale_y: the resize ratio of Y
"""
limit_side_len = self.limit_side_len
h, w, c = img.shape
# limit the max side
if self.limit_type == 'max':
if h > w:
ratio = float(limit_side_len) / h
else:
ratio = float(limit_side_len) / w
elif self.limit_type == 'min':
if h < w:
ratio = float(limit_side_len) / h
else:
ratio = float(limit_side_len) / w
elif self.limit_type == 'resize_long':
ratio = float(limit_side_len) / max(h, w)
else:
raise Exception('not support limit type, image ')
resize_h = int(h * ratio)
resize_w = int(w * ratio)
resize_h = max(int(round(resize_h / 32) * 32), 32)
resize_w = max(int(round(resize_w / 32) * 32), 32)
im_scale_y = resize_h / float(h)
im_scale_x = resize_w / float(w)
return im_scale_y, im_scale_x
class Resize(object):
"""resize image by target_size and max_size
Args:
target_size (int): the target size of image
keep_ratio (bool): whether keep_ratio or not, default true
interp (int): method of resize
"""
def __init__(self, target_size, keep_ratio=True, interp=cv2.INTER_LINEAR):
if isinstance(target_size, int):
target_size = [target_size, target_size]
self.target_size = target_size
self.keep_ratio = keep_ratio
self.interp = interp
def __call__(self, im, im_info):
"""
Args:
im (np.ndarray): image (np.ndarray)
im_info (dict): info of image
Returns:
im (np.ndarray): processed image (np.ndarray)
im_info (dict): info of processed image
"""
assert len(self.target_size) == 2
assert self.target_size[0] > 0 and self.target_size[1] > 0
im_channel = im.shape[2]
im_scale_y, im_scale_x = self.generate_scale(im)
im = cv2.resize(
im,
None,
None,
fx=im_scale_x,
fy=im_scale_y,
interpolation=self.interp)
im_info['im_shape'] = np.array(im.shape[:2]).astype('float32')
im_info['scale_factor'] = np.array(
[im_scale_y, im_scale_x]).astype('float32')
return im, im_info
def generate_scale(self, im):
"""
Args:
im (np.ndarray): image (np.ndarray)
Returns:
im_scale_x: the resize ratio of X
im_scale_y: the resize ratio of Y
"""
origin_shape = im.shape[:2]
im_c = im.shape[2]
if self.keep_ratio:
im_size_min = np.min(origin_shape)
im_size_max = np.max(origin_shape)
target_size_min = np.min(self.target_size)
target_size_max = np.max(self.target_size)
im_scale = float(target_size_min) / float(im_size_min)
if np.round(im_scale * im_size_max) > target_size_max:
im_scale = float(target_size_max) / float(im_size_max)
im_scale_x = im_scale
im_scale_y = im_scale
else:
resize_h, resize_w = self.target_size
im_scale_y = resize_h / float(origin_shape[0])
im_scale_x = resize_w / float(origin_shape[1])
return im_scale_y, im_scale_x
class ShortSizeScale(object):
"""
Scale images by short size.
Args:
short_size(float | int): Short size of an image will be scaled to the short_size.
fixed_ratio(bool): Set whether to zoom according to a fixed ratio. default: True
do_round(bool): Whether to round up when calculating the zoom ratio. default: False
backend(str): Choose pillow or cv2 as the graphics processing backend. default: 'pillow'
"""
def __init__(self,
short_size,
fixed_ratio=True,
keep_ratio=None,
do_round=False,
backend='pillow'):
self.short_size = short_size
assert (fixed_ratio and not keep_ratio) or (
not fixed_ratio
), "fixed_ratio and keep_ratio cannot be true at the same time"
self.fixed_ratio = fixed_ratio
self.keep_ratio = keep_ratio
self.do_round = do_round
assert backend in [
'pillow', 'cv2'
], "Scale's backend must be pillow or cv2, but get {backend}"
self.backend = backend
def __call__(self, img):
"""
Performs resize operations.
Args:
img (PIL.Image): a PIL.Image.
return:
resized_img: a PIL.Image after scaling.
"""
result_img = None
if isinstance(img, np.ndarray):
h, w, _ = img.shape
elif isinstance(img, Image.Image):
w, h = img.size
else:
raise NotImplementedError
if w <= h:
ow = self.short_size
if self.fixed_ratio: # default is True
oh = int(self.short_size * 4.0 / 3.0)
elif not self.keep_ratio: # no
oh = self.short_size
else:
scale_factor = self.short_size / w
oh = int(h * float(scale_factor) +
0.5) if self.do_round else int(h * self.short_size / w)
ow = int(w * float(scale_factor) +
0.5) if self.do_round else int(w * self.short_size / h)
else:
oh = self.short_size
if self.fixed_ratio:
ow = int(self.short_size * 4.0 / 3.0)
elif not self.keep_ratio: # no
ow = self.short_size
else:
scale_factor = self.short_size / h
oh = int(h * float(scale_factor) +
0.5) if self.do_round else int(h * self.short_size / w)
ow = int(w * float(scale_factor) +
0.5) if self.do_round else int(w * self.short_size / h)
if type(img) == np.ndarray:
img = Image.fromarray(img, mode='RGB')
if self.backend == 'pillow':
result_img = img.resize((ow, oh), Image.BILINEAR)
elif self.backend == 'cv2' and (self.keep_ratio is not None):
result_img = cv2.resize(
img, (ow, oh), interpolation=cv2.INTER_LINEAR)
else:
result_img = Image.fromarray(
cv2.resize(
np.asarray(img), (ow, oh), interpolation=cv2.INTER_LINEAR))
return result_img
class NormalizeImage(object):
"""normalize image
Args:
mean (list): im - mean
std (list): im / std
is_scale (bool): whether need im / 255
norm_type (str): type in ['mean_std', 'none']
"""
def __init__(self, mean, std, is_scale=True, norm_type='mean_std'):
self.mean = mean
self.std = std
self.is_scale = is_scale
self.norm_type = norm_type
def __call__(self, im, im_info):
"""
Args:
im (np.ndarray): image (np.ndarray)
im_info (dict): info of image
Returns:
im (np.ndarray): processed image (np.ndarray)
im_info (dict): info of processed image
"""
im = im.astype(np.float32, copy=False)
if self.is_scale:
scale = 1.0 / 255.0
im *= scale
if self.norm_type == 'mean_std':
mean = np.array(self.mean)[np.newaxis, np.newaxis, :]
std = np.array(self.std)[np.newaxis, np.newaxis, :]
im -= mean
im /= std
return im, im_info
class Permute(object):
"""permute image
Args:
to_bgr (bool): whether convert RGB to BGR
channel_first (bool): whether convert HWC to CHW
"""
def __init__(self, ):
super(Permute, self).__init__()
def __call__(self, im, im_info):
"""
Args:
im (np.ndarray): image (np.ndarray)
im_info (dict): info of image
Returns:
im (np.ndarray): processed image (np.ndarray)
im_info (dict): info of processed image
"""
im = im.transpose((2, 0, 1)).copy()
return im, im_info
class PadStride(object):
""" padding image for model with FPN, instead PadBatch(pad_to_stride) in original config
Args:
stride (bool): model with FPN need image shape % stride == 0
"""
def __init__(self, stride=0):
self.coarsest_stride = stride
def __call__(self, im, im_info):
"""
Args:
im (np.ndarray): image (np.ndarray)
im_info (dict): info of image
Returns:
im (np.ndarray): processed image (np.ndarray)
im_info (dict): info of processed image
"""
coarsest_stride = self.coarsest_stride
if coarsest_stride <= 0:
return im, im_info
im_c, im_h, im_w = im.shape
pad_h = int(np.ceil(float(im_h) / coarsest_stride) * coarsest_stride)
pad_w = int(np.ceil(float(im_w) / coarsest_stride) * coarsest_stride)
padding_im = np.zeros((im_c, pad_h, pad_w), dtype=np.float32)
padding_im[:, :im_h, :im_w] = im
return padding_im, im_info
class LetterBoxResize(object):
def __init__(self, target_size):
"""
Resize image to target size, convert normalized xywh to pixel xyxy
format ([x_center, y_center, width, height] -> [x0, y0, x1, y1]).
Args:
target_size (int|list): image target size.
"""
super(LetterBoxResize, self).__init__()
if isinstance(target_size, int):
target_size = [target_size, target_size]
self.target_size = target_size
def letterbox(self, img, height, width, color=(127.5, 127.5, 127.5)):
# letterbox: resize a rectangular image to a padded rectangular
shape = img.shape[:2] # [height, width]
ratio_h = float(height) / shape[0]
ratio_w = float(width) / shape[1]
ratio = min(ratio_h, ratio_w)
new_shape = (round(shape[1] * ratio),
round(shape[0] * ratio)) # [width, height]
padw = (width - new_shape[0]) / 2
padh = (height - new_shape[1]) / 2
top, bottom = round(padh - 0.1), round(padh + 0.1)
left, right = round(padw - 0.1), round(padw + 0.1)
img = cv2.resize(
img, new_shape, interpolation=cv2.INTER_AREA) # resized, no border
img = cv2.copyMakeBorder(
img, top, bottom, left, right, cv2.BORDER_CONSTANT,
value=color) # padded rectangular
return img, ratio, padw, padh
def __call__(self, im, im_info):
"""
Args:
im (np.ndarray): image (np.ndarray)
im_info (dict): info of image
Returns:
im (np.ndarray): processed image (np.ndarray)
im_info (dict): info of processed image
"""
assert len(self.target_size) == 2
assert self.target_size[0] > 0 and self.target_size[1] > 0
height, width = self.target_size
h, w = im.shape[:2]
im, ratio, padw, padh = self.letterbox(im, height=height, width=width)
new_shape = [round(h * ratio), round(w * ratio)]
im_info['im_shape'] = np.array(new_shape, dtype=np.float32)
im_info['scale_factor'] = np.array([ratio, ratio], dtype=np.float32)
return im, im_info
class Pad(object):
def __init__(self, size, fill_value=[114.0, 114.0, 114.0]):
"""
Pad image to a specified size.
Args:
size (list[int]): image target size
fill_value (list[float]): rgb value of pad area, default (114.0, 114.0, 114.0)
"""
super(Pad, self).__init__()
if isinstance(size, int):
size = [size, size]
self.size = size
self.fill_value = fill_value
def __call__(self, im, im_info):
im_h, im_w = im.shape[:2]
h, w = self.size
if h == im_h and w == im_w:
im = im.astype(np.float32)
return im, im_info
canvas = np.ones((h, w, 3), dtype=np.float32)
canvas *= np.array(self.fill_value, dtype=np.float32)
canvas[0:im_h, 0:im_w, :] = im.astype(np.float32)
im = canvas
return im, im_info
class WarpAffine(object):
"""Warp affine the image
"""
def __init__(self,
keep_res=False,
pad=31,
input_h=512,
input_w=512,
scale=0.4,
shift=0.1,
down_ratio=4):
self.keep_res = keep_res
self.pad = pad
self.input_h = input_h
self.input_w = input_w
self.scale = scale
self.shift = shift
self.down_ratio = down_ratio
def __call__(self, im, im_info):
"""
Args:
im (np.ndarray): image (np.ndarray)
im_info (dict): info of image
Returns:
im (np.ndarray): processed image (np.ndarray)
im_info (dict): info of processed image
"""
img = cv2.cvtColor(im, cv2.COLOR_RGB2BGR)
h, w = img.shape[:2]
if self.keep_res:
# True in detection eval/infer
input_h = (h | self.pad) + 1
input_w = (w | self.pad) + 1
s = np.array([input_w, input_h], dtype=np.float32)
c = np.array([w // 2, h // 2], dtype=np.float32)
else:
# False in centertrack eval_mot/eval_mot
s = max(h, w) * 1.0
input_h, input_w = self.input_h, self.input_w
c = np.array([w / 2., h / 2.], dtype=np.float32)
trans_input = get_affine_transform(c, s, 0, [input_w, input_h])
img = cv2.resize(img, (w, h))
inp = cv2.warpAffine(
img, trans_input, (input_w, input_h), flags=cv2.INTER_LINEAR)
if not self.keep_res:
out_h = input_h // self.down_ratio
out_w = input_w // self.down_ratio
trans_output = get_affine_transform(c, s, 0, [out_w, out_h])
im_info.update({
'center': c,
'scale': s,
'out_height': out_h,
'out_width': out_w,
'inp_height': input_h,
'inp_width': input_w,
'trans_input': trans_input,
'trans_output': trans_output,
})
return inp, im_info
class CULaneResize(object):
def __init__(self, img_h, img_w, cut_height, prob=0.5):
super(CULaneResize, self).__init__()
self.img_h = img_h
self.img_w = img_w
self.cut_height = cut_height
self.prob = prob
def __call__(self, im, im_info):
# cut
im = im[self.cut_height:, :, :]
# resize
transform = iaa.Sometimes(self.prob,
iaa.Resize({
"height": self.img_h,
"width": self.img_w
}))
im = transform(image=im.copy().astype(np.uint8))
im = im.astype(np.float32) / 255.
# check transpose is need whether the func decode_image is equal to CULaneDataSet cv.imread
im = im.transpose(2, 0, 1)
return im, im_info
def preprocess(im, preprocess_ops):
# process image by preprocess_ops
im_info = {
'scale_factor': np.array(
[1., 1.], dtype=np.float32),
'im_shape': None,
}
im, im_info = decode_image(im, im_info)
for operator in preprocess_ops:
im, im_info = operator(im, im_info)
return im, im_info
| PaddleDetection/deploy/python/preprocess.py/0 | {
"file_path": "PaddleDetection/deploy/python/preprocess.py",
"repo_id": "PaddleDetection",
"token_count": 9466
} | 71 |
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import onnx
import onnx_graphsurgeon
import numpy as np
from collections import OrderedDict
from paddle2onnx.command import program2onnx
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'--onnx_file', required=True, type=str, help='onnx model path')
parser.add_argument(
'--model_dir',
type=str,
default=None,
help=("Directory include:'model.pdiparams', 'model.pdmodel', "
"'infer_cfg.yml', created by tools/export_model.py."))
parser.add_argument(
"--opset_version",
type=int,
default=11,
help="set onnx opset version to export")
parser.add_argument(
'--topk_all', type=int, default=300, help='topk objects for every images')
parser.add_argument(
'--iou_thres', type=float, default=0.7, help='iou threshold for NMS')
parser.add_argument(
'--conf_thres', type=float, default=0.01, help='conf threshold for NMS')
def main(FLAGS):
assert os.path.exists(FLAGS.onnx_file)
onnx_model = onnx.load(FLAGS.onnx_file)
graph = onnx_graphsurgeon.import_onnx(onnx_model)
graph.toposort()
graph.fold_constants()
graph.cleanup()
num_anchors = graph.outputs[1].shape[2]
num_classes = graph.outputs[1].shape[1]
scores = onnx_graphsurgeon.Variable(
name='scores', shape=[-1, num_anchors, num_classes], dtype=np.float32)
graph.layer(
op='Transpose',
name='lastTranspose',
inputs=[graph.outputs[1]],
outputs=[scores],
attrs=OrderedDict(perm=[0, 2, 1]))
attrs = OrderedDict(
plugin_version="1",
background_class=-1,
max_output_boxes=FLAGS.topk_all,
score_threshold=FLAGS.conf_thres,
iou_threshold=FLAGS.iou_thres,
score_activation=False,
box_coding=0, )
outputs = [
onnx_graphsurgeon.Variable("num_dets", np.int32, [-1, 1]),
onnx_graphsurgeon.Variable("det_boxes", np.float32,
[-1, FLAGS.topk_all, 4]),
onnx_graphsurgeon.Variable("det_scores", np.float32,
[-1, FLAGS.topk_all]),
onnx_graphsurgeon.Variable("det_classes", np.int32,
[-1, FLAGS.topk_all])
]
graph.layer(
op='EfficientNMS_TRT',
name="batched_nms",
inputs=[graph.outputs[0], scores],
outputs=outputs,
attrs=attrs)
graph.outputs = outputs
graph.cleanup().toposort()
onnx.save(onnx_graphsurgeon.export_onnx(graph), FLAGS.onnx_file)
print(f"The modified onnx model is saved in {FLAGS.onnx_file}")
if __name__ == '__main__':
FLAGS = parser.parse_args()
if FLAGS.model_dir is not None:
assert os.path.exists(FLAGS.model_dir)
program2onnx(
model_dir=FLAGS.model_dir,
save_file=FLAGS.onnx_file,
model_filename="model.pdmodel",
params_filename="model.pdiparams",
opset_version=FLAGS.opset_version,
enable_onnx_checker=True)
main(FLAGS)
| PaddleDetection/deploy/third_engine/demo_onnx_trt/onnx_custom.py/0 | {
"file_path": "PaddleDetection/deploy/third_engine/demo_onnx_trt/onnx_custom.py",
"repo_id": "PaddleDetection",
"token_count": 1608
} | 72 |
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cv2
import numpy as np
import argparse
from scipy.special import softmax
from openvino.runtime import Core
def image_preprocess(img_path, re_shape):
img = cv2.imread(img_path)
img = cv2.resize(
img, (re_shape, re_shape), interpolation=cv2.INTER_LANCZOS4)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = np.transpose(img, [2, 0, 1]) / 255
img = np.expand_dims(img, 0)
img_mean = np.array([0.485, 0.456, 0.406]).reshape((3, 1, 1))
img_std = np.array([0.229, 0.224, 0.225]).reshape((3, 1, 1))
img -= img_mean
img /= img_std
return img.astype(np.float32)
def get_color_map_list(num_classes):
color_map = num_classes * [0, 0, 0]
for i in range(0, num_classes):
j = 0
lab = i
while lab:
color_map[i * 3] |= (((lab >> 0) & 1) << (7 - j))
color_map[i * 3 + 1] |= (((lab >> 1) & 1) << (7 - j))
color_map[i * 3 + 2] |= (((lab >> 2) & 1) << (7 - j))
j += 1
lab >>= 3
color_map = [color_map[i:i + 3] for i in range(0, len(color_map), 3)]
return color_map
def draw_box(srcimg, results, class_label):
label_list = list(
map(lambda x: x.strip(), open(class_label, 'r').readlines()))
for i in range(len(results)):
color_list = get_color_map_list(len(label_list))
clsid2color = {}
classid, conf = int(results[i, 0]), results[i, 1]
xmin, ymin, xmax, ymax = int(results[i, 2]), int(results[i, 3]), int(
results[i, 4]), int(results[i, 5])
if classid not in clsid2color:
clsid2color[classid] = color_list[classid]
color = tuple(clsid2color[classid])
cv2.rectangle(srcimg, (xmin, ymin), (xmax, ymax), color, thickness=2)
print(label_list[classid] + ': ' + str(round(conf, 3)))
cv2.putText(
srcimg,
label_list[classid] + ':' + str(round(conf, 3)), (xmin, ymin - 10),
cv2.FONT_HERSHEY_SIMPLEX,
0.8, (0, 255, 0),
thickness=2)
return srcimg
def hard_nms(box_scores, iou_threshold, top_k=-1, candidate_size=200):
"""
Args:
box_scores (N, 5): boxes in corner-form and probabilities.
iou_threshold: intersection over union threshold.
top_k: keep top_k results. If k <= 0, keep all the results.
candidate_size: only consider the candidates with the highest scores.
Returns:
picked: a list of indexes of the kept boxes
"""
scores = box_scores[:, -1]
boxes = box_scores[:, :-1]
picked = []
indexes = np.argsort(scores)
indexes = indexes[-candidate_size:]
while len(indexes) > 0:
current = indexes[-1]
picked.append(current)
if 0 < top_k == len(picked) or len(indexes) == 1:
break
current_box = boxes[current, :]
indexes = indexes[:-1]
rest_boxes = boxes[indexes, :]
iou = iou_of(
rest_boxes,
np.expand_dims(
current_box, axis=0), )
indexes = indexes[iou <= iou_threshold]
return box_scores[picked, :]
def iou_of(boxes0, boxes1, eps=1e-5):
"""Return intersection-over-union (Jaccard index) of boxes.
Args:
boxes0 (N, 4): ground truth boxes.
boxes1 (N or 1, 4): predicted boxes.
eps: a small number to avoid 0 as denominator.
Returns:
iou (N): IoU values.
"""
overlap_left_top = np.maximum(boxes0[..., :2], boxes1[..., :2])
overlap_right_bottom = np.minimum(boxes0[..., 2:], boxes1[..., 2:])
overlap_area = area_of(overlap_left_top, overlap_right_bottom)
area0 = area_of(boxes0[..., :2], boxes0[..., 2:])
area1 = area_of(boxes1[..., :2], boxes1[..., 2:])
return overlap_area / (area0 + area1 - overlap_area + eps)
def area_of(left_top, right_bottom):
"""Compute the areas of rectangles given two corners.
Args:
left_top (N, 2): left top corner.
right_bottom (N, 2): right bottom corner.
Returns:
area (N): return the area.
"""
hw = np.clip(right_bottom - left_top, 0.0, None)
return hw[..., 0] * hw[..., 1]
class PicoDetNMS(object):
"""
Args:
input_shape (int): network input image size
scale_factor (float): scale factor of ori image
"""
def __init__(self,
input_shape,
scale_x,
scale_y,
strides=[8, 16, 32, 64],
score_threshold=0.4,
nms_threshold=0.5,
nms_top_k=1000,
keep_top_k=100):
self.input_shape = input_shape
self.scale_x = scale_x
self.scale_y = scale_y
self.strides = strides
self.score_threshold = score_threshold
self.nms_threshold = nms_threshold
self.nms_top_k = nms_top_k
self.keep_top_k = keep_top_k
def __call__(self, decode_boxes, select_scores):
batch_size = 1
out_boxes_list = []
for batch_id in range(batch_size):
# nms
bboxes = np.concatenate(decode_boxes, axis=0)
confidences = np.concatenate(select_scores, axis=0)
picked_box_probs = []
picked_labels = []
for class_index in range(0, confidences.shape[1]):
probs = confidences[:, class_index]
mask = probs > self.score_threshold
probs = probs[mask]
if probs.shape[0] == 0:
continue
subset_boxes = bboxes[mask, :]
box_probs = np.concatenate(
[subset_boxes, probs.reshape(-1, 1)], axis=1)
box_probs = hard_nms(
box_probs,
iou_threshold=self.nms_threshold,
top_k=self.keep_top_k, )
picked_box_probs.append(box_probs)
picked_labels.extend([class_index] * box_probs.shape[0])
if len(picked_box_probs) == 0:
out_boxes_list.append(np.empty((0, 4)))
else:
picked_box_probs = np.concatenate(picked_box_probs)
# resize output boxes
picked_box_probs[:, 0] *= self.scale_x
picked_box_probs[:, 2] *= self.scale_x
picked_box_probs[:, 1] *= self.scale_y
picked_box_probs[:, 3] *= self.scale_y
# clas score box
out_boxes_list.append(
np.concatenate(
[
np.expand_dims(
np.array(picked_labels),
axis=-1), np.expand_dims(
picked_box_probs[:, 4], axis=-1),
picked_box_probs[:, :4]
],
axis=1))
out_boxes_list = np.concatenate(out_boxes_list, axis=0)
return out_boxes_list
def detect(img_file, compiled_model, class_label):
output = compiled_model.infer_new_request({0: test_image})
result_ie = list(output.values())
decode_boxes = []
select_scores = []
num_outs = int(len(result_ie) / 2)
for out_idx in range(num_outs):
decode_boxes.append(result_ie[out_idx])
select_scores.append(result_ie[out_idx + num_outs])
image = cv2.imread(img_file, 1)
scale_x = image.shape[1] / test_image.shape[3]
scale_y = image.shape[0] / test_image.shape[2]
nms = PicoDetNMS(test_image.shape[2:], scale_x, scale_y)
np_boxes = nms(decode_boxes, select_scores)
res_image = draw_box(image, np_boxes, class_label)
cv2.imwrite('res.jpg', res_image)
cv2.imshow("res", res_image)
cv2.waitKey()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--img_path',
type=str,
default='../../demo_onnxruntime/imgs/bus.jpg',
help="image path")
parser.add_argument(
'--onnx_path',
type=str,
default='out_onnxsim_infer/picodet_s_320_postproccesed_woNMS.onnx',
help="onnx filepath")
parser.add_argument('--in_shape', type=int, default=320, help="input_size")
parser.add_argument(
'--class_label',
type=str,
default='coco_label.txt',
help="class label file")
args = parser.parse_args()
ie = Core()
net = ie.read_model(args.onnx_path)
test_image = image_preprocess(args.img_path, args.in_shape)
compiled_model = ie.compile_model(net, 'CPU')
detect(args.img_path, compiled_model, args.class_label)
| PaddleDetection/deploy/third_engine/demo_openvino/python/openvino_infer.py/0 | {
"file_path": "PaddleDetection/deploy/third_engine/demo_openvino/python/openvino_infer.py",
"repo_id": "PaddleDetection",
"token_count": 4564
} | 73 |
# 模型库和基线
# 内容
- [基础设置](#基础设置)
- [测试环境](#测试环境)
- [通用设置](#通用设置)
- [训练策略](#训练策略)
- [ImageNet预训练模型](#ImageNet预训练模型)
- [基线](#基线)
- [目标检测](#目标检测)
- [实例分割](#实例分割)
- [PaddleYOLO](#PaddleYOLO)
- [人脸检测](#人脸检测)
- [旋转框检测](#旋转框检测)
- [关键点检测](#关键点检测)
- [多目标跟踪](#多目标跟踪)
# 基础设置
## 测试环境
- Python 3.7
- PaddlePaddle 每日版本
- CUDA 10.1
- cuDNN 7.5
- NCCL 2.4.8
## 通用设置
- 所有模型均在COCO17数据集中训练和测试。
- [YOLOv5](https://github.com/PaddlePaddle/PaddleYOLO/tree/develop/configs/yolov5)、[YOLOv6](https://github.com/PaddlePaddle/PaddleYOLO/tree/develop/configs/yolov6)、[YOLOv7](https://github.com/PaddlePaddle/PaddleYOLO/tree/develop/configs/yolov7)和[YOLOv8](https://github.com/PaddlePaddle/PaddleYOLO/tree/develop/configs/yolov8)这几类模型的代码在[PaddleYOLO](https://github.com/PaddlePaddle/PaddleYOLO)中,**PaddleYOLO库开源协议为GPL 3.0**。
- 除非特殊说明,所有ResNet骨干网络采用[ResNet-B](https://arxiv.org/pdf/1812.01187)结构。
- **推理时间(fps)**: 推理时间是在一张Tesla V100的GPU上通过'tools/eval.py'测试所有验证集得到,单位是fps(图片数/秒), cuDNN版本是7.5,包括数据加载、网络前向执行和后处理, batch size是1。
## 训练策略
- 我们采用和[Detectron](https://github.com/facebookresearch/Detectron/blob/master/MODEL_ZOO.md#training-schedules)相同的训练策略。
- 1x 策略表示:在总batch size为8时,初始学习率为0.01,在8 epoch和11 epoch后学习率分别下降10倍,最终训练12 epoch。
- 2x 策略为1x策略的两倍,同时学习率调整的epoch数位置也为1x的两倍。
## ImageNet预训练模型
Paddle提供基于ImageNet的骨架网络预训练模型。所有预训练模型均通过标准的Imagenet-1k数据集训练得到,ResNet和MobileNet等是采用余弦学习率调整策略或SSLD知识蒸馏训练得到的高精度预训练模型,可在[PaddleClas](https://github.com/PaddlePaddle/PaddleClas)查看模型细节。
# 基线
## 目标检测
### Faster R-CNN
请参考[Faster R-CNN](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/faster_rcnn/)
### YOLOv3
请参考[YOLOv3](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/yolov3/)
### PP-YOLOE/PP-YOLOE+
请参考[PP-YOLOE](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/ppyoloe/)
### PP-YOLO/PP-YOLOv2
请参考[PP-YOLO](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/ppyolo/)
### PicoDet
请参考[PicoDet](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/picodet)
### RetinaNet
请参考[RetinaNet](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/retinanet/)
### Cascade R-CNN
请参考[Cascade R-CNN](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/cascade_rcnn)
### SSD/SSDLite
请参考[SSD](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/ssd/)
### FCOS
请参考[FCOS](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/fcos/)
### CenterNet
请参考[CenterNet](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/centernet/)
### TTFNet/PAFNet
请参考[TTFNet](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/ttfnet/)
### Group Normalization
请参考[Group Normalization](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/gn/)
### Deformable ConvNets v2
请参考[Deformable ConvNets v2](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/dcn/)
### HRNets
请参考[HRNets](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/hrnet/)
### Res2Net
请参考[Res2Net](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/res2net/)
### ConvNeXt
请参考[ConvNeXt](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/convnext/)
### GFL
请参考[GFL](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/gfl)
### TOOD
请参考[TOOD](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/tood)
### PSS-DET(RCNN-Enhance)
请参考[PSS-DET](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/rcnn_enhance)
### DETR
请参考[DETR](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/detr)
### Deformable DETR
请参考[Deformable DETR](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/deformable_detr)
### Sparse R-CNN
请参考[Sparse R-CNN](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/sparse_rcnn)
### Vision Transformer
请参考[Vision Transformer](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/vitdet)
### DINO
请参考[DINO](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/dino)
### YOLOX
请参考[YOLOX](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/yolox)
### YOLOF
请参考[YOLOF](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/yolof)
## 实例分割
### Mask R-CNN
请参考[Mask R-CNN](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/mask_rcnn/)
### Cascade R-CNN
请参考[Cascade R-CNN](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/cascade_rcnn)
### SOLOv2
请参考[SOLOv2](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/solov2/)
### QueryInst
请参考[QueryInst](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/queryinst)
## [PaddleYOLO](https://github.com/PaddlePaddle/PaddleYOLO)
请参考[PaddleYOLO模型库](https://github.com/PaddlePaddle/PaddleYOLO/tree/develop/docs/MODEL_ZOO_cn.md)
### YOLOv5
请参考[YOLOv5](https://github.com/PaddlePaddle/PaddleYOLO/tree/develop/configs/yolov5)
### YOLOv6(v3.0)
请参考[YOLOv6](https://github.com/PaddlePaddle/PaddleYOLO/tree/develop/configs/yolov6)
### YOLOv7
请参考[YOLOv7](https://github.com/PaddlePaddle/PaddleYOLO/tree/develop/configs/yolov7)
### YOLOv8
请参考[YOLOv8](https://github.com/PaddlePaddle/PaddleYOLO/tree/develop/configs/yolov8)
### RTMDet
请参考[RTMDet](https://github.com/PaddlePaddle/PaddleYOLO/tree/develop/configs/rtmdet)
## 人脸检测
请参考[人脸检测模型库](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/face_detection)
### BlazeFace
请参考[BlazeFace](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/face_detection/)
## 旋转框检测
请参考[旋转框检测模型库](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/rotate)
### PP-YOLOE-R
请参考[PP-YOLOE-R](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/rotate/ppyoloe_r)
### FCOSR
请参考[FCOSR](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/rotate/fcosr)
### S2ANet
请参考[S2ANet](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/rotate/s2anet)
## 关键点检测
请参考[关键点检测模型库](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/keypoint)
### PP-TinyPose
请参考[PP-TinyPose](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/keypoint/tiny_pose)
### HRNet
请参考[HRNet](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/keypoint/hrnet)
### Lite-HRNet
请参考[Lite-HRNet](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/keypoint/lite_hrnet)
### HigherHRNet
请参考[HigherHRNet](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/keypoint/higherhrnet)
## 多目标跟踪
请参考[多目标跟踪模型库](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/mot)
### DeepSORT
请参考[DeepSORT](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/mot/deepsort)
### ByteTrack
请参考[ByteTrack](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/mot/bytetrack)
### OC-SORT
请参考[OC-SORT](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/mot/ocsort)
### BoT-SORT
请参考[BoT-SORT](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/mot/botsort)
### CenterTrack
请参考[CenterTrack](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/mot/centertrack)
### FairMOT/MC-FairMOT
请参考[FairMOT](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/mot/fairmot)
### JDE
请参考[JDE](https://github.com/PaddlePaddle/PaddleDetection/tree/develop/configs/mot/jde)
| PaddleDetection/docs/MODEL_ZOO_cn.md/0 | {
"file_path": "PaddleDetection/docs/MODEL_ZOO_cn.md",
"repo_id": "PaddleDetection",
"token_count": 4345
} | 74 |
[简体中文](./detection.md) | English
# Customize Object Detection task
In the practical application of object detection algorithms in a specific industry, additional training is often required for practical use. The project iteration will also need to modify categories. This document details how to use PaddleDetection for a customized object detection algorithm. The process includes data preparation, model optimization roadmap, and modifying the category development process.
## Data Preparation
Customization starts with the preparation of the dataset. We need to collect suitable data for the scenario features, so as to improve the model effect and generalization performance. Then Labeme, LabelImg and other labeling tools will be used to label the object detection bouding boxes and convert the labeling results into COCO or VOC data format. Details please refer to [Data Preparation](../../tutorials/data/PrepareDetDataSet_en.md)
## Model Optimization
### 1. Use customized dataset for training
Modify the corresponding path in the data configuration file based on the prepared data, for example:
configs/dataset/coco_detection.yml`:
```
metric: COCO
num_classes: 80
TrainDataset:
!COCODataSet
image_dir: train2017 # Path to the images of the training set relative to the dataset_dir
anno_path: annotations/instances_train2017.json # Path to the annotation file of the training set relative to the dataset_dir
dataset_dir: dataset/coco # Path to the dataset relative to the PaddleDetection path
data_fields: ['image', 'gt_bbox', 'gt_class', 'is_crowd']
EvalDataset:
!COCODataSet
image_dir: val2017 # Path to the images of the evaldataset set relative to the dataset_dir
anno_path: annotations/instances_val2017.json # Path to the annotation file of the evaldataset relative to the dataset_dir
dataset_dir: dataset/coco # Path to the dataset relative to the PaddleDetection path
TestDataset:
!ImageFolder
anno_path: annotations/instances_val2017.json # also support txt (like VOC's label_list.txt) # Path to the annotation files relative to dataset_di.
dataset_dir: dataset/coco # if set, anno_path will be 'dataset_dir/anno_path' # Path to the dataset relative to the PaddleDetection path
```
Once the configuration changes are completed, the training evaluation can be started with the following command
```
export CUDA_VISIBLE_DEVICES=0
python tools/train.py -c configs/yolov3/yolov3_mobilenet_v1_270e_coco.yml --eval
```
More details please refer to [Getting Started for PaddleDetection](../../tutorials/GETTING_STARTED_cn.md)
###
### 2. Load the COCO model as pre-training
The currently provided pre-trained models in PaddleDetection's configurations are weights from the ImageNet dataset, loaded into the backbone network of the detection algorithm. For practical use, it is recommended to load the weights trained on the COCO dataset, which can usually provide a large improvement to the model accuracy. The method is as follows.
#### 1) Set pre-training weight path
The trained model weights for the COCO dataset are saved in the configuration folder of each algorithm, for example, PP-YOLOE-l COCO dataset weights are provided under `configs/ppyoloe`: [Link](https://paddledet.bj.bcebos.com/models/ppyoloe_crn_l_300e_coco.pdparams) The configuration file sets`pretrain_weights: https://paddledet.bj.bcebos.com/models/ppyoloe_crn_l_300e_coco.pdparams`
#### 2) Modify hyperparameters
After loading the COCO pre-training weights, the learning rate hyperparameters need to be modified, for example
In `configs/ppyoloe/_base_/optimizer_300e.yml`:
```
epoch: 120 # The original configuration is 300 epoch, after loading COCO weights, the iteration number can be reduced appropriately
LearningRate:
base_lr: 0.005 # The original configuration is 0.025, after loading COCO weights, the learning rate should be reduced.
schedulers:
- !CosineDecay
max_epochs: 144 # Modify based on the number of epochs
- LinearWarmup
start_factor: 0.
epochs: 5
```
## Modify categories
When the actual application scenario category changes, the data configuration file needs to be modified, for example in `configs/datasets/coco_detection.yml`:
```
metric: COCO
num_classes: 10 # original class 80
```
After the configuration changes are completed, the COCO pre-training weights can also be loaded. PaddleDetection supports automatic loading of shape-matching weights, and weights that do not match the shape are automatically ignored, so no other modifications are needed.
| PaddleDetection/docs/advanced_tutorials/customization/detection_en.md/0 | {
"file_path": "PaddleDetection/docs/advanced_tutorials/customization/detection_en.md",
"repo_id": "PaddleDetection",
"token_count": 1262
} | 75 |
from collections import defaultdict
from pathlib import Path
import cv2
import numpy as np
import paddle.vision.transforms as T
from openvino.inference_engine import IECore
from ppdet.modeling.mot.tracker import JDETracker
from ppdet.modeling.mot.visualization import plot_tracking_dict
root_path = Path(__file__).parent
target_height = 320
target_width = 576
# -------------------------------
def get_net():
ie = IECore()
model_path = root_path / "fairmot_576_320_v3.onnx"
net = ie.read_network(model= str(model_path))
exec_net = ie.load_network(network=net, device_name="CPU")
return net, exec_net
def get_output_names(net):
output_names = [key for key in net.outputs]
return output_names
def prepare_input():
transforms = [
T.Resize(size=(target_height, target_width)),
T.Normalize(mean=(0,0,0), std=(1,1,1), data_format='HWC', to_rgb= True),
T.Transpose()
]
img_file = root_path / "street.jpeg"
img = cv2.imread(str(img_file))
normalized_img = T.Compose(transforms)(img)
normalized_img = normalized_img.astype(np.float32, copy=False) / 255.0
# add an new axis in front
img_input = normalized_img[np.newaxis, :]
# scale_factor is calculated as: im_shape / original_im_shape
h_scale = target_height / img.shape[0]
w_scale = target_width / img.shape[1]
input = {"image": img_input, "im_shape": [target_height, target_width], "scale_factor": [h_scale, w_scale]}
return input, img
def predict(exec_net, input):
result = exec_net.infer(input)
return result
def postprocess(pred_dets, pred_embs, threshold = 0.5):
tracker = JDETracker()
online_targets_dict = tracker.update(pred_dets, pred_embs)
online_tlwhs = defaultdict(list)
online_scores = defaultdict(list)
online_ids = defaultdict(list)
for cls_id in range(1):
online_targets = online_targets_dict[cls_id]
for t in online_targets:
tlwh = t.tlwh
tid = t.track_id
tscore = t.score
# make sure the tscore is no less then the threshold.
if tscore < threshold: continue
# make sure the target area is not less than the min_box_area.
if tlwh[2] * tlwh[3] <= tracker.min_box_area:
continue
# make sure the vertical ratio of a found target is within the range (1.6 as default ratio).
if tracker.vertical_ratio > 0 and tlwh[2] / tlwh[3] > tracker.vertical_ratio:
continue
online_tlwhs[cls_id].append(tlwh)
online_ids[cls_id].append(tid)
online_scores[cls_id].append(tscore)
online_im = plot_tracking_dict(
img,
1,
online_tlwhs,
online_ids,
online_scores,
frame_id=0)
return online_im
# -------------------------------
net, exec_net = get_net()
output_names = get_output_names(net)
del net
input, img = prepare_input()
result = predict(exec_net, input)
pred_dets = result[output_names[0]]
pred_embs = result[output_names[1]]
processed_img = postprocess(pred_dets, pred_embs)
tracked_img_file_path = root_path / "tracked.jpg"
cv2.imwrite(str(tracked_img_file_path), processed_img)
| PaddleDetection/docs/advanced_tutorials/openvino_inference/fairmot_onnx_openvino.py/0 | {
"file_path": "PaddleDetection/docs/advanced_tutorials/openvino_inference/fairmot_onnx_openvino.py",
"repo_id": "PaddleDetection",
"token_count": 1355
} | 76 |
[English](QUICK_STARTED.md) | 简体中文
# 快速开始
为了使得用户能够在很短时间内快速产出模型,掌握PaddleDetection的使用方式,这篇教程通过一个预训练检测模型对小数据集进行finetune。在较短时间内即可产出一个效果不错的模型。实际业务中,建议用户根据需要选择合适模型配置文件进行适配。
- **设置显卡**
```bash
export CUDA_VISIBLE_DEVICES=0
```
## 一、快速体验
```
# 用PP-YOLO算法在COCO数据集上预训练模型预测一张图片
python tools/infer.py -c configs/ppyolo/ppyolo_r50vd_dcn_1x_coco.yml -o use_gpu=true weights=https://paddledet.bj.bcebos.com/models/ppyolo_r50vd_dcn_1x_coco.pdparams --infer_img=demo/000000014439.jpg
```
结果如下图:

## 二、准备数据
数据集参考[Kaggle数据集](https://www.kaggle.com/andrewmvd/road-sign-detection) ,包含877张图像,数据类别4类:crosswalk,speedlimit,stop,trafficlight。
将数据划分为训练集701张图和测试集176张图,[下载链接](https://paddlemodels.bj.bcebos.com/object_detection/roadsign_voc.tar).
```
# 注意:可跳过这步下载,后面训练会自动下载
python dataset/roadsign_voc/download_roadsign_voc.py
```
## 三、训练、评估、预测
### 1、训练
```
# 边训练边测试 CPU需要约1小时(use_gpu=false),1080Ti GPU需要约10分钟
# -c 参数表示指定使用哪个配置文件
# -o 参数表示指定配置文件中的全局变量(覆盖配置文件中的设置),这里设置使用gpu
# --eval 参数表示边训练边评估,最后会自动保存一个名为model_final.pdparams的模型
python tools/train.py -c configs/yolov3/yolov3_mobilenet_v1_roadsign.yml --eval -o use_gpu=true
```
如果想通过VisualDL实时观察loss变化曲线,在训练命令中添加--use_vdl=true,以及通过--vdl_log_dir设置日志保存路径。
**但注意VisualDL需Python>=3.5**
首先安装[VisualDL](https://github.com/PaddlePaddle/VisualDL)
```
python -m pip install visualdl -i https://mirror.baidu.com/pypi/simple
```
```
python -u tools/train.py -c configs/yolov3/yolov3_mobilenet_v1_roadsign.yml \
--use_vdl=true \
--vdl_log_dir=vdl_dir/scalar \
--eval
```
通过visualdl命令实时查看变化曲线:
```
visualdl --logdir vdl_dir/scalar/ --host <host_IP> --port <port_num>
```
### 2、评估
```
# 评估 默认使用训练过程中保存的model_final.pdparams
# -c 参数表示指定使用哪个配置文件
# -o 参数表示指定配置文件中的全局变量(覆盖配置文件中的设置)
# 目前只支持单卡评估
python tools/eval.py -c configs/yolov3/yolov3_mobilenet_v1_roadsign.yml -o use_gpu=true
```
最终模型精度在mAP=0.85左右,由于数据集较小因此每次训练结束后精度会有一定波动
### 3、预测
```
# -c 参数表示指定使用哪个配置文件
# -o 参数表示指定配置文件中的全局变量(覆盖配置文件中的设置)
# --infer_img 参数指定预测图像路径
# 预测结束后会在output文件夹中生成一张画有预测结果的同名图像
python tools/infer.py -c configs/yolov3/yolov3_mobilenet_v1_roadsign.yml -o use_gpu=true --infer_img=demo/road554.png
```
结果如下图:

| PaddleDetection/docs/tutorials/QUICK_STARTED_cn.md/0 | {
"file_path": "PaddleDetection/docs/tutorials/QUICK_STARTED_cn.md",
"repo_id": "PaddleDetection",
"token_count": 2109
} | 77 |
简体中文 | [English](PrepareMOTDataSet_en.md)
# 多目标跟踪数据集准备
## 目录
- [简介和模型选型](#简介和模型选型)
- [MOT数据集准备](#MOT数据集准备)
- [SDE数据集](#SDE数据集)
- [JDE数据集](#JDE数据集)
- [用户自定义数据集准备](#用户自定义数据集准备)
- [SDE数据集](#SDE数据集)
- [JDE数据集](#JDE数据集)
- [引用](#引用)
## 简介和模型选型
PaddleDetection中提供了SDE和JDE两个系列的多种算法实现:
- SDE(Separate Detection and Embedding)
- [ByteTrack](../../../configs/mot/bytetrack)
- [DeepSORT](../../../configs/mot/deepsort)
- JDE(Joint Detection and Embedding)
- [JDE](../../../configs/mot/jde)
- [FairMOT](../../../configs/mot/fairmot)
- [MCFairMOT](../../../configs/mot/mcfairmot)
**注意:**
- 以上算法原论文均为单类别的多目标跟踪,PaddleDetection团队同时也支持了[ByteTrack](./bytetrack)和FairMOT([MCFairMOT](./mcfairmot))的多类别的多目标跟踪;
- [DeepSORT](../../../configs/mot/deepsort)和[JDE](../../../configs/mot/jde)均只支持单类别的多目标跟踪;
- [DeepSORT](../../../configs/mot/deepsort)需要额外添加ReID权重一起执行,[ByteTrack](../../../configs/mot/bytetrack)可加可不加ReID权重,默认不加;
关于模型选型,PaddleDetection团队提供的总结建议如下:
| MOT方式 | 经典算法 | 算法流程 | 数据集要求 | 其他特点 |
| :--------------| :--------------| :------- | :----: | :----: |
| SDE系列 | DeepSORT,ByteTrack | 分离式,两个独立模型权重先检测后ReID,也可不加ReID | 检测和ReID数据相对独立,不加ReID时即纯检测数据集 |检测和ReID可分别调优,鲁棒性较高,AI竞赛常用|
| JDE系列 | FairMOT | 联合式,一个模型权重端到端同时检测和ReID | 必须同时具有检测和ReID标注 | 检测和ReID联合训练,不易调优,泛化性不强|
**注意:**
- 由于数据标注的成本较大,建议选型前优先考虑**数据集要求**,如果数据集只有检测框标注而没有ReID标注,是无法使用JDE系列算法训练的,更推荐使用SDE系列;
- SDE系列算法在检测器精度足够高时,也可以不使用ReID权重进行物体间的长时序关联,可以参照[ByteTrack](bytetrack);
- 耗时速度和模型权重参数量计算量有一定关系,耗时从理论上看`不使用ReID的SDE系列 < JDE系列 < 使用ReID的SDE系列`;
## MOT数据集准备
PaddleDetection团队提供了众多公开数据集或整理后数据集的下载链接,参考[数据集下载汇总](../../../configs/mot/DataDownload.md),用户可以自行下载使用。
根据模型选型总结,MOT数据集可以分为两类:一类纯检测框标注的数据集,仅SDE系列可以使用;另一类是同时有检测和ReID标注的数据集,SDE系列和JDE系列都可以使用。
### SDE数据集
SDE数据集是纯检测标注的数据集,用户自定义数据集可以参照[DET数据准备文档](./PrepareDetDataSet.md)准备。
以MOT17数据集为例,下载并解压放在`PaddleDetection/dataset/mot`目录下:
```
wget https://bj.bcebos.com/v1/paddledet/data/mot/MOT17.zip
```
并修改数据集部分的配置文件如下:
```
num_classes: 1
TrainDataset:
!COCODataSet
dataset_dir: dataset/mot/MOT17
anno_path: annotations/train_half.json
image_dir: images/train
data_fields: ['image', 'gt_bbox', 'gt_class', 'is_crowd']
EvalDataset:
!COCODataSet
dataset_dir: dataset/mot/MOT17
anno_path: annotations/val_half.json
image_dir: images/train
TestDataset:
!ImageFolder
dataset_dir: dataset/mot/MOT17
anno_path: annotations/val_half.json
```
数据集目录为:
```
dataset/mot
|——————MOT17
|——————annotations
|——————images
```
### JDE数据集
JDE数据集是同时有检测和ReID标注的数据集,首先按照以下命令`image_lists.zip`并解压放在`PaddleDetection/dataset/mot`目录下:
```
wget https://bj.bcebos.com/v1/paddledet/data/mot/image_lists.zip
```
然后按照以下命令可以快速下载各个公开数据集,也解压放在`PaddleDetection/dataset/mot`目录下:
```
# MIX数据,同JDE,FairMOT论文使用的数据集
wget https://bj.bcebos.com/v1/paddledet/data/mot/MOT17.zip
wget https://bj.bcebos.com/v1/paddledet/data/mot/Caltech.zip
wget https://bj.bcebos.com/v1/paddledet/data/mot/CUHKSYSU.zip
wget https://bj.bcebos.com/v1/paddledet/data/mot/PRW.zip
wget https://bj.bcebos.com/v1/paddledet/data/mot/Cityscapes.zip
wget https://bj.bcebos.com/v1/paddledet/data/mot/ETHZ.zip
wget https://bj.bcebos.com/v1/paddledet/data/mot/MOT16.zip
```
数据集目录为:
```
dataset/mot
|——————image_lists
|——————caltech.all
|——————citypersons.train
|——————cuhksysu.train
|——————eth.train
|——————mot16.train
|——————mot17.train
|——————prw.train
|——————Caltech
|——————Cityscapes
|——————CUHKSYSU
|——————ETHZ
|——————MOT16
|——————MOT17
|——————PRW
```
#### JDE数据集的格式
这几个相关数据集都遵循以下结构:
```
MOT17
|——————images
| └——————train
| └——————test
└——————labels_with_ids
└——————train
```
所有数据集的标注是以统一数据格式提供的。各个数据集中每张图片都有相应的标注文本。给定一个图像路径,可以通过将字符串`images`替换为`labels_with_ids`并将`.jpg`替换为`.txt`来生成标注文本路径。在标注文本中,每行都描述一个边界框,格式如下:
```
[class] [identity] [x_center] [y_center] [width] [height]
```
- `class`为类别id,支持单类别和多类别,从`0`开始计,单类别即为`0`。
- `identity`是从`1`到`num_identities`的整数(`num_identities`是数据集中所有视频或图片序列的不同物体实例的总数),如果此框没有`identity`标注,则为`-1`。
- `[x_center] [y_center] [width] [height]`是中心点坐标和宽高,注意他们的值是由图片的宽度/高度标准化的,因此它们是从0到1的浮点数。
**注意:**
- MIX数据集是[JDE](https://github.com/Zhongdao/Towards-Realtime-MOT)和[FairMOT](https://github.com/ifzhang/FairMOT)原论文使用的数据集,包括**Caltech Pedestrian, CityPersons, CUHK-SYSU, PRW, ETHZ, MOT17和MOT16**。使用前6者作为联合数据集参与训练,MOT16作为评测数据集。如果您想使用这些数据集,请**遵循他们的License**。
- MIX数据集以及其子数据集都是单类别的行人跟踪数据集,可认为相比于行人检测数据集多了id号的标注。
- 更多场景的垂类模型例如车辆行人人头跟踪等,垂类数据集也需要处理成与MIX数据集相同的格式,参照[数据集下载汇总](DataDownload.md)、[车辆跟踪](vehicle/README_cn.md)、[人头跟踪](headtracking21/README_cn.md)以及更通用的[行人跟踪](pedestrian/README_cn.md)。
- 用户自定义数据集可参照[MOT数据集准备教程](../../docs/tutorials/PrepareMOTDataSet_cn.md)去准备。
## 用户自定义数据集准备
### SDE数据集
如果用户选择SDE系列方案,是准备准检测标注的自定义数据集,则可以参照[DET数据准备文档](./PrepareDetDataSet.md)准备。
### JDE数据集
如果用户选择JDE系列方案,则需要同时具有检测和ReID标注,且符合MOT-17数据集的格式。
为了规范地进行训练和评测,用户数据需要转成和MOT-17数据集相同的目录和格式:
```
custom_data
|——————images
| └——————test
| └——————train
| └——————seq1
| | └——————gt
| | | └——————gt.txt
| | └——————img1
| | | └——————000001.jpg
| | | |——————000002.jpg
| | | └—————— ...
| | └——————seqinfo.ini
| └——————seq2
| └——————...
└——————labels_with_ids
└——————train
└——————seq1
| └——————000001.txt
| |——————000002.txt
| └—————— ...
└——————seq2
└—————— ...
```
##### images文件夹
- `gt.txt`是原始标注文件,而训练所用标注是`labels_with_ids`文件夹。
- `gt.txt`里是当前视频中所有图片的原始标注文件,每行都描述一个边界框,格式如下:
```
[frame_id],[identity],[bb_left],[bb_top],[width],[height],[score],[label],[vis_ratio]
```
- `img1`文件夹里是按照一定帧率抽好的图片。
- `seqinfo.ini`文件是视频信息描述文件,需要如下格式的信息:
```
[Sequence]
name=MOT17-02
imDir=img1
frameRate=30
seqLength=600
imWidth=1920
imHeight=1080
imExt=.jpg
```
其中`gt.txt`里是当前视频中所有图片的原始标注文件,每行都描述一个边界框,格式如下:
```
[frame_id],[identity],[bb_left],[bb_top],[width],[height],[score],[label],[vis_ratio]
```
**注意**:
- `frame_id`为当前图片帧序号
- `identity`是从`1`到`num_identities`的整数(`num_identities`是**当前视频或图片序列**的不同物体实例的总数),如果此框没有`identity`标注,则为`-1`。
- `bb_left`是目标框的左边界的x坐标
- `bb_top`是目标框的上边界的y坐标
- `width,height`是真实的像素宽高
- `score`是当前目标是否进入考虑范围内的标志(值为0表示此目标在计算中被忽略,而值为1则用于将其标记为活动实例),默认为`1`
- `label`是当前目标的种类标签,由于目前仅支持单类别跟踪,默认为`1`,MOT-16数据集中会有其他类别标签,但都是当作ignore类别计算
- `vis_ratio`是当前目标被其他目标包含或覆挡后的可见率,是从0到1的浮点数,默认为`1`
##### labels_with_ids文件夹
所有数据集的标注是以统一数据格式提供的。各个数据集中每张图片都有相应的标注文本。给定一个图像路径,可以通过将字符串`images`替换为`labels_with_ids`并将`.jpg`替换为`.txt`来生成标注文本路径。在标注文本中,每行都描述一个边界框,格式如下:
```
[class] [identity] [x_center] [y_center] [width] [height]
```
**注意**:
- `class`为类别id,支持单类别和多类别,从`0`开始计,单类别即为`0`。
- `identity`是从`1`到`num_identities`的整数(`num_identities`是数据集中所有视频或图片序列的不同物体实例的总数),如果此框没有`identity`标注,则为`-1`。
- `[x_center] [y_center] [width] [height]`是中心点坐标和宽高,注意是由图片的宽度/高度标准化的,因此它们是从0到1的浮点数。
可采用如下脚本生成相应的`labels_with_ids`:
```
cd dataset/mot
python gen_labels_MOT.py
```
### 引用
Caltech:
```
@inproceedings{ dollarCVPR09peds,
author = "P. Doll\'ar and C. Wojek and B. Schiele and P. Perona",
title = "Pedestrian Detection: A Benchmark",
booktitle = "CVPR",
month = "June",
year = "2009",
city = "Miami",
}
```
Citypersons:
```
@INPROCEEDINGS{Shanshan2017CVPR,
Author = {Shanshan Zhang and Rodrigo Benenson and Bernt Schiele},
Title = {CityPersons: A Diverse Dataset for Pedestrian Detection},
Booktitle = {CVPR},
Year = {2017}
}
@INPROCEEDINGS{Cordts2016Cityscapes,
title={The Cityscapes Dataset for Semantic Urban Scene Understanding},
author={Cordts, Marius and Omran, Mohamed and Ramos, Sebastian and Rehfeld, Timo and Enzweiler, Markus and Benenson, Rodrigo and Franke, Uwe and Roth, Stefan and Schiele, Bernt},
booktitle={Proc. of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR)},
year={2016}
}
```
CUHK-SYSU:
```
@inproceedings{xiaoli2017joint,
title={Joint Detection and Identification Feature Learning for Person Search},
author={Xiao, Tong and Li, Shuang and Wang, Bochao and Lin, Liang and Wang, Xiaogang},
booktitle={CVPR},
year={2017}
}
```
PRW:
```
@inproceedings{zheng2017person,
title={Person re-identification in the wild},
author={Zheng, Liang and Zhang, Hengheng and Sun, Shaoyan and Chandraker, Manmohan and Yang, Yi and Tian, Qi},
booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition},
pages={1367--1376},
year={2017}
}
```
ETHZ:
```
@InProceedings{eth_biwi_00534,
author = {A. Ess and B. Leibe and K. Schindler and and L. van Gool},
title = {A Mobile Vision System for Robust Multi-Person Tracking},
booktitle = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR'08)},
year = {2008},
month = {June},
publisher = {IEEE Press},
keywords = {}
}
```
MOT-16&17:
```
@article{milan2016mot16,
title={MOT16: A benchmark for multi-object tracking},
author={Milan, Anton and Leal-Taix{\'e}, Laura and Reid, Ian and Roth, Stefan and Schindler, Konrad},
journal={arXiv preprint arXiv:1603.00831},
year={2016}
}
```
| PaddleDetection/docs/tutorials/data/PrepareMOTDataSet.md/0 | {
"file_path": "PaddleDetection/docs/tutorials/data/PrepareMOTDataSet.md",
"repo_id": "PaddleDetection",
"token_count": 7868
} | 78 |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
def bbox_area(boxes):
return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
def intersection_over_box(chips, boxes):
"""
intersection area over box area
:param chips: C
:param boxes: B
:return: iob, CxB
"""
M = chips.shape[0]
N = boxes.shape[0]
if M * N == 0:
return np.zeros([M, N], dtype='float32')
box_area = bbox_area(boxes) # B
inter_x2y2 = np.minimum(np.expand_dims(chips, 1)[:, :, 2:],
boxes[:, 2:]) # CxBX2
inter_x1y1 = np.maximum(np.expand_dims(chips, 1)[:, :, :2],
boxes[:, :2]) # CxBx2
inter_wh = inter_x2y2 - inter_x1y1
inter_wh = np.clip(inter_wh, a_min=0, a_max=None)
inter_area = inter_wh[:, :, 0] * inter_wh[:, :, 1] # CxB
iob = inter_area / np.expand_dims(box_area, 0)
return iob
def clip_boxes(boxes, im_shape):
"""
Clip boxes to image boundaries.
:param boxes: [N, 4]
:param im_shape: tuple of 2, [h, w]
:return: [N, 4]
"""
# x1 >= 0
boxes[:, 0] = np.clip(boxes[:, 0], 0, im_shape[1] - 1)
# y1 >= 0
boxes[:, 1] = np.clip(boxes[:, 1], 0, im_shape[0] - 1)
# x2 < im_shape[1]
boxes[:, 2] = np.clip(boxes[:, 2], 1, im_shape[1])
# y2 < im_shape[0]
boxes[:, 3] = np.clip(boxes[:, 3], 1, im_shape[0])
return boxes
def transform_chip_box(gt_bbox: 'Gx4', boxes_idx: 'B', chip: '4'):
boxes_idx = np.array(boxes_idx)
cur_gt_bbox = gt_bbox[boxes_idx].copy() # Bx4
x1, y1, x2, y2 = chip
cur_gt_bbox[:, 0] -= x1
cur_gt_bbox[:, 1] -= y1
cur_gt_bbox[:, 2] -= x1
cur_gt_bbox[:, 3] -= y1
h = y2 - y1
w = x2 - x1
cur_gt_bbox = clip_boxes(cur_gt_bbox, (h, w))
ws = (cur_gt_bbox[:, 2] - cur_gt_bbox[:, 0]).astype(np.int32)
hs = (cur_gt_bbox[:, 3] - cur_gt_bbox[:, 1]).astype(np.int32)
valid_idx = (ws >= 2) & (hs >= 2)
return cur_gt_bbox[valid_idx], boxes_idx[valid_idx]
def find_chips_to_cover_overlaped_boxes(iob, overlap_threshold):
chip_ids, box_ids = np.nonzero(iob >= overlap_threshold)
chip_id2overlap_box_num = np.bincount(chip_ids) # 1d array
chip_id2overlap_box_num = np.pad(
chip_id2overlap_box_num, (0, len(iob) - len(chip_id2overlap_box_num)),
constant_values=0)
chosen_chip_ids = []
while len(box_ids) > 0:
value_counts = np.bincount(chip_ids) # 1d array
max_count_chip_id = np.argmax(value_counts)
assert max_count_chip_id not in chosen_chip_ids
chosen_chip_ids.append(max_count_chip_id)
box_ids_in_cur_chip = box_ids[chip_ids == max_count_chip_id]
ids_not_in_cur_boxes_mask = np.logical_not(
np.isin(box_ids, box_ids_in_cur_chip))
chip_ids = chip_ids[ids_not_in_cur_boxes_mask]
box_ids = box_ids[ids_not_in_cur_boxes_mask]
return chosen_chip_ids, chip_id2overlap_box_num
def transform_chip_boxes2image_boxes(chip_boxes, chip, img_h, img_w):
chip_boxes = np.array(sorted(chip_boxes, key=lambda item: -item[1]))
xmin, ymin, _, _ = chip
# Transform to origin image loc
chip_boxes[:, 2] += xmin
chip_boxes[:, 4] += xmin
chip_boxes[:, 3] += ymin
chip_boxes[:, 5] += ymin
chip_boxes = clip_boxes(chip_boxes, (img_h, img_w))
return chip_boxes
def nms(dets, thresh):
"""Apply classic DPM-style greedy NMS."""
if dets.shape[0] == 0:
return dets[[], :]
scores = dets[:, 1]
x1 = dets[:, 2]
y1 = dets[:, 3]
x2 = dets[:, 4]
y2 = dets[:, 5]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = scores.argsort()[::-1]
ndets = dets.shape[0]
suppressed = np.zeros((ndets), dtype=np.int32)
# nominal indices
# _i, _j
# sorted indices
# i, j
# temp variables for box i's (the box currently under consideration)
# ix1, iy1, ix2, iy2, iarea
# variables for computing overlap with box j (lower scoring box)
# xx1, yy1, xx2, yy2
# w, h
# inter, ovr
for _i in range(ndets):
i = order[_i]
if suppressed[i] == 1:
continue
ix1 = x1[i]
iy1 = y1[i]
ix2 = x2[i]
iy2 = y2[i]
iarea = areas[i]
for _j in range(_i + 1, ndets):
j = order[_j]
if suppressed[j] == 1:
continue
xx1 = max(ix1, x1[j])
yy1 = max(iy1, y1[j])
xx2 = min(ix2, x2[j])
yy2 = min(iy2, y2[j])
w = max(0.0, xx2 - xx1 + 1)
h = max(0.0, yy2 - yy1 + 1)
inter = w * h
ovr = inter / (iarea + areas[j] - inter)
if ovr >= thresh:
suppressed[j] = 1
keep = np.where(suppressed == 0)[0]
dets = dets[keep, :]
return dets
| PaddleDetection/ppdet/data/crop_utils/chip_box_utils.py/0 | {
"file_path": "PaddleDetection/ppdet/data/crop_utils/chip_box_utils.py",
"repo_id": "PaddleDetection",
"token_count": 2636
} | 79 |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# The code is based on:
# https://github.com/open-mmlab/mmdetection/blob/master/mmdet/core/bbox/assigners/atss_assigner.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from ppdet.utils.logger import setup_logger
logger = setup_logger(__name__)
def bbox_overlaps(bboxes1, bboxes2, mode='iou', is_aligned=False, eps=1e-6):
"""Calculate overlap between two set of bboxes.
If ``is_aligned `` is ``False``, then calculate the overlaps between each
bbox of bboxes1 and bboxes2, otherwise the overlaps between each aligned
pair of bboxes1 and bboxes2.
Args:
bboxes1 (Tensor): shape (B, m, 4) in <x1, y1, x2, y2> format or empty.
bboxes2 (Tensor): shape (B, n, 4) in <x1, y1, x2, y2> format or empty.
B indicates the batch dim, in shape (B1, B2, ..., Bn).
If ``is_aligned `` is ``True``, then m and n must be equal.
mode (str): "iou" (intersection over union) or "iof" (intersection over
foreground).
is_aligned (bool, optional): If True, then m and n must be equal.
Default False.
eps (float, optional): A value added to the denominator for numerical
stability. Default 1e-6.
Returns:
Tensor: shape (m, n) if ``is_aligned `` is False else shape (m,)
"""
assert mode in ['iou', 'iof', 'giou', 'diou'], 'Unsupported mode {}'.format(
mode)
# Either the boxes are empty or the length of boxes's last dimenstion is 4
assert (bboxes1.shape[-1] == 4 or bboxes1.shape[0] == 0)
assert (bboxes2.shape[-1] == 4 or bboxes2.shape[0] == 0)
# Batch dim must be the same
# Batch dim: (B1, B2, ... Bn)
assert bboxes1.shape[:-2] == bboxes2.shape[:-2]
batch_shape = bboxes1.shape[:-2]
rows = bboxes1.shape[-2] if bboxes1.shape[0] > 0 else 0
cols = bboxes2.shape[-2] if bboxes2.shape[0] > 0 else 0
if is_aligned:
assert rows == cols
if rows * cols == 0:
if is_aligned:
return np.random.random(batch_shape + (rows, ))
else:
return np.random.random(batch_shape + (rows, cols))
area1 = (bboxes1[..., 2] - bboxes1[..., 0]) * (
bboxes1[..., 3] - bboxes1[..., 1])
area2 = (bboxes2[..., 2] - bboxes2[..., 0]) * (
bboxes2[..., 3] - bboxes2[..., 1])
if is_aligned:
lt = np.maximum(bboxes1[..., :2], bboxes2[..., :2]) # [B, rows, 2]
rb = np.minimum(bboxes1[..., 2:], bboxes2[..., 2:]) # [B, rows, 2]
wh = (rb - lt).clip(min=0) # [B, rows, 2]
overlap = wh[..., 0] * wh[..., 1]
if mode in ['iou', 'giou']:
union = area1 + area2 - overlap
else:
union = area1
if mode == 'giou':
enclosed_lt = np.minimum(bboxes1[..., :2], bboxes2[..., :2])
enclosed_rb = np.maximum(bboxes1[..., 2:], bboxes2[..., 2:])
if mode == 'diou':
enclosed_lt = np.minimum(bboxes1[..., :2], bboxes2[..., :2])
enclosed_rb = np.maximum(bboxes1[..., 2:], bboxes2[..., 2:])
b1_x1, b1_y1 = bboxes1[..., 0], bboxes1[..., 1]
b1_x2, b1_y2 = bboxes1[..., 2], bboxes1[..., 3]
b2_x1, b2_y1 = bboxes2[..., 0], bboxes2[..., 1]
b2_x2, b2_y2 = bboxes2[..., 2], bboxes2[..., 3]
else:
lt = np.maximum(bboxes1[..., :, None, :2],
bboxes2[..., None, :, :2]) # [B, rows, cols, 2]
rb = np.minimum(bboxes1[..., :, None, 2:],
bboxes2[..., None, :, 2:]) # [B, rows, cols, 2]
wh = (rb - lt).clip(min=0) # [B, rows, cols, 2]
overlap = wh[..., 0] * wh[..., 1]
if mode in ['iou', 'giou']:
union = area1[..., None] + area2[..., None, :] - overlap
else:
union = area1[..., None]
if mode == 'giou':
enclosed_lt = np.minimum(bboxes1[..., :, None, :2],
bboxes2[..., None, :, :2])
enclosed_rb = np.maximum(bboxes1[..., :, None, 2:],
bboxes2[..., None, :, 2:])
if mode == 'diou':
enclosed_lt = np.minimum(bboxes1[..., :, None, :2],
bboxes2[..., None, :, :2])
enclosed_rb = np.maximum(bboxes1[..., :, None, 2:],
bboxes2[..., None, :, 2:])
b1_x1, b1_y1 = bboxes1[..., :, None, 0], bboxes1[..., :, None, 1]
b1_x2, b1_y2 = bboxes1[..., :, None, 2], bboxes1[..., :, None, 3]
b2_x1, b2_y1 = bboxes2[..., None, :, 0], bboxes2[..., None, :, 1]
b2_x2, b2_y2 = bboxes2[..., None, :, 2], bboxes2[..., None, :, 3]
eps = np.array([eps])
union = np.maximum(union, eps)
ious = overlap / union
if mode in ['iou', 'iof']:
return ious
# calculate gious
if mode in ['giou']:
enclose_wh = (enclosed_rb - enclosed_lt).clip(min=0)
enclose_area = enclose_wh[..., 0] * enclose_wh[..., 1]
enclose_area = np.maximum(enclose_area, eps)
gious = ious - (enclose_area - union) / enclose_area
return gious
if mode in ['diou']:
left = ((b2_x1 + b2_x2) - (b1_x1 + b1_x2))**2 / 4
right = ((b2_y1 + b2_y2) - (b1_y1 + b1_y2))**2 / 4
rho2 = left + right
enclose_wh = (enclosed_rb - enclosed_lt).clip(min=0)
enclose_c = enclose_wh[..., 0]**2 + enclose_wh[..., 1]**2
enclose_c = np.maximum(enclose_c, eps)
dious = ious - rho2 / enclose_c
return dious
def topk_(input, k, axis=1, largest=True):
x = -input if largest else input
if axis == 0:
row_index = np.arange(input.shape[1 - axis])
if k == x.shape[0]: # argpartition requires index < len(input)
topk_index = np.argpartition(x, k - 1, axis=axis)[0:k, :]
else:
topk_index = np.argpartition(x, k, axis=axis)[0:k, :]
topk_data = x[topk_index, row_index]
topk_index_sort = np.argsort(topk_data, axis=axis)
topk_data_sort = topk_data[topk_index_sort, row_index]
topk_index_sort = topk_index[0:k, :][topk_index_sort, row_index]
else:
column_index = np.arange(x.shape[1 - axis])[:, None]
topk_index = np.argpartition(x, k, axis=axis)[:, 0:k]
topk_data = x[column_index, topk_index]
topk_data = -topk_data if largest else topk_data
topk_index_sort = np.argsort(topk_data, axis=axis)
topk_data_sort = topk_data[column_index, topk_index_sort]
topk_index_sort = topk_index[:, 0:k][column_index, topk_index_sort]
return topk_data_sort, topk_index_sort
class ATSSAssigner(object):
"""Assign a corresponding gt bbox or background to each bbox.
Each proposals will be assigned with `0` or a positive integer
indicating the ground truth index.
- 0: negative sample, no assigned gt
- positive integer: positive sample, index (1-based) of assigned gt
Args:
topk (float): number of bbox selected in each level
"""
def __init__(self, topk=9):
self.topk = topk
def __call__(self,
bboxes,
num_level_bboxes,
gt_bboxes,
gt_bboxes_ignore=None,
gt_labels=None):
"""Assign gt to bboxes.
The assignment is done in following steps
1. compute iou between all bbox (bbox of all pyramid levels) and gt
2. compute center distance between all bbox and gt
3. on each pyramid level, for each gt, select k bbox whose center
are closest to the gt center, so we total select k*l bbox as
candidates for each gt
4. get corresponding iou for the these candidates, and compute the
mean and std, set mean + std as the iou threshold
5. select these candidates whose iou are greater than or equal to
the threshold as postive
6. limit the positive sample's center in gt
Args:
bboxes (np.array): Bounding boxes to be assigned, shape(n, 4).
num_level_bboxes (List): num of bboxes in each level
gt_bboxes (np.array): Groundtruth boxes, shape (k, 4).
gt_bboxes_ignore (np.array, optional): Ground truth bboxes that are
labelled as `ignored`, e.g., crowd boxes in COCO.
gt_labels (np.array, optional): Label of gt_bboxes, shape (k, ).
"""
bboxes = bboxes[:, :4]
num_gt, num_bboxes = gt_bboxes.shape[0], bboxes.shape[0]
# assign 0 by default
assigned_gt_inds = np.zeros((num_bboxes, ), dtype=np.int64)
if num_gt == 0 or num_bboxes == 0:
# No ground truth or boxes, return empty assignment
max_overlaps = np.zeros((num_bboxes, ))
if num_gt == 0:
# No truth, assign everything to background
assigned_gt_inds[:] = 0
if not np.any(gt_labels):
assigned_labels = None
else:
assigned_labels = -np.ones((num_bboxes, ), dtype=np.int64)
return assigned_gt_inds, max_overlaps
# compute iou between all bbox and gt
overlaps = bbox_overlaps(bboxes, gt_bboxes)
# compute center distance between all bbox and gt
gt_cx = (gt_bboxes[:, 0] + gt_bboxes[:, 2]) / 2.0
gt_cy = (gt_bboxes[:, 1] + gt_bboxes[:, 3]) / 2.0
gt_points = np.stack((gt_cx, gt_cy), axis=1)
bboxes_cx = (bboxes[:, 0] + bboxes[:, 2]) / 2.0
bboxes_cy = (bboxes[:, 1] + bboxes[:, 3]) / 2.0
bboxes_points = np.stack((bboxes_cx, bboxes_cy), axis=1)
distances = np.sqrt(
np.power((bboxes_points[:, None, :] - gt_points[None, :, :]), 2)
.sum(-1))
# Selecting candidates based on the center distance
candidate_idxs = []
start_idx = 0
for bboxes_per_level in num_level_bboxes:
# on each pyramid level, for each gt,
# select k bbox whose center are closest to the gt center
end_idx = start_idx + bboxes_per_level
distances_per_level = distances[start_idx:end_idx, :]
selectable_k = min(self.topk, bboxes_per_level)
_, topk_idxs_per_level = topk_(
distances_per_level, selectable_k, axis=0, largest=False)
candidate_idxs.append(topk_idxs_per_level + start_idx)
start_idx = end_idx
candidate_idxs = np.concatenate(candidate_idxs, axis=0)
# get corresponding iou for the these candidates, and compute the
# mean and std, set mean + std as the iou threshold
candidate_overlaps = overlaps[candidate_idxs, np.arange(num_gt)]
overlaps_mean_per_gt = candidate_overlaps.mean(0)
overlaps_std_per_gt = candidate_overlaps.std(0)
overlaps_thr_per_gt = overlaps_mean_per_gt + overlaps_std_per_gt
is_pos = candidate_overlaps >= overlaps_thr_per_gt[None, :]
# limit the positive sample's center in gt
for gt_idx in range(num_gt):
candidate_idxs[:, gt_idx] += gt_idx * num_bboxes
ep_bboxes_cx = np.broadcast_to(
bboxes_cx.reshape(1, -1), [num_gt, num_bboxes]).reshape(-1)
ep_bboxes_cy = np.broadcast_to(
bboxes_cy.reshape(1, -1), [num_gt, num_bboxes]).reshape(-1)
candidate_idxs = candidate_idxs.reshape(-1)
# calculate the left, top, right, bottom distance between positive
# bbox center and gt side
l_ = ep_bboxes_cx[candidate_idxs].reshape(-1, num_gt) - gt_bboxes[:, 0]
t_ = ep_bboxes_cy[candidate_idxs].reshape(-1, num_gt) - gt_bboxes[:, 1]
r_ = gt_bboxes[:, 2] - ep_bboxes_cx[candidate_idxs].reshape(-1, num_gt)
b_ = gt_bboxes[:, 3] - ep_bboxes_cy[candidate_idxs].reshape(-1, num_gt)
is_in_gts = np.stack([l_, t_, r_, b_], axis=1).min(axis=1) > 0.01
is_pos = is_pos & is_in_gts
# if an anchor box is assigned to multiple gts,
# the one with the highest IoU will be selected.
overlaps_inf = -np.inf * np.ones_like(overlaps).T.reshape(-1)
index = candidate_idxs.reshape(-1)[is_pos.reshape(-1)]
overlaps_inf[index] = overlaps.T.reshape(-1)[index]
overlaps_inf = overlaps_inf.reshape(num_gt, -1).T
max_overlaps = overlaps_inf.max(axis=1)
argmax_overlaps = overlaps_inf.argmax(axis=1)
assigned_gt_inds[max_overlaps !=
-np.inf] = argmax_overlaps[max_overlaps != -np.inf] + 1
return assigned_gt_inds, max_overlaps
def get_vlr_region(self,
bboxes,
num_level_bboxes,
gt_bboxes,
gt_bboxes_ignore=None,
gt_labels=None):
"""get vlr region for ld distillation.
Args:
bboxes (np.array): Bounding boxes to be assigned, shape(n, 4).
num_level_bboxes (List): num of bboxes in each level
gt_bboxes (np.array): Groundtruth boxes, shape (k, 4).
gt_bboxes_ignore (np.array, optional): Ground truth bboxes that are
labelled as `ignored`, e.g., crowd boxes in COCO.
gt_labels (np.array, optional): Label of gt_bboxes, shape (k, ).
"""
bboxes = bboxes[:, :4]
num_gt, num_bboxes = gt_bboxes.shape[0], bboxes.shape[0]
# compute iou between all bbox and gt
overlaps = bbox_overlaps(bboxes, gt_bboxes)
# compute diou between all bbox and gt
diou = bbox_overlaps(bboxes, gt_bboxes, mode='diou')
# assign 0 by default
assigned_gt_inds = np.zeros((num_bboxes, ), dtype=np.int64)
vlr_region_iou = (assigned_gt_inds + 0).astype(np.float32)
if num_gt == 0 or num_bboxes == 0:
# No ground truth or boxes, return empty assignment
max_overlaps = np.zeros((num_bboxes, ))
if num_gt == 0:
# No truth, assign everything to background
assigned_gt_inds[:] = 0
if not np.any(gt_labels):
assigned_labels = None
else:
assigned_labels = -np.ones((num_bboxes, ), dtype=np.int64)
return assigned_gt_inds, max_overlaps
# compute center distance between all bbox and gt
gt_cx = (gt_bboxes[:, 0] + gt_bboxes[:, 2]) / 2.0
gt_cy = (gt_bboxes[:, 1] + gt_bboxes[:, 3]) / 2.0
gt_points = np.stack((gt_cx, gt_cy), axis=1)
bboxes_cx = (bboxes[:, 0] + bboxes[:, 2]) / 2.0
bboxes_cy = (bboxes[:, 1] + bboxes[:, 3]) / 2.0
bboxes_points = np.stack((bboxes_cx, bboxes_cy), axis=1)
distances = np.sqrt(
np.power((bboxes_points[:, None, :] - gt_points[None, :, :]), 2)
.sum(-1))
# Selecting candidates based on the center distance
candidate_idxs = []
candidate_idxs_t = []
start_idx = 0
for bboxes_per_level in num_level_bboxes:
# on each pyramid level, for each gt,
# select k bbox whose center are closest to the gt center
end_idx = start_idx + bboxes_per_level
distances_per_level = distances[start_idx:end_idx, :]
selectable_t = min(self.topk, bboxes_per_level)
selectable_k = bboxes_per_level #k for all
_, topt_idxs_per_level = topk_(
distances_per_level, selectable_t, axis=0, largest=False)
_, topk_idxs_per_level = topk_(
distances_per_level, selectable_k, axis=0, largest=False)
candidate_idxs_t.append(topt_idxs_per_level + start_idx)
candidate_idxs.append(topk_idxs_per_level + start_idx)
start_idx = end_idx
candidate_idxs_t = np.concatenate(candidate_idxs_t, axis=0)
candidate_idxs = np.concatenate(candidate_idxs, axis=0)
# get corresponding iou for the these candidates, and compute the
# mean and std, set mean + std as the iou threshold
candidate_overlaps_t = overlaps[candidate_idxs_t, np.arange(num_gt)]
# compute tdiou
t_diou = diou[candidate_idxs, np.arange(num_gt)]
overlaps_mean_per_gt = candidate_overlaps_t.mean(0)
overlaps_std_per_gt = candidate_overlaps_t.std(
0, ddof=1) # NOTE: use Bessel correction
overlaps_thr_per_gt = overlaps_mean_per_gt + overlaps_std_per_gt
# compute region
is_pos = (t_diou < overlaps_thr_per_gt[None, :]) & (
t_diou >= 0.25 * overlaps_thr_per_gt[None, :])
# limit the positive sample's center in gt
for gt_idx in range(num_gt):
candidate_idxs[:, gt_idx] += gt_idx * num_bboxes
candidate_idxs = candidate_idxs.reshape(-1)
# if an anchor box is assigned to multiple gts,
# the one with the highest IoU will be selected.
overlaps_inf = -np.inf * np.ones_like(overlaps).T.reshape(-1)
index = candidate_idxs.reshape(-1)[is_pos.reshape(-1)]
overlaps_inf[index] = overlaps.T.reshape(-1)[index]
overlaps_inf = overlaps_inf.reshape(num_gt, -1).T
max_overlaps = overlaps_inf.max(axis=1)
argmax_overlaps = overlaps_inf.argmax(axis=1)
overlaps_inf = -np.inf * np.ones_like(overlaps).T.reshape(-1)
overlaps_inf = overlaps_inf.reshape(num_gt, -1).T
assigned_gt_inds[max_overlaps !=
-np.inf] = argmax_overlaps[max_overlaps != -np.inf] + 1
vlr_region_iou[max_overlaps !=
-np.inf] = max_overlaps[max_overlaps != -np.inf] + 0
return vlr_region_iou
| PaddleDetection/ppdet/data/transform/atss_assigner.py/0 | {
"file_path": "PaddleDetection/ppdet/data/transform/atss_assigner.py",
"repo_id": "PaddleDetection",
"token_count": 8947
} | 80 |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import glob
import re
import paddle
import paddle.nn as nn
import numpy as np
from tqdm import tqdm
from collections import defaultdict
from ppdet.core.workspace import create
from ppdet.utils.checkpoint import load_weight, load_pretrain_weight
from ppdet.modeling.mot.utils import Detection, get_crops, scale_coords, clip_box
from ppdet.modeling.mot.utils import MOTTimer, load_det_results, write_mot_results, save_vis_results
from ppdet.modeling.mot.tracker import JDETracker, CenterTracker
from ppdet.modeling.mot.tracker import DeepSORTTracker, OCSORTTracker, BOTSORTTracker
from ppdet.modeling.architectures import YOLOX
from ppdet.metrics import Metric, MOTMetric, KITTIMOTMetric, MCMOTMetric
from ppdet.data.source.category import get_categories
import ppdet.utils.stats as stats
from .callbacks import Callback, ComposeCallback
from ppdet.utils.logger import setup_logger
logger = setup_logger(__name__)
MOT_ARCH = ['JDE', 'FairMOT', 'DeepSORT', 'ByteTrack', 'CenterTrack']
MOT_ARCH_JDE = MOT_ARCH[:2]
MOT_ARCH_SDE = MOT_ARCH[2:4]
MOT_DATA_TYPE = ['mot', 'mcmot', 'kitti']
__all__ = ['Tracker']
class Tracker(object):
def __init__(self, cfg, mode='eval'):
self.cfg = cfg
assert mode.lower() in ['test', 'eval'], \
"mode should be 'test' or 'eval'"
self.mode = mode.lower()
self.optimizer = None
# build MOT data loader
self.dataset = cfg['{}MOTDataset'.format(self.mode.capitalize())]
# build model
self.model = create(cfg.architecture)
if isinstance(self.model.detector, YOLOX):
for k, m in self.model.named_sublayers():
if isinstance(m, nn.BatchNorm2D):
m._epsilon = 1e-3 # for amp(fp16)
m._momentum = 0.97 # 0.03 in pytorch
anno_file = self.dataset.get_anno()
clsid2catid, catid2name = get_categories(
self.cfg.metric, anno_file=anno_file)
self.ids2names = []
for k, v in catid2name.items():
self.ids2names.append(v)
self.status = {}
self.start_epoch = 0
# initial default callbacks
self._init_callbacks()
# initial default metrics
self._init_metrics()
self._reset_metrics()
def _init_callbacks(self):
self._callbacks = []
self._compose_callback = None
def _init_metrics(self):
if self.mode in ['test']:
self._metrics = []
return
if self.cfg.metric == 'MOT':
self._metrics = [MOTMetric(), ]
elif self.cfg.metric == 'MCMOT':
self._metrics = [MCMOTMetric(self.cfg.num_classes), ]
elif self.cfg.metric == 'KITTI':
self._metrics = [KITTIMOTMetric(), ]
else:
logger.warning("Metric not support for metric type {}".format(
self.cfg.metric))
self._metrics = []
def _reset_metrics(self):
for metric in self._metrics:
metric.reset()
def register_callbacks(self, callbacks):
callbacks = [h for h in list(callbacks) if h is not None]
for c in callbacks:
assert isinstance(c, Callback), \
"metrics shoule be instances of subclass of Metric"
self._callbacks.extend(callbacks)
self._compose_callback = ComposeCallback(self._callbacks)
def register_metrics(self, metrics):
metrics = [m for m in list(metrics) if m is not None]
for m in metrics:
assert isinstance(m, Metric), \
"metrics shoule be instances of subclass of Metric"
self._metrics.extend(metrics)
def load_weights_jde(self, weights):
load_weight(self.model, weights, self.optimizer)
def load_weights_sde(self, det_weights, reid_weights):
with_detector = self.model.detector is not None
with_reid = self.model.reid is not None
if with_detector:
load_weight(self.model.detector, det_weights)
if with_reid:
load_weight(self.model.reid, reid_weights)
else:
load_weight(self.model.reid, reid_weights)
def _eval_seq_centertrack(self,
dataloader,
save_dir=None,
show_image=False,
frame_rate=30,
draw_threshold=0):
assert isinstance(self.model.tracker, CenterTracker)
if save_dir:
if not os.path.exists(save_dir): os.makedirs(save_dir)
tracker = self.model.tracker
timer = MOTTimer()
frame_id = 0
self.status['mode'] = 'track'
self.model.eval()
results = defaultdict(list) # only support single class now
for step_id, data in enumerate(tqdm(dataloader)):
self.status['step_id'] = step_id
if step_id == 0:
self.model.reset_tracking()
# forward
timer.tic()
pred_ret = self.model(data)
online_targets = tracker.update(pred_ret)
online_tlwhs, online_scores, online_ids = [], [], []
for t in online_targets:
bbox = t['bbox']
tlwh = [bbox[0], bbox[1], bbox[2] - bbox[0], bbox[3] - bbox[1]]
tscore = float(t['score'])
tid = int(t['tracking_id'])
if tlwh[2] * tlwh[3] > 0:
online_tlwhs.append(tlwh)
online_ids.append(tid)
online_scores.append(tscore)
timer.toc()
# save results
results[0].append(
(frame_id + 1, online_tlwhs, online_scores, online_ids))
save_vis_results(data, frame_id, online_ids, online_tlwhs,
online_scores, timer.average_time, show_image,
save_dir, self.cfg.num_classes, self.ids2names)
frame_id += 1
return results, frame_id, timer.average_time, timer.calls
def _eval_seq_jde(self,
dataloader,
save_dir=None,
show_image=False,
frame_rate=30,
draw_threshold=0):
if save_dir:
if not os.path.exists(save_dir): os.makedirs(save_dir)
tracker = self.model.tracker
tracker.max_time_lost = int(frame_rate / 30.0 * tracker.track_buffer)
timer = MOTTimer()
frame_id = 0
self.status['mode'] = 'track'
self.model.eval()
results = defaultdict(list) # support single class and multi classes
for step_id, data in enumerate(tqdm(dataloader)):
self.status['step_id'] = step_id
# forward
timer.tic()
pred_dets, pred_embs = self.model(data)
pred_dets, pred_embs = pred_dets.numpy(), pred_embs.numpy()
online_targets_dict = self.model.tracker.update(pred_dets,
pred_embs)
online_tlwhs = defaultdict(list)
online_scores = defaultdict(list)
online_ids = defaultdict(list)
for cls_id in range(self.cfg.num_classes):
online_targets = online_targets_dict[cls_id]
for t in online_targets:
tlwh = t.tlwh
tid = t.track_id
tscore = t.score
if tlwh[2] * tlwh[3] <= tracker.min_box_area: continue
if tracker.vertical_ratio > 0 and tlwh[2] / tlwh[
3] > tracker.vertical_ratio:
continue
online_tlwhs[cls_id].append(tlwh)
online_ids[cls_id].append(tid)
online_scores[cls_id].append(tscore)
# save results
results[cls_id].append(
(frame_id + 1, online_tlwhs[cls_id], online_scores[cls_id],
online_ids[cls_id]))
timer.toc()
save_vis_results(data, frame_id, online_ids, online_tlwhs,
online_scores, timer.average_time, show_image,
save_dir, self.cfg.num_classes, self.ids2names)
frame_id += 1
return results, frame_id, timer.average_time, timer.calls
def _eval_seq_sde(self,
dataloader,
save_dir=None,
show_image=False,
frame_rate=30,
seq_name='',
scaled=False,
det_file='',
draw_threshold=0):
if save_dir:
if not os.path.exists(save_dir): os.makedirs(save_dir)
use_detector = False if not self.model.detector else True
use_reid = hasattr(self.model, 'reid')
if use_reid and self.model.reid is not None:
use_reid = True
else:
use_reid = False
timer = MOTTimer()
results = defaultdict(list)
frame_id = 0
self.status['mode'] = 'track'
self.model.eval()
if use_reid:
self.model.reid.eval()
if not use_detector:
dets_list = load_det_results(det_file, len(dataloader))
logger.info('Finish loading detection results file {}.'.format(
det_file))
tracker = self.model.tracker
for step_id, data in enumerate(tqdm(dataloader)):
self.status['step_id'] = step_id
ori_image = data['ori_image'] # [bs, H, W, 3]
ori_image_shape = data['ori_image'].shape[1:3]
# ori_image_shape: [H, W]
input_shape = data['image'].shape[2:]
# input_shape: [h, w], before data transforms, set in model config
im_shape = data['im_shape'][0].numpy()
# im_shape: [new_h, new_w], after data transforms
scale_factor = data['scale_factor'][0].numpy()
empty_detections = False
# when it has no detected bboxes, will not inference reid model
# and if visualize, use original image instead
# forward
timer.tic()
if not use_detector:
dets = dets_list[frame_id]
bbox_tlwh = np.array(dets['bbox'], dtype='float32')
if bbox_tlwh.shape[0] > 0:
# detector outputs: pred_cls_ids, pred_scores, pred_bboxes
pred_cls_ids = np.array(dets['cls_id'], dtype='float32')
pred_scores = np.array(dets['score'], dtype='float32')
pred_bboxes = np.concatenate(
(bbox_tlwh[:, 0:2],
bbox_tlwh[:, 2:4] + bbox_tlwh[:, 0:2]),
axis=1)
else:
logger.warning(
'Frame {} has not object, try to modify score threshold.'.
format(frame_id))
empty_detections = True
else:
outs = self.model.detector(data)
outs['bbox'] = outs['bbox'].numpy()
outs['bbox_num'] = outs['bbox_num'].numpy()
if len(outs['bbox']) > 0 and empty_detections == False:
# detector outputs: pred_cls_ids, pred_scores, pred_bboxes
pred_cls_ids = outs['bbox'][:, 0:1]
pred_scores = outs['bbox'][:, 1:2]
if not scaled:
# Note: scaled=False only in JDE YOLOv3 or other detectors
# with LetterBoxResize and JDEBBoxPostProcess.
#
# 'scaled' means whether the coords after detector outputs
# have been scaled back to the original image, set True
# in general detector, set False in JDE YOLOv3.
pred_bboxes = scale_coords(outs['bbox'][:, 2:],
input_shape, im_shape,
scale_factor)
else:
pred_bboxes = outs['bbox'][:, 2:]
pred_dets_old = np.concatenate(
(pred_cls_ids, pred_scores, pred_bboxes), axis=1)
else:
logger.warning(
'Frame {} has not detected object, try to modify score threshold.'.
format(frame_id))
empty_detections = True
if not empty_detections:
pred_xyxys, keep_idx = clip_box(pred_bboxes, ori_image_shape)
if len(keep_idx[0]) == 0:
logger.warning(
'Frame {} has not detected object left after clip_box.'.
format(frame_id))
empty_detections = True
if empty_detections:
timer.toc()
# if visualize, use original image instead
online_ids, online_tlwhs, online_scores = None, None, None
save_vis_results(data, frame_id, online_ids, online_tlwhs,
online_scores, timer.average_time, show_image,
save_dir, self.cfg.num_classes, self.ids2names)
frame_id += 1
# thus will not inference reid model
continue
pred_cls_ids = pred_cls_ids[keep_idx[0]]
pred_scores = pred_scores[keep_idx[0]]
pred_dets = np.concatenate(
(pred_cls_ids, pred_scores, pred_xyxys), axis=1)
if use_reid:
crops = get_crops(
pred_xyxys,
ori_image,
w=tracker.input_size[0],
h=tracker.input_size[1])
crops = paddle.to_tensor(crops)
data.update({'crops': crops})
pred_embs = self.model(data)['embeddings'].numpy()
else:
pred_embs = None
if isinstance(tracker, DeepSORTTracker):
online_tlwhs, online_scores, online_ids = [], [], []
tracker.predict()
online_targets = tracker.update(pred_dets, pred_embs)
for t in online_targets:
if not t.is_confirmed() or t.time_since_update > 1:
continue
tlwh = t.to_tlwh()
tscore = t.score
tid = t.track_id
if tscore < draw_threshold: continue
if tlwh[2] * tlwh[3] <= tracker.min_box_area: continue
if tracker.vertical_ratio > 0 and tlwh[2] / tlwh[
3] > tracker.vertical_ratio:
continue
online_tlwhs.append(tlwh)
online_scores.append(tscore)
online_ids.append(tid)
timer.toc()
# save results
results[0].append(
(frame_id + 1, online_tlwhs, online_scores, online_ids))
save_vis_results(data, frame_id, online_ids, online_tlwhs,
online_scores, timer.average_time, show_image,
save_dir, self.cfg.num_classes, self.ids2names)
elif isinstance(tracker, JDETracker):
# trick hyperparams only used for MOTChallenge (MOT17, MOT20) Test-set
tracker.track_buffer, tracker.conf_thres = get_trick_hyperparams(
seq_name, tracker.track_buffer, tracker.conf_thres)
online_targets_dict = tracker.update(pred_dets_old, pred_embs)
online_tlwhs = defaultdict(list)
online_scores = defaultdict(list)
online_ids = defaultdict(list)
for cls_id in range(self.cfg.num_classes):
online_targets = online_targets_dict[cls_id]
for t in online_targets:
tlwh = t.tlwh
tid = t.track_id
tscore = t.score
if tlwh[2] * tlwh[3] <= tracker.min_box_area: continue
if tracker.vertical_ratio > 0 and tlwh[2] / tlwh[
3] > tracker.vertical_ratio:
continue
online_tlwhs[cls_id].append(tlwh)
online_ids[cls_id].append(tid)
online_scores[cls_id].append(tscore)
# save results
results[cls_id].append(
(frame_id + 1, online_tlwhs[cls_id],
online_scores[cls_id], online_ids[cls_id]))
timer.toc()
save_vis_results(data, frame_id, online_ids, online_tlwhs,
online_scores, timer.average_time, show_image,
save_dir, self.cfg.num_classes, self.ids2names)
elif isinstance(tracker, OCSORTTracker):
# OC_SORT Tracker
online_targets = tracker.update(pred_dets_old, pred_embs)
online_tlwhs = []
online_ids = []
online_scores = []
for t in online_targets:
tlwh = [t[0], t[1], t[2] - t[0], t[3] - t[1]]
tscore = float(t[4])
tid = int(t[5])
if tlwh[2] * tlwh[3] > 0:
online_tlwhs.append(tlwh)
online_ids.append(tid)
online_scores.append(tscore)
timer.toc()
# save results
results[0].append(
(frame_id + 1, online_tlwhs, online_scores, online_ids))
save_vis_results(data, frame_id, online_ids, online_tlwhs,
online_scores, timer.average_time, show_image,
save_dir, self.cfg.num_classes, self.ids2names)
elif isinstance(tracker, BOTSORTTracker):
# BOTSORT Tracker
online_targets = tracker.update(
pred_dets_old, img=ori_image.numpy())
online_tlwhs = []
online_ids = []
online_scores = []
for t in online_targets:
tlwh = t.tlwh
tid = t.track_id
tscore = t.score
if tlwh[2] * tlwh[3] > 0:
online_tlwhs.append(tlwh)
online_ids.append(tid)
online_scores.append(tscore)
timer.toc()
# save results
results[0].append(
(frame_id + 1, online_tlwhs, online_scores, online_ids))
save_vis_results(data, frame_id, online_ids, online_tlwhs,
online_scores, timer.average_time, show_image,
save_dir, self.cfg.num_classes, self.ids2names)
else:
raise ValueError(tracker)
frame_id += 1
return results, frame_id, timer.average_time, timer.calls
def mot_evaluate(self,
data_root,
seqs,
output_dir,
data_type='mot',
model_type='JDE',
save_images=False,
save_videos=False,
show_image=False,
scaled=False,
det_results_dir=''):
if not os.path.exists(output_dir): os.makedirs(output_dir)
result_root = os.path.join(output_dir, 'mot_results')
if not os.path.exists(result_root): os.makedirs(result_root)
assert data_type in MOT_DATA_TYPE, \
"data_type should be 'mot', 'mcmot' or 'kitti'"
assert model_type in MOT_ARCH, \
"model_type should be 'JDE', 'DeepSORT', 'FairMOT' or 'ByteTrack'"
# run tracking
n_frame = 0
timer_avgs, timer_calls = [], []
for seq in seqs:
infer_dir = os.path.join(data_root, seq)
if not os.path.exists(infer_dir) or not os.path.isdir(infer_dir):
logger.warning("Seq {} error, {} has no images.".format(
seq, infer_dir))
continue
if os.path.exists(os.path.join(infer_dir, 'img1')):
infer_dir = os.path.join(infer_dir, 'img1')
frame_rate = 30
seqinfo = os.path.join(data_root, seq, 'seqinfo.ini')
if os.path.exists(seqinfo):
meta_info = open(seqinfo).read()
frame_rate = int(meta_info[meta_info.find('frameRate') + 10:
meta_info.find('\nseqLength')])
save_dir = os.path.join(output_dir, 'mot_outputs',
seq) if save_images or save_videos else None
logger.info('Evaluate seq: {}'.format(seq))
self.dataset.set_images(self.get_infer_images(infer_dir))
dataloader = create('EvalMOTReader')(self.dataset, 0)
result_filename = os.path.join(result_root, '{}.txt'.format(seq))
with paddle.no_grad():
if model_type in MOT_ARCH_JDE:
results, nf, ta, tc = self._eval_seq_jde(
dataloader,
save_dir=save_dir,
show_image=show_image,
frame_rate=frame_rate)
elif model_type in MOT_ARCH_SDE:
results, nf, ta, tc = self._eval_seq_sde(
dataloader,
save_dir=save_dir,
show_image=show_image,
frame_rate=frame_rate,
seq_name=seq,
scaled=scaled,
det_file=os.path.join(det_results_dir,
'{}.txt'.format(seq)))
elif model_type == 'CenterTrack':
results, nf, ta, tc = self._eval_seq_centertrack(
dataloader,
save_dir=save_dir,
show_image=show_image,
frame_rate=frame_rate)
else:
raise ValueError(model_type)
write_mot_results(result_filename, results, data_type,
self.cfg.num_classes)
n_frame += nf
timer_avgs.append(ta)
timer_calls.append(tc)
if save_videos:
output_video_path = os.path.join(save_dir, '..',
'{}_vis.mp4'.format(seq))
cmd_str = 'ffmpeg -f image2 -i {}/%05d.jpg {}'.format(
save_dir, output_video_path)
os.system(cmd_str)
logger.info('Save video in {}.'.format(output_video_path))
# update metrics
for metric in self._metrics:
metric.update(data_root, seq, data_type, result_root,
result_filename)
timer_avgs = np.asarray(timer_avgs)
timer_calls = np.asarray(timer_calls)
all_time = np.dot(timer_avgs, timer_calls)
avg_time = all_time / np.sum(timer_calls)
logger.info('Time elapsed: {:.2f} seconds, FPS: {:.2f}'.format(
all_time, 1.0 / avg_time))
# accumulate metric to log out
for metric in self._metrics:
metric.accumulate()
metric.log()
# reset metric states for metric may performed multiple times
self._reset_metrics()
def get_infer_images(self, infer_dir):
assert infer_dir is None or os.path.isdir(infer_dir), \
"{} is not a directory".format(infer_dir)
images = set()
assert os.path.isdir(infer_dir), \
"infer_dir {} is not a directory".format(infer_dir)
exts = ['jpg', 'jpeg', 'png', 'bmp']
exts += [ext.upper() for ext in exts]
for ext in exts:
images.update(glob.glob('{}/*.{}'.format(infer_dir, ext)))
images = list(images)
images.sort()
assert len(images) > 0, "no image found in {}".format(infer_dir)
logger.info("Found {} inference images in total.".format(len(images)))
return images
def mot_predict_seq(self,
video_file,
frame_rate,
image_dir,
output_dir,
data_type='mot',
model_type='JDE',
save_images=False,
save_videos=True,
show_image=False,
scaled=False,
det_results_dir='',
draw_threshold=0.5):
assert video_file is not None or image_dir is not None, \
"--video_file or --image_dir should be set."
assert video_file is None or os.path.isfile(video_file), \
"{} is not a file".format(video_file)
assert image_dir is None or os.path.isdir(image_dir), \
"{} is not a directory".format(image_dir)
if not os.path.exists(output_dir): os.makedirs(output_dir)
result_root = os.path.join(output_dir, 'mot_results')
if not os.path.exists(result_root): os.makedirs(result_root)
assert data_type in MOT_DATA_TYPE, \
"data_type should be 'mot', 'mcmot' or 'kitti'"
assert model_type in MOT_ARCH, \
"model_type should be 'JDE', 'DeepSORT', 'FairMOT' or 'ByteTrack'"
# run tracking
if video_file:
seq = video_file.split('/')[-1].split('.')[0]
self.dataset.set_video(video_file, frame_rate)
logger.info('Starting tracking video {}'.format(video_file))
elif image_dir:
seq = image_dir.split('/')[-1].split('.')[0]
if os.path.exists(os.path.join(image_dir, 'img1')):
image_dir = os.path.join(image_dir, 'img1')
images = [
'{}/{}'.format(image_dir, x) for x in os.listdir(image_dir)
]
images.sort()
self.dataset.set_images(images)
logger.info('Starting tracking folder {}, found {} images'.format(
image_dir, len(images)))
else:
raise ValueError('--video_file or --image_dir should be set.')
save_dir = os.path.join(output_dir, 'mot_outputs',
seq) if save_images or save_videos else None
dataloader = create('TestMOTReader')(self.dataset, 0)
result_filename = os.path.join(result_root, '{}.txt'.format(seq))
if frame_rate == -1:
frame_rate = self.dataset.frame_rate
with paddle.no_grad():
if model_type in MOT_ARCH_JDE:
results, nf, ta, tc = self._eval_seq_jde(
dataloader,
save_dir=save_dir,
show_image=show_image,
frame_rate=frame_rate,
draw_threshold=draw_threshold)
elif model_type in MOT_ARCH_SDE:
results, nf, ta, tc = self._eval_seq_sde(
dataloader,
save_dir=save_dir,
show_image=show_image,
frame_rate=frame_rate,
seq_name=seq,
scaled=scaled,
det_file=os.path.join(det_results_dir,
'{}.txt'.format(seq)),
draw_threshold=draw_threshold)
elif model_type == 'CenterTrack':
results, nf, ta, tc = self._eval_seq_centertrack(
dataloader,
save_dir=save_dir,
show_image=show_image,
frame_rate=frame_rate)
else:
raise ValueError(model_type)
if save_videos:
output_video_path = os.path.join(save_dir, '..',
'{}_vis.mp4'.format(seq))
cmd_str = 'ffmpeg -f image2 -i {}/%05d.jpg {}'.format(
save_dir, output_video_path)
os.system(cmd_str)
logger.info('Save video in {}'.format(output_video_path))
write_mot_results(result_filename, results, data_type,
self.cfg.num_classes)
def get_trick_hyperparams(video_name, ori_buffer, ori_thresh):
if video_name[:3] != 'MOT':
# only used for MOTChallenge (MOT17, MOT20) Test-set
return ori_buffer, ori_thresh
video_name = video_name[:8]
if 'MOT17-05' in video_name:
track_buffer = 14
elif 'MOT17-13' in video_name:
track_buffer = 25
else:
track_buffer = ori_buffer
if 'MOT17-01' in video_name:
track_thresh = 0.65
elif 'MOT17-06' in video_name:
track_thresh = 0.65
elif 'MOT17-12' in video_name:
track_thresh = 0.7
elif 'MOT17-14' in video_name:
track_thresh = 0.67
else:
track_thresh = ori_thresh
if 'MOT20-06' in video_name or 'MOT20-08' in video_name:
track_thresh = 0.3
else:
track_thresh = ori_thresh
return track_buffer, ori_thresh
| PaddleDetection/ppdet/engine/tracker.py/0 | {
"file_path": "PaddleDetection/ppdet/engine/tracker.py",
"repo_id": "PaddleDetection",
"token_count": 16883
} | 81 |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import numpy as np
import itertools
from ppdet.metrics.json_results import get_det_res, get_det_poly_res, get_seg_res, get_solov2_segm_res, get_keypoint_res, get_pose3d_res
from ppdet.metrics.map_utils import draw_pr_curve
from ppdet.utils.logger import setup_logger
logger = setup_logger(__name__)
def get_infer_results(outs, catid, bias=0):
"""
Get result at the stage of inference.
The output format is dictionary containing bbox or mask result.
For example, bbox result is a list and each element contains
image_id, category_id, bbox and score.
"""
if outs is None or len(outs) == 0:
raise ValueError(
'The number of valid detection result if zero. Please use reasonable model and check input data.'
)
im_id = outs['im_id']
infer_res = {}
if 'bbox' in outs:
if len(outs['bbox']) > 0 and len(outs['bbox'][0]) > 6:
infer_res['bbox'] = get_det_poly_res(
outs['bbox'], outs['bbox_num'], im_id, catid, bias=bias)
else:
infer_res['bbox'] = get_det_res(
outs['bbox'], outs['bbox_num'], im_id, catid, bias=bias)
if 'mask' in outs:
# mask post process
infer_res['mask'] = get_seg_res(outs['mask'], outs['bbox'],
outs['bbox_num'], im_id, catid)
if 'segm' in outs:
infer_res['segm'] = get_solov2_segm_res(outs, im_id, catid)
if 'keypoint' in outs:
infer_res['keypoint'] = get_keypoint_res(outs, im_id)
outs['bbox_num'] = [len(infer_res['keypoint'])]
if 'pose3d' in outs:
infer_res['pose3d'] = get_pose3d_res(outs, im_id)
outs['bbox_num'] = [len(infer_res['pose3d'])]
return infer_res
def cocoapi_eval(jsonfile,
style,
coco_gt=None,
anno_file=None,
max_dets=(100, 300, 1000),
classwise=False,
sigmas=None,
use_area=True):
"""
Args:
jsonfile (str): Evaluation json file, eg: bbox.json, mask.json.
style (str): COCOeval style, can be `bbox` , `segm` , `proposal`, `keypoints` and `keypoints_crowd`.
coco_gt (str): Whether to load COCOAPI through anno_file,
eg: coco_gt = COCO(anno_file)
anno_file (str): COCO annotations file.
max_dets (tuple): COCO evaluation maxDets.
classwise (bool): Whether per-category AP and draw P-R Curve or not.
sigmas (nparray): keypoint labelling sigmas.
use_area (bool): If gt annotations (eg. CrowdPose, AIC)
do not have 'area', please set use_area=False.
"""
assert coco_gt != None or anno_file != None
if style == 'keypoints_crowd':
#please install xtcocotools==1.6
from xtcocotools.coco import COCO
from xtcocotools.cocoeval import COCOeval
else:
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
if coco_gt == None:
coco_gt = COCO(anno_file)
logger.info("Start evaluate...")
coco_dt = coco_gt.loadRes(jsonfile)
if style == 'proposal':
coco_eval = COCOeval(coco_gt, coco_dt, 'bbox')
coco_eval.params.useCats = 0
coco_eval.params.maxDets = list(max_dets)
elif style == 'keypoints_crowd':
coco_eval = COCOeval(coco_gt, coco_dt, style, sigmas, use_area)
else:
coco_eval = COCOeval(coco_gt, coco_dt, style)
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
if classwise:
# Compute per-category AP and PR curve
try:
from terminaltables import AsciiTable
except Exception as e:
logger.error(
'terminaltables not found, plaese install terminaltables. '
'for example: `pip install terminaltables`.')
raise e
precisions = coco_eval.eval['precision']
cat_ids = coco_gt.getCatIds()
# precision: (iou, recall, cls, area range, max dets)
assert len(cat_ids) == precisions.shape[2]
results_per_category = []
for idx, catId in enumerate(cat_ids):
# area range index 0: all area ranges
# max dets index -1: typically 100 per image
nm = coco_gt.loadCats(catId)[0]
precision = precisions[:, :, idx, 0, -1]
precision = precision[precision > -1]
if precision.size:
ap = np.mean(precision)
else:
ap = float('nan')
results_per_category.append(
(str(nm["name"]), '{:0.3f}'.format(float(ap))))
pr_array = precisions[0, :, idx, 0, 2]
recall_array = np.arange(0.0, 1.01, 0.01)
draw_pr_curve(
pr_array,
recall_array,
out_dir=style + '_pr_curve',
file_name='{}_precision_recall_curve.jpg'.format(nm["name"]))
num_columns = min(6, len(results_per_category) * 2)
results_flatten = list(itertools.chain(*results_per_category))
headers = ['category', 'AP'] * (num_columns // 2)
results_2d = itertools.zip_longest(
* [results_flatten[i::num_columns] for i in range(num_columns)])
table_data = [headers]
table_data += [result for result in results_2d]
table = AsciiTable(table_data)
logger.info('Per-category of {} AP: \n{}'.format(style, table.table))
logger.info("per-category PR curve has output to {} folder.".format(
style + '_pr_curve'))
# flush coco evaluation result
sys.stdout.flush()
return coco_eval.stats
def json_eval_results(metric, json_directory, dataset):
"""
cocoapi eval with already exists proposal.json, bbox.json or mask.json
"""
assert metric == 'COCO'
anno_file = dataset.get_anno()
json_file_list = ['proposal.json', 'bbox.json', 'mask.json']
if json_directory:
assert os.path.exists(
json_directory), "The json directory:{} does not exist".format(
json_directory)
for k, v in enumerate(json_file_list):
json_file_list[k] = os.path.join(str(json_directory), v)
coco_eval_style = ['proposal', 'bbox', 'segm']
for i, v_json in enumerate(json_file_list):
if os.path.exists(v_json):
cocoapi_eval(v_json, coco_eval_style[i], anno_file=anno_file)
else:
logger.info("{} not exists!".format(v_json))
| PaddleDetection/ppdet/metrics/coco_utils.py/0 | {
"file_path": "PaddleDetection/ppdet/metrics/coco_utils.py",
"repo_id": "PaddleDetection",
"token_count": 3369
} | 82 |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
import ppdet
class TestListModel(unittest.TestCase):
def setUp(self):
self._filter = []
def test_main(self):
try:
ppdet.model_zoo.list_model(self._filter)
self.assertTrue(True)
except:
self.assertTrue(False)
class TestListModelYOLO(TestListModel):
def setUp(self):
self._filter = ['yolo']
class TestListModelRCNN(TestListModel):
def setUp(self):
self._filter = ['rcnn']
class TestListModelSSD(TestListModel):
def setUp(self):
self._filter = ['ssd']
class TestListModelMultiFilter(TestListModel):
def setUp(self):
self._filter = ['yolo', 'darknet']
class TestListModelError(unittest.TestCase):
def setUp(self):
self._filter = ['xxx']
def test_main(self):
try:
ppdet.model_zoo.list_model(self._filter)
self.assertTrue(False)
except ValueError:
self.assertTrue(True)
if __name__ == '__main__':
unittest.main()
| PaddleDetection/ppdet/model_zoo/tests/test_list_model.py/0 | {
"file_path": "PaddleDetection/ppdet/model_zoo/tests/test_list_model.py",
"repo_id": "PaddleDetection",
"token_count": 653
} | 83 |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from ppdet.core.workspace import register, create
from .meta_arch import BaseArch
__all__ = ['JDE']
@register
class JDE(BaseArch):
__category__ = 'architecture'
__shared__ = ['metric']
"""
JDE network, see https://arxiv.org/abs/1909.12605v1
Args:
detector (object): detector model instance
reid (object): reid model instance
tracker (object): tracker instance
metric (str): 'MOTDet' for training and detection evaluation, 'ReID'
for ReID embedding evaluation, or 'MOT' for multi object tracking
evaluation.
"""
def __init__(self,
detector='YOLOv3',
reid='JDEEmbeddingHead',
tracker='JDETracker',
metric='MOT'):
super(JDE, self).__init__()
self.detector = detector
self.reid = reid
self.tracker = tracker
self.metric = metric
@classmethod
def from_config(cls, cfg, *args, **kwargs):
detector = create(cfg['detector'])
kwargs = {'input_shape': detector.neck.out_shape}
reid = create(cfg['reid'], **kwargs)
tracker = create(cfg['tracker'])
return {
"detector": detector,
"reid": reid,
"tracker": tracker,
}
def _forward(self):
det_outs = self.detector(self.inputs)
if self.training:
emb_feats = det_outs['emb_feats']
loss_confs = det_outs['det_losses']['loss_confs']
loss_boxes = det_outs['det_losses']['loss_boxes']
jde_losses = self.reid(
emb_feats,
self.inputs,
loss_confs=loss_confs,
loss_boxes=loss_boxes)
return jde_losses
else:
if self.metric == 'MOTDet':
det_results = {
'bbox': det_outs['bbox'],
'bbox_num': det_outs['bbox_num'],
}
return det_results
elif self.metric == 'MOT':
emb_feats = det_outs['emb_feats']
bboxes = det_outs['bbox']
boxes_idx = det_outs['boxes_idx']
nms_keep_idx = det_outs['nms_keep_idx']
pred_dets, pred_embs = self.reid(
emb_feats,
self.inputs,
bboxes=bboxes,
boxes_idx=boxes_idx,
nms_keep_idx=nms_keep_idx)
return pred_dets, pred_embs
else:
raise ValueError("Unknown metric {} for multi object tracking.".
format(self.metric))
def get_loss(self):
return self._forward()
def get_pred(self):
return self._forward()
| PaddleDetection/ppdet/modeling/architectures/jde.py/0 | {
"file_path": "PaddleDetection/ppdet/modeling/architectures/jde.py",
"repo_id": "PaddleDetection",
"token_count": 1718
} | 84 |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from ppdet.core.workspace import register, create
from .meta_arch import BaseArch
import paddle
import paddle.nn.functional as F
__all__ = ['SSD']
@register
class SSD(BaseArch):
"""
Single Shot MultiBox Detector, see https://arxiv.org/abs/1512.02325
Args:
backbone (nn.Layer): backbone instance
ssd_head (nn.Layer): `SSDHead` instance
post_process (object): `BBoxPostProcess` instance
"""
__category__ = 'architecture'
__inject__ = ['post_process']
def __init__(self, backbone, ssd_head, post_process, r34_backbone=False):
super(SSD, self).__init__()
self.backbone = backbone
self.ssd_head = ssd_head
self.post_process = post_process
self.r34_backbone = r34_backbone
if self.r34_backbone:
from ppdet.modeling.backbones.resnet import ResNet
assert isinstance(self.backbone, ResNet) and \
self.backbone.depth == 34, \
"If you set r34_backbone=True, please use ResNet-34 as backbone."
self.backbone.res_layers[2].blocks[0].branch2a.conv._stride = [1, 1]
self.backbone.res_layers[2].blocks[0].short.conv._stride = [1, 1]
@classmethod
def from_config(cls, cfg, *args, **kwargs):
# backbone
backbone = create(cfg['backbone'])
# head
kwargs = {'input_shape': backbone.out_shape}
ssd_head = create(cfg['ssd_head'], **kwargs)
return {
'backbone': backbone,
"ssd_head": ssd_head,
}
def _forward(self):
# Backbone
body_feats = self.backbone(self.inputs)
# SSD Head
if self.training:
return self.ssd_head(body_feats, self.inputs['image'],
self.inputs['gt_bbox'],
self.inputs['gt_class'])
else:
preds, anchors = self.ssd_head(body_feats, self.inputs['image'])
bbox, bbox_num, nms_keep_idx = self.post_process(
preds, anchors, self.inputs['im_shape'],
self.inputs['scale_factor'])
if self.use_extra_data:
extra_data = {} # record the bbox output before nms, such like scores and nms_keep_idx
"""extra_data:{
'scores': predict scores,
'nms_keep_idx': bbox index before nms,
}
"""
preds_logits = preds[1] # [[1xNumBBoxNumClass]]
extra_data['scores'] = F.softmax(paddle.concat(
preds_logits, axis=1)).transpose([0, 2, 1])
extra_data['logits'] = paddle.concat(
preds_logits, axis=1).transpose([0, 2, 1])
extra_data['nms_keep_idx'] = nms_keep_idx # bbox index before nms
return bbox, bbox_num, extra_data
else:
return bbox, bbox_num
def get_loss(self, ):
return {"loss": self._forward()}
def get_pred(self):
if self.use_extra_data:
bbox_pred, bbox_num, extra_data = self._forward()
output = {
"bbox": bbox_pred,
"bbox_num": bbox_num,
"extra_data": extra_data
}
else:
bbox_pred, bbox_num = self._forward()
output = {
"bbox": bbox_pred,
"bbox_num": bbox_num,
}
return output
| PaddleDetection/ppdet/modeling/architectures/ssd.py/0 | {
"file_path": "PaddleDetection/ppdet/modeling/architectures/ssd.py",
"repo_id": "PaddleDetection",
"token_count": 2042
} | 85 |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddle.nn import AdaptiveAvgPool2D, Linear
from paddle.regularizer import L2Decay
from paddle import ParamAttr
from paddle.nn.initializer import Normal, Uniform
from numbers import Integral
import math
from ppdet.core.workspace import register
from ..shape_spec import ShapeSpec
__all__ = ['HRNet']
class ConvNormLayer(nn.Layer):
def __init__(self,
ch_in,
ch_out,
filter_size,
stride=1,
norm_type='bn',
norm_groups=32,
use_dcn=False,
norm_momentum=0.9,
norm_decay=0.,
freeze_norm=False,
act=None,
name=None):
super(ConvNormLayer, self).__init__()
assert norm_type in ['bn', 'sync_bn', 'gn']
self.act = act
self.conv = nn.Conv2D(
in_channels=ch_in,
out_channels=ch_out,
kernel_size=filter_size,
stride=stride,
padding=(filter_size - 1) // 2,
groups=1,
weight_attr=ParamAttr(initializer=Normal(
mean=0., std=0.01)),
bias_attr=False)
norm_lr = 0. if freeze_norm else 1.
param_attr = ParamAttr(
learning_rate=norm_lr, regularizer=L2Decay(norm_decay))
bias_attr = ParamAttr(
learning_rate=norm_lr, regularizer=L2Decay(norm_decay))
global_stats = True if freeze_norm else None
if norm_type in ['bn', 'sync_bn']:
self.norm = nn.BatchNorm2D(
ch_out,
momentum=norm_momentum,
weight_attr=param_attr,
bias_attr=bias_attr,
use_global_stats=global_stats)
elif norm_type == 'gn':
self.norm = nn.GroupNorm(
num_groups=norm_groups,
num_channels=ch_out,
weight_attr=param_attr,
bias_attr=bias_attr)
norm_params = self.norm.parameters()
if freeze_norm:
for param in norm_params:
param.stop_gradient = True
def forward(self, inputs):
out = self.conv(inputs)
out = self.norm(out)
if self.act == 'relu':
out = F.relu(out)
return out
class Layer1(nn.Layer):
def __init__(self,
num_channels,
has_se=False,
norm_momentum=0.9,
norm_decay=0.,
freeze_norm=True,
name=None):
super(Layer1, self).__init__()
self.bottleneck_block_list = []
for i in range(4):
bottleneck_block = self.add_sublayer(
"block_{}_{}".format(name, i + 1),
BottleneckBlock(
num_channels=num_channels if i == 0 else 256,
num_filters=64,
has_se=has_se,
stride=1,
downsample=True if i == 0 else False,
norm_momentum=norm_momentum,
norm_decay=norm_decay,
freeze_norm=freeze_norm,
name=name + '_' + str(i + 1)))
self.bottleneck_block_list.append(bottleneck_block)
def forward(self, input):
conv = input
for block_func in self.bottleneck_block_list:
conv = block_func(conv)
return conv
class TransitionLayer(nn.Layer):
def __init__(self,
in_channels,
out_channels,
norm_momentum=0.9,
norm_decay=0.,
freeze_norm=True,
name=None):
super(TransitionLayer, self).__init__()
num_in = len(in_channels)
num_out = len(out_channels)
out = []
self.conv_bn_func_list = []
for i in range(num_out):
residual = None
if i < num_in:
if in_channels[i] != out_channels[i]:
residual = self.add_sublayer(
"transition_{}_layer_{}".format(name, i + 1),
ConvNormLayer(
ch_in=in_channels[i],
ch_out=out_channels[i],
filter_size=3,
norm_momentum=norm_momentum,
norm_decay=norm_decay,
freeze_norm=freeze_norm,
act='relu',
name=name + '_layer_' + str(i + 1)))
else:
residual = self.add_sublayer(
"transition_{}_layer_{}".format(name, i + 1),
ConvNormLayer(
ch_in=in_channels[-1],
ch_out=out_channels[i],
filter_size=3,
stride=2,
norm_momentum=norm_momentum,
norm_decay=norm_decay,
freeze_norm=freeze_norm,
act='relu',
name=name + '_layer_' + str(i + 1)))
self.conv_bn_func_list.append(residual)
def forward(self, input):
outs = []
for idx, conv_bn_func in enumerate(self.conv_bn_func_list):
if conv_bn_func is None:
outs.append(input[idx])
else:
if idx < len(input):
outs.append(conv_bn_func(input[idx]))
else:
outs.append(conv_bn_func(input[-1]))
return outs
class Branches(nn.Layer):
def __init__(self,
block_num,
in_channels,
out_channels,
has_se=False,
norm_momentum=0.9,
norm_decay=0.,
freeze_norm=True,
name=None):
super(Branches, self).__init__()
self.basic_block_list = []
for i in range(len(out_channels)):
self.basic_block_list.append([])
for j in range(block_num):
in_ch = in_channels[i] if j == 0 else out_channels[i]
basic_block_func = self.add_sublayer(
"bb_{}_branch_layer_{}_{}".format(name, i + 1, j + 1),
BasicBlock(
num_channels=in_ch,
num_filters=out_channels[i],
has_se=has_se,
norm_momentum=norm_momentum,
norm_decay=norm_decay,
freeze_norm=freeze_norm,
name=name + '_branch_layer_' + str(i + 1) + '_' +
str(j + 1)))
self.basic_block_list[i].append(basic_block_func)
def forward(self, inputs):
outs = []
for idx, input in enumerate(inputs):
conv = input
basic_block_list = self.basic_block_list[idx]
for basic_block_func in basic_block_list:
conv = basic_block_func(conv)
outs.append(conv)
return outs
class BottleneckBlock(nn.Layer):
def __init__(self,
num_channels,
num_filters,
has_se,
stride=1,
downsample=False,
norm_momentum=0.9,
norm_decay=0.,
freeze_norm=True,
name=None):
super(BottleneckBlock, self).__init__()
self.has_se = has_se
self.downsample = downsample
self.conv1 = ConvNormLayer(
ch_in=num_channels,
ch_out=num_filters,
filter_size=1,
norm_momentum=norm_momentum,
norm_decay=norm_decay,
freeze_norm=freeze_norm,
act="relu",
name=name + "_conv1")
self.conv2 = ConvNormLayer(
ch_in=num_filters,
ch_out=num_filters,
filter_size=3,
stride=stride,
norm_momentum=norm_momentum,
norm_decay=norm_decay,
freeze_norm=freeze_norm,
act="relu",
name=name + "_conv2")
self.conv3 = ConvNormLayer(
ch_in=num_filters,
ch_out=num_filters * 4,
filter_size=1,
norm_momentum=norm_momentum,
norm_decay=norm_decay,
freeze_norm=freeze_norm,
act=None,
name=name + "_conv3")
if self.downsample:
self.conv_down = ConvNormLayer(
ch_in=num_channels,
ch_out=num_filters * 4,
filter_size=1,
norm_momentum=norm_momentum,
norm_decay=norm_decay,
freeze_norm=freeze_norm,
act=None,
name=name + "_downsample")
if self.has_se:
self.se = SELayer(
num_channels=num_filters * 4,
num_filters=num_filters * 4,
reduction_ratio=16,
name='fc' + name)
def forward(self, input):
residual = input
conv1 = self.conv1(input)
conv2 = self.conv2(conv1)
conv3 = self.conv3(conv2)
if self.downsample:
residual = self.conv_down(input)
if self.has_se:
conv3 = self.se(conv3)
y = paddle.add(x=residual, y=conv3)
y = F.relu(y)
return y
class BasicBlock(nn.Layer):
def __init__(self,
num_channels,
num_filters,
stride=1,
has_se=False,
downsample=False,
norm_momentum=0.9,
norm_decay=0.,
freeze_norm=True,
name=None):
super(BasicBlock, self).__init__()
self.has_se = has_se
self.downsample = downsample
self.conv1 = ConvNormLayer(
ch_in=num_channels,
ch_out=num_filters,
filter_size=3,
norm_momentum=norm_momentum,
norm_decay=norm_decay,
freeze_norm=freeze_norm,
stride=stride,
act="relu",
name=name + "_conv1")
self.conv2 = ConvNormLayer(
ch_in=num_filters,
ch_out=num_filters,
filter_size=3,
norm_momentum=norm_momentum,
norm_decay=norm_decay,
freeze_norm=freeze_norm,
stride=1,
act=None,
name=name + "_conv2")
if self.downsample:
self.conv_down = ConvNormLayer(
ch_in=num_channels,
ch_out=num_filters * 4,
filter_size=1,
norm_momentum=norm_momentum,
norm_decay=norm_decay,
freeze_norm=freeze_norm,
act=None,
name=name + "_downsample")
if self.has_se:
self.se = SELayer(
num_channels=num_filters,
num_filters=num_filters,
reduction_ratio=16,
name='fc' + name)
def forward(self, input):
residual = input
conv1 = self.conv1(input)
conv2 = self.conv2(conv1)
if self.downsample:
residual = self.conv_down(input)
if self.has_se:
conv2 = self.se(conv2)
y = paddle.add(x=residual, y=conv2)
y = F.relu(y)
return y
class SELayer(nn.Layer):
def __init__(self, num_channels, num_filters, reduction_ratio, name=None):
super(SELayer, self).__init__()
self.pool2d_gap = AdaptiveAvgPool2D(1)
self._num_channels = num_channels
med_ch = int(num_channels / reduction_ratio)
stdv = 1.0 / math.sqrt(num_channels * 1.0)
self.squeeze = Linear(
num_channels,
med_ch,
weight_attr=ParamAttr(initializer=Uniform(-stdv, stdv)))
stdv = 1.0 / math.sqrt(med_ch * 1.0)
self.excitation = Linear(
med_ch,
num_filters,
weight_attr=ParamAttr(initializer=Uniform(-stdv, stdv)))
def forward(self, input):
pool = self.pool2d_gap(input)
pool = paddle.squeeze(pool, axis=[2, 3])
squeeze = self.squeeze(pool)
squeeze = F.relu(squeeze)
excitation = self.excitation(squeeze)
excitation = F.sigmoid(excitation)
excitation = paddle.unsqueeze(excitation, axis=[2, 3])
out = input * excitation
return out
class Stage(nn.Layer):
def __init__(self,
num_channels,
num_modules,
num_filters,
has_se=False,
norm_momentum=0.9,
norm_decay=0.,
freeze_norm=True,
multi_scale_output=True,
name=None):
super(Stage, self).__init__()
self._num_modules = num_modules
self.stage_func_list = []
for i in range(num_modules):
if i == num_modules - 1 and not multi_scale_output:
stage_func = self.add_sublayer(
"stage_{}_{}".format(name, i + 1),
HighResolutionModule(
num_channels=num_channels,
num_filters=num_filters,
has_se=has_se,
norm_momentum=norm_momentum,
norm_decay=norm_decay,
freeze_norm=freeze_norm,
multi_scale_output=False,
name=name + '_' + str(i + 1)))
else:
stage_func = self.add_sublayer(
"stage_{}_{}".format(name, i + 1),
HighResolutionModule(
num_channels=num_channels,
num_filters=num_filters,
has_se=has_se,
norm_momentum=norm_momentum,
norm_decay=norm_decay,
freeze_norm=freeze_norm,
name=name + '_' + str(i + 1)))
self.stage_func_list.append(stage_func)
def forward(self, input):
out = input
for idx in range(self._num_modules):
out = self.stage_func_list[idx](out)
return out
class HighResolutionModule(nn.Layer):
def __init__(self,
num_channels,
num_filters,
has_se=False,
multi_scale_output=True,
norm_momentum=0.9,
norm_decay=0.,
freeze_norm=True,
name=None):
super(HighResolutionModule, self).__init__()
self.branches_func = Branches(
block_num=4,
in_channels=num_channels,
out_channels=num_filters,
has_se=has_se,
norm_momentum=norm_momentum,
norm_decay=norm_decay,
freeze_norm=freeze_norm,
name=name)
self.fuse_func = FuseLayers(
in_channels=num_filters,
out_channels=num_filters,
multi_scale_output=multi_scale_output,
norm_momentum=norm_momentum,
norm_decay=norm_decay,
freeze_norm=freeze_norm,
name=name)
def forward(self, input):
out = self.branches_func(input)
out = self.fuse_func(out)
return out
class FuseLayers(nn.Layer):
def __init__(self,
in_channels,
out_channels,
multi_scale_output=True,
norm_momentum=0.9,
norm_decay=0.,
freeze_norm=True,
name=None):
super(FuseLayers, self).__init__()
self._actual_ch = len(in_channels) if multi_scale_output else 1
self._in_channels = in_channels
self.residual_func_list = []
for i in range(self._actual_ch):
for j in range(len(in_channels)):
residual_func = None
if j > i:
residual_func = self.add_sublayer(
"residual_{}_layer_{}_{}".format(name, i + 1, j + 1),
ConvNormLayer(
ch_in=in_channels[j],
ch_out=out_channels[i],
filter_size=1,
stride=1,
act=None,
norm_momentum=norm_momentum,
norm_decay=norm_decay,
freeze_norm=freeze_norm,
name=name + '_layer_' + str(i + 1) + '_' +
str(j + 1)))
self.residual_func_list.append(residual_func)
elif j < i:
pre_num_filters = in_channels[j]
for k in range(i - j):
if k == i - j - 1:
residual_func = self.add_sublayer(
"residual_{}_layer_{}_{}_{}".format(
name, i + 1, j + 1, k + 1),
ConvNormLayer(
ch_in=pre_num_filters,
ch_out=out_channels[i],
filter_size=3,
stride=2,
norm_momentum=norm_momentum,
norm_decay=norm_decay,
freeze_norm=freeze_norm,
act=None,
name=name + '_layer_' + str(i + 1) + '_' +
str(j + 1) + '_' + str(k + 1)))
pre_num_filters = out_channels[i]
else:
residual_func = self.add_sublayer(
"residual_{}_layer_{}_{}_{}".format(
name, i + 1, j + 1, k + 1),
ConvNormLayer(
ch_in=pre_num_filters,
ch_out=out_channels[j],
filter_size=3,
stride=2,
norm_momentum=norm_momentum,
norm_decay=norm_decay,
freeze_norm=freeze_norm,
act="relu",
name=name + '_layer_' + str(i + 1) + '_' +
str(j + 1) + '_' + str(k + 1)))
pre_num_filters = out_channels[j]
self.residual_func_list.append(residual_func)
def forward(self, input):
outs = []
residual_func_idx = 0
for i in range(self._actual_ch):
residual = input[i]
for j in range(len(self._in_channels)):
if j > i:
y = self.residual_func_list[residual_func_idx](input[j])
residual_func_idx += 1
y = F.interpolate(y, scale_factor=2**(j - i))
residual = paddle.add(x=residual, y=y)
elif j < i:
y = input[j]
for k in range(i - j):
y = self.residual_func_list[residual_func_idx](y)
residual_func_idx += 1
residual = paddle.add(x=residual, y=y)
residual = F.relu(residual)
outs.append(residual)
return outs
@register
class HRNet(nn.Layer):
"""
HRNet, see https://arxiv.org/abs/1908.07919
Args:
width (int): the width of HRNet
has_se (bool): whether to add SE block for each stage
freeze_at (int): the stage to freeze
freeze_norm (bool): whether to freeze norm in HRNet
norm_momentum (float): momentum of BatchNorm
norm_decay (float): weight decay for normalization layer weights
return_idx (List): the stage to return
upsample (bool): whether to upsample and concat the backbone feats
"""
def __init__(self,
width=18,
has_se=False,
freeze_at=0,
freeze_norm=True,
norm_momentum=0.9,
norm_decay=0.,
return_idx=[0, 1, 2, 3],
upsample=False,
downsample=False):
super(HRNet, self).__init__()
self.width = width
self.has_se = has_se
if isinstance(return_idx, Integral):
return_idx = [return_idx]
assert len(return_idx) > 0, "need one or more return index"
self.freeze_at = freeze_at
self.return_idx = return_idx
self.upsample = upsample
self.downsample = downsample
self.channels = {
18: [[18, 36], [18, 36, 72], [18, 36, 72, 144]],
30: [[30, 60], [30, 60, 120], [30, 60, 120, 240]],
32: [[32, 64], [32, 64, 128], [32, 64, 128, 256]],
40: [[40, 80], [40, 80, 160], [40, 80, 160, 320]],
44: [[44, 88], [44, 88, 176], [44, 88, 176, 352]],
48: [[48, 96], [48, 96, 192], [48, 96, 192, 384]],
60: [[60, 120], [60, 120, 240], [60, 120, 240, 480]],
64: [[64, 128], [64, 128, 256], [64, 128, 256, 512]]
}
channels_2, channels_3, channels_4 = self.channels[width]
num_modules_2, num_modules_3, num_modules_4 = 1, 4, 3
self._out_channels = [sum(channels_4)] if self.upsample else channels_4
self._out_strides = [4] if self.upsample else [4, 8, 16, 32]
self.conv_layer1_1 = ConvNormLayer(
ch_in=3,
ch_out=64,
filter_size=3,
stride=2,
norm_momentum=norm_momentum,
norm_decay=norm_decay,
freeze_norm=freeze_norm,
act='relu',
name="layer1_1")
self.conv_layer1_2 = ConvNormLayer(
ch_in=64,
ch_out=64,
filter_size=3,
stride=2,
norm_momentum=norm_momentum,
norm_decay=norm_decay,
freeze_norm=freeze_norm,
act='relu',
name="layer1_2")
self.la1 = Layer1(
num_channels=64,
has_se=has_se,
norm_momentum=norm_momentum,
norm_decay=norm_decay,
freeze_norm=freeze_norm,
name="layer2")
self.tr1 = TransitionLayer(
in_channels=[256],
out_channels=channels_2,
norm_momentum=norm_momentum,
norm_decay=norm_decay,
freeze_norm=freeze_norm,
name="tr1")
self.st2 = Stage(
num_channels=channels_2,
num_modules=num_modules_2,
num_filters=channels_2,
has_se=self.has_se,
norm_momentum=norm_momentum,
norm_decay=norm_decay,
freeze_norm=freeze_norm,
name="st2")
self.tr2 = TransitionLayer(
in_channels=channels_2,
out_channels=channels_3,
norm_momentum=norm_momentum,
norm_decay=norm_decay,
freeze_norm=freeze_norm,
name="tr2")
self.st3 = Stage(
num_channels=channels_3,
num_modules=num_modules_3,
num_filters=channels_3,
has_se=self.has_se,
norm_momentum=norm_momentum,
norm_decay=norm_decay,
freeze_norm=freeze_norm,
name="st3")
self.tr3 = TransitionLayer(
in_channels=channels_3,
out_channels=channels_4,
norm_momentum=norm_momentum,
norm_decay=norm_decay,
freeze_norm=freeze_norm,
name="tr3")
self.st4 = Stage(
num_channels=channels_4,
num_modules=num_modules_4,
num_filters=channels_4,
has_se=self.has_se,
norm_momentum=norm_momentum,
norm_decay=norm_decay,
freeze_norm=freeze_norm,
multi_scale_output=len(return_idx) > 1,
name="st4")
if self.downsample:
self.incre_modules, self.downsamp_modules, \
self.final_layer = self._make_head(channels_4, norm_momentum=norm_momentum, has_se=self.has_se)
def _make_layer(self,
block,
inplanes,
planes,
blocks,
stride=1,
norm_momentum=0.9,
has_se=False,
name=None):
downsample = None
if stride != 1 or inplanes != planes * 4:
downsample = True
layers = []
layers.append(
block(
inplanes,
planes,
has_se,
stride,
downsample,
norm_momentum=norm_momentum,
freeze_norm=False,
name=name + "_s0"))
inplanes = planes * 4
for i in range(1, blocks):
layers.append(
block(
inplanes,
planes,
has_se,
norm_momentum=norm_momentum,
freeze_norm=False,
name=name + "_s" + str(i)))
return nn.Sequential(*layers)
def _make_head(self, pre_stage_channels, norm_momentum=0.9, has_se=False):
head_block = BottleneckBlock
head_channels = [32, 64, 128, 256]
# Increasing the #channels on each resolution
# from C, 2C, 4C, 8C to 128, 256, 512, 1024
incre_modules = []
for i, channels in enumerate(pre_stage_channels):
incre_module = self._make_layer(
head_block,
channels,
head_channels[i],
1,
stride=1,
norm_momentum=norm_momentum,
has_se=has_se,
name='incre' + str(i))
incre_modules.append(incre_module)
incre_modules = nn.LayerList(incre_modules)
# downsampling modules
downsamp_modules = []
for i in range(len(pre_stage_channels) - 1):
in_channels = head_channels[i] * 4
out_channels = head_channels[i + 1] * 4
downsamp_module = nn.Sequential(
nn.Conv2D(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
stride=2,
padding=1),
nn.BatchNorm2D(
out_channels, momentum=norm_momentum),
nn.ReLU())
downsamp_modules.append(downsamp_module)
downsamp_modules = nn.LayerList(downsamp_modules)
final_layer = nn.Sequential(
nn.Conv2D(
in_channels=head_channels[3] * 4,
out_channels=2048,
kernel_size=1,
stride=1,
padding=0),
nn.BatchNorm2D(
2048, momentum=norm_momentum),
nn.ReLU())
return incre_modules, downsamp_modules, final_layer
def forward(self, inputs):
x = inputs['image']
conv1 = self.conv_layer1_1(x)
conv2 = self.conv_layer1_2(conv1)
la1 = self.la1(conv2)
tr1 = self.tr1([la1])
st2 = self.st2(tr1)
tr2 = self.tr2(st2)
st3 = self.st3(tr2)
tr3 = self.tr3(st3)
st4 = self.st4(tr3)
if self.upsample:
# Upsampling
x0_h, x0_w = st4[0].shape[2:4]
x1 = F.upsample(st4[1], size=(x0_h, x0_w), mode='bilinear')
x2 = F.upsample(st4[2], size=(x0_h, x0_w), mode='bilinear')
x3 = F.upsample(st4[3], size=(x0_h, x0_w), mode='bilinear')
x = paddle.concat([st4[0], x1, x2, x3], 1)
return x
if self.downsample:
y = self.incre_modules[0](st4[0])
for i in range(len(self.downsamp_modules)):
y = self.incre_modules[i+1](st4[i+1]) + \
self.downsamp_modules[i](y)
y = self.final_layer(y)
return y
res = []
for i, layer in enumerate(st4):
if i == self.freeze_at:
layer.stop_gradient = True
if i in self.return_idx:
res.append(layer)
return res
@property
def out_shape(self):
if self.upsample:
self.return_idx = [0]
return [
ShapeSpec(
channels=self._out_channels[i], stride=self._out_strides[i])
for i in self.return_idx
]
| PaddleDetection/ppdet/modeling/backbones/hrnet.py/0 | {
"file_path": "PaddleDetection/ppdet/modeling/backbones/hrnet.py",
"repo_id": "PaddleDetection",
"token_count": 17651
} | 86 |
# copyright (c) 2022 PaddlePaddle Authors. All Rights Reserve.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
import numpy as np
import math
from paddle import ParamAttr
from paddle.regularizer import L2Decay
from paddle.nn.initializer import Constant, TruncatedNormal
from ppdet.modeling.shape_spec import ShapeSpec
from ppdet.core.workspace import register, serializable
from .transformer_utils import (zeros_, DropPath, Identity, window_partition,
window_unpartition)
from ..initializer import linear_init_
__all__ = ['VisionTransformer2D', 'SimpleFeaturePyramid']
class Mlp(nn.Layer):
def __init__(self,
in_features,
hidden_features=None,
out_features=None,
act_layer='nn.GELU',
drop=0.,
lr_factor=1.0):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(
in_features,
hidden_features,
weight_attr=ParamAttr(learning_rate=lr_factor),
bias_attr=ParamAttr(learning_rate=lr_factor))
self.act = eval(act_layer)()
self.fc2 = nn.Linear(
hidden_features,
out_features,
weight_attr=ParamAttr(learning_rate=lr_factor),
bias_attr=ParamAttr(learning_rate=lr_factor))
self.drop = nn.Dropout(drop)
self._init_weights()
def _init_weights(self):
linear_init_(self.fc1)
linear_init_(self.fc2)
def forward(self, x):
x = self.drop(self.act(self.fc1(x)))
x = self.drop(self.fc2(x))
return x
class Attention(nn.Layer):
def __init__(self,
dim,
num_heads=8,
qkv_bias=False,
attn_bias=False,
attn_drop=0.,
proj_drop=0.,
use_rel_pos=False,
rel_pos_zero_init=True,
window_size=None,
input_size=None,
qk_scale=None,
lr_factor=1.0):
super().__init__()
self.num_heads = num_heads
self.head_dim = dim // num_heads
self.scale = qk_scale or self.head_dim**-0.5
self.use_rel_pos = use_rel_pos
self.input_size = input_size
self.rel_pos_zero_init = rel_pos_zero_init
self.window_size = window_size
self.lr_factor = lr_factor
self.qkv = nn.Linear(
dim,
dim * 3,
weight_attr=ParamAttr(learning_rate=lr_factor),
bias_attr=ParamAttr(learning_rate=lr_factor)
if attn_bias else False)
if qkv_bias:
self.q_bias = self.create_parameter(
shape=([dim]), default_initializer=zeros_)
self.v_bias = self.create_parameter(
shape=([dim]), default_initializer=zeros_)
else:
self.q_bias = None
self.v_bias = None
self.proj = nn.Linear(
dim,
dim,
weight_attr=ParamAttr(learning_rate=lr_factor),
bias_attr=ParamAttr(learning_rate=lr_factor))
self.attn_drop = nn.Dropout(attn_drop)
if window_size is None:
self.window_size = self.input_size[0]
self._init_weights()
def _init_weights(self):
linear_init_(self.qkv)
linear_init_(self.proj)
if self.use_rel_pos:
self.rel_pos_h = self.create_parameter(
[2 * self.window_size - 1, self.head_dim],
attr=ParamAttr(learning_rate=self.lr_factor),
default_initializer=Constant(value=0.))
self.rel_pos_w = self.create_parameter(
[2 * self.window_size - 1, self.head_dim],
attr=ParamAttr(learning_rate=self.lr_factor),
default_initializer=Constant(value=0.))
if not self.rel_pos_zero_init:
TruncatedNormal(self.rel_pos_h, std=0.02)
TruncatedNormal(self.rel_pos_w, std=0.02)
def get_rel_pos(self, seq_size, rel_pos):
max_rel_dist = int(2 * seq_size - 1)
# Interpolate rel pos if needed.
if rel_pos.shape[0] != max_rel_dist:
# Interpolate rel pos.
rel_pos = rel_pos.reshape([1, rel_pos.shape[0], -1])
rel_pos = rel_pos.transpose([0, 2, 1])
rel_pos_resized = F.interpolate(
rel_pos,
size=(max_rel_dist, ),
mode="linear",
data_format='NCW')
rel_pos_resized = rel_pos_resized.reshape([-1, max_rel_dist])
rel_pos_resized = rel_pos_resized.transpose([1, 0])
else:
rel_pos_resized = rel_pos
coords = paddle.arange(seq_size, dtype='float32')
relative_coords = coords.unsqueeze(-1) - coords.unsqueeze(0)
relative_coords += (seq_size - 1)
relative_coords = relative_coords.astype('int64').flatten()
return paddle.index_select(rel_pos_resized, relative_coords).reshape(
[seq_size, seq_size, self.head_dim])
def add_decomposed_rel_pos(self, attn, q, h, w):
"""
Calculate decomposed Relative Positional Embeddings from :paper:`mvitv2`.
Args:
attn (Tensor): attention map.
q (Tensor): query q in the attention layer with shape (B, q_h * q_w, C).
Returns:
attn (Tensor): attention map with added relative positional embeddings.
"""
Rh = self.get_rel_pos(h, self.rel_pos_h)
Rw = self.get_rel_pos(w, self.rel_pos_w)
B, _, dim = q.shape
r_q = q.reshape([B, h, w, dim])
# bhwc, hch->bhwh1
# bwhc, wcw->bhw1w
rel_h = paddle.einsum("bhwc,hkc->bhwk", r_q, Rh).unsqueeze(-1)
rel_w = paddle.einsum("bhwc,wkc->bhwk", r_q, Rw).unsqueeze(-2)
attn = attn.reshape([B, h, w, h, w]) + rel_h + rel_w
return attn.reshape([B, h * w, h * w])
def forward(self, x):
B, H, W, C = paddle.shape(x)
if self.q_bias is not None:
qkv_bias = paddle.concat(
(self.q_bias, paddle.zeros_like(self.v_bias), self.v_bias))
qkv = F.linear(x, weight=self.qkv.weight, bias=qkv_bias)
else:
qkv = self.qkv(x).reshape(
[B, H * W, 3, self.num_heads, self.head_dim]).transpose(
[2, 0, 3, 1, 4]).reshape(
[3, B * self.num_heads, H * W, self.head_dim])
q, k, v = qkv[0], qkv[1], qkv[2]
attn = q.matmul(k.transpose([0, 2, 1])) * self.scale
if self.use_rel_pos:
attn = self.add_decomposed_rel_pos(attn, q, H, W)
attn = F.softmax(attn, axis=-1)
attn = self.attn_drop(attn)
x = attn.matmul(v).reshape(
[B, self.num_heads, H * W, self.head_dim]).transpose(
[0, 2, 1, 3]).reshape([B, H, W, C])
x = self.proj(x)
return x
class Block(nn.Layer):
def __init__(self,
dim,
num_heads,
mlp_ratio=4.,
qkv_bias=False,
attn_bias=False,
qk_scale=None,
init_values=None,
drop=0.,
attn_drop=0.,
drop_path=0.,
use_rel_pos=True,
rel_pos_zero_init=True,
window_size=None,
input_size=None,
act_layer='nn.GELU',
norm_layer='nn.LayerNorm',
lr_factor=1.0,
epsilon=1e-5):
super().__init__()
self.window_size = window_size
self.norm1 = eval(norm_layer)(dim,
weight_attr=ParamAttr(
learning_rate=lr_factor,
regularizer=L2Decay(0.0)),
bias_attr=ParamAttr(
learning_rate=lr_factor,
regularizer=L2Decay(0.0)),
epsilon=epsilon)
self.attn = Attention(
dim,
num_heads=num_heads,
qkv_bias=qkv_bias,
attn_bias=attn_bias,
qk_scale=qk_scale,
attn_drop=attn_drop,
proj_drop=drop,
use_rel_pos=use_rel_pos,
rel_pos_zero_init=rel_pos_zero_init,
window_size=window_size,
input_size=input_size,
lr_factor=lr_factor)
self.drop_path = DropPath(drop_path) if drop_path > 0. else Identity()
self.norm2 = eval(norm_layer)(dim,
weight_attr=ParamAttr(
learning_rate=lr_factor,
regularizer=L2Decay(0.0)),
bias_attr=ParamAttr(
learning_rate=lr_factor,
regularizer=L2Decay(0.0)),
epsilon=epsilon)
self.mlp = Mlp(in_features=dim,
hidden_features=int(dim * mlp_ratio),
act_layer=act_layer,
drop=drop,
lr_factor=lr_factor)
if init_values is not None:
self.gamma_1 = self.create_parameter(
shape=([dim]), default_initializer=Constant(value=init_values))
self.gamma_2 = self.create_parameter(
shape=([dim]), default_initializer=Constant(value=init_values))
else:
self.gamma_1, self.gamma_2 = None, None
def forward(self, x):
y = self.norm1(x)
if self.window_size is not None:
y, pad_hw, num_hw = window_partition(y, self.window_size)
y = self.attn(y)
if self.gamma_1 is not None:
y = self.gamma_1 * y
if self.window_size is not None:
y = window_unpartition(y, pad_hw, num_hw, (x.shape[1], x.shape[2]))
x = x + self.drop_path(y)
if self.gamma_2 is None:
x = x + self.drop_path(self.mlp(self.norm2(x)))
else:
x = x + self.drop_path(self.gamma_2 * self.mlp(self.norm2(x)))
return x
class PatchEmbed(nn.Layer):
""" Image to Patch Embedding
"""
def __init__(self,
img_size=(224, 224),
patch_size=16,
in_chans=3,
embed_dim=768,
lr_factor=0.01):
super().__init__()
self.img_size = img_size
self.patch_size = patch_size
self.proj = nn.Conv2D(
in_chans,
embed_dim,
kernel_size=patch_size,
stride=patch_size,
weight_attr=ParamAttr(learning_rate=lr_factor),
bias_attr=ParamAttr(learning_rate=lr_factor))
@property
def num_patches_in_h(self):
return self.img_size[1] // self.patch_size
@property
def num_patches_in_w(self):
return self.img_size[0] // self.patch_size
def forward(self, x):
out = self.proj(x)
return out
@register
@serializable
class VisionTransformer2D(nn.Layer):
""" Vision Transformer with support for patch input
"""
def __init__(self,
img_size=(1024, 1024),
patch_size=16,
in_chans=3,
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4,
qkv_bias=False,
attn_bias=False,
qk_scale=None,
init_values=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.,
act_layer='nn.GELU',
norm_layer='nn.LayerNorm',
lr_decay_rate=1.0,
global_attn_indexes=(2, 5, 8, 11),
use_abs_pos=False,
use_rel_pos=False,
use_abs_pos_emb=False,
use_sincos_pos_emb=False,
rel_pos_zero_init=True,
epsilon=1e-5,
final_norm=False,
pretrained=None,
window_size=None,
out_indices=(11, ),
with_fpn=False,
use_checkpoint=False,
*args,
**kwargs):
super().__init__()
self.img_size = img_size
self.patch_size = patch_size
self.embed_dim = embed_dim
self.num_heads = num_heads
self.depth = depth
self.global_attn_indexes = global_attn_indexes
self.epsilon = epsilon
self.with_fpn = with_fpn
self.use_checkpoint = use_checkpoint
self.patch_h = img_size[0] // patch_size
self.patch_w = img_size[1] // patch_size
self.num_patches = self.patch_h * self.patch_w
self.use_abs_pos = use_abs_pos
self.use_abs_pos_emb = use_abs_pos_emb
self.patch_embed = PatchEmbed(
img_size=img_size,
patch_size=patch_size,
in_chans=in_chans,
embed_dim=embed_dim)
dpr = np.linspace(0, drop_path_rate, depth)
if use_checkpoint:
paddle.seed(0)
if use_abs_pos_emb:
self.pos_w = self.patch_embed.num_patches_in_w
self.pos_h = self.patch_embed.num_patches_in_h
self.pos_embed = self.create_parameter(
shape=(1, self.pos_w * self.pos_h + 1, embed_dim),
default_initializer=paddle.nn.initializer.TruncatedNormal(
std=.02))
elif use_sincos_pos_emb:
pos_embed = self.get_2d_sincos_position_embedding(self.patch_h,
self.patch_w)
self.pos_embed = pos_embed
self.pos_embed = self.create_parameter(shape=pos_embed.shape)
self.pos_embed.set_value(pos_embed.numpy())
self.pos_embed.stop_gradient = True
else:
self.pos_embed = None
self.blocks = nn.LayerList([
Block(
embed_dim,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
attn_bias=attn_bias,
qk_scale=qk_scale,
drop=drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr[i],
use_rel_pos=use_rel_pos,
rel_pos_zero_init=rel_pos_zero_init,
window_size=None
if i in self.global_attn_indexes else window_size,
input_size=[self.patch_h, self.patch_w],
act_layer=act_layer,
lr_factor=self.get_vit_lr_decay_rate(i, lr_decay_rate),
norm_layer=norm_layer,
init_values=init_values,
epsilon=epsilon) for i in range(depth)
])
assert len(out_indices) <= 4, 'out_indices out of bound'
self.out_indices = out_indices
self.pretrained = pretrained
self.init_weight()
self.out_channels = [embed_dim for _ in range(len(out_indices))]
self.out_strides = [4, 8, 16, 32][-len(out_indices):] if with_fpn else [
patch_size for _ in range(len(out_indices))
]
self.norm = Identity()
if self.with_fpn:
self.init_fpn(
embed_dim=embed_dim,
patch_size=patch_size,
out_with_norm=final_norm)
def get_vit_lr_decay_rate(self, layer_id, lr_decay_rate):
return lr_decay_rate**(self.depth - layer_id)
def init_weight(self):
pretrained = self.pretrained
if pretrained:
if 'http' in pretrained:
path = paddle.utils.download.get_weights_path_from_url(
pretrained)
else:
path = pretrained
load_state_dict = paddle.load(path)
model_state_dict = self.state_dict()
pos_embed_name = "pos_embed"
if pos_embed_name in load_state_dict.keys(
) and self.use_abs_pos_emb:
load_pos_embed = paddle.to_tensor(
load_state_dict[pos_embed_name], dtype="float32")
if self.pos_embed.shape != load_pos_embed.shape:
pos_size = int(math.sqrt(load_pos_embed.shape[1] - 1))
model_state_dict[pos_embed_name] = self.resize_pos_embed(
load_pos_embed, (pos_size, pos_size),
(self.pos_h, self.pos_w))
# self.set_state_dict(model_state_dict)
load_state_dict[pos_embed_name] = model_state_dict[
pos_embed_name]
print("Load pos_embed and resize it from {} to {} .".format(
load_pos_embed.shape, self.pos_embed.shape))
self.set_state_dict(load_state_dict)
print("Load load_state_dict....")
def init_fpn(self, embed_dim=768, patch_size=16, out_with_norm=False):
if patch_size == 16:
self.fpn1 = nn.Sequential(
nn.Conv2DTranspose(
embed_dim, embed_dim, kernel_size=2, stride=2),
nn.BatchNorm2D(embed_dim),
nn.GELU(),
nn.Conv2DTranspose(
embed_dim, embed_dim, kernel_size=2, stride=2), )
self.fpn2 = nn.Sequential(
nn.Conv2DTranspose(
embed_dim, embed_dim, kernel_size=2, stride=2), )
self.fpn3 = Identity()
self.fpn4 = nn.MaxPool2D(kernel_size=2, stride=2)
elif patch_size == 8:
self.fpn1 = nn.Sequential(
nn.Conv2DTranspose(
embed_dim, embed_dim, kernel_size=2, stride=2), )
self.fpn2 = Identity()
self.fpn3 = nn.Sequential(nn.MaxPool2D(kernel_size=2, stride=2), )
self.fpn4 = nn.Sequential(nn.MaxPool2D(kernel_size=4, stride=4), )
if not out_with_norm:
self.norm = Identity()
else:
self.norm = nn.LayerNorm(embed_dim, epsilon=self.epsilon)
def resize_pos_embed(self, pos_embed, old_hw, new_hw):
"""
Resize pos_embed weight.
Args:
pos_embed (Tensor): the pos_embed weight
old_hw (list[int]): the height and width of old pos_embed
new_hw (list[int]): the height and width of new pos_embed
Returns:
Tensor: the resized pos_embed weight
"""
cls_pos_embed = pos_embed[:, :1, :]
pos_embed = pos_embed[:, 1:, :]
pos_embed = pos_embed.transpose([0, 2, 1])
pos_embed = pos_embed.reshape([1, -1, old_hw[0], old_hw[1]])
pos_embed = F.interpolate(
pos_embed, new_hw, mode='bicubic', align_corners=False)
pos_embed = pos_embed.flatten(2).transpose([0, 2, 1])
pos_embed = paddle.concat([cls_pos_embed, pos_embed], axis=1)
return pos_embed
def get_2d_sincos_position_embedding(self, h, w, temperature=10000.):
grid_y, grid_x = paddle.meshgrid(
paddle.arange(
h, dtype=paddle.float32),
paddle.arange(
w, dtype=paddle.float32))
assert self.embed_dim % 4 == 0, 'Embed dimension must be divisible by 4 for 2D sin-cos position embedding'
pos_dim = self.embed_dim // 4
omega = paddle.arange(pos_dim, dtype=paddle.float32) / pos_dim
omega = (1. / (temperature**omega)).unsqueeze(0)
out_x = grid_x.reshape([-1, 1]).matmul(omega)
out_y = grid_y.reshape([-1, 1]).matmul(omega)
pos_emb = paddle.concat(
[
paddle.sin(out_y), paddle.cos(out_y), paddle.sin(out_x),
paddle.cos(out_x)
],
axis=1)
return pos_emb.reshape([1, h, w, self.embed_dim])
def forward(self, inputs):
x = self.patch_embed(inputs['image']).transpose([0, 2, 3, 1])
B, Hp, Wp, _ = paddle.shape(x)
if self.use_abs_pos:
x = x + self.get_2d_sincos_position_embedding(Hp, Wp)
if self.use_abs_pos_emb:
x = x + self.resize_pos_embed(self.pos_embed,
(self.pos_h, self.pos_w), (Hp, Wp))
feats = []
for idx, blk in enumerate(self.blocks):
if self.use_checkpoint and self.training:
x = paddle.distributed.fleet.utils.recompute(
blk, x, **{"preserve_rng_state": True})
else:
x = blk(x)
if idx in self.out_indices:
feats.append(self.norm(x.transpose([0, 3, 1, 2])))
if self.with_fpn:
fpns = [self.fpn1, self.fpn2, self.fpn3, self.fpn4]
for i in range(len(feats)):
feats[i] = fpns[i](feats[i])
return feats
@property
def num_layers(self):
return len(self.blocks)
@property
def no_weight_decay(self):
return {'pos_embed', 'cls_token'}
@property
def out_shape(self):
return [
ShapeSpec(
channels=c, stride=s)
for c, s in zip(self.out_channels, self.out_strides)
]
class LayerNorm(nn.Layer):
"""
A LayerNorm variant, popularized by Transformers, that performs point-wise mean and
variance normalization over the channel dimension for inputs that have shape
(batch_size, channels, height, width).
Note that, the modified LayerNorm on used in ResBlock and SimpleFeaturePyramid.
In ViT, we use the nn.LayerNorm
"""
def __init__(self, normalized_shape, eps=1e-6):
super().__init__()
self.weight = self.create_parameter([normalized_shape])
self.bias = self.create_parameter([normalized_shape])
self.eps = eps
self.normalized_shape = (normalized_shape, )
def forward(self, x):
u = x.mean(1, keepdim=True)
s = (x - u).pow(2).mean(1, keepdim=True)
x = (x - u) / paddle.sqrt(s + self.eps)
x = self.weight[:, None, None] * x + self.bias[:, None, None]
return x
@register
@serializable
class SimpleFeaturePyramid(nn.Layer):
def __init__(self,
in_channels,
out_channels,
spatial_scales,
num_levels=4,
use_bias=False):
"""
Args:
in_channels (list[int]): input channels of each level which can be
derived from the output shape of backbone by from_config
out_channel (int): output channel of each level.
spatial_scales (list[float]): list of scaling factors to upsample or downsample
the input features for creating pyramid features which can be derived from
the output shape of backbone by from_config
num_levels (int): number of levels of output features.
use_bias (bool): whether use bias or not.
"""
super(SimpleFeaturePyramid, self).__init__()
self.in_channels = in_channels[0]
self.out_channels = out_channels
self.num_levels = num_levels
self.stages = []
dim = self.in_channels
if num_levels == 4:
scale_factors = [2.0, 1.0, 0.5]
elif num_levels == 5:
scale_factors = [4.0, 2.0, 1.0, 0.5]
else:
raise NotImplementedError(
f"num_levels={num_levels} is not supported yet.")
dim = in_channels[0]
for idx, scale in enumerate(scale_factors):
out_dim = dim
if scale == 4.0:
layers = [
nn.Conv2DTranspose(
dim, dim // 2, kernel_size=2, stride=2),
nn.LayerNorm(dim // 2),
nn.GELU(),
nn.Conv2DTranspose(
dim // 2, dim // 4, kernel_size=2, stride=2),
]
out_dim = dim // 4
elif scale == 2.0:
layers = [
nn.Conv2DTranspose(
dim, dim // 2, kernel_size=2, stride=2)
]
out_dim = dim // 2
elif scale == 1.0:
layers = []
elif scale == 0.5:
layers = [nn.MaxPool2D(kernel_size=2, stride=2)]
layers.extend([
nn.Conv2D(
out_dim,
out_channels,
kernel_size=1,
bias_attr=use_bias, ), LayerNorm(out_channels), nn.Conv2D(
out_channels,
out_channels,
kernel_size=3,
padding=1,
bias_attr=use_bias, ), LayerNorm(out_channels)
])
layers = nn.Sequential(*layers)
stage = -int(math.log2(spatial_scales[0] * scale_factors[idx]))
self.add_sublayer(f"simfp_{stage}", layers)
self.stages.append(layers)
# top block output feature maps.
self.top_block = nn.Sequential(
nn.MaxPool2D(
kernel_size=1, stride=2, padding=0))
@classmethod
def from_config(cls, cfg, input_shape):
return {
'in_channels': [i.channels for i in input_shape],
'spatial_scales': [1.0 / i.stride for i in input_shape],
}
@property
def out_shape(self):
return [
ShapeSpec(channels=self.out_channels)
for _ in range(self.num_levels)
]
def forward(self, feats):
"""
Args:
x: Tensor of shape (N,C,H,W).
"""
features = feats[0]
results = []
for stage in self.stages:
results.append(stage(features))
top_block_in_feature = results[-1]
results.append(self.top_block(top_block_in_feature))
assert self.num_levels == len(results)
return results
| PaddleDetection/ppdet/modeling/backbones/vit_mae.py/0 | {
"file_path": "PaddleDetection/ppdet/modeling/backbones/vit_mae.py",
"repo_id": "PaddleDetection",
"token_count": 14902
} | 87 |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import paddle.nn as nn
from ppdet.core.workspace import register
from .. import layers as L
from ..backbones.hrnet import BasicBlock
@register
class HrHRNetHead(nn.Layer):
__inject__ = ['loss']
def __init__(self, num_joints, loss='HrHRNetLoss', swahr=False, width=32):
"""
Head for HigherHRNet network
Args:
num_joints (int): number of keypoints
hrloss (object): HrHRNetLoss instance
swahr (bool): whether to use swahr
width (int): hrnet channel width
"""
super(HrHRNetHead, self).__init__()
self.loss = loss
self.num_joints = num_joints
num_featout1 = num_joints * 2
num_featout2 = num_joints
self.swahr = swahr
self.conv1 = L.Conv2d(width, num_featout1, 1, 1, 0, bias=True)
self.conv2 = L.Conv2d(width, num_featout2, 1, 1, 0, bias=True)
self.deconv = nn.Sequential(
L.ConvTranspose2d(
num_featout1 + width, width, 4, 2, 1, 0, bias=False),
L.BatchNorm2d(width),
L.ReLU())
self.blocks = nn.Sequential(*(BasicBlock(
num_channels=width,
num_filters=width,
has_se=False,
freeze_norm=False,
name='HrHRNetHead_{}'.format(i)) for i in range(4)))
self.interpolate = L.Upsample(2, mode='bilinear')
self.concat = L.Concat(dim=1)
if swahr:
self.scalelayer0 = nn.Sequential(
L.Conv2d(
width, num_joints, 1, 1, 0, bias=True),
L.BatchNorm2d(num_joints),
L.ReLU(),
L.Conv2d(
num_joints,
num_joints,
9,
1,
4,
groups=num_joints,
bias=True))
self.scalelayer1 = nn.Sequential(
L.Conv2d(
width, num_joints, 1, 1, 0, bias=True),
L.BatchNorm2d(num_joints),
L.ReLU(),
L.Conv2d(
num_joints,
num_joints,
9,
1,
4,
groups=num_joints,
bias=True))
def forward(self, feats, targets=None):
x1 = feats[0]
xo1 = self.conv1(x1)
x2 = self.blocks(self.deconv(self.concat((x1, xo1))))
xo2 = self.conv2(x2)
num_joints = self.num_joints
if self.training:
heatmap1, tagmap = paddle.split(xo1, 2, axis=1)
if self.swahr:
so1 = self.scalelayer0(x1)
so2 = self.scalelayer1(x2)
hrhrnet_outputs = ([heatmap1, so1], [xo2, so2], tagmap)
return self.loss(hrhrnet_outputs, targets)
else:
hrhrnet_outputs = (heatmap1, xo2, tagmap)
return self.loss(hrhrnet_outputs, targets)
# averaged heatmap, upsampled tagmap
upsampled = self.interpolate(xo1)
avg = (upsampled[:, :num_joints] + xo2[:, :num_joints]) / 2
return avg, upsampled[:, num_joints:]
| PaddleDetection/ppdet/modeling/heads/keypoint_hrhrnet_head.py/0 | {
"file_path": "PaddleDetection/ppdet/modeling/heads/keypoint_hrhrnet_head.py",
"repo_id": "PaddleDetection",
"token_count": 2092
} | 88 |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddle import ParamAttr
from paddle.nn.initializer import Constant, Normal
from paddle.regularizer import L2Decay
from ppdet.core.workspace import register
from ppdet.modeling.layers import DeformableConvV2, LiteConv
import numpy as np
@register
class HMHead(nn.Layer):
"""
Args:
ch_in (int): The channel number of input Tensor.
ch_out (int): The channel number of output Tensor.
num_classes (int): Number of classes.
conv_num (int): The convolution number of hm_feat.
dcn_head(bool): whether use dcn in head. False by default.
lite_head(bool): whether use lite version. False by default.
norm_type (string): norm type, 'sync_bn', 'bn', 'gn' are optional.
bn by default
Return:
Heatmap head output
"""
__shared__ = ['num_classes', 'norm_type']
def __init__(
self,
ch_in,
ch_out=128,
num_classes=80,
conv_num=2,
dcn_head=False,
lite_head=False,
norm_type='bn', ):
super(HMHead, self).__init__()
head_conv = nn.Sequential()
for i in range(conv_num):
name = 'conv.{}'.format(i)
if lite_head:
lite_name = 'hm.' + name
head_conv.add_sublayer(
lite_name,
LiteConv(
in_channels=ch_in if i == 0 else ch_out,
out_channels=ch_out,
norm_type=norm_type))
else:
if dcn_head:
head_conv.add_sublayer(
name,
DeformableConvV2(
in_channels=ch_in if i == 0 else ch_out,
out_channels=ch_out,
kernel_size=3,
weight_attr=ParamAttr(initializer=Normal(0, 0.01))))
else:
head_conv.add_sublayer(
name,
nn.Conv2D(
in_channels=ch_in if i == 0 else ch_out,
out_channels=ch_out,
kernel_size=3,
padding=1,
weight_attr=ParamAttr(initializer=Normal(0, 0.01)),
bias_attr=ParamAttr(
learning_rate=2., regularizer=L2Decay(0.))))
head_conv.add_sublayer(name + '.act', nn.ReLU())
self.feat = head_conv
bias_init = float(-np.log((1 - 0.01) / 0.01))
weight_attr = None if lite_head else ParamAttr(initializer=Normal(0,
0.01))
self.head = nn.Conv2D(
in_channels=ch_out,
out_channels=num_classes,
kernel_size=1,
weight_attr=weight_attr,
bias_attr=ParamAttr(
learning_rate=2.,
regularizer=L2Decay(0.),
initializer=Constant(bias_init)))
def forward(self, feat):
out = self.feat(feat)
out = self.head(out)
return out
@register
class WHHead(nn.Layer):
"""
Args:
ch_in (int): The channel number of input Tensor.
ch_out (int): The channel number of output Tensor.
conv_num (int): The convolution number of wh_feat.
dcn_head(bool): whether use dcn in head. False by default.
lite_head(bool): whether use lite version. False by default.
norm_type (string): norm type, 'sync_bn', 'bn', 'gn' are optional.
bn by default
Return:
Width & Height head output
"""
__shared__ = ['norm_type']
def __init__(self,
ch_in,
ch_out=64,
conv_num=2,
dcn_head=False,
lite_head=False,
norm_type='bn'):
super(WHHead, self).__init__()
head_conv = nn.Sequential()
for i in range(conv_num):
name = 'conv.{}'.format(i)
if lite_head:
lite_name = 'wh.' + name
head_conv.add_sublayer(
lite_name,
LiteConv(
in_channels=ch_in if i == 0 else ch_out,
out_channels=ch_out,
norm_type=norm_type))
else:
if dcn_head:
head_conv.add_sublayer(
name,
DeformableConvV2(
in_channels=ch_in if i == 0 else ch_out,
out_channels=ch_out,
kernel_size=3,
weight_attr=ParamAttr(initializer=Normal(0, 0.01))))
else:
head_conv.add_sublayer(
name,
nn.Conv2D(
in_channels=ch_in if i == 0 else ch_out,
out_channels=ch_out,
kernel_size=3,
padding=1,
weight_attr=ParamAttr(initializer=Normal(0, 0.01)),
bias_attr=ParamAttr(
learning_rate=2., regularizer=L2Decay(0.))))
head_conv.add_sublayer(name + '.act', nn.ReLU())
weight_attr = None if lite_head else ParamAttr(initializer=Normal(0,
0.01))
self.feat = head_conv
self.head = nn.Conv2D(
in_channels=ch_out,
out_channels=4,
kernel_size=1,
weight_attr=weight_attr,
bias_attr=ParamAttr(
learning_rate=2., regularizer=L2Decay(0.)))
def forward(self, feat):
out = self.feat(feat)
out = self.head(out)
out = F.relu(out)
return out
@register
class TTFHead(nn.Layer):
"""
TTFHead
Args:
in_channels (int): the channel number of input to TTFHead.
num_classes (int): the number of classes, 80 by default.
hm_head_planes (int): the channel number in heatmap head,
128 by default.
wh_head_planes (int): the channel number in width & height head,
64 by default.
hm_head_conv_num (int): the number of convolution in heatmap head,
2 by default.
wh_head_conv_num (int): the number of convolution in width & height
head, 2 by default.
hm_loss (object): Instance of 'CTFocalLoss'.
wh_loss (object): Instance of 'GIoULoss'.
wh_offset_base (float): the base offset of width and height,
16.0 by default.
down_ratio (int): the actual down_ratio is calculated by base_down_ratio
(default 16) and the number of upsample layers.
lite_head(bool): whether use lite version. False by default.
norm_type (string): norm type, 'sync_bn', 'bn', 'gn' are optional.
bn by default
ags_module(bool): whether use AGS module to reweight location feature.
false by default.
"""
__shared__ = ['num_classes', 'down_ratio', 'norm_type']
__inject__ = ['hm_loss', 'wh_loss']
def __init__(self,
in_channels,
num_classes=80,
hm_head_planes=128,
wh_head_planes=64,
hm_head_conv_num=2,
wh_head_conv_num=2,
hm_loss='CTFocalLoss',
wh_loss='GIoULoss',
wh_offset_base=16.,
down_ratio=4,
dcn_head=False,
lite_head=False,
norm_type='bn',
ags_module=False):
super(TTFHead, self).__init__()
self.in_channels = in_channels
self.hm_head = HMHead(in_channels, hm_head_planes, num_classes,
hm_head_conv_num, dcn_head, lite_head, norm_type)
self.wh_head = WHHead(in_channels, wh_head_planes, wh_head_conv_num,
dcn_head, lite_head, norm_type)
self.hm_loss = hm_loss
self.wh_loss = wh_loss
self.wh_offset_base = wh_offset_base
self.down_ratio = down_ratio
self.ags_module = ags_module
@classmethod
def from_config(cls, cfg, input_shape):
if isinstance(input_shape, (list, tuple)):
input_shape = input_shape[0]
return {'in_channels': input_shape.channels, }
def forward(self, feats):
hm = self.hm_head(feats)
wh = self.wh_head(feats) * self.wh_offset_base
return hm, wh
def filter_box_by_weight(self, pred, target, weight):
"""
Filter out boxes where ttf_reg_weight is 0, only keep positive samples.
"""
index = paddle.nonzero(weight > 0)
index.stop_gradient = True
weight = paddle.gather_nd(weight, index)
pred = paddle.gather_nd(pred, index)
target = paddle.gather_nd(target, index)
return pred, target, weight
def filter_loc_by_weight(self, score, weight):
index = paddle.nonzero(weight > 0)
index.stop_gradient = True
score = paddle.gather_nd(score, index)
return score
def get_loss(self, pred_hm, pred_wh, target_hm, box_target, target_weight):
pred_hm = paddle.clip(F.sigmoid(pred_hm), 1e-4, 1 - 1e-4)
hm_loss = self.hm_loss(pred_hm, target_hm)
H, W = target_hm.shape[2:]
mask = paddle.reshape(target_weight, [-1, H, W])
avg_factor = paddle.sum(mask) + 1e-4
base_step = self.down_ratio
shifts_x = paddle.arange(0, W * base_step, base_step, dtype='int32')
shifts_y = paddle.arange(0, H * base_step, base_step, dtype='int32')
shift_y, shift_x = paddle.tensor.meshgrid([shifts_y, shifts_x])
base_loc = paddle.stack([shift_x, shift_y], axis=0)
base_loc.stop_gradient = True
pred_boxes = paddle.concat(
[0 - pred_wh[:, 0:2, :, :] + base_loc, pred_wh[:, 2:4] + base_loc],
axis=1)
pred_boxes = paddle.transpose(pred_boxes, [0, 2, 3, 1])
boxes = paddle.transpose(box_target, [0, 2, 3, 1])
boxes.stop_gradient = True
if self.ags_module:
pred_hm_max = paddle.max(pred_hm, axis=1, keepdim=True)
pred_hm_max_softmax = F.softmax(pred_hm_max, axis=1)
pred_hm_max_softmax = paddle.transpose(pred_hm_max_softmax,
[0, 2, 3, 1])
pred_hm_max_softmax = self.filter_loc_by_weight(pred_hm_max_softmax,
mask)
else:
pred_hm_max_softmax = None
pred_boxes, boxes, mask = self.filter_box_by_weight(pred_boxes, boxes,
mask)
mask.stop_gradient = True
wh_loss = self.wh_loss(
pred_boxes,
boxes,
iou_weight=mask.unsqueeze(1),
loc_reweight=pred_hm_max_softmax)
wh_loss = wh_loss / avg_factor
ttf_loss = {'hm_loss': hm_loss, 'wh_loss': wh_loss}
return ttf_loss
| PaddleDetection/ppdet/modeling/heads/ttf_head.py/0 | {
"file_path": "PaddleDetection/ppdet/modeling/heads/ttf_head.py",
"repo_id": "PaddleDetection",
"token_count": 6505
} | 89 |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import paddle
import paddle.nn.functional as F
import paddle.nn as nn
from ppdet.core.workspace import register
__all__ = ['FocalLoss', 'Weighted_FocalLoss']
@register
class FocalLoss(nn.Layer):
"""A wrapper around paddle.nn.functional.sigmoid_focal_loss.
Args:
use_sigmoid (bool): currently only support use_sigmoid=True
alpha (float): parameter alpha in Focal Loss
gamma (float): parameter gamma in Focal Loss
loss_weight (float): final loss will be multiplied by this
"""
def __init__(self,
use_sigmoid=True,
alpha=0.25,
gamma=2.0,
loss_weight=1.0):
super(FocalLoss, self).__init__()
assert use_sigmoid == True, \
'Focal Loss only supports sigmoid at the moment'
self.use_sigmoid = use_sigmoid
self.alpha = alpha
self.gamma = gamma
self.loss_weight = loss_weight
def forward(self, pred, target, reduction='none'):
"""forward function.
Args:
pred (Tensor): logits of class prediction, of shape (N, num_classes)
target (Tensor): target class label, of shape (N, )
reduction (str): the way to reduce loss, one of (none, sum, mean)
"""
num_classes = pred.shape[1]
target = F.one_hot(target, num_classes+1).cast(pred.dtype)
target = target[:, :-1].detach()
loss = F.sigmoid_focal_loss(
pred, target, alpha=self.alpha, gamma=self.gamma,
reduction=reduction)
return loss * self.loss_weight
@register
class Weighted_FocalLoss(FocalLoss):
"""A wrapper around paddle.nn.functional.sigmoid_focal_loss.
Args:
use_sigmoid (bool): currently only support use_sigmoid=True
alpha (float): parameter alpha in Focal Loss
gamma (float): parameter gamma in Focal Loss
loss_weight (float): final loss will be multiplied by this
"""
def __init__(self,
use_sigmoid=True,
alpha=0.25,
gamma=2.0,
loss_weight=1.0,
reduction="mean"):
super(FocalLoss, self).__init__()
assert use_sigmoid == True, \
'Focal Loss only supports sigmoid at the moment'
self.use_sigmoid = use_sigmoid
self.alpha = alpha
self.gamma = gamma
self.loss_weight = loss_weight
self.reduction = reduction
def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None):
"""forward function.
Args:
pred (Tensor): logits of class prediction, of shape (N, num_classes)
target (Tensor): target class label, of shape (N, )
reduction (str): the way to reduce loss, one of (none, sum, mean)
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
num_classes = pred.shape[1]
target = F.one_hot(target, num_classes + 1).astype(pred.dtype)
target = target[:, :-1].detach()
loss = F.sigmoid_focal_loss(
pred, target, alpha=self.alpha, gamma=self.gamma,
reduction='none')
if weight is not None:
if weight.shape != loss.shape:
if weight.shape[0] == loss.shape[0]:
# For most cases, weight is of shape (num_priors, ),
# which means it does not have the second axis num_class
weight = weight.reshape((-1, 1))
else:
# Sometimes, weight per anchor per class is also needed. e.g.
# in FSAF. But it may be flattened of shape
# (num_priors x num_class, ), while loss is still of shape
# (num_priors, num_class).
assert weight.numel() == loss.numel()
weight = weight.reshape((loss.shape[0], -1))
assert weight.ndim == loss.ndim
loss = loss * weight
# if avg_factor is not specified, just reduce the loss
if avg_factor is None:
if reduction == 'mean':
loss = loss.mean()
elif reduction == 'sum':
loss = loss.sum()
else:
# if reduction is mean, then average the loss by avg_factor
if reduction == 'mean':
# Avoid causing ZeroDivisionError when avg_factor is 0.0,
# i.e., all labels of an image belong to ignore index.
eps = 1e-10
loss = loss.sum() / (avg_factor + eps)
# if reduction is 'none', then do nothing, otherwise raise an error
elif reduction != 'none':
raise ValueError('avg_factor can not be used with reduction="sum"')
return loss * self.loss_weight
| PaddleDetection/ppdet/modeling/losses/focal_loss.py/0 | {
"file_path": "PaddleDetection/ppdet/modeling/losses/focal_loss.py",
"repo_id": "PaddleDetection",
"token_count": 2477
} | 90 |
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This code is based on https://github.com/noahcao/OC_SORT/blob/master/trackers/ocsort_tracker/ocsort.py
"""
import numpy as np
from ..matching.ocsort_matching import associate, linear_assignment, iou_batch, associate_only_iou
from ..motion.ocsort_kalman_filter import OCSORTKalmanFilter
from ppdet.core.workspace import register, serializable
def k_previous_obs(observations, cur_age, k):
if len(observations) == 0:
return [-1, -1, -1, -1, -1]
for i in range(k):
dt = k - i
if cur_age - dt in observations:
return observations[cur_age - dt]
max_age = max(observations.keys())
return observations[max_age]
def convert_bbox_to_z(bbox):
"""
Takes a bounding box in the form [x1,y1,x2,y2] and returns z in the form
[x,y,s,r] where x,y is the centre of the box and s is the scale/area and r is
the aspect ratio
"""
w = bbox[2] - bbox[0]
h = bbox[3] - bbox[1]
x = bbox[0] + w / 2.
y = bbox[1] + h / 2.
s = w * h # scale is just area
r = w / float(h + 1e-6)
return np.array([x, y, s, r]).reshape((4, 1))
def convert_x_to_bbox(x, score=None):
"""
Takes a bounding box in the centre form [x,y,s,r] and returns it in the form
[x1,y1,x2,y2] where x1,y1 is the top left and x2,y2 is the bottom right
"""
w = np.sqrt(x[2] * x[3])
h = x[2] / w
if (score == None):
return np.array(
[x[0] - w / 2., x[1] - h / 2., x[0] + w / 2.,
x[1] + h / 2.]).reshape((1, 4))
else:
score = np.array([score])
return np.array([
x[0] - w / 2., x[1] - h / 2., x[0] + w / 2., x[1] + h / 2., score
]).reshape((1, 5))
def speed_direction(bbox1, bbox2):
cx1, cy1 = (bbox1[0] + bbox1[2]) / 2.0, (bbox1[1] + bbox1[3]) / 2.0
cx2, cy2 = (bbox2[0] + bbox2[2]) / 2.0, (bbox2[1] + bbox2[3]) / 2.0
speed = np.array([cy2 - cy1, cx2 - cx1])
norm = np.sqrt((cy2 - cy1)**2 + (cx2 - cx1)**2) + 1e-6
return speed / norm
class KalmanBoxTracker(object):
"""
This class represents the internal state of individual tracked objects observed as bbox.
Args:
bbox (np.array): bbox in [x1,y1,x2,y2,score] format.
delta_t (int): delta_t of previous observation
"""
count = 0
def __init__(self, bbox, delta_t=3):
self.kf = OCSORTKalmanFilter(dim_x=7, dim_z=4)
self.kf.F = np.array([[1., 0, 0, 0, 1., 0, 0], [0, 1., 0, 0, 0, 1., 0],
[0, 0, 1., 0, 0, 0, 1], [0, 0, 0, 1., 0, 0, 0],
[0, 0, 0, 0, 1., 0, 0], [0, 0, 0, 0, 0, 1., 0],
[0, 0, 0, 0, 0, 0, 1.]])
self.kf.H = np.array([[1., 0, 0, 0, 0, 0, 0], [0, 1., 0, 0, 0, 0, 0],
[0, 0, 1., 0, 0, 0, 0], [0, 0, 0, 1., 0, 0, 0]])
self.kf.R[2:, 2:] *= 10.
self.kf.P[4:, 4:] *= 1000.
# give high uncertainty to the unobservable initial velocities
self.kf.P *= 10.
self.kf.Q[-1, -1] *= 0.01
self.kf.Q[4:, 4:] *= 0.01
self.score = bbox[4]
self.kf.x[:4] = convert_bbox_to_z(bbox)
self.time_since_update = 0
self.id = KalmanBoxTracker.count
KalmanBoxTracker.count += 1
self.history = []
self.hits = 0
self.hit_streak = 0
self.age = 0
"""
NOTE: [-1,-1,-1,-1,-1] is a compromising placeholder for non-observation status, the same for the return of
function k_previous_obs. It is ugly and I do not like it. But to support generate observation array in a
fast and unified way, which you would see below k_observations = np.array([k_previous_obs(...]]), let's bear it for now.
"""
self.last_observation = np.array([-1, -1, -1, -1, -1]) # placeholder
self.observations = dict()
self.history_observations = []
self.velocity = None
self.delta_t = delta_t
def update(self, bbox, angle_cost=False):
"""
Updates the state vector with observed bbox.
"""
if bbox is not None:
if angle_cost and self.last_observation.sum(
) >= 0: # no previous observation
previous_box = None
for i in range(self.delta_t):
dt = self.delta_t - i
if self.age - dt in self.observations:
previous_box = self.observations[self.age - dt]
break
if previous_box is None:
previous_box = self.last_observation
"""
Estimate the track speed direction with observations \Delta t steps away
"""
self.velocity = speed_direction(previous_box, bbox)
"""
Insert new observations. This is a ugly way to maintain both self.observations
and self.history_observations. Bear it for the moment.
"""
self.last_observation = bbox
self.observations[self.age] = bbox
self.history_observations.append(bbox)
self.time_since_update = 0
self.history = []
self.hits += 1
self.hit_streak += 1
self.kf.update(convert_bbox_to_z(bbox))
else:
self.kf.update(bbox)
def predict(self):
"""
Advances the state vector and returns the predicted bounding box estimate.
"""
if ((self.kf.x[6] + self.kf.x[2]) <= 0):
self.kf.x[6] *= 0.0
self.kf.predict()
self.age += 1
if (self.time_since_update > 0):
self.hit_streak = 0
self.time_since_update += 1
self.history.append(convert_x_to_bbox(self.kf.x, score=self.score))
return self.history[-1]
def get_state(self):
return convert_x_to_bbox(self.kf.x, score=self.score)
@register
@serializable
class OCSORTTracker(object):
"""
OCSORT tracker, support single class
Args:
det_thresh (float): threshold of detection score
max_age (int): maximum number of missed misses before a track is deleted
min_hits (int): minimum hits for associate
iou_threshold (float): iou threshold for associate
delta_t (int): delta_t of previous observation
inertia (float): vdc_weight of angle_diff_cost for associate
vertical_ratio (float): w/h, the vertical ratio of the bbox to filter
bad results. If set <= 0 means no need to filter bboxes,usually set
1.6 for pedestrian tracking.
min_box_area (int): min box area to filter out low quality boxes
use_byte (bool): Whether use ByteTracker, default False
"""
def __init__(self,
det_thresh=0.6,
max_age=30,
min_hits=3,
iou_threshold=0.3,
delta_t=3,
inertia=0.2,
vertical_ratio=-1,
min_box_area=0,
use_byte=False,
use_angle_cost=False):
self.det_thresh = det_thresh
self.max_age = max_age
self.min_hits = min_hits
self.iou_threshold = iou_threshold
self.delta_t = delta_t
self.inertia = inertia
self.vertical_ratio = vertical_ratio
self.min_box_area = min_box_area
self.use_byte = use_byte
self.use_angle_cost = use_angle_cost
self.trackers = []
self.frame_count = 0
KalmanBoxTracker.count = 0
def update(self, pred_dets, pred_embs=None):
"""
Args:
pred_dets (np.array): Detection results of the image, the shape is
[N, 6], means 'cls_id, score, x0, y0, x1, y1'.
pred_embs (np.array): Embedding results of the image, the shape is
[N, 128] or [N, 512], default as None.
Return:
tracking boxes (np.array): [M, 6], means 'x0, y0, x1, y1, score, id'.
"""
if pred_dets is None:
return np.empty((0, 6))
self.frame_count += 1
bboxes = pred_dets[:, 2:]
scores = pred_dets[:, 1:2]
dets = np.concatenate((bboxes, scores), axis=1)
scores = scores.squeeze(-1)
inds_low = scores > 0.1
inds_high = scores < self.det_thresh
inds_second = np.logical_and(inds_low, inds_high)
# self.det_thresh > score > 0.1, for second matching
dets_second = dets[inds_second] # detections for second matching
remain_inds = scores > self.det_thresh
dets = dets[remain_inds]
# get predicted locations from existing trackers.
trks = np.zeros((len(self.trackers), 5))
to_del = []
ret = []
for t, trk in enumerate(trks):
pos = self.trackers[t].predict()[0]
trk[:] = [pos[0], pos[1], pos[2], pos[3], 0]
if np.any(np.isnan(pos)):
to_del.append(t)
trks = np.ma.compress_rows(np.ma.masked_invalid(trks))
for t in reversed(to_del):
self.trackers.pop(t)
if self.use_angle_cost:
velocities = np.array([
trk.velocity if trk.velocity is not None else np.array((0, 0))
for trk in self.trackers
])
k_observations = np.array([
k_previous_obs(trk.observations, trk.age, self.delta_t)
for trk in self.trackers
])
last_boxes = np.array([trk.last_observation for trk in self.trackers])
"""
First round of association
"""
if self.use_angle_cost:
matched, unmatched_dets, unmatched_trks = associate(
dets, trks, self.iou_threshold, velocities, k_observations,
self.inertia)
else:
matched, unmatched_dets, unmatched_trks = associate_only_iou(
dets, trks, self.iou_threshold)
for m in matched:
self.trackers[m[1]].update(
dets[m[0], :], angle_cost=self.use_angle_cost)
"""
Second round of associaton by OCR
"""
# BYTE association
if self.use_byte and len(dets_second) > 0 and unmatched_trks.shape[
0] > 0:
u_trks = trks[unmatched_trks]
iou_left = iou_batch(
dets_second,
u_trks) # iou between low score detections and unmatched tracks
iou_left = np.array(iou_left)
if iou_left.max() > self.iou_threshold:
"""
NOTE: by using a lower threshold, e.g., self.iou_threshold - 0.1, you may
get a higher performance especially on MOT17/MOT20 datasets. But we keep it
uniform here for simplicity
"""
matched_indices = linear_assignment(-iou_left)
to_remove_trk_indices = []
for m in matched_indices:
det_ind, trk_ind = m[0], unmatched_trks[m[1]]
if iou_left[m[0], m[1]] < self.iou_threshold:
continue
self.trackers[trk_ind].update(
dets_second[det_ind, :], angle_cost=self.use_angle_cost)
to_remove_trk_indices.append(trk_ind)
unmatched_trks = np.setdiff1d(unmatched_trks,
np.array(to_remove_trk_indices))
if unmatched_dets.shape[0] > 0 and unmatched_trks.shape[0] > 0:
left_dets = dets[unmatched_dets]
left_trks = last_boxes[unmatched_trks]
iou_left = iou_batch(left_dets, left_trks)
iou_left = np.array(iou_left)
if iou_left.max() > self.iou_threshold:
"""
NOTE: by using a lower threshold, e.g., self.iou_threshold - 0.1, you may
get a higher performance especially on MOT17/MOT20 datasets. But we keep it
uniform here for simplicity
"""
rematched_indices = linear_assignment(-iou_left)
to_remove_det_indices = []
to_remove_trk_indices = []
for m in rematched_indices:
det_ind, trk_ind = unmatched_dets[m[0]], unmatched_trks[m[
1]]
if iou_left[m[0], m[1]] < self.iou_threshold:
continue
self.trackers[trk_ind].update(
dets[det_ind, :], angle_cost=self.use_angle_cost)
to_remove_det_indices.append(det_ind)
to_remove_trk_indices.append(trk_ind)
unmatched_dets = np.setdiff1d(unmatched_dets,
np.array(to_remove_det_indices))
unmatched_trks = np.setdiff1d(unmatched_trks,
np.array(to_remove_trk_indices))
for m in unmatched_trks:
self.trackers[m].update(None)
# create and initialise new trackers for unmatched detections
for i in unmatched_dets:
trk = KalmanBoxTracker(dets[i, :], delta_t=self.delta_t)
self.trackers.append(trk)
i = len(self.trackers)
for trk in reversed(self.trackers):
if trk.last_observation.sum() < 0:
d = trk.get_state()[0]
else:
d = trk.last_observation # tlbr + score
if (trk.time_since_update < 1) and (
trk.hit_streak >= self.min_hits or
self.frame_count <= self.min_hits):
# +1 as MOT benchmark requires positive
ret.append(np.concatenate((d, [trk.id + 1])).reshape(1, -1))
i -= 1
# remove dead tracklet
if (trk.time_since_update > self.max_age):
self.trackers.pop(i)
if (len(ret) > 0):
return np.concatenate(ret)
return np.empty((0, 6))
| PaddleDetection/ppdet/modeling/mot/tracker/ocsort_tracker.py/0 | {
"file_path": "PaddleDetection/ppdet/modeling/mot/tracker/ocsort_tracker.py",
"repo_id": "PaddleDetection",
"token_count": 7597
} | 91 |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddle import ParamAttr
from paddle.nn.initializer import Constant, Uniform, Normal, XavierUniform
from ppdet.core.workspace import register, serializable
from paddle.regularizer import L2Decay
from ppdet.modeling.layers import DeformableConvV2, ConvNormLayer, LiteConv
import math
from ppdet.modeling.ops import batch_norm
from ..shape_spec import ShapeSpec
__all__ = ['TTFFPN']
class Upsample(nn.Layer):
def __init__(self, ch_in, ch_out, norm_type='bn'):
super(Upsample, self).__init__()
fan_in = ch_in * 3 * 3
stdv = 1. / math.sqrt(fan_in)
self.dcn = DeformableConvV2(
ch_in,
ch_out,
kernel_size=3,
weight_attr=ParamAttr(initializer=Uniform(-stdv, stdv)),
bias_attr=ParamAttr(
initializer=Constant(0),
regularizer=L2Decay(0.),
learning_rate=2.),
lr_scale=2.,
regularizer=L2Decay(0.))
self.bn = batch_norm(
ch_out, norm_type=norm_type, initializer=Constant(1.))
def forward(self, feat):
dcn = self.dcn(feat)
bn = self.bn(dcn)
relu = F.relu(bn)
out = F.interpolate(relu, scale_factor=2., mode='bilinear')
return out
class DeConv(nn.Layer):
def __init__(self, ch_in, ch_out, norm_type='bn'):
super(DeConv, self).__init__()
self.deconv = nn.Sequential()
conv1 = ConvNormLayer(
ch_in=ch_in,
ch_out=ch_out,
stride=1,
filter_size=1,
norm_type=norm_type,
initializer=XavierUniform())
conv2 = nn.Conv2DTranspose(
in_channels=ch_out,
out_channels=ch_out,
kernel_size=4,
padding=1,
stride=2,
groups=ch_out,
weight_attr=ParamAttr(initializer=XavierUniform()),
bias_attr=False)
bn = batch_norm(ch_out, norm_type=norm_type, norm_decay=0.)
conv3 = ConvNormLayer(
ch_in=ch_out,
ch_out=ch_out,
stride=1,
filter_size=1,
norm_type=norm_type,
initializer=XavierUniform())
self.deconv.add_sublayer('conv1', conv1)
self.deconv.add_sublayer('relu6_1', nn.ReLU6())
self.deconv.add_sublayer('conv2', conv2)
self.deconv.add_sublayer('bn', bn)
self.deconv.add_sublayer('relu6_2', nn.ReLU6())
self.deconv.add_sublayer('conv3', conv3)
self.deconv.add_sublayer('relu6_3', nn.ReLU6())
def forward(self, inputs):
return self.deconv(inputs)
class LiteUpsample(nn.Layer):
def __init__(self, ch_in, ch_out, norm_type='bn'):
super(LiteUpsample, self).__init__()
self.deconv = DeConv(ch_in, ch_out, norm_type=norm_type)
self.conv = LiteConv(ch_in, ch_out, norm_type=norm_type)
def forward(self, inputs):
deconv_up = self.deconv(inputs)
conv = self.conv(inputs)
interp_up = F.interpolate(conv, scale_factor=2., mode='bilinear')
return deconv_up + interp_up
class ShortCut(nn.Layer):
def __init__(self,
layer_num,
ch_in,
ch_out,
norm_type='bn',
lite_neck=False,
name=None):
super(ShortCut, self).__init__()
shortcut_conv = nn.Sequential()
for i in range(layer_num):
fan_out = 3 * 3 * ch_out
std = math.sqrt(2. / fan_out)
in_channels = ch_in if i == 0 else ch_out
shortcut_name = name + '.conv.{}'.format(i)
if lite_neck:
shortcut_conv.add_sublayer(
shortcut_name,
LiteConv(
in_channels=in_channels,
out_channels=ch_out,
with_act=i < layer_num - 1,
norm_type=norm_type))
else:
shortcut_conv.add_sublayer(
shortcut_name,
nn.Conv2D(
in_channels=in_channels,
out_channels=ch_out,
kernel_size=3,
padding=1,
weight_attr=ParamAttr(initializer=Normal(0, std)),
bias_attr=ParamAttr(
learning_rate=2., regularizer=L2Decay(0.))))
if i < layer_num - 1:
shortcut_conv.add_sublayer(shortcut_name + '.act',
nn.ReLU())
self.shortcut = self.add_sublayer('shortcut', shortcut_conv)
def forward(self, feat):
out = self.shortcut(feat)
return out
@register
@serializable
class TTFFPN(nn.Layer):
"""
Args:
in_channels (list): number of input feature channels from backbone.
[128,256,512,1024] by default, means the channels of DarkNet53
backbone return_idx [1,2,3,4].
planes (list): the number of output feature channels of FPN.
[256, 128, 64] by default
shortcut_num (list): the number of convolution layers in each shortcut.
[3,2,1] by default, means DarkNet53 backbone return_idx_1 has 3 convs
in its shortcut, return_idx_2 has 2 convs and return_idx_3 has 1 conv.
norm_type (string): norm type, 'sync_bn', 'bn', 'gn' are optional.
bn by default
lite_neck (bool): whether to use lite conv in TTFNet FPN,
False by default
fusion_method (string): the method to fusion upsample and lateral layer.
'add' and 'concat' are optional, add by default
"""
__shared__ = ['norm_type']
def __init__(self,
in_channels,
planes=[256, 128, 64],
shortcut_num=[3, 2, 1],
norm_type='bn',
lite_neck=False,
fusion_method='add'):
super(TTFFPN, self).__init__()
self.planes = planes
self.shortcut_num = shortcut_num[::-1]
self.shortcut_len = len(shortcut_num)
self.ch_in = in_channels[::-1]
self.fusion_method = fusion_method
self.upsample_list = []
self.shortcut_list = []
self.upper_list = []
for i, out_c in enumerate(self.planes):
in_c = self.ch_in[i] if i == 0 else self.upper_list[-1]
upsample_module = LiteUpsample if lite_neck else Upsample
upsample = self.add_sublayer(
'upsample.' + str(i),
upsample_module(
in_c, out_c, norm_type=norm_type))
self.upsample_list.append(upsample)
if i < self.shortcut_len:
shortcut = self.add_sublayer(
'shortcut.' + str(i),
ShortCut(
self.shortcut_num[i],
self.ch_in[i + 1],
out_c,
norm_type=norm_type,
lite_neck=lite_neck,
name='shortcut.' + str(i)))
self.shortcut_list.append(shortcut)
if self.fusion_method == 'add':
upper_c = out_c
elif self.fusion_method == 'concat':
upper_c = out_c * 2
else:
raise ValueError('Illegal fusion method. Expected add or\
concat, but received {}'.format(self.fusion_method))
self.upper_list.append(upper_c)
def forward(self, inputs):
feat = inputs[-1]
for i, out_c in enumerate(self.planes):
feat = self.upsample_list[i](feat)
if i < self.shortcut_len:
shortcut = self.shortcut_list[i](inputs[-i - 2])
if self.fusion_method == 'add':
feat = feat + shortcut
else:
feat = paddle.concat([feat, shortcut], axis=1)
return feat
@classmethod
def from_config(cls, cfg, input_shape):
return {'in_channels': [i.channels for i in input_shape], }
@property
def out_shape(self):
return [ShapeSpec(channels=self.upper_list[-1], )]
| PaddleDetection/ppdet/modeling/necks/ttf_fpn.py/0 | {
"file_path": "PaddleDetection/ppdet/modeling/necks/ttf_fpn.py",
"repo_id": "PaddleDetection",
"token_count": 4677
} | 92 |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddle.nn.initializer import Normal, Constant
from paddle import ParamAttr
from .resnet import ResNet50, ResNet101
from ppdet.core.workspace import register
__all__ = ['PCBPyramid']
@register
class PCBPyramid(nn.Layer):
"""
PCB (Part-based Convolutional Baseline), see https://arxiv.org/abs/1711.09349,
Pyramidal Person Re-IDentification, see https://arxiv.org/abs/1810.12193
Args:
input_ch (int): Number of channels of the input feature.
num_stripes (int): Number of sub-parts.
used_levels (tuple): Whether the level is used, 1 means used.
num_classes (int): Number of classes for identities, default 751 in
Market-1501 dataset.
last_conv_stride (int): Stride of the last conv.
last_conv_dilation (int): Dilation of the last conv.
num_conv_out_channels (int): Number of channels of conv feature.
"""
def __init__(self,
input_ch=2048,
model_name='ResNet101',
num_stripes=6,
used_levels=(1, 1, 1, 1, 1, 1),
num_classes=751,
last_conv_stride=1,
last_conv_dilation=1,
num_conv_out_channels=128):
super(PCBPyramid, self).__init__()
self.num_stripes = num_stripes
self.used_levels = used_levels
self.num_classes = num_classes
self.num_in_each_level = [i for i in range(self.num_stripes, 0, -1)]
self.num_branches = sum(self.num_in_each_level)
assert model_name in ['ResNet50', 'ResNet101'], "Unsupported ReID arch: {}".format(model_name)
self.base = eval(model_name)(
lr_mult=0.1,
last_conv_stride=last_conv_stride,
last_conv_dilation=last_conv_dilation)
self.dropout_layer = nn.Dropout(p=0.2)
self.pyramid_conv_list0, self.pyramid_fc_list0 = self.basic_branch(
num_conv_out_channels, input_ch)
def basic_branch(self, num_conv_out_channels, input_ch):
# the level indexes are defined from fine to coarse,
# the branch will contain one more part than that of its previous level
# the sliding step is set to 1
pyramid_conv_list = nn.LayerList()
pyramid_fc_list = nn.LayerList()
idx_levels = 0
for idx_branches in range(self.num_branches):
if idx_branches >= sum(self.num_in_each_level[0:idx_levels + 1]):
idx_levels += 1
pyramid_conv_list.append(
nn.Sequential(
nn.Conv2D(input_ch, num_conv_out_channels, 1),
nn.BatchNorm2D(num_conv_out_channels), nn.ReLU()))
idx_levels = 0
for idx_branches in range(self.num_branches):
if idx_branches >= sum(self.num_in_each_level[0:idx_levels + 1]):
idx_levels += 1
fc = nn.Linear(
in_features=num_conv_out_channels,
out_features=self.num_classes,
weight_attr=ParamAttr(initializer=Normal(
mean=0., std=0.001)),
bias_attr=ParamAttr(initializer=Constant(value=0.)))
pyramid_fc_list.append(fc)
return pyramid_conv_list, pyramid_fc_list
def pyramid_forward(self, feat):
each_stripe_size = int(feat.shape[2] / self.num_stripes)
feat_list, logits_list = [], []
idx_levels = 0
used_branches = 0
for idx_branches in range(self.num_branches):
if idx_branches >= sum(self.num_in_each_level[0:idx_levels + 1]):
idx_levels += 1
idx_in_each_level = idx_branches - sum(self.num_in_each_level[
0:idx_levels])
stripe_size_in_each_level = each_stripe_size * (idx_levels + 1)
start = idx_in_each_level * each_stripe_size
end = start + stripe_size_in_each_level
k = feat.shape[-1]
local_feat_avgpool = F.avg_pool2d(
feat[:, :, start:end, :],
kernel_size=(stripe_size_in_each_level, k))
local_feat_maxpool = F.max_pool2d(
feat[:, :, start:end, :],
kernel_size=(stripe_size_in_each_level, k))
local_feat = local_feat_avgpool + local_feat_maxpool
local_feat = self.pyramid_conv_list0[used_branches](local_feat)
local_feat = paddle.reshape(
local_feat, shape=[local_feat.shape[0], -1])
feat_list.append(local_feat)
local_logits = self.pyramid_fc_list0[used_branches](
self.dropout_layer(local_feat))
logits_list.append(local_logits)
used_branches += 1
return feat_list, logits_list
def forward(self, x):
feat = self.base(x)
assert feat.shape[2] % self.num_stripes == 0
feat_list, logits_list = self.pyramid_forward(feat)
feat_out = paddle.concat(feat_list, axis=-1)
return feat_out
| PaddleDetection/ppdet/modeling/reid/pyramidal_embedding.py/0 | {
"file_path": "PaddleDetection/ppdet/modeling/reid/pyramidal_embedding.py",
"repo_id": "PaddleDetection",
"token_count": 2712
} | 93 |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Modified from Deformable-DETR (https://github.com/fundamentalvision/Deformable-DETR)
# Copyright (c) 2020 SenseTime. All Rights Reserved.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddle import ParamAttr
from ppdet.core.workspace import register
from ..layers import MultiHeadAttention
from .position_encoding import PositionEmbedding
from .utils import _get_clones, get_valid_ratio
from ..initializer import linear_init_, constant_, xavier_uniform_, normal_
__all__ = ['DeformableTransformer']
class MSDeformableAttention(nn.Layer):
def __init__(self,
embed_dim=256,
num_heads=8,
num_levels=4,
num_points=4,
lr_mult=0.1):
"""
Multi-Scale Deformable Attention Module
"""
super(MSDeformableAttention, self).__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.num_levels = num_levels
self.num_points = num_points
self.total_points = num_heads * num_levels * num_points
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
self.sampling_offsets = nn.Linear(
embed_dim,
self.total_points * 2,
weight_attr=ParamAttr(learning_rate=lr_mult),
bias_attr=ParamAttr(learning_rate=lr_mult))
self.attention_weights = nn.Linear(embed_dim, self.total_points)
self.value_proj = nn.Linear(embed_dim, embed_dim)
self.output_proj = nn.Linear(embed_dim, embed_dim)
try:
# use cuda op
from deformable_detr_ops import ms_deformable_attn
except:
# use paddle func
from .utils import deformable_attention_core_func as ms_deformable_attn
self.ms_deformable_attn_core = ms_deformable_attn
self._reset_parameters()
def _reset_parameters(self):
# sampling_offsets
constant_(self.sampling_offsets.weight)
thetas = paddle.arange(
self.num_heads,
dtype=paddle.float32) * (2.0 * math.pi / self.num_heads)
grid_init = paddle.stack([thetas.cos(), thetas.sin()], -1)
grid_init = grid_init / grid_init.abs().max(-1, keepdim=True)
grid_init = grid_init.reshape([self.num_heads, 1, 1, 2]).tile(
[1, self.num_levels, self.num_points, 1])
scaling = paddle.arange(
1, self.num_points + 1,
dtype=paddle.float32).reshape([1, 1, -1, 1])
grid_init *= scaling
self.sampling_offsets.bias.set_value(grid_init.flatten())
# attention_weights
constant_(self.attention_weights.weight)
constant_(self.attention_weights.bias)
# proj
xavier_uniform_(self.value_proj.weight)
constant_(self.value_proj.bias)
xavier_uniform_(self.output_proj.weight)
constant_(self.output_proj.bias)
def forward(self,
query,
reference_points,
value,
value_spatial_shapes,
value_level_start_index,
value_mask=None):
"""
Args:
query (Tensor): [bs, query_length, C]
reference_points (Tensor): [bs, query_length, n_levels, 2], range in [0, 1], top-left (0,0),
bottom-right (1, 1), including padding area
value (Tensor): [bs, value_length, C]
value_spatial_shapes (Tensor): [n_levels, 2], [(H_0, W_0), (H_1, W_1), ..., (H_{L-1}, W_{L-1})]
value_level_start_index (Tensor(int64)): [n_levels], [0, H_0*W_0, H_0*W_0+H_1*W_1, ...]
value_mask (Tensor): [bs, value_length], True for non-padding elements, False for padding elements
Returns:
output (Tensor): [bs, Length_{query}, C]
"""
bs, Len_q = query.shape[:2]
Len_v = value.shape[1]
assert int(value_spatial_shapes.prod(1).sum()) == Len_v
value = self.value_proj(value)
if value_mask is not None:
value_mask = value_mask.astype(value.dtype).unsqueeze(-1)
value *= value_mask
value = value.reshape([bs, Len_v, self.num_heads, self.head_dim])
sampling_offsets = self.sampling_offsets(query).reshape(
[bs, Len_q, self.num_heads, self.num_levels, self.num_points, 2])
attention_weights = self.attention_weights(query).reshape(
[bs, Len_q, self.num_heads, self.num_levels * self.num_points])
attention_weights = F.softmax(attention_weights).reshape(
[bs, Len_q, self.num_heads, self.num_levels, self.num_points])
if reference_points.shape[-1] == 2:
offset_normalizer = value_spatial_shapes.flip([1]).reshape(
[1, 1, 1, self.num_levels, 1, 2])
sampling_locations = reference_points.reshape([
bs, Len_q, 1, self.num_levels, 1, 2
]) + sampling_offsets / offset_normalizer
elif reference_points.shape[-1] == 4:
sampling_locations = (
reference_points[:, :, None, :, None, :2] + sampling_offsets /
self.num_points * reference_points[:, :, None, :, None, 2:] *
0.5)
else:
raise ValueError(
"Last dim of reference_points must be 2 or 4, but get {} instead.".
format(reference_points.shape[-1]))
output = self.ms_deformable_attn_core(
value, value_spatial_shapes, value_level_start_index,
sampling_locations, attention_weights)
output = self.output_proj(output)
return output
class DeformableTransformerEncoderLayer(nn.Layer):
def __init__(self,
d_model=256,
n_head=8,
dim_feedforward=1024,
dropout=0.1,
activation="relu",
n_levels=4,
n_points=4,
lr_mult=0.1,
weight_attr=None,
bias_attr=None):
super(DeformableTransformerEncoderLayer, self).__init__()
# self attention
self.self_attn = MSDeformableAttention(d_model, n_head, n_levels,
n_points, lr_mult)
self.dropout1 = nn.Dropout(dropout)
self.norm1 = nn.LayerNorm(
d_model, weight_attr=weight_attr, bias_attr=bias_attr)
# ffn
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.activation = getattr(F, activation)
self.dropout2 = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.dropout3 = nn.Dropout(dropout)
self.norm2 = nn.LayerNorm(
d_model, weight_attr=weight_attr, bias_attr=bias_attr)
self._reset_parameters()
def _reset_parameters(self):
linear_init_(self.linear1)
linear_init_(self.linear2)
xavier_uniform_(self.linear1.weight)
xavier_uniform_(self.linear2.weight)
def with_pos_embed(self, tensor, pos):
return tensor if pos is None else tensor + pos
def forward_ffn(self, src):
src2 = self.linear2(self.dropout2(self.activation(self.linear1(src))))
src = src + self.dropout3(src2)
src = self.norm2(src)
return src
def forward(self,
src,
reference_points,
spatial_shapes,
level_start_index,
src_mask=None,
query_pos_embed=None):
# self attention
src2 = self.self_attn(
self.with_pos_embed(src, query_pos_embed), reference_points, src,
spatial_shapes, level_start_index, src_mask)
src = src + self.dropout1(src2)
src = self.norm1(src)
# ffn
src = self.forward_ffn(src)
return src
class DeformableTransformerEncoder(nn.Layer):
def __init__(self, encoder_layer, num_layers):
super(DeformableTransformerEncoder, self).__init__()
self.layers = _get_clones(encoder_layer, num_layers)
self.num_layers = num_layers
@staticmethod
def get_reference_points(spatial_shapes, valid_ratios, offset=0.5):
valid_ratios = valid_ratios.unsqueeze(1)
reference_points = []
for i, (H, W) in enumerate(spatial_shapes):
ref_y, ref_x = paddle.meshgrid(
paddle.arange(end=H) + offset, paddle.arange(end=W) + offset)
ref_y = ref_y.flatten().unsqueeze(0) / (valid_ratios[:, :, i, 1] *
H)
ref_x = ref_x.flatten().unsqueeze(0) / (valid_ratios[:, :, i, 0] *
W)
reference_points.append(paddle.stack((ref_x, ref_y), axis=-1))
reference_points = paddle.concat(reference_points, 1).unsqueeze(2)
reference_points = reference_points * valid_ratios
return reference_points
def forward(self,
feat,
spatial_shapes,
level_start_index,
feat_mask=None,
query_pos_embed=None,
valid_ratios=None):
if valid_ratios is None:
valid_ratios = paddle.ones(
[feat.shape[0], spatial_shapes.shape[0], 2])
reference_points = self.get_reference_points(spatial_shapes,
valid_ratios)
for layer in self.layers:
feat = layer(feat, reference_points, spatial_shapes,
level_start_index, feat_mask, query_pos_embed)
return feat
class DeformableTransformerDecoderLayer(nn.Layer):
def __init__(self,
d_model=256,
n_head=8,
dim_feedforward=1024,
dropout=0.1,
activation="relu",
n_levels=4,
n_points=4,
lr_mult=0.1,
weight_attr=None,
bias_attr=None):
super(DeformableTransformerDecoderLayer, self).__init__()
# self attention
self.self_attn = MultiHeadAttention(d_model, n_head, dropout=dropout)
self.dropout1 = nn.Dropout(dropout)
self.norm1 = nn.LayerNorm(
d_model, weight_attr=weight_attr, bias_attr=bias_attr)
# cross attention
self.cross_attn = MSDeformableAttention(d_model, n_head, n_levels,
n_points, lr_mult)
self.dropout2 = nn.Dropout(dropout)
self.norm2 = nn.LayerNorm(
d_model, weight_attr=weight_attr, bias_attr=bias_attr)
# ffn
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.activation = getattr(F, activation)
self.dropout3 = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.dropout4 = nn.Dropout(dropout)
self.norm3 = nn.LayerNorm(
d_model, weight_attr=weight_attr, bias_attr=bias_attr)
self._reset_parameters()
def _reset_parameters(self):
linear_init_(self.linear1)
linear_init_(self.linear2)
xavier_uniform_(self.linear1.weight)
xavier_uniform_(self.linear2.weight)
def with_pos_embed(self, tensor, pos):
return tensor if pos is None else tensor + pos
def forward_ffn(self, tgt):
tgt2 = self.linear2(self.dropout3(self.activation(self.linear1(tgt))))
tgt = tgt + self.dropout4(tgt2)
tgt = self.norm3(tgt)
return tgt
def forward(self,
tgt,
reference_points,
memory,
memory_spatial_shapes,
memory_level_start_index,
memory_mask=None,
query_pos_embed=None):
# self attention
q = k = self.with_pos_embed(tgt, query_pos_embed)
tgt2 = self.self_attn(q, k, value=tgt)
tgt = tgt + self.dropout1(tgt2)
tgt = self.norm1(tgt)
# cross attention
tgt2 = self.cross_attn(
self.with_pos_embed(tgt, query_pos_embed), reference_points, memory,
memory_spatial_shapes, memory_level_start_index, memory_mask)
tgt = tgt + self.dropout2(tgt2)
tgt = self.norm2(tgt)
# ffn
tgt = self.forward_ffn(tgt)
return tgt
class DeformableTransformerDecoder(nn.Layer):
def __init__(self, decoder_layer, num_layers, return_intermediate=False):
super(DeformableTransformerDecoder, self).__init__()
self.layers = _get_clones(decoder_layer, num_layers)
self.num_layers = num_layers
self.return_intermediate = return_intermediate
def forward(self,
tgt,
reference_points,
memory,
memory_spatial_shapes,
memory_level_start_index,
memory_mask=None,
query_pos_embed=None):
output = tgt
intermediate = []
for lid, layer in enumerate(self.layers):
output = layer(output, reference_points, memory,
memory_spatial_shapes, memory_level_start_index,
memory_mask, query_pos_embed)
if self.return_intermediate:
intermediate.append(output)
if self.return_intermediate:
return paddle.stack(intermediate)
return output.unsqueeze(0)
@register
class DeformableTransformer(nn.Layer):
__shared__ = ['hidden_dim']
def __init__(self,
num_queries=300,
position_embed_type='sine',
return_intermediate_dec=True,
in_feats_channel=[512, 1024, 2048],
num_feature_levels=4,
num_encoder_points=4,
num_decoder_points=4,
hidden_dim=256,
nhead=8,
num_encoder_layers=6,
num_decoder_layers=6,
dim_feedforward=1024,
dropout=0.1,
activation="relu",
lr_mult=0.1,
pe_temperature=10000,
pe_offset=-0.5):
super(DeformableTransformer, self).__init__()
assert position_embed_type in ['sine', 'learned'], \
f'ValueError: position_embed_type not supported {position_embed_type}!'
assert len(in_feats_channel) <= num_feature_levels
self.hidden_dim = hidden_dim
self.nhead = nhead
self.num_feature_levels = num_feature_levels
encoder_layer = DeformableTransformerEncoderLayer(
hidden_dim, nhead, dim_feedforward, dropout, activation,
num_feature_levels, num_encoder_points, lr_mult)
self.encoder = DeformableTransformerEncoder(encoder_layer,
num_encoder_layers)
decoder_layer = DeformableTransformerDecoderLayer(
hidden_dim, nhead, dim_feedforward, dropout, activation,
num_feature_levels, num_decoder_points)
self.decoder = DeformableTransformerDecoder(
decoder_layer, num_decoder_layers, return_intermediate_dec)
self.level_embed = nn.Embedding(num_feature_levels, hidden_dim)
self.tgt_embed = nn.Embedding(num_queries, hidden_dim)
self.query_pos_embed = nn.Embedding(num_queries, hidden_dim)
self.reference_points = nn.Linear(
hidden_dim,
2,
weight_attr=ParamAttr(learning_rate=lr_mult),
bias_attr=ParamAttr(learning_rate=lr_mult))
self.input_proj = nn.LayerList()
for in_channels in in_feats_channel:
self.input_proj.append(
nn.Sequential(
nn.Conv2D(
in_channels, hidden_dim, kernel_size=1),
nn.GroupNorm(32, hidden_dim)))
in_channels = in_feats_channel[-1]
for _ in range(num_feature_levels - len(in_feats_channel)):
self.input_proj.append(
nn.Sequential(
nn.Conv2D(
in_channels,
hidden_dim,
kernel_size=3,
stride=2,
padding=1),
nn.GroupNorm(32, hidden_dim)))
in_channels = hidden_dim
self.position_embedding = PositionEmbedding(
hidden_dim // 2,
temperature=pe_temperature,
normalize=True if position_embed_type == 'sine' else False,
embed_type=position_embed_type,
offset=pe_offset,
eps=1e-4)
self._reset_parameters()
def _reset_parameters(self):
normal_(self.level_embed.weight)
normal_(self.tgt_embed.weight)
normal_(self.query_pos_embed.weight)
xavier_uniform_(self.reference_points.weight)
constant_(self.reference_points.bias)
for l in self.input_proj:
xavier_uniform_(l[0].weight)
constant_(l[0].bias)
@classmethod
def from_config(cls, cfg, input_shape):
return {'in_feats_channel': [i.channels for i in input_shape], }
def forward(self, src_feats, src_mask=None, *args, **kwargs):
srcs = []
for i in range(len(src_feats)):
srcs.append(self.input_proj[i](src_feats[i]))
if self.num_feature_levels > len(srcs):
len_srcs = len(srcs)
for i in range(len_srcs, self.num_feature_levels):
if i == len_srcs:
srcs.append(self.input_proj[i](src_feats[-1]))
else:
srcs.append(self.input_proj[i](srcs[-1]))
src_flatten = []
mask_flatten = []
lvl_pos_embed_flatten = []
spatial_shapes = []
valid_ratios = []
for level, src in enumerate(srcs):
src_shape = paddle.shape(src)
bs = src_shape[0:1]
h = src_shape[2:3]
w = src_shape[3:4]
spatial_shapes.append(paddle.concat([h, w]))
src = src.flatten(2).transpose([0, 2, 1])
src_flatten.append(src)
if src_mask is not None:
mask = F.interpolate(src_mask.unsqueeze(0), size=(h, w))[0]
else:
mask = paddle.ones([bs, h, w])
valid_ratios.append(get_valid_ratio(mask))
pos_embed = self.position_embedding(mask).flatten(1, 2)
lvl_pos_embed = pos_embed + self.level_embed.weight[level]
lvl_pos_embed_flatten.append(lvl_pos_embed)
mask = mask.flatten(1)
mask_flatten.append(mask)
src_flatten = paddle.concat(src_flatten, 1)
mask_flatten = None if src_mask is None else paddle.concat(mask_flatten,
1)
lvl_pos_embed_flatten = paddle.concat(lvl_pos_embed_flatten, 1)
# [l, 2]
spatial_shapes = paddle.to_tensor(
paddle.stack(spatial_shapes).astype('int64'))
# [l], 每一个level的起始index
level_start_index = paddle.concat([
paddle.zeros(
[1], dtype='int64'), spatial_shapes.prod(1).cumsum(0)[:-1]
])
# [b, l, 2]
valid_ratios = paddle.stack(valid_ratios, 1)
# encoder
memory = self.encoder(src_flatten, spatial_shapes, level_start_index,
mask_flatten, lvl_pos_embed_flatten, valid_ratios)
# prepare input for decoder
bs, _, c = memory.shape
query_embed = self.query_pos_embed.weight.unsqueeze(0).tile([bs, 1, 1])
tgt = self.tgt_embed.weight.unsqueeze(0).tile([bs, 1, 1])
reference_points = F.sigmoid(self.reference_points(query_embed))
reference_points_input = reference_points.unsqueeze(
2) * valid_ratios.unsqueeze(1)
# decoder
hs = self.decoder(tgt, reference_points_input, memory, spatial_shapes,
level_start_index, mask_flatten, query_embed)
return (hs, memory, reference_points)
class QRDeformableTransformerDecoder(DeformableTransformerDecoder):
def __init__(self, decoder_layer, num_layers,
start_q=None, end_q=None, return_intermediate=False):
super(QRDeformableTransformerDecoder, self).__init__(
decoder_layer, num_layers, return_intermediate=return_intermediate)
self.start_q = start_q
self.end_q = end_q
def forward(self,
tgt,
reference_points,
memory,
memory_spatial_shapes,
memory_level_start_index,
memory_mask=None,
query_pos_embed=None):
if not self.training:
return super(QRDeformableTransformerDecoder, self).forward(
tgt, reference_points,
memory, memory_spatial_shapes,
memory_level_start_index,
memory_mask=memory_mask,
query_pos_embed=query_pos_embed)
batchsize = tgt.shape[0]
query_list_reserve = [tgt]
intermediate = []
for lid, layer in enumerate(self.layers):
start_q = self.start_q[lid]
end_q = self.end_q[lid]
query_list = query_list_reserve.copy()[start_q:end_q]
# prepare for parallel process
output = paddle.concat(query_list, axis=0)
fakesetsize = int(output.shape[0] / batchsize)
reference_points_tiled = reference_points.tile([fakesetsize, 1, 1, 1])
memory_tiled = memory.tile([fakesetsize, 1, 1])
query_pos_embed_tiled = query_pos_embed.tile([fakesetsize, 1, 1])
memory_mask_tiled = memory_mask.tile([fakesetsize, 1])
output = layer(output, reference_points_tiled, memory_tiled,
memory_spatial_shapes, memory_level_start_index,
memory_mask_tiled, query_pos_embed_tiled)
for i in range(fakesetsize):
query_list_reserve.append(output[batchsize*i:batchsize*(i+1)])
if self.return_intermediate:
for i in range(fakesetsize):
intermediate.append(output[batchsize*i:batchsize*(i+1)])
if self.return_intermediate:
return paddle.stack(intermediate)
return output.unsqueeze(0)
@register
class QRDeformableTransformer(DeformableTransformer):
def __init__(self,
num_queries=300,
position_embed_type='sine',
return_intermediate_dec=True,
in_feats_channel=[512, 1024, 2048],
num_feature_levels=4,
num_encoder_points=4,
num_decoder_points=4,
hidden_dim=256,
nhead=8,
num_encoder_layers=6,
num_decoder_layers=6,
dim_feedforward=1024,
dropout=0.1,
activation="relu",
lr_mult=0.1,
pe_temperature=10000,
pe_offset=-0.5,
start_q=None,
end_q=None):
super(QRDeformableTransformer, self).__init__(
num_queries=num_queries,
position_embed_type=position_embed_type,
return_intermediate_dec=return_intermediate_dec,
in_feats_channel=in_feats_channel,
num_feature_levels=num_feature_levels,
num_encoder_points=num_encoder_points,
num_decoder_points=num_decoder_points,
hidden_dim=hidden_dim,
nhead=nhead,
num_encoder_layers=num_encoder_layers,
num_decoder_layers=num_decoder_layers,
dim_feedforward=dim_feedforward,
dropout=dropout,
activation=activation,
lr_mult=lr_mult,
pe_temperature=pe_temperature,
pe_offset=pe_offset)
decoder_layer = DeformableTransformerDecoderLayer(
hidden_dim, nhead, dim_feedforward, dropout, activation,
num_feature_levels, num_decoder_points)
self.decoder = QRDeformableTransformerDecoder(
decoder_layer, num_decoder_layers, start_q, end_q, return_intermediate_dec)
| PaddleDetection/ppdet/modeling/transformers/deformable_transformer.py/0 | {
"file_path": "PaddleDetection/ppdet/modeling/transformers/deformable_transformer.py",
"repo_id": "PaddleDetection",
"token_count": 12986
} | 94 |
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser, RawDescriptionHelpFormatter
import yaml
import re
from ppdet.core.workspace import get_registered_modules, dump_value
__all__ = ['ColorTTY', 'ArgsParser']
class ColorTTY(object):
def __init__(self):
super(ColorTTY, self).__init__()
self.colors = ['red', 'green', 'yellow', 'blue', 'magenta', 'cyan']
def __getattr__(self, attr):
if attr in self.colors:
color = self.colors.index(attr) + 31
def color_message(message):
return "[{}m{}[0m".format(color, message)
setattr(self, attr, color_message)
return color_message
def bold(self, message):
return self.with_code('01', message)
def with_code(self, code, message):
return "[{}m{}[0m".format(code, message)
class ArgsParser(ArgumentParser):
def __init__(self):
super(ArgsParser, self).__init__(
formatter_class=RawDescriptionHelpFormatter)
self.add_argument("-c", "--config", help="configuration file to use")
self.add_argument(
"-o", "--opt", nargs='*', help="set configuration options")
def parse_args(self, argv=None):
args = super(ArgsParser, self).parse_args(argv)
assert args.config is not None, \
"Please specify --config=configure_file_path."
args.opt = self._parse_opt(args.opt)
return args
def _parse_opt(self, opts):
config = {}
if not opts:
return config
for s in opts:
s = s.strip()
k, v = s.split('=', 1)
if '.' not in k:
config[k] = yaml.load(v, Loader=yaml.Loader)
else:
keys = k.split('.')
if keys[0] not in config:
config[keys[0]] = {}
cur = config[keys[0]]
for idx, key in enumerate(keys[1:]):
if idx == len(keys) - 2:
cur[key] = yaml.load(v, Loader=yaml.Loader)
else:
cur[key] = {}
cur = cur[key]
return config
def merge_args(config, args, exclude_args=['config', 'opt', 'slim_config']):
for k, v in vars(args).items():
if k not in exclude_args:
config[k] = v
return config
def print_total_cfg(config):
modules = get_registered_modules()
color_tty = ColorTTY()
green = '___{}___'.format(color_tty.colors.index('green') + 31)
styled = {}
for key in config.keys():
if not config[key]: # empty schema
continue
if key not in modules and not hasattr(config[key], '__dict__'):
styled[key] = config[key]
continue
elif key in modules:
module = modules[key]
else:
type_name = type(config[key]).__name__
if type_name in modules:
module = modules[type_name].copy()
module.update({
k: v
for k, v in config[key].__dict__.items()
if k in module.schema
})
key += " ({})".format(type_name)
default = module.find_default_keys()
missing = module.find_missing_keys()
mismatch = module.find_mismatch_keys()
extra = module.find_extra_keys()
dep_missing = []
for dep in module.inject:
if isinstance(module[dep], str) and module[dep] != '<value>':
if module[dep] not in modules: # not a valid module
dep_missing.append(dep)
else:
dep_mod = modules[module[dep]]
# empty dict but mandatory
if not dep_mod and dep_mod.mandatory():
dep_missing.append(dep)
override = list(
set(module.keys()) - set(default) - set(extra) - set(dep_missing))
replacement = {}
for name in set(override + default + extra + mismatch + missing):
new_name = name
if name in missing:
value = "<missing>"
else:
value = module[name]
if name in extra:
value = dump_value(value) + " <extraneous>"
elif name in mismatch:
value = dump_value(value) + " <type mismatch>"
elif name in dep_missing:
value = dump_value(value) + " <module config missing>"
elif name in override and value != '<missing>':
mark = green
new_name = mark + name
replacement[new_name] = value
styled[key] = replacement
buffer = yaml.dump(styled, default_flow_style=False, default_style='')
buffer = (re.sub(r"<missing>", r"[31m<missing>[0m", buffer))
buffer = (re.sub(r"<extraneous>", r"[33m<extraneous>[0m", buffer))
buffer = (re.sub(r"<type mismatch>", r"[31m<type mismatch>[0m", buffer))
buffer = (re.sub(r"<module config missing>",
r"[31m<module config missing>[0m", buffer))
buffer = re.sub(r"___(\d+)___(.*?):", r"[\1m\2[0m:", buffer)
print(buffer)
| PaddleDetection/ppdet/utils/cli.py/0 | {
"file_path": "PaddleDetection/ppdet/utils/cli.py",
"repo_id": "PaddleDetection",
"token_count": 2733
} | 95 |
import numpy as np
import os
import subprocess
import json
import argparse
import glob
def init_args():
parser = argparse.ArgumentParser()
# params for testing assert allclose
parser.add_argument("--atol", type=float, default=1e-3)
parser.add_argument("--rtol", type=float, default=1e-3)
parser.add_argument("--gt_file", type=str, default="")
parser.add_argument("--log_file", type=str, default="")
parser.add_argument("--precision", type=str, default="fp32")
return parser
def parse_args():
parser = init_args()
return parser.parse_args()
def run_shell_command(cmd):
p = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = p.communicate()
if p.returncode == 0:
return out.decode('utf-8')
else:
return None
def parser_results_from_log_by_name(log_path, names_list):
if not os.path.exists(log_path):
raise ValueError("The log file {} does not exists!".format(log_path))
if names_list is None or len(names_list) < 1:
return []
parser_results = {}
for name in names_list:
cmd = "grep {} {}".format(name, log_path)
outs = run_shell_command(cmd)
outs = outs.split("\n")[0]
result = outs.split("{}".format(name))[-1]
try:
result = json.loads(result)
except:
result = np.array([int(r) for r in result.split()]).reshape(-1, 4)
parser_results[name] = result
return parser_results
def load_gt_from_file(gt_file):
if not os.path.exists(gt_file):
raise ValueError("The log file {} does not exists!".format(gt_file))
with open(gt_file, 'r') as f:
data = f.readlines()
f.close()
parser_gt = {}
for line in data:
image_name, result = line.strip("\n").split("\t")
image_name = image_name.split('/')[-1]
try:
result = json.loads(result)
except:
result = np.array([int(r) for r in result.split()]).reshape(-1, 4)
parser_gt[image_name] = result
return parser_gt
def load_gt_from_txts(gt_file):
gt_list = glob.glob(gt_file)
gt_collection = {}
for gt_f in gt_list:
gt_dict = load_gt_from_file(gt_f)
basename = os.path.basename(gt_f)
if "fp32" in basename:
gt_collection["fp32"] = [gt_dict, gt_f]
elif "fp16" in basename:
gt_collection["fp16"] = [gt_dict, gt_f]
elif "int8" in basename:
gt_collection["int8"] = [gt_dict, gt_f]
else:
continue
return gt_collection
def collect_predict_from_logs(log_path, key_list):
log_list = glob.glob(log_path)
pred_collection = {}
for log_f in log_list:
pred_dict = parser_results_from_log_by_name(log_f, key_list)
key = os.path.basename(log_f)
pred_collection[key] = pred_dict
return pred_collection
def testing_assert_allclose(dict_x, dict_y, atol=1e-7, rtol=1e-7):
for k in dict_x:
np.testing.assert_allclose(
np.array(dict_x[k]), np.array(dict_y[k]), atol=atol, rtol=rtol)
if __name__ == "__main__":
# Usage:
# python3.7 tests/compare_results.py --gt_file=./tests/results/*.txt --log_file=./tests/output/infer_*.log
args = parse_args()
gt_collection = load_gt_from_txts(args.gt_file)
key_list = gt_collection["fp32"][0].keys()
pred_collection = collect_predict_from_logs(args.log_file, key_list)
for filename in pred_collection.keys():
if "fp32" in filename:
gt_dict, gt_filename = gt_collection["fp32"]
elif "fp16" in filename:
gt_dict, gt_filename = gt_collection["fp16"]
elif "int8" in filename:
gt_dict, gt_filename = gt_collection["int8"]
else:
continue
pred_dict = pred_collection[filename]
try:
testing_assert_allclose(
gt_dict, pred_dict, atol=args.atol, rtol=args.rtol)
print(
"Assert allclose passed! The results of {} and {} are consistent!".
format(filename, gt_filename))
except Exception as E:
print(E)
raise ValueError(
"The results of {} and the results of {} are inconsistent!".
format(filename, gt_filename))
| PaddleDetection/test_tipc/compare_results.py/0 | {
"file_path": "PaddleDetection/test_tipc/compare_results.py",
"repo_id": "PaddleDetection",
"token_count": 2012
} | 96 |
#!/bin/bash
source test_tipc/utils_func.sh
FILENAME=$1
# MODE be one of ['lite_train_lite_infer' 'lite_train_whole_infer'
# 'whole_train_whole_infer', 'whole_infer', 'klquant_whole_infer',
# 'cpp_infer', 'serving_infer', 'lite_infer', 'paddle2onnx_infer']
MODE=$2
# parse params
dataline=$(cat ${FILENAME})
IFS=$'\n'
lines=(${dataline})
# The training params
model_name=$(func_parser_value "${lines[1]}")
python=$(func_parser_value "${lines[2]}")
if [ ${MODE} = "whole_train_whole_infer" ];then
mv ./dataset/coco/download_coco.py . && rm -rf ./dataset/coco/* && mv ./download_coco.py ./dataset/coco/
# prepare whole training data
eval "${python} ./dataset/coco/download_coco.py"
elif [ ${MODE} = "cpp_infer" ];then
# download coco lite data
wget -nc -P ./dataset/coco/ https://paddledet.bj.bcebos.com/data/tipc/coco_tipc.tar --no-check-certificate
cd ./dataset/coco/ && tar -xvf coco_tipc.tar && mv -n coco_tipc/* .
rm -rf coco_tipc/ && cd ../../
# download wider_face lite data
wget -nc -P ./dataset/wider_face/ https://paddledet.bj.bcebos.com/data/tipc/wider_tipc.tar --no-check-certificate
cd ./dataset/wider_face/ && tar -xvf wider_tipc.tar && mv -n wider_tipc/* .
rm -rf wider_tipc/ && cd ../../
# download spine lite data
wget -nc -P ./dataset/spine_coco/ https://paddledet.bj.bcebos.com/data/tipc/spine_tipc.tar --no-check-certificate
cd ./dataset/spine_coco/ && tar -xvf spine_tipc.tar && mv -n spine_tipc/* .
rm -rf spine_tipc/ && cd ../../
if [[ ${model_name} =~ "s2anet" ]]; then
cd ./ppdet/ext_op && eval "${python} setup.py install"
cd ../../
elif [[ ${model_name} =~ "tinypose" ]]; then
wget -nc -P ./output_inference/ https://bj.bcebos.com/v1/paddledet/models/keypoint/picodet_s_320_pedestrian.tar --no-check-certificate
cd ./output_inference/ && tar -xvf picodet_s_320_pedestrian.tar
cd ../
fi
# download KL model
if [[ ${model_name} = "picodet_lcnet_1_5x_416_coco_KL" ]]; then
wget -nc -P ./output_inference/picodet_lcnet_1_5x_416_coco_KL/ https://bj.bcebos.com/v1/paddledet/data/tipc/models/picodet_lcnet_1_5x_416_coco_ptq.tar --no-check-certificate
cd ./output_inference/picodet_lcnet_1_5x_416_coco_KL/ && tar -xvf picodet_lcnet_1_5x_416_coco_ptq.tar && mv -n picodet_lcnet_1_5x_416_coco_ptq/* .
cd ../../
elif [[ ${model_name} = "ppyoloe_crn_s_300e_coco_KL" ]]; then
wget -nc -P ./output_inference/ppyoloe_crn_s_300e_coco_KL/ https://bj.bcebos.com/v1/paddledet/data/tipc/models/ppyoloe_crn_s_300e_coco_ptq.tar --no-check-certificate
cd ./output_inference/ppyoloe_crn_s_300e_coco_KL/ && tar -xvf ppyoloe_crn_s_300e_coco_ptq.tar && mv -n ppyoloe_crn_s_300e_coco_ptq/* .
cd ../../
elif [[ ${model_name} = "ppyolo_mbv3_large_coco_KL" ]]; then
wget -nc -P ./output_inference/ppyolo_mbv3_large_coco_KL/ https://bj.bcebos.com/v1/paddledet/data/tipc/models/ppyolo_mbv3_large_ptq.tar --no-check-certificate
cd ./output_inference/ppyolo_mbv3_large_coco_KL/ && tar -xvf ppyolo_mbv3_large_ptq.tar && mv -n ppyolo_mbv3_large_ptq/* .
cd ../../
elif [[ ${model_name} = "mask_rcnn_r50_fpn_1x_coco_KL" ]]; then
wget -nc -P ./output_inference/mask_rcnn_r50_fpn_1x_coco_KL/ https://bj.bcebos.com/v1/paddledet/data/tipc/models/mask_rcnn_r50_fpn_1x_coco_ptq.tar --no-check-certificate
cd ./output_inference/mask_rcnn_r50_fpn_1x_coco_KL/ && tar -xvf mask_rcnn_r50_fpn_1x_coco_ptq.tar && mv -n mask_rcnn_r50_fpn_1x_coco_ptq/* .
cd ../../
elif [[ ${model_name} = "tinypose_128x96_KL" ]]; then
wget -nc -P ./output_inference/tinypose_128x96_KL/ https://bj.bcebos.com/v1/paddledet/data/tipc/models/tinypose_128x96_ptq.tar --no-check-certificate
cd ./output_inference/tinypose_128x96_KL/ && tar -xvf tinypose_128x96_ptq.tar && mv -n tinypose_128x96_ptq/* .
cd ../../
fi
# download mot lite data
wget -nc -P ./dataset/mot/ https://paddledet.bj.bcebos.com/data/tipc/mot_tipc.tar --no-check-certificate
cd ./dataset/mot/ && tar -xvf mot_tipc.tar && mv -n mot_tipc/* .
rm -rf mot_tipc/ && cd ../../
opencv_dir=$(func_parser_value "${lines[15]}")
# prepare opencv
cd ./deploy/cpp
if [ ${opencv_dir} = "default" ] || [ ${opencv_dir} = "null" ]; then
if [ -d "deps/opencv-3.4.16_gcc8.2_ffmpeg/" ]; then
echo "################### Opencv already exists, skip downloading. ###################"
else
mkdir -p $(pwd)/deps && cd $(pwd)/deps
wget -c https://paddledet.bj.bcebos.com/data/opencv-3.4.16_gcc8.2_ffmpeg.tar.gz --no-check-certificate
tar -xvf opencv-3.4.16_gcc8.2_ffmpeg.tar.gz && cd ../
echo "################### Finish downloading opencv. ###################"
fi
fi
cd ../../
elif [ ${MODE} = "benchmark_train" ];then
pip install -U pip
pip install Cython
pip install -r requirements.txt
if [[ ${model_name} =~ "higherhrnet" ]] || [[ ${model_name} =~ "hrnet" ]] || [[ ${model_name} =~ "tinypose" ]];then
wget -nc -P ./dataset/ https://bj.bcebos.com/v1/paddledet/data/coco.tar --no-check-certificate
cd ./dataset/ && tar -xf coco.tar
ls ./coco/
cd ../
pip3 install opencv-python==4.5.2.54
elif [[ ${model_name} =~ "ppyoloe_r_crn_s_3x_spine_coco" ]];then
wget -nc -P ./dataset/spine_coco/ https://paddledet.bj.bcebos.com/data/tipc/spine_coco_tipc.tar --no-check-certificate
cd ./dataset/spine_coco/ && tar -xvf spine_coco_tipc.tar && mv -n spine_coco_tipc/* .
rm -rf spine_coco_tipc/ && cd ../../
cd ./ppdet/ext_op && eval "${python} setup.py install"
cd ../../
else
# prepare lite benchmark coco data
wget -nc -P ./dataset/coco/ https://bj.bcebos.com/v1/paddledet/data/cocomini.zip --no-check-certificate
cd ./dataset/coco/ && unzip cocomini.zip
mv -u cocomini/* ./
ls ./
cd ../../
# prepare lite benchmark mot data
wget -nc -P ./dataset/mot/ https://paddledet.bj.bcebos.com/data/mot_benchmark.tar --no-check-certificate
cd ./dataset/mot/ && tar -xf mot_benchmark.tar
mv -u mot_benchmark/* ./
ls ./
cd ../../
fi
elif [ ${MODE} = "paddle2onnx_infer" ];then
# install paddle2onnx
${python} -m pip install paddle2onnx
${python} -m pip install onnx onnxruntime
elif [ ${MODE} = "serving_infer" ];then
unset https_proxy http_proxy
# download coco lite data
wget -nc -P ./dataset/coco/ https://paddledet.bj.bcebos.com/data/tipc/coco_tipc.tar --no-check-certificate
cd ./dataset/coco/ && tar -xvf coco_tipc.tar && mv -n coco_tipc/* .
rm -rf coco_tipc/ && cd ../../
# download KL model
if [[ ${model_name} = "picodet_lcnet_1_5x_416_coco_KL" ]]; then
wget -nc -P ./output_inference/picodet_lcnet_1_5x_416_coco_KL/ https://bj.bcebos.com/v1/paddledet/data/tipc/models/picodet_lcnet_1_5x_416_coco_ptq.tar --no-check-certificate
cd ./output_inference/picodet_lcnet_1_5x_416_coco_KL/ && tar -xvf picodet_lcnet_1_5x_416_coco_ptq.tar && mv -n picodet_lcnet_1_5x_416_coco_ptq/* .
cd ../../
eval "${python} -m paddle_serving_client.convert --dirname output_inference/picodet_lcnet_1_5x_416_coco_KL/ --model_filename model.pdmodel --params_filename model.pdiparams --serving_server output_inference/picodet_lcnet_1_5x_416_coco_KL/serving_server --serving_client output_inference/picodet_lcnet_1_5x_416_coco_KL/serving_client"
elif [[ ${model_name} = "ppyoloe_crn_s_300e_coco_KL" ]]; then
wget -nc -P ./output_inference/ppyoloe_crn_s_300e_coco_KL/ https://bj.bcebos.com/v1/paddledet/data/tipc/models/ppyoloe_crn_s_300e_coco_ptq.tar --no-check-certificate
cd ./output_inference/ppyoloe_crn_s_300e_coco_KL/ && tar -xvf ppyoloe_crn_s_300e_coco_ptq.tar && mv -n ppyoloe_crn_s_300e_coco_ptq/* .
cd ../../
eval "${python} -m paddle_serving_client.convert --dirname output_inference/ppyoloe_crn_s_300e_coco_KL/ --model_filename model.pdmodel --params_filename model.pdiparams --serving_server output_inference/ppyoloe_crn_s_300e_coco_KL/serving_server --serving_client output_inference/ppyoloe_crn_s_300e_coco_KL/serving_client"
elif [[ ${model_name} = "ppyolo_mbv3_large_coco_KL" ]]; then
wget -nc -P ./output_inference/ppyolo_mbv3_large_coco_KL/ https://bj.bcebos.com/v1/paddledet/data/tipc/models/ppyolo_mbv3_large_ptq.tar --no-check-certificate
cd ./output_inference/ppyolo_mbv3_large_coco_KL/ && tar -xvf ppyolo_mbv3_large_ptq.tar && mv -n ppyolo_mbv3_large_ptq/* .
cd ../../
eval "${python} -m paddle_serving_client.convert --dirname output_inference/ppyolo_mbv3_large_coco_KL/ --model_filename model.pdmodel --params_filename model.pdiparams --serving_server output_inference/ppyolo_mbv3_large_coco_KL/serving_server --serving_client output_inference/ppyolo_mbv3_large_coco_KL/serving_client"
elif [[ ${model_name} = "mask_rcnn_r50_fpn_1x_coco_KL" ]]; then
wget -nc -P ./output_inference/mask_rcnn_r50_fpn_1x_coco_KL/ https://bj.bcebos.com/v1/paddledet/data/tipc/models/mask_rcnn_r50_fpn_1x_coco_ptq.tar --no-check-certificate
cd ./output_inference/mask_rcnn_r50_fpn_1x_coco_KL/ && tar -xvf mask_rcnn_r50_fpn_1x_coco_ptq.tar && mv -n mask_rcnn_r50_fpn_1x_coco_ptq/* .
cd ../../
eval "${python} -m paddle_serving_client.convert --dirname output_inference/mask_rcnn_r50_fpn_1x_coco_KL/ --model_filename model.pdmodel --params_filename model.pdiparams --serving_server output_inference/mask_rcnn_r50_fpn_1x_coco_KL/serving_server --serving_client output_inference/mask_rcnn_r50_fpn_1x_coco_KL/serving_client"
elif [[ ${model_name} = "tinypose_128x96_KL" ]]; then
wget -nc -P ./output_inference/tinypose_128x96_KL/ https://bj.bcebos.com/v1/paddledet/data/tipc/models/tinypose_128x96_ptq.tar --no-check-certificate
cd ./output_inference/tinypose_128x96_KL/ && tar -xvf tinypose_128x96_ptq.tar && mv -n tinypose_128x96_ptq/* .
cd ../../
eval "${python} -m paddle_serving_client.convert --dirname output_inference/tinypose_128x96_KL/ --model_filename model.pdmodel --params_filename model.pdiparams --serving_server output_inference/tinypose_128x96_KL/serving_server --serving_client output_inference/tinypose_128x96_KL/serving_client"
fi
else
# download coco lite data
wget -nc -P ./dataset/coco/ https://paddledet.bj.bcebos.com/data/tipc/coco_tipc.tar --no-check-certificate
cd ./dataset/coco/ && tar -xvf coco_tipc.tar && mv -n coco_tipc/* .
rm -rf coco_tipc/ && cd ../../
# download wider_face lite data
wget -nc -P ./dataset/wider_face/ https://paddledet.bj.bcebos.com/data/tipc/wider_tipc.tar --no-check-certificate
cd ./dataset/wider_face/ && tar -xvf wider_tipc.tar && mv -n wider_tipc/* .
rm -rf wider_tipc/ && cd ../../
# download spine_coco lite data
wget -nc -P ./dataset/spine_coco/ https://paddledet.bj.bcebos.com/data/tipc/spine_coco_tipc.tar --no-check-certificate
cd ./dataset/spine_coco/ && tar -xvf spine_coco_tipc.tar && mv -n spine_coco_tipc/* .
rm -rf spine_coco_tipc/ && cd ../../
if [[ ${model_name} =~ "s2anet" ]]; then
cd ./ppdet/ext_op && eval "${python} setup.py install"
cd ../../
elif [[ ${model_name} =~ "ppyoloe_r_crn_s_3x_spine_coco" ]]; then
cd ./ppdet/ext_op && eval "${python} setup.py install"
cd ../../
elif [[ ${model_name} =~ "fcosr_x50_3x_spine_coco" ]]; then
cd ./ppdet/ext_op && eval "${python} setup.py install"
cd ../../
fi
# download mot lite data
wget -nc -P ./dataset/mot/ https://paddledet.bj.bcebos.com/data/tipc/mot_tipc.tar --no-check-certificate
cd ./dataset/mot/ && tar -xvf mot_tipc.tar && mv -n mot_tipc/* .
rm -rf mot_tipc/ && cd ../../
fi
| PaddleDetection/test_tipc/prepare.sh/0 | {
"file_path": "PaddleDetection/test_tipc/prepare.sh",
"repo_id": "PaddleDetection",
"token_count": 5691
} | 97 |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
# add python path of PaddleDetection to sys.path
parent_path = os.path.abspath(os.path.join(__file__, *(['..'] * 2)))
sys.path.insert(0, parent_path)
# ignore warning log
import warnings
warnings.filterwarnings('ignore')
import paddle
from ppdet.core.workspace import create, load_config, merge_config
from ppdet.utils.check import check_gpu, check_npu, check_xpu, check_mlu, check_version, check_config
from ppdet.utils.cli import ArgsParser, merge_args
from ppdet.engine import Trainer, Trainer_ARSL, init_parallel_env
from ppdet.metrics.coco_utils import json_eval_results
from ppdet.slim import build_slim_model
from ppdet.utils.logger import setup_logger
logger = setup_logger('eval')
def parse_args():
parser = ArgsParser()
parser.add_argument(
"--output_eval",
default=None,
type=str,
help="Evaluation directory, default is current directory.")
parser.add_argument(
'--json_eval',
action='store_true',
default=False,
help='Whether to re eval with already exists bbox.json or mask.json')
parser.add_argument(
"--slim_config",
default=None,
type=str,
help="Configuration file of slim method.")
# TODO: bias should be unified
parser.add_argument(
"--bias",
action="store_true",
help="whether add bias or not while getting w and h")
parser.add_argument(
"--classwise",
action="store_true",
help="whether per-category AP and draw P-R Curve or not.")
parser.add_argument(
'--save_prediction_only',
action='store_true',
default=False,
help='Whether to save the evaluation results only')
parser.add_argument(
"--amp",
action='store_true',
default=False,
help="Enable auto mixed precision eval.")
# for smalldet slice_infer
parser.add_argument(
"--slice_infer",
action='store_true',
help="Whether to slice the image and merge the inference results for small object detection."
)
parser.add_argument(
'--slice_size',
nargs='+',
type=int,
default=[640, 640],
help="Height of the sliced image.")
parser.add_argument(
"--overlap_ratio",
nargs='+',
type=float,
default=[0.25, 0.25],
help="Overlap height ratio of the sliced image.")
parser.add_argument(
"--combine_method",
type=str,
default='nms',
help="Combine method of the sliced images' detection results, choose in ['nms', 'nmm', 'concat']."
)
parser.add_argument(
"--match_threshold",
type=float,
default=0.6,
help="Combine method matching threshold.")
parser.add_argument(
"--match_metric",
type=str,
default='ios',
help="Combine method matching metric, choose in ['iou', 'ios'].")
args = parser.parse_args()
return args
def run(FLAGS, cfg):
if FLAGS.json_eval:
logger.info(
"In json_eval mode, PaddleDetection will evaluate json files in "
"output_eval directly. And proposal.json, bbox.json and mask.json "
"will be detected by default.")
json_eval_results(
cfg.metric,
json_directory=FLAGS.output_eval,
dataset=create('EvalDataset')())
return
# init parallel environment if nranks > 1
init_parallel_env()
ssod_method = cfg.get('ssod_method', None)
if ssod_method == 'ARSL':
# build ARSL_trainer
trainer = Trainer_ARSL(cfg, mode='eval')
# load ARSL_weights
trainer.load_weights(cfg.weights, ARSL_eval=True)
else:
# build trainer
trainer = Trainer(cfg, mode='eval')
#load weights
trainer.load_weights(cfg.weights)
# training
if FLAGS.slice_infer:
trainer.evaluate_slice(
slice_size=FLAGS.slice_size,
overlap_ratio=FLAGS.overlap_ratio,
combine_method=FLAGS.combine_method,
match_threshold=FLAGS.match_threshold,
match_metric=FLAGS.match_metric)
else:
trainer.evaluate()
def main():
FLAGS = parse_args()
cfg = load_config(FLAGS.config)
merge_args(cfg, FLAGS)
merge_config(FLAGS.opt)
# disable npu in config by default
if 'use_npu' not in cfg:
cfg.use_npu = False
# disable xpu in config by default
if 'use_xpu' not in cfg:
cfg.use_xpu = False
if 'use_gpu' not in cfg:
cfg.use_gpu = False
# disable mlu in config by default
if 'use_mlu' not in cfg:
cfg.use_mlu = False
if cfg.use_gpu:
place = paddle.set_device('gpu')
elif cfg.use_npu:
place = paddle.set_device('npu')
elif cfg.use_xpu:
place = paddle.set_device('xpu')
elif cfg.use_mlu:
place = paddle.set_device('mlu')
else:
place = paddle.set_device('cpu')
if FLAGS.slim_config:
cfg = build_slim_model(cfg, FLAGS.slim_config, mode='eval')
check_config(cfg)
check_gpu(cfg.use_gpu)
check_npu(cfg.use_npu)
check_xpu(cfg.use_xpu)
check_mlu(cfg.use_mlu)
check_version()
run(FLAGS, cfg)
if __name__ == '__main__':
main()
| PaddleDetection/tools/eval.py/0 | {
"file_path": "PaddleDetection/tools/eval.py",
"repo_id": "PaddleDetection",
"token_count": 2529
} | 98 |
{
"add_prefix_space": false,
"bos_token": "<|endoftext|>",
"eos_token": "<|endoftext|>",
"model_max_length": 1024,
"name_or_path": "Salesforce/codegen-350M-mono",
"special_tokens_map_file": null,
"tokenizer_class": "CodeGenTokenizer",
"unk_token": "<|endoftext|>"
}
| fauxpilot/copilot_proxy/cgtok/tokenizer_config.json/0 | {
"file_path": "fauxpilot/copilot_proxy/cgtok/tokenizer_config.json",
"repo_id": "fauxpilot",
"token_count": 108
} | 99 |
"""
Tests setup script (currently for Python backend)
"""
import os
import shutil
import signal
import subprocess
from pathlib import Path
from typing import Dict, Union
import pexpect
import pytest
import requests
curdir = Path(__file__).parent
root = curdir.parent.parent
test_models_dir = curdir.joinpath("models")
def setup_module():
"""
Setup steps for tests in this module
"""
assert root.joinpath("setup.sh").exists(), "setup.sh not found"
if root.joinpath(".env").exists():
shutil.move(str(root.joinpath(".env")), str(root.joinpath(".env.bak")))
def teardown_module():
"""
Teardown steps for tests in this module
"""
if root.joinpath(".env.bak").exists():
shutil.move(str(root.joinpath(".env.bak")), str(root.joinpath(".env")))
try:
if test_models_dir:
shutil.rmtree(test_models_dir)
except Exception as exc:
print(
f"WARNING: Couldn't delete `{test_models_dir}` most likely due to permission issues."
f"Run the tests with sudo to ensure this gets deleted automatically, or else delete manually. "
f"Exception: {exc}"
)
def enter_input(proc: pexpect.spawn, expect: str, input_s: str, timeout: int = 5) -> str:
"""
Helper function to enter input for a given prompt. Returns consumed output.
"""
try:
proc.expect(expect, timeout=timeout)
except pexpect.exceptions.TIMEOUT as exc:
raise AssertionError(
f"Timeout waiting for prompt: `{expect}`.\n"
f"Output-before: `{proc.before}`\nOutput-after: `{proc.after}`"
) from exc
after = str(proc.after)
print(after)
proc.sendline(input_s)
return after
def run_common_setup_steps(n_gpus: int = 0) -> pexpect.spawn:
"""
Helper function to run common setup steps.
"""
proc = pexpect.pty_spawn.spawn(
"./setup.sh 2>&1", encoding="utf-8", cwd=str(root),
)
proc.ignorecase = True
enter_input(proc, r".*Enter number of GPUs[^:]+: ?", str(n_gpus))
enter_input(proc, r".*port for the API[^:]+: ?", "5000")
enter_input(proc, r".*Address for Triton[^:]+: ?", "triton")
enter_input(proc, r".*Port of Triton[^:]+: ?", "8001")
enter_input(proc, r".*save your models[^\?]+\? ?", str(test_models_dir.absolute()))
return proc
def load_test_env():
"""
Load test env vars
"""
# Without loading default env vars, PATH won't be set correctly
env = os.environ.copy()
with open(curdir.joinpath("test.env"), "r", encoding="utf8") as test_env:
for line in test_env:
key, val = line.strip().split("=")
env[key] = val
return env
def run_inference(
prompt: str, model: str = "py-model", port: int = 5000, return_all: bool = False,
**kwargs
) -> Union[str, Dict]:
"""
Invokes the copilot proxy with the given prompt and returns the completion
"""
endpoint = f"http://localhost:{port}/v1/engines/codegen/completions"
data = {
"model": model,
"prompt": prompt,
"suffix": kwargs.get("suffix", ""),
"max_tokens": kwargs.get("max_tokens", 16),
"temperature": kwargs.get("temperature", 0.0),
"top_p": kwargs.get("top_p", 1.0),
"n": kwargs.get("n", 1),
"stream": kwargs.get("stream", None), # it's not true/false. It's None or not None :[
"logprobs": kwargs.get("logprobs", 0),
"stop": kwargs.get("stop", ""),
"echo": kwargs.get("echo", True),
"presence_penalty": kwargs.get("presence_penalty", 0.0),
"frequency_penalty": kwargs.get("frequency_penalty", 0.0),
"best_of": kwargs.get("best_of", 1),
"logit_bias": kwargs.get("logit_bias", {}),
"user": kwargs.get("user", "test"),
}
response = requests.post(endpoint, json=data)
response.raise_for_status()
if return_all:
return response.json()
return response.json()["choices"][0]["text"]
@pytest.mark.parametrize("n_gpus", [0]) # we don't have a GPU on CI
def test_python_backend(n_gpus: int):
"""
Step 1: run $root/setup.sh while passing appropriate options via stdin
Step 2: run docker-compose up with test.env sourced
Step 3: call :5000 with appropriate request
"""
proc = run_common_setup_steps(n_gpus)
choices = enter_input(proc, r".*Choose your backend.*Enter your choice[^:]+: ?", "2")
assert "[2] Python backend" in choices, "Option 2 should be Python backend"
choices = enter_input(proc, r".*Models available:.*Enter your choice[^:]+: ?", "1")
assert "[1] codegen-350M-mono" in choices, "Option 1 should be codegen-350M-mono"
enter_input(proc, r".*share (your )?huggingface cache[^:]+: ?", "y")
enter_input(proc, r".*cache directory[^:]+: ?", "") # default
enter_input(proc, r".*use int8[^:]+: ?", "n")
enter_input(proc, r".*run FauxPilot\? \[y/n\] ", "n", timeout=120)
# copy $root/.env to $curdir/test.env
shutil.copy(str(root.joinpath(".env")), str(curdir.joinpath("test.env")))
# run docker-compose up -f docker-compose-{without|with}-gpus.yml
compose_file = f"docker-compose-with{'' if n_gpus > 0 else 'out'}-gpus.yaml"
docker_proc = None
try:
docker_proc = pexpect.pty_spawn.spawn(
f"docker compose -f {compose_file} up",
encoding="utf-8",
cwd=curdir,
env=load_test_env(),
)
print("Waiting for API to be ready...")
docker_proc.expect(r".*Started GRPCInferenceService at 0.0.0.0:8001", timeout=120)
print("API ready, sending request...")
# Simple test 1: hello world prompt without bells and whistles
response = run_inference("def hello_world():\n", max_tokens=16, return_all=True)
assert response["choices"][0]["text"].rstrip() == ' print("Hello World")\n\nhello_world()\n\n#'
assert response["choices"][0]["finish_reason"] == "length"
finally:
if docker_proc is not None and docker_proc.isalive():
docker_proc.kill(signal.SIGINT)
# killing docker-compose process doesn't bring down the containers.
# explicitly stop the containers:
subprocess.run(["docker-compose", "-f", compose_file, "down"], cwd=curdir, check=True, env=load_test_env())
| fauxpilot/tests/python_backend/test_setup.py/0 | {
"file_path": "fauxpilot/tests/python_backend/test_setup.py",
"repo_id": "fauxpilot",
"token_count": 2665
} | 100 |
[
{
"name": "\ud83d\udcd6 - READER: \ud83d\udc3f Jsonl",
"time_stats": {
"total": 2.726222380704712,
"n": 5199,
"mean": 0.0005243743759770558,
"variance": 1.3217056156114112e-05,
"std_dev": 0.003635526943389928,
"min": 9.210023563355207e-06,
"max": 0.03093169000931084,
"total_human": "2 seconds",
"mean_human": "0.52 milliseconds",
"std_dev_human": "3.64 milliseconds",
"min_human": "0.01 milliseconds",
"max_human": "30.93 milliseconds",
"global_mean": 0.1703888987940445,
"global_mean_human": "0 seconds",
"global_min": 0.014167226327117532,
"global_min_human": "0 seconds",
"global_max": 0.3217224628897384,
"global_max_human": "0 seconds",
"global_std_dev": 0.14844102588250072,
"global_std_dev_human": "0 seconds"
},
"stats": {
"input_files": 16,
"doc_len": {
"total": 666096707,
"n": 5199,
"mean": 128120.15906905179,
"variance": 786538251733.3838,
"std_dev": 886869.9181578907,
"min": 3,
"max": 11190333
},
"documents": {
"total": 5183,
"n": 16,
"mean": 323.9375,
"variance": 59.92916666666681,
"std_dev": 7.741393070156483,
"min": 312,
"max": 340,
"unit": "input_file"
}
}
},
{
"name": "\ud83e\udec2 - DEDUP: \ud83c\udfaf MinHash stage 1",
"time_stats": {
"total": 402.93095610296587,
"n": 16,
"mean": 25.183184756435367,
"variance": 343.50627465273016,
"std_dev": 18.533922268444154,
"min": 2.4529826429788955,
"max": 46.265308337053284,
"total_human": "6 minutes and 42 seconds",
"mean_human": "25 seconds and 183.19 milliseconds",
"std_dev_human": "18 seconds and 533.92 milliseconds",
"min_human": "2 seconds and 452.98 milliseconds",
"max_human": "46 seconds and 265.31 milliseconds",
"global_mean": 25.183184756435367,
"global_mean_human": "25 seconds",
"global_min": 2.4529826429788955,
"global_min_human": "2 seconds",
"global_max": 46.265308337053284,
"global_max_human": "46 seconds",
"global_std_dev": 18.533922268444158,
"global_std_dev_human": "18 seconds"
},
"stats": {
"total": 5199
}
}
] | get-data/logs/2024-07-05_01-48-57_flgve/stats.json/0 | {
"file_path": "get-data/logs/2024-07-05_01-48-57_flgve/stats.json",
"repo_id": "get-data",
"token_count": 1649
} | 101 |
# InsightFace: 2D and 3D Face Analysis Project
<div align="left">
<img src="https://insightface.ai/assets/img/custom/logo3.jpg" width="240"/>
</div>
InsightFace project is mainly maintained By [Jia Guo](mailto:guojia@gmail.com?subject=[GitHub]%20InsightFace%20Project) and [Jiankang Deng](https://jiankangdeng.github.io/).
For all main contributors, please check [contributing](#contributing).
## License
The code of InsightFace is released under the MIT License. There is no limitation for both academic and commercial usage.
The training data containing the annotation (and the models trained with these data) are available for non-commercial research purposes only.
Both manual-downloading models from our github repo and auto-downloading models with our [python-library](python-package) follow the above license policy(which is for non-commercial research purposes only).
## Top News
**`2024-04-17`**: [Monocular Identity-Conditioned Facial Reflectance Reconstruction](https://arxiv.org/abs/2404.00301) accepted by [CVPR-2024](https://cvpr.thecvf.com/Conferences/2024).
**`2023-04-01`**: We move the swapping demo to Discord bot, which support editing on Midjourney generated images, see detail at [web-demos/swapping_discord](web-demos/swapping_discord).
**`2022-08-12`**: We achieved Rank-1st of
[Perspective Projection Based Monocular 3D Face Reconstruction Challenge](https://tianchi.aliyun.com/competition/entrance/531961/introduction)
of [ECCV-2022 WCPA Workshop](https://sites.google.com/view/wcpa2022), [paper](https://arxiv.org/abs/2208.07142) and [code](reconstruction/jmlr).
**`2021-11-30`**: [MFR-Ongoing](challenges/mfr) challenge launched(same with IFRT), which is an extended version of [iccv21-mfr](challenges/iccv21-mfr).
**`2021-10-29`**: We achieved 1st place on the [VISA track](https://pages.nist.gov/frvt/plots/11/visa.html) of [NIST-FRVT 1:1](https://pages.nist.gov/frvt/html/frvt11.html) by using Partial FC (Xiang An, Jiankang Deng, Jia Guo).
## ChangeLogs
**`2023-08-08`**: We released the implementation of [Generalizing Gaze Estimation with Weak-Supervision from Synthetic Views](https://arxiv.org/abs/2212.02997) at [reconstruction/gaze](reconstruction/gaze).
**`2023-05-03`**: We have launched the ongoing version of wild face anti-spoofing challenge. See details [here](https://github.com/deepinsight/insightface/tree/master/challenges/cvpr23-fas-wild#updates).
**`2023-04-01`**: We move the swapping demo to Discord bot, which support editing on Midjourney generated images, see detail at [web-demos/swapping_discord](web-demos/swapping_discord).
**`2023-02-13`**: We launch a large scale in the wild face anti-spoofing challenge on CVPR23 Workshop, see details at [challenges/cvpr23-fas-wild](challenges/cvpr23-fas-wild).
**`2022-11-28`**: Single line code for facial identity swapping in our python packge ver 0.7, please check the example [here](examples/in_swapper).
**`2022-10-28`**: [MFR-Ongoing](http://iccv21-mfr.com) website is refactored, please create issues if there's any bug.
**`2022-09-22`**: Now we have [web-demos](web-demos): [face-localization](http://demo.insightface.ai:7007/), [face-recognition](http://demo.insightface.ai:7008/), and [face-swapping](http://demo.insightface.ai:7009/).
**`2022-08-12`**: We achieved Rank-1st of
[Perspective Projection Based Monocular 3D Face Reconstruction Challenge](https://tianchi.aliyun.com/competition/entrance/531961/introduction)
of [ECCV-2022 WCPA Workshop](https://sites.google.com/view/wcpa2022), [paper](https://arxiv.org/abs/2208.07142) and [code](reconstruction/jmlr).
**`2022-03-30`**: [Partial FC](https://arxiv.org/abs/2203.15565) accepted by CVPR-2022.
**`2022-02-23`**: [SCRFD](detection/scrfd) accepted by [ICLR-2022](https://iclr.cc/Conferences/2022).
**`2021-11-30`**: [MFR-Ongoing](challenges/mfr) challenge launched(same with IFRT), which is an extended version of [iccv21-mfr](challenges/iccv21-mfr).
**`2021-10-29`**: We achieved 1st place on the [VISA track](https://pages.nist.gov/frvt/plots/11/visa.html) of [NIST-FRVT 1:1](https://pages.nist.gov/frvt/html/frvt11.html) by using Partial FC (Xiang An, Jiankang Deng, Jia Guo).
**`2021-10-11`**: [Leaderboard](https://insightface.ai/mfr21) of [ICCV21 - Masked Face Recognition Challenge](challenges/iccv21-mfr) released. Video: [Youtube](https://www.youtube.com/watch?v=lL-7l5t6x2w), [Bilibili](https://www.bilibili.com/video/BV15b4y1h79N/).
**`2021-06-05`**: We launch a [Masked Face Recognition Challenge & Workshop](challenges/iccv21-mfr) on ICCV 2021.
## Introduction
[InsightFace](https://insightface.ai) is an open source 2D&3D deep face analysis toolbox, mainly based on PyTorch and MXNet.
Please check our [website](https://insightface.ai) for detail.
The master branch works with **PyTorch 1.6+** and/or **MXNet=1.6-1.8**, with **Python 3.x**.
InsightFace efficiently implements a rich variety of state of the art algorithms of face recognition, face detection and face alignment, which optimized for both training and deployment.
## Quick Start
Please start with our [python-package](python-package/), for testing detection, recognition and alignment models on input images.
### ArcFace Video Demo
[<img src=https://insightface.ai/assets/img/github/facerecognitionfromvideo.PNG width="760" />](https://www.youtube.com/watch?v=y-D1tReryGA&t=81s)
Please click the image to watch the Youtube video. For Bilibili users, click [here](https://www.bilibili.com/video/av38041494?from=search&seid=11501833604850032313).
## Projects
The [page](https://insightface.ai/projects) on InsightFace website also describes all supported projects in InsightFace.
You may also interested in some [challenges](https://insightface.ai/challenges) hold by InsightFace.
## Face Recognition
### Introduction
In this module, we provide training data, network settings and loss designs for deep face recognition.
The supported methods are as follows:
- [x] [ArcFace_mxnet (CVPR'2019)](recognition/arcface_mxnet)
- [x] [ArcFace_torch (CVPR'2019)](recognition/arcface_torch)
- [x] [SubCenter ArcFace (ECCV'2020)](recognition/subcenter_arcface)
- [x] [PartialFC_mxnet (CVPR'2022)](recognition/partial_fc)
- [x] [PartialFC_torch (CVPR'2022)](recognition/arcface_torch)
- [x] [VPL (CVPR'2021)](recognition/vpl)
- [x] [Arcface_oneflow](recognition/arcface_oneflow)
- [x] [ArcFace_Paddle (CVPR'2019)](recognition/arcface_paddle)
Commonly used network backbones are included in most of the methods, such as IResNet, MobilefaceNet, MobileNet, InceptionResNet_v2, DenseNet, etc..
### Datasets
The training data includes, but not limited to the cleaned MS1M, VGG2 and CASIA-Webface datasets, which were already packed in MXNet binary format. Please [dataset](recognition/_datasets_) page for detail.
### Evaluation
We provide standard IJB and Megaface evaluation pipelines in [evaluation](recognition/_evaluation_)
### Pretrained Models
**Please check [Model-Zoo](https://github.com/deepinsight/insightface/wiki/Model-Zoo) for more pretrained models.**
### Third-party Re-implementation of ArcFace
- TensorFlow: [InsightFace_TF](https://github.com/auroua/InsightFace_TF)
- TensorFlow: [tf-insightface](https://github.com/AIInAi/tf-insightface)
- TensorFlow:[insightface](https://github.com/Fei-Wang/insightface)
- PyTorch: [InsightFace_Pytorch](https://github.com/TreB1eN/InsightFace_Pytorch)
- PyTorch: [arcface-pytorch](https://github.com/ronghuaiyang/arcface-pytorch)
- Caffe: [arcface-caffe](https://github.com/xialuxi/arcface-caffe)
- Caffe: [CombinedMargin-caffe](https://github.com/gehaocool/CombinedMargin-caffe)
- Tensorflow: [InsightFace-tensorflow](https://github.com/luckycallor/InsightFace-tensorflow)
- TensorRT: [wang-xinyu/tensorrtx](https://github.com/wang-xinyu/tensorrtx)
- TensorRT: [InsightFace-REST](https://github.com/SthPhoenix/InsightFace-REST)
- ONNXRuntime C++: [ArcFace-ONNXRuntime](https://github.com/DefTruth/lite.ai.toolkit/blob/main/lite/ort/cv/glint_arcface.cpp)
- ONNXRuntime Go: [arcface-go](https://github.com/jack139/arcface-go)
- MNN: [ArcFace-MNN](https://github.com/DefTruth/lite.ai.toolkit/blob/main/lite/mnn/cv/mnn_glint_arcface.cpp)
- TNN: [ArcFace-TNN](https://github.com/DefTruth/lite.ai.toolkit/blob/main/lite/tnn/cv/tnn_glint_arcface.cpp)
- NCNN: [ArcFace-NCNN](https://github.com/DefTruth/lite.ai.toolkit/blob/main/lite/ncnn/cv/ncnn_glint_arcface.cpp)
## Face Detection
### Introduction
<div align="left">
<img src="https://insightface.ai/assets/img/github/11513D05.jpg" width="640"/>
</div>
In this module, we provide training data with annotation, network settings and loss designs for face detection training, evaluation and inference.
The supported methods are as follows:
- [x] [RetinaFace (CVPR'2020)](detection/retinaface)
- [x] [SCRFD (Arxiv'2021)](detection/scrfd)
- [x] [blazeface_paddle](detection/blazeface_paddle)
[RetinaFace](detection/retinaface) is a practical single-stage face detector which is accepted by [CVPR 2020](https://openaccess.thecvf.com/content_CVPR_2020/html/Deng_RetinaFace_Single-Shot_Multi-Level_Face_Localisation_in_the_Wild_CVPR_2020_paper.html). We provide training code, training dataset, pretrained models and evaluation scripts.
[SCRFD](detection/scrfd) is an efficient high accuracy face detection approach which is initialy described in [Arxiv](https://arxiv.org/abs/2105.04714). We provide an easy-to-use pipeline to train high efficiency face detectors with NAS supporting.
## Face Alignment
### Introduction
<div align="left">
<img src="https://insightface.ai/assets/img/custom/thumb_sdunet.png" width="600"/>
</div>
In this module, we provide datasets and training/inference pipelines for face alignment.
Supported methods:
- [x] [SDUNets (BMVC'2018)](alignment/heatmap)
- [x] [SimpleRegression](alignment/coordinate_reg)
[SDUNets](alignment/heatmap) is a heatmap based method which accepted on [BMVC](http://bmvc2018.org/contents/papers/0051.pdf).
[SimpleRegression](alignment/coordinate_reg) provides very lightweight facial landmark models with fast coordinate regression. The input of these models is loose cropped face image while the output is the direct landmark coordinates.
## Citation
If you find *InsightFace* useful in your research, please consider to cite the following related papers:
```
@inproceedings{ren2023pbidr,
title={Facial Geometric Detail Recovery via Implicit Representation},
author={Ren, Xingyu and Lattas, Alexandros and Gecer, Baris and Deng, Jiankang and Ma, Chao and Yang, Xiaokang},
booktitle={2023 IEEE 17th International Conference on Automatic Face and Gesture Recognition (FG)},
year={2023}
}
@article{guo2021sample,
title={Sample and Computation Redistribution for Efficient Face Detection},
author={Guo, Jia and Deng, Jiankang and Lattas, Alexandros and Zafeiriou, Stefanos},
journal={arXiv preprint arXiv:2105.04714},
year={2021}
}
@inproceedings{gecer2021ostec,
title={OSTeC: One-Shot Texture Completion},
author={Gecer, Baris and Deng, Jiankang and Zafeiriou, Stefanos},
booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)},
year={2021}
}
@inproceedings{an2020partical_fc,
title={Partial FC: Training 10 Million Identities on a Single Machine},
author={An, Xiang and Zhu, Xuhan and Xiao, Yang and Wu, Lan and Zhang, Ming and Gao, Yuan and Qin, Bin and
Zhang, Debing and Fu Ying},
booktitle={Arxiv 2010.05222},
year={2020}
}
@inproceedings{deng2020subcenter,
title={Sub-center ArcFace: Boosting Face Recognition by Large-scale Noisy Web Faces},
author={Deng, Jiankang and Guo, Jia and Liu, Tongliang and Gong, Mingming and Zafeiriou, Stefanos},
booktitle={Proceedings of the IEEE Conference on European Conference on Computer Vision},
year={2020}
}
@inproceedings{Deng2020CVPR,
title = {RetinaFace: Single-Shot Multi-Level Face Localisation in the Wild},
author = {Deng, Jiankang and Guo, Jia and Ververas, Evangelos and Kotsia, Irene and Zafeiriou, Stefanos},
booktitle = {CVPR},
year = {2020}
}
@inproceedings{guo2018stacked,
title={Stacked Dense U-Nets with Dual Transformers for Robust Face Alignment},
author={Guo, Jia and Deng, Jiankang and Xue, Niannan and Zafeiriou, Stefanos},
booktitle={BMVC},
year={2018}
}
@article{deng2018menpo,
title={The Menpo benchmark for multi-pose 2D and 3D facial landmark localisation and tracking},
author={Deng, Jiankang and Roussos, Anastasios and Chrysos, Grigorios and Ververas, Evangelos and Kotsia, Irene and Shen, Jie and Zafeiriou, Stefanos},
journal={IJCV},
year={2018}
}
@inproceedings{deng2018arcface,
title={ArcFace: Additive Angular Margin Loss for Deep Face Recognition},
author={Deng, Jiankang and Guo, Jia and Niannan, Xue and Zafeiriou, Stefanos},
booktitle={CVPR},
year={2019}
}
```
## Contributing
Main contributors:
- [Jia Guo](https://github.com/nttstar), ``guojia[at]gmail.com``
- [Jiankang Deng](https://github.com/jiankangdeng) ``jiankangdeng[at]gmail.com``
- [Xiang An](https://github.com/anxiangsir) ``anxiangsir[at]gmail.com``
- [Jack Yu](https://github.com/szad670401) ``jackyu961127[at]gmail.com``
- [Baris Gecer](https://barisgecer.github.io/) ``barisgecer[at]msn.com``
| insightface/README.md/0 | {
"file_path": "insightface/README.md",
"repo_id": "insightface",
"token_count": 4547
} | 102 |
import numpy as np
import albumentations as A
from albumentations.core.transforms_interface import ImageOnlyTransform
class RectangleBorderAugmentation(ImageOnlyTransform):
def __init__(
self,
fill_value = 0,
limit = 0.3,
always_apply=False,
p=1.0,
):
super(RectangleBorderAugmentation, self).__init__(always_apply, p)
assert limit>0.0 and limit<1.0
self.fill_value = 0
self.limit = limit
def apply(self, image, border_size_limit, **params):
assert len(border_size_limit)==4
border_size = border_size_limit.copy()
border_size[0] *= image.shape[1]
border_size[2] *= image.shape[1]
border_size[1] *= image.shape[0]
border_size[3] *= image.shape[0]
border_size = border_size.astype(np.int)
image[:,:border_size[0],:] = self.fill_value
image[:border_size[1],:,:] = self.fill_value
image[:,-border_size[2]:,:] = self.fill_value
image[-border_size[3]:,:,:] = self.fill_value
return image
def get_params(self):
border_size_limit = np.random.uniform(0.0, self.limit, size=4)
return {'border_size_limit': border_size_limit}
def get_transform_init_args_names(self):
return ('fill_value', 'limit')
| insightface/alignment/synthetics/datasets/augs.py/0 | {
"file_path": "insightface/alignment/synthetics/datasets/augs.py",
"repo_id": "insightface",
"token_count": 612
} | 103 |
BATCH_SIZE: 512
DATA:
NUM_FRAMES: 1
SCALE_MID_MEAN: 0.720643
SCALE_MID_STD: 0.058
USE_RANDOM_DIFF: true
NETWORK:
DIS_RES_BLOCKS: 2
DIS_TEMP_RES_BLOCKS: 2
DIS_USE_SPECTRAL_NORM: false
SCALER_INPUT_SIZE: 34
TRAIN:
BOUND_AZIM: 2.44346
BOUND_ELEV: 0.34906585
DIS_LR: 0.0001
LOSS_TYPE: ss_adv
LOSS_WEIGHTS:
- 0.5
- 5.0
- 1.0
- 1.0
MAINNET_CRITICS: 4
NUM_CRITICS: 3
NUM_CRITICS_TEMP: 3
POSE_LR: 0.00015
PRETRAIN_LIFTER: false
SCALE_LOSS_WEIGHTS:
- 0.001
- 1.0
SUBNET_CRITICS: 1
TEMP_LR: 0.0001
SCHEDULER_STEP_SIZE: 5
USE_CYCLE: true
USE_NEW_ROT: false
USE_NEW_TEMP: true
USE_SCALER: true
USE_GT: true
FIX:
FIX_TRAJ: true
FIX_TRAJ_BY_ROT: false
| insightface/body/human_pose/ambiguity_aware/cfg/h36m_gt_scale.yaml/0 | {
"file_path": "insightface/body/human_pose/ambiguity_aware/cfg/h36m_gt_scale.yaml",
"repo_id": "insightface",
"token_count": 378
} | 104 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
import torch.nn as nn
def weight_init(m):
if isinstance(m, nn.Linear):
nn.init.kaiming_normal(m.weight)
class Linear(nn.Module):
def __init__(self, linear_size, p_dropout=0.5, spectral_norm=False, use_bn=True):
super(Linear, self).__init__()
self.l_size = linear_size
self.use_bn = use_bn
self.relu = nn.ReLU(inplace=True)
self.dropout = nn.Dropout(p_dropout)
self.w1 = nn.Linear(self.l_size, self.l_size)
self.batch_norm1 = nn.BatchNorm1d(self.l_size)
self.w2 = nn.Linear(self.l_size, self.l_size)
self.batch_norm2 = nn.BatchNorm1d(self.l_size)
if spectral_norm:
self.w1 = nn.utils.spectral_norm(self.w1)
self.w2 = nn.utils.spectral_norm(self.w2)
def forward(self, x):
y = self.w1(x)
if self.use_bn:
y = self.batch_norm1(y)
y = self.relu(y)
y = self.dropout(y)
y = self.w2(y)
if self.use_bn:
y = self.batch_norm2(y)
y = self.relu(y)
y = self.dropout(y)
out = x + y
return out
class LinearModelBefore(nn.Module):
def __init__(self,
linear_size=1024,
num_stage=4,
p_dropout=0.5):
super(LinearModelBefore, self).__init__()
self.linear_size = linear_size
self.p_dropout = p_dropout
self.num_stage = num_stage
# 2d joints
self.input_size = 17 * 2
# 3d joints
self.output_size = 17 * 1
# process input to linear size
self.w1 = nn.Linear(self.input_size, self.linear_size)
self.batch_norm1 = nn.BatchNorm1d(self.linear_size)
self.linear_stages = []
for l in range(num_stage):
self.linear_stages.append(Linear(self.linear_size, self.p_dropout))
self.linear_stages = nn.ModuleList(self.linear_stages)
# post processing
# self.w2 = nn.Linear(self.linear_size, self.output_size)
self.relu = nn.ReLU(inplace=True)
self.dropout = nn.Dropout(self.p_dropout)
def forward(self, x):
x = x.view(x.size(0), -1)
# pre-processing
y = self.w1(x)
y = self.batch_norm1(y)
y = self.relu(y)
y = self.dropout(y)
# linear layers
for i in range(self.num_stage):
y = self.linear_stages[i](y)
# y = self.w2(y)
# y should be features, and x is reshaped inputs
# namely x, joints_sc for LinearModelAfter
return y, x
class LinearModelAfter(nn.Module):
def __init__(self):
super(LinearModelAfter, self).__init__()
self.main = nn.Linear(1024, 17 * 1)
def forward(self, x, y):
return self.main(x)
| insightface/body/human_pose/ambiguity_aware/lib/models/simple_model.py/0 | {
"file_path": "insightface/body/human_pose/ambiguity_aware/lib/models/simple_model.py",
"repo_id": "insightface",
"token_count": 1488
} | 105 |
#!/usr/bin/env python3
# coding=utf-8
import os
import cv2
import random
import os.path as osp
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from mpl_toolkits.mplot3d import Axes3D
plt.switch_backend('agg')
plt.ioff()
import h5py
from tqdm import trange
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--seq_num', type=int, default=1, help='Specify the number of sequences to render')
parser.add_argument('--save_dir', type=str, default="../vis/", help='Specify the directory the save the visualization')
parser.add_argument('--in_filename', type=str, default= "../data/h36m_valid_pred_3d.h5", help="Speicfy the dataset to load from")
args = parser.parse_args()
seq_num = args.seq_num
save_dir = args.save_dir
in_filename = args.in_filename
os.makedirs(save_dir, exist_ok=True)
v3d_to_ours = [3, 2, 1, 4, 5, 6, 16, 15, 14, 11, 12, 13, 8, 0, 7, 9, 10]
pairs = [(0, 1), (1, 2), (2, 13), (3, 13), (3, 4), (4, 5), (6, 7), (7, 8), (8, 12), (9, 10),(9, 12), (10, 11),(12, 14), (12, 15), (13, 14), (15, 16)]
pairs_left = [(3, 13), (3, 4), (4, 5), (9, 10), (9, 12), (10, 11)]
pairs_right = [(0, 1), (1, 2), (2, 13), (6, 7), (7, 8), (8, 12)]
colors = {
'pink': np.array([197, 27, 125]), # L lower leg
'light_pink': np.array([233, 163, 201]), # L upper leg
'light_green': np.array([161, 215, 106]), # L lower arm
'green': np.array([77, 146, 33]), # L upper arm
'red': np.array([215, 48, 39]), # head
'light_red': np.array([252, 146, 114]), # head
'light_orange': np.array([252, 141, 89]), # chest
'purple': np.array([118, 42, 131]), # R lower leg
'light_purple': np.array([175, 141, 195]), # R upper
'light_blue': np.array([145, 191, 219]), # R lower arm
'blue': np.array([69, 117, 180]), # R upper arm
'gray': np.array([130, 130, 130]), #
'white': np.array([255, 255, 255]), #
}
jcolors = [
'light_pink', 'light_pink', 'light_pink', 'pink', 'pink', 'pink',
'light_blue', 'light_blue', 'light_blue', 'blue', 'blue', 'blue',
'purple', 'purple', 'red', 'green', 'green', 'white', 'white'
]
ecolors = {
0: 'light_pink',
1: 'light_pink',
2: 'light_pink',
3: 'pink',
4: 'pink',
5: 'pink',
6: 'light_blue',
7: 'light_blue',
8: 'light_blue',
9: 'blue',
10: 'blue',
11: 'blue',
12: 'purple',
13: 'light_green',
14: 'light_green',
15: 'purple'
}
root = "/home/yuzhenbo/codebase/3D/multipose/data/mpi_inf/"
image_root = root
in_filename = "../data/mpi_valid_pred_3d.h5"
print("Read from", in_filename)
f = h5py.File(in_filename, "r")
imagenames = [name.decode() for name in f['imagename'][:]]
# 2d joints in the order of v3d convention
# poses2d = np.array(f['joint_2d_gt'])[:, v3d_to_ours]
poses2d = np.array(f['joint_2d_gt'])
poses3d = np.array(f['joint_3d_pre'])
poses3d_gt = np.array(f['joint_3d_gt'])
poses3d_gt = poses3d_gt - poses3d_gt[:, 13:14]
f.close()
t = trange(0, len(imagenames))
processed_video_names = []
def plot_skeleton_2d(all_frames, joints_2d):
out_frames = []
radius = max(4, (np.mean(all_frames[0].shape[:2]) * 0.01).astype(int))
for idx in range(len(all_frames)):
for pair in pairs:
i, j = pair
pt1, pt2 = joints_2d[idx, i], joints_2d[idx, j]
x11, y11 = pt1
x22, y22 = pt2
if pair in pairs_left:
color = (205, 0, 0)
elif pair in pairs_right:
color = (0, 205, 0)
else:
color = (0, 165, 255)
cv2.line(all_frames[idx], (int(x11), int(y11)), (int(x22), int(y22)), color, radius-2)
def get_xxyys(names):
xxyys = []
# should be subject, action, camera
splits = names[0].split('/')
video_name = '/'.join(splits[:-1])
part_label_path = osp.join(root, splits[0], 'MySegmentsMat', 'PartLabels',
splits[1] + ("cam" + splits[2]).replace('cam0', '.54138969').replace('cam2','.58860488').replace('cam1', '.55011271').replace('cam3', '.60457274') + ".mat")
f = h5py.File(part_label_path, "r")
for idx, name in enumerate(names):
partmask = f[f['Feat'][idx*30, 0]][()].T
yp, xp = np.where(partmask != 0)
xmin, xmax = np.min(xp), np.max(xp) + 1
ymin, ymax = np.min(yp), np.max(yp) + 1
xxyys.append((xmin, xmax, ymin, ymax))
f.close()
return xxyys
def crop_image(all_frames, xxyys, scale_factor=0.25):
out_frames = []
for frame, xxyy in zip(all_frames, xxyys):
h, w = frame.shape[:2]
xmin, xmax, ymin, ymax = xxyy
xc, yc = (xmin + xmax) / 2, (ymin + ymax) / 2
l = max(xmax - xmin, ymax - ymin)
xmin, xmax = max(0, xc - l/2), min(w, xc + l / 2)
ymin, ymax = max(0, yc - l/2), min(h, yc + l / 2)
xmin, xmax = int(xmin), int(xmax)
ymin, ymax = int(ymin), int(ymax)
frame = frame[ymin:ymax, xmin:xmax, :].copy()
frame = cv2.resize(frame, (int(scale_factor * w), int(scale_factor * h)))
frame = frame[::-1, :, ::-1] / 255
out_frames.append(frame)
return out_frames
for imageid in t:
name = imagenames[imageid]
splits = name.split('/')
video_name = '/'.join(splits[:2])
if len(processed_video_names) == seq_num:
print("Finished! Rendered {} sequences, saved to {}".format(seq_num, save_dir))
break
if video_name in processed_video_names:
continue
else:
processed_video_names.append(video_name)
print(video_name)
recs = [(idx, name) for idx, name in enumerate(imagenames) if video_name in name]
# downsample
recs = recs[::5]
# cand_list = [x*5 for x in [440, 565, 770]]
# cand_list = [200, 250, 300, 350, 400, 450, 500, 520, 550, 590, 620, 660, 700, 740, 770, 800, 830, 845]
# recs = list(filter(lambda x: x[0] in cand_list, recs))
# recs = list(filter(lambda x: x[0] in [65*5, 100*5, 905*5, 1160*5], recs))
recs = sorted(recs, key=lambda x: int(x[1].split('/')[-1].split('_')[1].split('.')[0]))
names_in_video = [rec[1] for rec in recs]
indices_in_video = [rec[0] for rec in recs]
# path_format = osp.join(image_root, splits[0], splits[1], "img_{:06d}.jpg")
poses3d_in_video = poses3d[indices_in_video]
poses2d_in_video = poses2d[indices_in_video]
poses3d_gt_in_video = poses3d_gt[indices_in_video]
all_frames = [cv2.imread(osp.join(image_root, name)) for name in names_in_video]
# all_frames = [cv2.imread(path_format.format(int(name.split('/')[-1])+1)) for name in names_in_video]
print("Ploting 2d skeleton...")
plot_skeleton_2d(all_frames, poses2d_in_video)
scale_factor = 0.2
all_frames = [cv2.resize(frame, (int(scale_factor * frame.shape[1]), int(scale_factor * frame.shape[0])))[::-1, :, ::-1] / 255 for frame in all_frames]
# print("Getting bounding boxes...")
# xxyys = get_xxyys(names_in_video)
# print("Cropping images...")
# all_frames = crop_image(all_frames, xxyys, scale_factor=0.2)
print("Generating gifs...")
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(111, projection='3d')
ax.view_init(elev=10., azim=45.)
lines_3d, lines_3d_gt = [], []
radius = 0.75
initialized = False
num_render = len(names_in_video)
print(num_render, " frames to plot")
def update_video(frame_idx):
global initialized, lines_3d, lines_3d_gt
print("{}/{} ".format(frame_idx, num_render), end='\r')
pose2d = poses2d_in_video[frame_idx]
pose3d = poses3d_in_video[frame_idx]
pose3d_gt = poses3d_gt_in_video[frame_idx]
if not initialized:
for idx, pair in enumerate(pairs):
i, j = pair
if pair in pairs_left:
color = "blue"
elif pair in pairs_right:
color = "green"
else:
color = "darkorange"
# pt1, pt2 = pose3d[i], pose3d[j]
# x11, y11, z11 = pt1
# x22, y22, z22 = pt2
# lines_3d.append(ax.plot([z11, z22], [x11, x22], [-y11, -y22], c='red', linewidth=3, label="pre"))
pt1, pt2 = pose3d_gt[i], pose3d_gt[j]
x11, y11, z11 = pt1
x22, y22, z22 = pt2
lines_3d_gt.append(ax.plot([z11, z22], [x11, x22], [-y11, -y22], c=color, linewidth=3, label="gt"))
# pt1, pt2 = pose3d_ssadv[i], pose3d_ssadv[j]
# x11, y11, z11 = pt1
# x22, y22, z22 = pt2
# lines_3d_ssadv.append(ax.plot([z11, z22], [x11, x22], [-y11, -y22], c="red", linewidth=3, label="ssadv"))
initialized = True
else:
for idx, pair in enumerate(pairs):
i, j = pair
# pt1, pt2 = pose3d[i], pose3d[j]
# x11, y11, z11 = pt1
# x22, y22, z22 = pt2
# lines_3d[idx][0].set_xdata([z11, z22])
# lines_3d[idx][0].set_ydata([x11, x22])
# lines_3d[idx][0].set_3d_properties([-y11, -y22])
pt1, pt2 = pose3d_gt[i], pose3d_gt[j]
x11, y11, z11 = pt1
x22, y22, z22 = pt2
lines_3d_gt[idx][0].set_xdata([z11, z22])
lines_3d_gt[idx][0].set_ydata([x11, x22])
lines_3d_gt[idx][0].set_3d_properties([-y11, -y22])
# pt1, pt2 = pose3d_ssadv[i], pose3d_ssadv[j]
# x11, y11, z11 = pt1
# x22, y22, z22 = pt2
# lines_3d_ssadv[idx][0].set_xdata([z11, z22])
# lines_3d_ssadv[idx][0].set_ydata([x11, x22])
# lines_3d_ssadv[idx][0].set_3d_properties([-y11, -y22])
xroot, yroot, zroot = pose3d_gt[13, 0], -pose3d_gt[13, 1], pose3d_gt[13, 2]
ax.set_ylim3d([-radius+xroot, radius+xroot])
ax.set_zlim3d([-radius+yroot, radius+yroot])
ax.set_xlim3d([-2.5 * radius+zroot, radius+zroot])
ax.get_xaxis().set_ticklabels([])
ax.get_yaxis().set_ticklabels([])
ax.set_zticklabels([])
white = (1.0, 1.0, 1.0, 0.0)
ax.w_xaxis.set_pane_color(white)
ax.w_yaxis.set_pane_color(white)
ax.w_xaxis.line.set_color(white)
ax.w_yaxis.line.set_color(white)
ax.w_zaxis.line.set_color(white)
r = 0.95
xx = np.linspace(-r * radius + xroot, r * radius + xroot, all_frames[frame_idx].shape[1])
yy = np.linspace(-r * radius + yroot, r * radius + yroot, all_frames[frame_idx].shape[0])
xx, yy = np.meshgrid(xx, yy)
zz = np.ones_like(xx) * (-3.2* radius + zroot)
ax.set_xlabel('Z', fontsize=13)
ax.set_ylabel("X", fontsize=13)
ax.set_zlabel("Y", fontsize=13)
ax.plot_surface(zz, xx, yy, rstride=1, cstride=1, facecolors=all_frames[frame_idx], shade=False)
plt.savefig(osp.join(save_dir, f"{video_name.replace('/', '_')}_{frame_idx}.png"))
for idx in range(len(names_in_video)):
update_video(idx)
ani = animation.FuncAnimation(fig, update_video, range(len(names_in_video)), interval=20)
save_name = name.replace('/', '_')
ani.save(osp.join(save_dir, f"{save_name}.gif"), writer='imagemagick', fps=20)
t.set_postfix(index=int(imageid))
| insightface/body/human_pose/ambiguity_aware/scripts/mpi_plot1.py/0 | {
"file_path": "insightface/body/human_pose/ambiguity_aware/scripts/mpi_plot1.py",
"repo_id": "insightface",
"token_count": 5777
} | 106 |
import sys
import os
import argparse
import onnx
import mxnet as mx
from onnx import helper
from onnx import TensorProto
from onnx import numpy_helper
print('mxnet version:', mx.__version__)
print('onnx version:', onnx.__version__)
assert mx.__version__ >= '1.8', 'mxnet version should >= 1.8'
assert onnx.__version__ >= '1.2.1', 'onnx version should >= 1.2.1'
import numpy as np
from mxnet.contrib import onnx as onnx_mxnet
def create_map(graph_member_list):
member_map={}
for n in graph_member_list:
member_map[n.name]=n
return member_map
parser = argparse.ArgumentParser(description='convert arcface models to onnx')
# general
parser.add_argument('params', default='./r100a/model-0000.params', help='mxnet params to load.')
parser.add_argument('output', default='./r100a.onnx', help='path to write onnx model.')
parser.add_argument('--eps', default=1.0e-8, type=float, help='eps for weights.')
parser.add_argument('--input-shape', default='3,112,112', help='input shape.')
args = parser.parse_args()
input_shape = (1,) + tuple( [int(x) for x in args.input_shape.split(',')] )
params_file = args.params
pos = params_file.rfind('-')
prefix = params_file[:pos]
epoch = int(params_file[pos+1:pos+5])
sym_file = prefix + "-symbol.json"
assert os.path.exists(sym_file)
assert os.path.exists(params_file)
sym, arg_params, aux_params = mx.model.load_checkpoint(prefix, epoch)
eps = args.eps
arg = {}
aux = {}
invalid = 0
ac = 0
for k in arg_params:
v = arg_params[k]
nv = v.asnumpy()
#print(k, nv.dtype)
nv = nv.astype(np.float32)
ac += nv.size
invalid += np.count_nonzero(np.abs(nv)<eps)
nv[np.abs(nv) < eps] = 0.0
arg[k] = mx.nd.array(nv, dtype='float32')
print(invalid, ac)
arg_params = arg
invalid = 0
ac = 0
for k in aux_params:
v = aux_params[k]
nv = v.asnumpy().astype(np.float32)
ac += nv.size
invalid += np.count_nonzero(np.abs(nv)<eps)
nv[np.abs(nv) < eps] = 0.0
aux[k] = mx.nd.array(nv, dtype='float32')
print(invalid, ac)
aux_params = aux
all_args = {}
all_args.update(arg_params)
all_args.update(aux_params)
converted_model_path = onnx_mxnet.export_model(sym, all_args, [input_shape], np.float32, args.output, opset_version=11)
model = onnx.load(args.output)
graph = model.graph
input_map = create_map(graph.input)
node_map = create_map(graph.node)
init_map = create_map(graph.initializer)
#fix PRelu issue
for input_name in input_map.keys():
if input_name.endswith('_gamma'):
node_name = input_name[:-6]
if not node_name in node_map:
continue
node = node_map[node_name]
if node.op_type!='PRelu':
continue
input_shape = input_map[input_name].type.tensor_type.shape.dim
input_dim_val=input_shape[0].dim_value
graph.initializer.remove(init_map[input_name])
weight_array = numpy_helper.to_array(init_map[input_name])
b=[]
for w in weight_array:
b.append(w)
new_nv = helper.make_tensor(input_name, TensorProto.FLOAT, [input_dim_val,1,1], b)
graph.initializer.extend([new_nv])
for init_name in init_map.keys():
weight_array = numpy_helper.to_array(init_map[init_name])
assert weight_array.dtype==np.float32
if init_name in input_map:
graph.input.remove(input_map[init_name])
#support batch-inference
graph.input[0].type.tensor_type.shape.dim[0].dim_param = 'None'
onnx.save(model, args.output)
| insightface/challenges/iccv21-mfr/mxnet_to_ort.py/0 | {
"file_path": "insightface/challenges/iccv21-mfr/mxnet_to_ort.py",
"repo_id": "insightface",
"token_count": 1493
} | 107 |
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
import os
from os.path import join as pjoin
from setuptools import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
import numpy as np
def find_in_path(name, path):
"Find a file in a search path"
# Adapted fom
# http://code.activestate.com/recipes/52224-find-a-file-given-a-search-path/
for dir in path.split(os.pathsep):
binpath = pjoin(dir, name)
if os.path.exists(binpath):
return os.path.abspath(binpath)
return None
def locate_cuda():
"""Locate the CUDA environment on the system
Returns a dict with keys 'home', 'nvcc', 'include', and 'lib64'
and values giving the absolute path to each directory.
Starts by looking for the CUDAHOME env variable. If not found, everything
is based on finding 'nvcc' in the PATH.
"""
# first check if the CUDAHOME env variable is in use
if 'CUDAHOME' in os.environ:
home = os.environ['CUDAHOME']
nvcc = pjoin(home, 'bin', 'nvcc')
else:
# otherwise, search the PATH for NVCC
default_path = pjoin(os.sep, 'usr', 'local', 'cuda', 'bin')
nvcc = find_in_path('nvcc',
os.environ['PATH'] + os.pathsep + default_path)
if nvcc is None:
raise EnvironmentError(
'The nvcc binary could not be '
'located in your $PATH. Either add it to your path, or set $CUDAHOME'
)
home = os.path.dirname(os.path.dirname(nvcc))
cudaconfig = {
'home': home,
'nvcc': nvcc,
'include': pjoin(home, 'include'),
'lib64': pjoin(home, 'lib64')
}
for k, v in cudaconfig.items():
if not os.path.exists(v):
raise EnvironmentError(
'The CUDA %s path could not be located in %s' % (k, v))
return cudaconfig
# Test if cuda could be foun
try:
CUDA = locate_cuda()
except EnvironmentError:
CUDA = None
# Obtain the numpy include directory. This logic works across numpy versions.
try:
numpy_include = np.get_include()
except AttributeError:
numpy_include = np.get_numpy_include()
def customize_compiler_for_nvcc(self):
"""inject deep into distutils to customize how the dispatch
to gcc/nvcc works.
If you subclass UnixCCompiler, it's not trivial to get your subclass
injected in, and still have the right customizations (i.e.
distutils.sysconfig.customize_compiler) run on it. So instead of going
the OO route, I have this. Note, it's kindof like a wierd functional
subclassing going on."""
# tell the compiler it can processes .cu
self.src_extensions.append('.cu')
# save references to the default compiler_so and _comple methods
default_compiler_so = self.compiler_so
super = self._compile
# now redefine the _compile method. This gets executed for each
# object but distutils doesn't have the ability to change compilers
# based on source extension: we add it.
def _compile(obj, src, ext, cc_args, extra_postargs, pp_opts):
if os.path.splitext(src)[1] == '.cu':
# use the cuda for .cu files
self.set_executable('compiler_so', CUDA['nvcc'])
# use only a subset of the extra_postargs, which are 1-1 translated
# from the extra_compile_args in the Extension class
postargs = extra_postargs['nvcc']
else:
postargs = extra_postargs['gcc']
super(obj, src, ext, cc_args, postargs, pp_opts)
# reset the default compiler_so, which we might have changed for cuda
self.compiler_so = default_compiler_so
# inject our redefined _compile method into the class
self._compile = _compile
# run the customize_compiler
class custom_build_ext(build_ext):
def build_extensions(self):
customize_compiler_for_nvcc(self.compiler)
build_ext.build_extensions(self)
ext_modules = [
Extension("bbox", ["bbox.pyx"],
extra_compile_args={'gcc': ["-Wno-cpp", "-Wno-unused-function"]},
include_dirs=[numpy_include]),
Extension("anchors", ["anchors.pyx"],
extra_compile_args={'gcc': ["-Wno-cpp", "-Wno-unused-function"]},
include_dirs=[numpy_include]),
Extension("cpu_nms", ["cpu_nms.pyx"],
extra_compile_args={'gcc': ["-Wno-cpp", "-Wno-unused-function"]},
include_dirs=[numpy_include]),
]
if CUDA is not None:
ext_modules.append(
Extension(
'gpu_nms',
['nms_kernel.cu', 'gpu_nms.pyx'],
library_dirs=[CUDA['lib64']],
libraries=['cudart'],
language='c++',
runtime_library_dirs=[CUDA['lib64']],
# this syntax is specific to this build system
# we're only going to use certain compiler args with nvcc and not with
# gcc the implementation of this trick is in customize_compiler() below
extra_compile_args={
'gcc': ["-Wno-unused-function"],
'nvcc': [
'-arch=sm_35', '--ptxas-options=-v', '-c',
'--compiler-options', "'-fPIC'"
]
},
include_dirs=[numpy_include, CUDA['include']]))
else:
print('Skipping GPU_NMS')
setup(
name='frcnn_cython',
ext_modules=ext_modules,
# inject our custom trigger
cmdclass={'build_ext': custom_build_ext},
)
| insightface/detection/retinaface/rcnn/cython/setup.py/0 | {
"file_path": "insightface/detection/retinaface/rcnn/cython/setup.py",
"repo_id": "insightface",
"token_count": 2399
} | 108 |
https://github.com/pdollar/coco/commit/336d2a27c91e3c0663d2dcf0b13574674d30f88e
| insightface/detection/retinaface/rcnn/pycocotools/UPSTREAM_REV/0 | {
"file_path": "insightface/detection/retinaface/rcnn/pycocotools/UPSTREAM_REV",
"repo_id": "insightface",
"token_count": 42
} | 109 |
import mxnet as mx
import mxnet.ndarray as nd
import mxnet.gluon as gluon
import mxnet.gluon.nn as nn
import mxnet.autograd as ag
import numpy as np
from rcnn.config import config
from rcnn.PY_OP import rpn_fpn_ohem, rpn_fpn_ohem2, rpn_fpn_ohem3
USE_DCN = False
MM = 1.0
def ConvBlock(channels, kernel_size, strides, **kwargs):
out = nn.HybridSequential(**kwargs)
with out.name_scope():
out.add(
nn.Conv2D(channels, kernel_size, strides=strides, padding=1, use_bias=False),
nn.BatchNorm(scale=True),
nn.Activation('relu')
)
return out
def Conv1x1(channels, is_linear=False, **kwargs):
out = nn.HybridSequential(**kwargs)
with out.name_scope():
out.add(
nn.Conv2D(channels, 1, padding=0, use_bias=False),
nn.BatchNorm(scale=True)
)
if not is_linear:
out.add(nn.Activation('relu'))
return out
def DWise(channels, strides, kernel_size=3, **kwargs):
out = nn.HybridSequential(**kwargs)
with out.name_scope():
out.add(
nn.Conv2D(channels, kernel_size, strides=strides, padding=kernel_size // 2, groups=channels, use_bias=False),
nn.BatchNorm(scale=True),
nn.Activation('relu')
)
return out
class SepCONV(nn.HybridBlock):
def __init__(self, inp, output, kernel_size, depth_multiplier=1, with_bn=True, **kwargs):
super(SepCONV, self).__init__(**kwargs)
with self.name_scope():
self.net = nn.HybridSequential()
cn = int(inp*depth_multiplier)
if output is None:
self.net.add(
nn.Conv2D(in_channels=inp, channels=cn, groups=inp, kernel_size=kernel_size, strides=(1,1), padding=kernel_size // 2
, use_bias=not with_bn)
)
else:
self.net.add(
nn.Conv2D(in_channels=inp, channels=cn, groups=inp, kernel_size=kernel_size, strides=(1,1), padding=kernel_size // 2
, use_bias=False),
nn.BatchNorm(),
nn.Activation('relu'),
nn.Conv2D(in_channels=cn, channels=output, kernel_size=(1,1), strides=(1,1)
, use_bias=not with_bn)
)
self.with_bn = with_bn
self.act = nn.Activation('relu')
if with_bn:
self.bn = nn.BatchNorm()
def hybrid_forward(self, F ,x):
x = self.net(x)
if self.with_bn:
x = self.bn(x)
if self.act is not None:
x = self.act(x)
return x
class ExpandedConv(nn.HybridBlock):
def __init__(self, inp, oup, t, strides, kernel=3, same_shape=True, **kwargs):
super(ExpandedConv, self).__init__(**kwargs)
self.same_shape = same_shape
self.strides = strides
with self.name_scope():
self.bottleneck = nn.HybridSequential()
self.bottleneck.add(
Conv1x1(inp*t, prefix="expand_"),
DWise(inp*t, self.strides, kernel, prefix="dwise_"),
Conv1x1(oup, is_linear=True, prefix="linear_")
)
def hybrid_forward(self, F, x):
out = self.bottleneck(x)
if self.strides == 1 and self.same_shape:
out = F.elemwise_add(out, x)
return out
def ExpandedConvSequence(t, k, inp, oup, repeats, first_strides, **kwargs):
seq = nn.HybridSequential(**kwargs)
with seq.name_scope():
seq.add(ExpandedConv(inp, oup, t, first_strides, k, same_shape=False))
curr_inp = oup
for i in range(1, repeats):
seq.add(ExpandedConv(curr_inp, oup, t, 1))
curr_inp = oup
return seq
class Mnasnet(nn.HybridBlock):
def __init__(self, multiplier=1.0, **kwargs):
super(Mnasnet, self).__init__(**kwargs)
mm = multiplier
self.first_oup = 32
self.interverted_residual_setting = [
# t, c, n, s, k
[3, int(24*mm), 3, 2, 3, "stage2_"], # -> 56x56
[3, int(40*mm), 3, 2, 5, "stage3_"], # -> 28x28
[6, int(80*mm), 3, 2, 5, "stage4_1_"], # -> 14x14
[6, int(96*mm), 2, 1, 3, "stage4_2_"], # -> 14x14
[6, int(192*mm), 4, 2, 5, "stage5_1_"], # -> 7x7
[6, int(320*mm), 1, 1, 3, "stage5_2_"], # -> 7x7
]
self.last_channels = 1280
with self.name_scope():
self.features = nn.HybridSequential()
self.features.add(ConvBlock(self.first_oup, 3, 2, prefix="stage1_conv0_"))
self.features.add(SepCONV(self.first_oup, 16, 3, prefix="stage1_sepconv0_"))
inp = 16
for i, (t, c, n, s, k, prefix) in enumerate(self.interverted_residual_setting):
oup = c
self.features.add(ExpandedConvSequence(t, k, inp, oup, n, s, prefix=prefix))
inp = oup
self.features.add(Conv1x1(self.last_channels, prefix="stage5_3_"))
def hybrid_forward(self, F, x):
x = self.features(x)
return x
def conv_act_layer(from_layer, name, num_filter, kernel=(1,1), pad=(0,0), \
stride=(1,1), act_type="relu", bias_wd_mult=0.0, dcn=False):
weight = mx.symbol.Variable(name="{}_weight".format(name),
init=mx.init.Normal(0.01), attr={'__lr_mult__': '1.0'})
bias = mx.symbol.Variable(name="{}_bias".format(name),
init=mx.init.Constant(0.0), attr={'__lr_mult__': '2.0', '__wd_mult__': str(bias_wd_mult)})
if not dcn:
conv = mx.symbol.Convolution(data=from_layer, kernel=kernel, pad=pad, \
stride=stride, num_filter=num_filter, name="{}".format(name), weight = weight, bias=bias)
else:
assert kernel[0]==3 and kernel[1]==3
num_group = 1
f = num_group*18
offset_weight = mx.symbol.Variable(name="{}_offset_weight".format(name),
init=mx.init.Constant(0.0), attr={'__lr_mult__': '1.0'})
offset_bias = mx.symbol.Variable(name="{}_offset_bias".format(name),
init=mx.init.Constant(0.0), attr={'__lr_mult__': '2.0', '__wd_mult__': str(bias_wd_mult)})
conv_offset = mx.symbol.Convolution(name=name+'_offset', data = from_layer, weight=offset_weight, bias=offset_bias,
num_filter=f, pad=(1, 1), kernel=(3, 3), stride=(1, 1))
conv = mx.contrib.symbol.DeformableConvolution(name=name, data=from_layer, offset=conv_offset, weight=weight, bias=bias,
num_filter=num_filter, pad=(1,1), kernel=(3, 3), num_deformable_group=num_group, stride=(1, 1), no_bias=False)
if len(act_type)>0:
relu = mx.symbol.Activation(data=conv, act_type=act_type, \
name="{}_{}".format(name, act_type))
else:
relu = conv
return relu
def ssh_context_module(body, num_filters, name):
conv_dimred = conv_act_layer(body, name+'_conv1',
num_filters, kernel=(3, 3), pad=(1, 1), stride=(1, 1), act_type='relu', dcn=False)
conv5x5 = conv_act_layer(conv_dimred, name+'_conv2',
num_filters, kernel=(3, 3), pad=(1, 1), stride=(1, 1), act_type='', dcn=USE_DCN)
conv7x7_1 = conv_act_layer(conv_dimred, name+'_conv3_1',
num_filters, kernel=(3, 3), pad=(1, 1), stride=(1, 1), act_type='relu', dcn=False)
conv7x7 = conv_act_layer(conv7x7_1, name+'_conv3_2',
num_filters, kernel=(3, 3), pad=(1, 1), stride=(1, 1), act_type='', dcn=USE_DCN)
return (conv5x5, conv7x7)
def ssh_detection_module(body, num_filters, name):
conv3x3 = conv_act_layer(body, name+'_conv1',
num_filters, kernel=(3, 3), pad=(1, 1), stride=(1, 1), act_type='', dcn=USE_DCN)
conv5x5, conv7x7 = ssh_context_module(body, num_filters//2, name+'_context')
ret = mx.sym.concat(*[conv3x3, conv5x5, conv7x7], dim=1, name = name+'_concat')
ret = mx.symbol.Activation(data=ret, act_type='relu', name=name+'_concat_relu')
return ret
def conv_bn(input, filter, ksize, stride, padding, act_type='relu', name=''):
conv = mx.symbol.Convolution(data=input, kernel=(ksize,ksize), pad=(padding,padding), \
stride=(stride,stride), num_filter=filter, name=name+"_conv")
ret = mx.sym.BatchNorm(data=conv, fix_gamma=False, eps=2e-5, momentum=0.9, name=name + '_bn')
if act_type is not None:
ret = mx.symbol.Activation(data=ret, act_type=act_type, \
name="{}_{}".format(name, act_type))
return ret
def cpm(input, name):
# residual
branch1 = conv_bn(input, 1024, 1, 1, 0, act_type=None, name=name+"_branch1")
branch2a = conv_bn(input, 256, 1, 1, 0, act_type='relu', name=name+"_branch2a")
branch2b = conv_bn(branch2a, 256, 3, 1, 1, act_type='relu', name=name+"_branch2b")
branch2c = conv_bn(branch2b, 1024, 1, 1, 0, act_type=None, name=name+"_branch2c")
sum = branch1 + branch2c
rescomb = mx.symbol.Activation(data=sum, act_type='relu', name="%s_relu2"%(name))
ssh_out = ssh_detection_module(rescomb, 256, name=name+"_ssh")
return ssh_out
def get_mnet_conv(data):
mm = MM
net = Mnasnet(mm, prefix="")
body = net(data)
all_layers = body.get_internals()
#print(all_layers)
c1 = all_layers['stage3_expandedconv2_elemwise_add0_output']
c2 = all_layers['stage4_2_expandedconv1_elemwise_add0_output']
#c3 = all_layers['stage5_3_relu0_fwd_output']
c3 = all_layers['stage5_2_expandedconv0_linear_batchnorm0_fwd_output']
F1 = int(256*mm)
F2 = int(128*mm)
_bwm = 1.0
conv4_128 = conv_act_layer(c1, 'ssh_m1_red_conv',
F2, kernel=(1, 1), pad=(0, 0), stride=(1, 1), act_type='relu', bias_wd_mult=_bwm)
conv5_128 = conv_act_layer(c2, 'ssh_m2_red_conv',
F2, kernel=(1, 1), pad=(0, 0), stride=(1, 1), act_type='relu', bias_wd_mult=_bwm)
conv5_128_up = mx.symbol.Deconvolution(data=conv5_128, num_filter=F2, kernel=(4,4), stride=(2, 2), pad=(1,1),
num_group = F2, no_bias = True, attr={'__lr_mult__': '0.0', '__wd_mult__': '0.0'},
name='ssh_m2_red_upsampling')
#conv5_128_up = mx.symbol.UpSampling(conv5_128, scale=2, sample_type='nearest', workspace=512, name='ssh_m2_red_up', num_args=1)
conv4_128 = mx.symbol.Crop(*[conv4_128, conv5_128_up])
#conv5_128_up = mx.symbol.Crop(*[conv5_128_up, conv4_128])
conv_sum = conv4_128+conv5_128_up
#conv_sum = conv_1x1
m1_conv = conv_act_layer(conv_sum, 'ssh_m1_conv',
F2, kernel=(3, 3), pad=(1, 1), stride=(1, 1), act_type='relu', bias_wd_mult=_bwm)
m1 = ssh_detection_module(m1_conv, F2, 'ssh_m1_det')
m2 = ssh_detection_module(c2, F1, 'ssh_m2_det')
m3 = ssh_detection_module(c3, F1, 'ssh_m3_det')
return {8: m1, 16:m2, 32: m3}
def get_out(conv_fpn_feat, prefix, stride, landmark=False, lr_mult=1.0):
A = config.NUM_ANCHORS
ret_group = []
num_anchors = config.RPN_ANCHOR_CFG[str(stride)]['NUM_ANCHORS']
label = mx.symbol.Variable(name='%s_label_stride%d'%(prefix,stride))
bbox_target = mx.symbol.Variable(name='%s_bbox_target_stride%d'%(prefix,stride))
bbox_weight = mx.symbol.Variable(name='%s_bbox_weight_stride%d'%(prefix,stride))
if landmark:
landmark_target = mx.symbol.Variable(name='%s_landmark_target_stride%d'%(prefix,stride))
landmark_weight = mx.symbol.Variable(name='%s_landmark_weight_stride%d'%(prefix,stride))
rpn_relu = conv_fpn_feat[stride]
maxout_stat = 0
if config.USE_MAXOUT>=1 and stride==config.RPN_FEAT_STRIDE[-1]:
maxout_stat = 1
if config.USE_MAXOUT>=2 and stride!=config.RPN_FEAT_STRIDE[-1]:
maxout_stat = 2
if maxout_stat==0:
rpn_cls_score = conv_act_layer(rpn_relu, '%s_rpn_cls_score_stride%d'%(prefix, stride), 2*num_anchors,
kernel=(1,1), pad=(0,0), stride=(1, 1), act_type='')
elif maxout_stat==1:
cls_list = []
for a in range(num_anchors):
rpn_cls_score_bg = conv_act_layer(rpn_relu, '%s_rpn_cls_score_stride%d_anchor%d_bg'%(prefix,stride,a), 3,
kernel=(1,1), pad=(0,0), stride=(1, 1), act_type='')
rpn_cls_score_bg = mx.sym.max(rpn_cls_score_bg, axis=1, keepdims=True)
cls_list.append(rpn_cls_score_bg)
rpn_cls_score_fg = conv_act_layer(rpn_relu, '%s_rpn_cls_score_stride%d_anchor%d_fg'%(prefix,stride,a), 1,
kernel=(1,1), pad=(0,0), stride=(1, 1), act_type='')
cls_list.append(rpn_cls_score_fg)
rpn_cls_score = mx.sym.concat(*cls_list, dim=1, name='%s_rpn_cls_score_stride%d'%(prefix,stride))
else:
cls_list = []
for a in range(num_anchors):
rpn_cls_score_bg = conv_act_layer(rpn_relu, '%s_rpn_cls_score_stride%d_anchor%d_bg'%(prefix,stride,a), 1,
kernel=(1,1), pad=(0,0), stride=(1, 1), act_type='')
cls_list.append(rpn_cls_score_bg)
rpn_cls_score_fg = conv_act_layer(rpn_relu, '%s_rpn_cls_score_stride%d_anchor%d_fg'%(prefix,stride,a), 3,
kernel=(1,1), pad=(0,0), stride=(1, 1), act_type='')
rpn_cls_score_fg = mx.sym.max(rpn_cls_score_fg, axis=1, keepdims=True)
cls_list.append(rpn_cls_score_fg)
rpn_cls_score = mx.sym.concat(*cls_list, dim=1, name='%s_rpn_cls_score_stride%d'%(prefix,stride))
rpn_bbox_pred = conv_act_layer(rpn_relu, '%s_rpn_bbox_pred_stride%d'%(prefix,stride), 4*num_anchors,
kernel=(1,1), pad=(0,0), stride=(1, 1), act_type='')
# prepare rpn data
rpn_cls_score_reshape = mx.symbol.Reshape(data=rpn_cls_score,
shape=(0, 2, -1),
name="%s_rpn_cls_score_reshape_stride%s" % (prefix,stride))
rpn_bbox_pred_reshape = mx.symbol.Reshape(data=rpn_bbox_pred,
shape=(0, 0, -1),
name="%s_rpn_bbox_pred_reshape_stride%s" % (prefix,stride))
if landmark:
rpn_landmark_pred = conv_act_layer(rpn_relu, '%s_rpn_landmark_pred_stride%d'%(prefix,stride), 10*num_anchors,
kernel=(1,1), pad=(0,0), stride=(1, 1), act_type='')
rpn_landmark_pred_reshape = mx.symbol.Reshape(data=rpn_landmark_pred,
shape=(0, 0, -1),
name="%s_rpn_landmark_pred_reshape_stride%s" % (prefix,stride))
if config.TRAIN.RPN_ENABLE_OHEM>=2:
label, anchor_weight = mx.sym.Custom(op_type='rpn_fpn_ohem3', stride=int(stride), network=config.network, dataset=config.dataset, prefix=prefix, cls_score=rpn_cls_score_reshape, labels = label)
_bbox_weight = mx.sym.tile(anchor_weight, (1,1,4))
_bbox_weight = _bbox_weight.reshape((0, -1, A * 4)).transpose((0,2,1))
bbox_weight = mx.sym.elemwise_mul(bbox_weight, _bbox_weight, name='%s_bbox_weight_mul_stride%s'%(prefix,stride))
if landmark:
_landmark_weight = mx.sym.tile(anchor_weight, (1,1,10))
_landmark_weight = _landmark_weight.reshape((0, -1, A * 10)).transpose((0,2,1))
landmark_weight = mx.sym.elemwise_mul(landmark_weight, _landmark_weight, name='%s_landmark_weight_mul_stride%s'%(prefix,stride))
#if not config.FACE_LANDMARK:
# label, bbox_weight = mx.sym.Custom(op_type='rpn_fpn_ohem', stride=int(stride), cls_score=rpn_cls_score_reshape, bbox_weight = bbox_weight , labels = label)
#else:
# label, bbox_weight, landmark_weight = mx.sym.Custom(op_type='rpn_fpn_ohem2', stride=int(stride), cls_score=rpn_cls_score_reshape, bbox_weight = bbox_weight, landmark_weight=landmark_weight, labels = label)
#cls loss
rpn_cls_prob = mx.symbol.SoftmaxOutput(data=rpn_cls_score_reshape,
label=label,
multi_output=True,
normalization='valid', use_ignore=True, ignore_label=-1,
grad_scale = lr_mult,
name='%s_rpn_cls_prob_stride%d'%(prefix,stride))
ret_group.append(rpn_cls_prob)
ret_group.append(mx.sym.BlockGrad(label))
#bbox loss
bbox_diff = rpn_bbox_pred_reshape-bbox_target
bbox_diff = bbox_diff * bbox_weight
rpn_bbox_loss_ = mx.symbol.smooth_l1(name='%s_rpn_bbox_loss_stride%d_'%(prefix,stride), scalar=3.0, data=bbox_diff)
rpn_bbox_loss = mx.sym.MakeLoss(name='%s_rpn_bbox_loss_stride%d'%(prefix,stride), data=rpn_bbox_loss_, grad_scale=1.0*lr_mult / (config.TRAIN.RPN_BATCH_SIZE))
ret_group.append(rpn_bbox_loss)
ret_group.append(mx.sym.BlockGrad(bbox_weight))
#landmark loss
if landmark:
landmark_diff = rpn_landmark_pred_reshape-landmark_target
landmark_diff = landmark_diff * landmark_weight
rpn_landmark_loss_ = mx.symbol.smooth_l1(name='%s_rpn_landmark_loss_stride%d_'%(prefix,stride), scalar=3.0, data=landmark_diff)
rpn_landmark_loss = mx.sym.MakeLoss(name='%s_rpn_landmark_loss_stride%d'%(prefix,stride), data=rpn_landmark_loss_, grad_scale=0.5*lr_mult / (config.TRAIN.RPN_BATCH_SIZE))
ret_group.append(rpn_landmark_loss)
ret_group.append(mx.sym.BlockGrad(landmark_weight))
return ret_group
def get_mnet_train():
data = mx.symbol.Variable(name="data")
# shared convolutional layers
conv_fpn_feat = get_mnet_conv(data)
ret_group = []
for stride in config.RPN_FEAT_STRIDE:
ret = get_out(conv_fpn_feat, 'face', stride, config.FACE_LANDMARK, lr_mult=1.0)
ret_group += ret
if config.HEAD_BOX:
ret = get_out(conv_fpn_feat, 'head', stride, False, lr_mult=1.0)
ret_group += ret
return mx.sym.Group(ret_group)
| insightface/detection/retinaface/rcnn/symbol/symbol_mnet.py.bak/0 | {
"file_path": "insightface/detection/retinaface/rcnn/symbol/symbol_mnet.py.bak",
"repo_id": "insightface",
"token_count": 8977
} | 110 |
from __future__ import print_function
import argparse
import sys
import os
import time
import numpy as np
import mxnet as mx
from mxnet import ndarray as nd
import cv2
from rcnn.logger import logger
#from rcnn.config import config, default, generate_config
#from rcnn.tools.test_rcnn import test_rcnn
#from rcnn.tools.test_rpn import test_rpn
from rcnn.processing.bbox_transform import nonlinear_pred, clip_boxes, landmark_pred
from rcnn.processing.generate_anchor import generate_anchors_fpn, anchors_plane
from rcnn.processing.nms import gpu_nms_wrapper
from rcnn.processing.bbox_transform import bbox_overlaps
from rcnn.dataset import retinaface
from retinaface import RetinaFace
def parse_args():
parser = argparse.ArgumentParser(
description='Test widerface by retinaface detector')
# general
parser.add_argument('--network',
help='network name',
default='net3',
type=str)
parser.add_argument('--dataset',
help='dataset name',
default='retinaface',
type=str)
parser.add_argument('--image-set',
help='image_set name',
default='val',
type=str)
parser.add_argument('--root-path',
help='output data folder',
default='./data',
type=str)
parser.add_argument('--dataset-path',
help='dataset path',
default='./data/retinaface',
type=str)
parser.add_argument('--gpu',
help='GPU device to test with',
default=0,
type=int)
# testing
parser.add_argument('--prefix',
help='model to test with',
default='',
type=str)
parser.add_argument('--epoch',
help='model to test with',
default=0,
type=int)
parser.add_argument('--output',
help='output folder',
default='./wout',
type=str)
parser.add_argument('--nocrop', help='', action='store_true')
parser.add_argument('--thresh',
help='valid detection threshold',
default=0.02,
type=float)
parser.add_argument('--mode',
help='test mode, 0 for fast, 1 for accurate',
default=1,
type=int)
#parser.add_argument('--pyramid', help='enable pyramid test', action='store_true')
#parser.add_argument('--bbox-vote', help='', action='store_true')
parser.add_argument('--part', help='', default=0, type=int)
parser.add_argument('--parts', help='', default=1, type=int)
args = parser.parse_args()
return args
detector = None
args = None
imgid = -1
def get_boxes(roi, pyramid):
global imgid
im = cv2.imread(roi['image'])
do_flip = False
if not pyramid:
target_size = 1200
max_size = 1600
#do_flip = True
target_size = 1504
max_size = 2000
target_size = 1600
max_size = 2150
im_shape = im.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
im_scale = float(target_size) / float(im_size_min)
# prevent bigger axis from being more than max_size:
if np.round(im_scale * im_size_max) > max_size:
im_scale = float(max_size) / float(im_size_max)
scales = [im_scale]
else:
do_flip = True
#TEST_SCALES = [500, 800, 1200, 1600]
TEST_SCALES = [500, 800, 1100, 1400, 1700]
target_size = 800
max_size = 1200
im_shape = im.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
im_scale = float(target_size) / float(im_size_min)
# prevent bigger axis from being more than max_size:
if np.round(im_scale * im_size_max) > max_size:
im_scale = float(max_size) / float(im_size_max)
scales = [
float(scale) / target_size * im_scale for scale in TEST_SCALES
]
boxes, landmarks = detector.detect(im,
threshold=args.thresh,
scales=scales,
do_flip=do_flip)
#print(boxes.shape, landmarks.shape)
if imgid >= 0 and imgid < 100:
font = cv2.FONT_HERSHEY_SIMPLEX
for i in range(boxes.shape[0]):
box = boxes[i]
ibox = box[0:4].copy().astype(np.int)
cv2.rectangle(im, (ibox[0], ibox[1]), (ibox[2], ibox[3]),
(255, 0, 0), 2)
#print('box', ibox)
#if len(ibox)>5:
# for l in range(5):
# pp = (ibox[5+l*2], ibox[6+l*2])
# cv2.circle(im, (pp[0], pp[1]), 1, (0, 0, 255), 1)
blur = box[5]
k = "%.3f" % blur
cv2.putText(im, k, (ibox[0] + 2, ibox[1] + 14), font, 0.6,
(0, 255, 0), 2)
#landmarks = box[6:21].reshape( (5,3) )
if landmarks is not None:
for l in range(5):
color = (0, 255, 0)
landmark = landmarks[i][l]
pp = (int(landmark[0]), int(landmark[1]))
if landmark[2] - 0.5 < 0.0:
color = (0, 0, 255)
cv2.circle(im, (pp[0], pp[1]), 1, color, 2)
filename = './testimages/%d.jpg' % imgid
cv2.imwrite(filename, im)
print(filename, 'wrote')
imgid += 1
return boxes
def test(args):
print('test with', args)
global detector
output_folder = args.output
if not os.path.exists(output_folder):
os.mkdir(output_folder)
detector = RetinaFace(args.prefix,
args.epoch,
args.gpu,
network=args.network,
nocrop=args.nocrop,
vote=args.bbox_vote)
imdb = eval(args.dataset)(args.image_set, args.root_path,
args.dataset_path)
roidb = imdb.gt_roidb()
gt_overlaps = np.zeros(0)
overall = [0.0, 0.0]
gt_max = np.array((0.0, 0.0))
num_pos = 0
print('roidb size', len(roidb))
for i in range(len(roidb)):
if i % args.parts != args.part:
continue
#if i%10==0:
# print('processing', i, file=sys.stderr)
roi = roidb[i]
boxes = get_boxes(roi, args.pyramid)
if 'boxes' in roi:
gt_boxes = roi['boxes'].copy()
gt_areas = (gt_boxes[:, 2] - gt_boxes[:, 0] +
1) * (gt_boxes[:, 3] - gt_boxes[:, 1] + 1)
num_pos += gt_boxes.shape[0]
overlaps = bbox_overlaps(boxes.astype(np.float),
gt_boxes.astype(np.float))
#print(im_info, gt_boxes.shape, boxes.shape, overlaps.shape, file=sys.stderr)
_gt_overlaps = np.zeros((gt_boxes.shape[0]))
if boxes.shape[0] > 0:
_gt_overlaps = overlaps.max(axis=0)
#print('max_overlaps', _gt_overlaps, file=sys.stderr)
for j in range(len(_gt_overlaps)):
if _gt_overlaps[j] > 0.5:
continue
#print(j, 'failed', gt_boxes[j], 'max_overlap:', _gt_overlaps[j], file=sys.stderr)
# append recorded IoU coverage level
found = (_gt_overlaps > 0.5).sum()
recall = found / float(gt_boxes.shape[0])
#print('recall', _recall, gt_boxes.shape[0], boxes.shape[0], gt_areas, 'num:', i, file=sys.stderr)
overall[0] += found
overall[1] += gt_boxes.shape[0]
#gt_overlaps = np.hstack((gt_overlaps, _gt_overlaps))
#_recall = (gt_overlaps >= threshold).sum() / float(num_pos)
recall_all = float(overall[0]) / overall[1]
#print('recall_all', _recall, file=sys.stderr)
print('[%d]' % i,
'recall',
recall, (gt_boxes.shape[0], boxes.shape[0]),
'all:',
recall_all,
file=sys.stderr)
else:
print('[%d]' % i, 'detect %d faces' % boxes.shape[0])
_vec = roidb[i]['image'].split('/')
out_dir = os.path.join(output_folder, _vec[-2])
if not os.path.exists(out_dir):
os.mkdir(out_dir)
out_file = os.path.join(out_dir, _vec[-1].replace('jpg', 'txt'))
with open(out_file, 'w') as f:
name = '/'.join(roidb[i]['image'].split('/')[-2:])
f.write("%s\n" % (name))
f.write("%d\n" % (boxes.shape[0]))
for b in range(boxes.shape[0]):
box = boxes[b]
f.write(
"%d %d %d %d %g \n" %
(box[0], box[1], box[2] - box[0], box[3] - box[1], box[4]))
def main():
global args
args = parse_args()
args.pyramid = False
args.bbox_vote = False
if args.mode == 1:
args.pyramid = True
args.bbox_vote = True
elif args.mode == 2:
args.pyramid = True
args.bbox_vote = False
logger.info('Called with argument: %s' % args)
test(args)
if __name__ == '__main__':
main()
| insightface/detection/retinaface/test_widerface.py/0 | {
"file_path": "insightface/detection/retinaface/test_widerface.py",
"repo_id": "insightface",
"token_count": 5389
} | 111 |
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=2,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
evaluation = dict(interval=1, metric='bbox')
| insightface/detection/scrfd/configs/_base_/datasets/coco_detection.py/0 | {
"file_path": "insightface/detection/scrfd/configs/_base_/datasets/coco_detection.py",
"repo_id": "insightface",
"token_count": 795
} | 112 |
import warnings
import matplotlib.pyplot as plt
import mmcv
import numpy as np
import torch
from mmcv.ops import RoIPool
from mmcv.parallel import collate, scatter
from mmcv.runner import load_checkpoint
from mmdet.core import get_classes
from mmdet.datasets.pipelines import Compose
from mmdet.models import build_detector
def init_detector(config, checkpoint=None, device='cuda:0', cfg_options=None):
"""Initialize a detector from config file.
Args:
config (str or :obj:`mmcv.Config`): Config file path or the config
object.
checkpoint (str, optional): Checkpoint path. If left as None, the model
will not load any weights.
cfg_options (dict): Options to override some settings in the used
config.
Returns:
nn.Module: The constructed detector.
"""
if isinstance(config, str):
config = mmcv.Config.fromfile(config)
elif not isinstance(config, mmcv.Config):
raise TypeError('config must be a filename or Config object, '
f'but got {type(config)}')
if cfg_options is not None:
config.merge_from_dict(cfg_options)
config.model.pretrained = None
model = build_detector(config.model, test_cfg=config.test_cfg)
if checkpoint is not None:
map_loc = 'cpu' if device == 'cpu' else None
checkpoint = load_checkpoint(model, checkpoint, map_location=map_loc)
if 'CLASSES' in checkpoint['meta']:
model.CLASSES = checkpoint['meta']['CLASSES']
else:
warnings.simplefilter('once')
warnings.warn('Class names are not saved in the checkpoint\'s '
'meta data, use COCO classes by default.')
model.CLASSES = get_classes('coco')
model.cfg = config # save the config in the model for convenience
model.to(device)
model.eval()
return model
class LoadImage(object):
"""A simple pipeline to load image."""
def __call__(self, results):
"""Call function to load images into results.
Args:
results (dict): A result dict contains the file name
of the image to be read.
Returns:
dict: ``results`` will be returned containing loaded image.
"""
if isinstance(results['img'], str):
results['filename'] = results['img']
results['ori_filename'] = results['img']
else:
results['filename'] = None
results['ori_filename'] = None
img = mmcv.imread(results['img'])
results['img'] = img
results['img_fields'] = ['img']
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
return results
def inference_detector(model, img):
"""Inference image(s) with the detector.
Args:
model (nn.Module): The loaded detector.
imgs (str/ndarray or list[str/ndarray]): Either image files or loaded
images.
Returns:
If imgs is a str, a generator will be returned, otherwise return the
detection results directly.
"""
cfg = model.cfg
device = next(model.parameters()).device # model device
# prepare data
if isinstance(img, np.ndarray):
# directly add img
data = dict(img=img)
cfg = cfg.copy()
# set loading pipeline type
cfg.data.test.pipeline[0].type = 'LoadImageFromWebcam'
else:
# add information into dict
data = dict(img_info=dict(filename=img), img_prefix=None)
# build the data pipeline
test_pipeline = Compose(cfg.data.test.pipeline)
data = test_pipeline(data)
data = collate([data], samples_per_gpu=1)
if next(model.parameters()).is_cuda:
# scatter to specified GPU
data = scatter(data, [device])[0]
else:
for m in model.modules():
assert not isinstance(
m, RoIPool
), 'CPU inference with RoIPool is not supported currently.'
# just get the actual data from DataContainer
data['img_metas'] = data['img_metas'][0].data
# forward the model
with torch.no_grad():
result = model(return_loss=False, rescale=True, **data)[0]
return result
async def async_inference_detector(model, img):
"""Async inference image(s) with the detector.
Args:
model (nn.Module): The loaded detector.
img (str | ndarray): Either image files or loaded images.
Returns:
Awaitable detection results.
"""
cfg = model.cfg
device = next(model.parameters()).device # model device
# prepare data
if isinstance(img, np.ndarray):
# directly add img
data = dict(img=img)
cfg = cfg.copy()
# set loading pipeline type
cfg.data.test.pipeline[0].type = 'LoadImageFromWebcam'
else:
# add information into dict
data = dict(img_info=dict(filename=img), img_prefix=None)
# build the data pipeline
test_pipeline = Compose(cfg.data.test.pipeline)
data = test_pipeline(data)
data = scatter(collate([data], samples_per_gpu=1), [device])[0]
# We don't restore `torch.is_grad_enabled()` value during concurrent
# inference since execution can overlap
torch.set_grad_enabled(False)
result = await model.aforward_test(rescale=True, **data)
return result
def show_result_pyplot(model,
img,
result,
score_thr=0.3,
fig_size=(15, 10),
title='result',
block=True):
"""Visualize the detection results on the image.
Args:
model (nn.Module): The loaded detector.
img (str or np.ndarray): Image filename or loaded image.
result (tuple[list] or list): The detection result, can be either
(bbox, segm) or just bbox.
score_thr (float): The threshold to visualize the bboxes and masks.
fig_size (tuple): Figure size of the pyplot figure.
title (str): Title of the pyplot figure.
block (bool): Whether to block GUI.
"""
if hasattr(model, 'module'):
model = model.module
img = model.show_result(img, result, score_thr=score_thr, show=False)
plt.figure(figsize=fig_size)
plt.imshow(mmcv.bgr2rgb(img))
plt.title(title)
plt.tight_layout()
plt.show(block=block)
| insightface/detection/scrfd/mmdet/apis/inference.py/0 | {
"file_path": "insightface/detection/scrfd/mmdet/apis/inference.py",
"repo_id": "insightface",
"token_count": 2670
} | 113 |
import torch
from ..builder import BBOX_ASSIGNERS
from ..iou_calculators import build_iou_calculator
from .assign_result import AssignResult
from .base_assigner import BaseAssigner
@BBOX_ASSIGNERS.register_module()
class GridAssigner(BaseAssigner):
"""Assign a corresponding gt bbox or background to each bbox.
Each proposals will be assigned with `-1`, `0`, or a positive integer
indicating the ground truth index.
- -1: don't care
- 0: negative sample, no assigned gt
- positive integer: positive sample, index (1-based) of assigned gt
Args:
pos_iou_thr (float): IoU threshold for positive bboxes.
neg_iou_thr (float or tuple): IoU threshold for negative bboxes.
min_pos_iou (float): Minimum iou for a bbox to be considered as a
positive bbox. Positive samples can have smaller IoU than
pos_iou_thr due to the 4th step (assign max IoU sample to each gt).
gt_max_assign_all (bool): Whether to assign all bboxes with the same
highest overlap with some gt to that gt.
"""
def __init__(self,
pos_iou_thr,
neg_iou_thr,
min_pos_iou=.0,
gt_max_assign_all=True,
iou_calculator=dict(type='BboxOverlaps2D')):
self.pos_iou_thr = pos_iou_thr
self.neg_iou_thr = neg_iou_thr
self.min_pos_iou = min_pos_iou
self.gt_max_assign_all = gt_max_assign_all
self.iou_calculator = build_iou_calculator(iou_calculator)
def assign(self, bboxes, box_responsible_flags, gt_bboxes, gt_labels=None):
"""Assign gt to bboxes. The process is very much like the max iou
assigner, except that positive samples are constrained within the cell
that the gt boxes fell in.
This method assign a gt bbox to every bbox (proposal/anchor), each bbox
will be assigned with -1, 0, or a positive number. -1 means don't care,
0 means negative sample, positive number is the index (1-based) of
assigned gt.
The assignment is done in following steps, the order matters.
1. assign every bbox to -1
2. assign proposals whose iou with all gts <= neg_iou_thr to 0
3. for each bbox within a cell, if the iou with its nearest gt >
pos_iou_thr and the center of that gt falls inside the cell,
assign it to that bbox
4. for each gt bbox, assign its nearest proposals within the cell the
gt bbox falls in to itself.
Args:
bboxes (Tensor): Bounding boxes to be assigned, shape(n, 4).
box_responsible_flags (Tensor): flag to indicate whether box is
responsible for prediction, shape(n, )
gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4).
gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ).
Returns:
:obj:`AssignResult`: The assign result.
"""
num_gts, num_bboxes = gt_bboxes.size(0), bboxes.size(0)
# compute iou between all gt and bboxes
overlaps = self.iou_calculator(gt_bboxes, bboxes)
# 1. assign -1 by default
assigned_gt_inds = overlaps.new_full((num_bboxes, ),
-1,
dtype=torch.long)
if num_gts == 0 or num_bboxes == 0:
# No ground truth or boxes, return empty assignment
max_overlaps = overlaps.new_zeros((num_bboxes, ))
if num_gts == 0:
# No truth, assign everything to background
assigned_gt_inds[:] = 0
if gt_labels is None:
assigned_labels = None
else:
assigned_labels = overlaps.new_full((num_bboxes, ),
-1,
dtype=torch.long)
return AssignResult(
num_gts,
assigned_gt_inds,
max_overlaps,
labels=assigned_labels)
# 2. assign negative: below
# for each anchor, which gt best overlaps with it
# for each anchor, the max iou of all gts
# shape of max_overlaps == argmax_overlaps == num_bboxes
max_overlaps, argmax_overlaps = overlaps.max(dim=0)
if isinstance(self.neg_iou_thr, float):
assigned_gt_inds[(max_overlaps >= 0)
& (max_overlaps <= self.neg_iou_thr)] = 0
elif isinstance(self.neg_iou_thr, (tuple, list)):
assert len(self.neg_iou_thr) == 2
assigned_gt_inds[(max_overlaps > self.neg_iou_thr[0])
& (max_overlaps <= self.neg_iou_thr[1])] = 0
# 3. assign positive: falls into responsible cell and above
# positive IOU threshold, the order matters.
# the prior condition of comparision is to filter out all
# unrelated anchors, i.e. not box_responsible_flags
overlaps[:, ~box_responsible_flags.type(torch.bool)] = -1.
# calculate max_overlaps again, but this time we only consider IOUs
# for anchors responsible for prediction
max_overlaps, argmax_overlaps = overlaps.max(dim=0)
# for each gt, which anchor best overlaps with it
# for each gt, the max iou of all proposals
# shape of gt_max_overlaps == gt_argmax_overlaps == num_gts
gt_max_overlaps, gt_argmax_overlaps = overlaps.max(dim=1)
pos_inds = (max_overlaps >
self.pos_iou_thr) & box_responsible_flags.type(torch.bool)
assigned_gt_inds[pos_inds] = argmax_overlaps[pos_inds] + 1
# 4. assign positive to max overlapped anchors within responsible cell
for i in range(num_gts):
if gt_max_overlaps[i] > self.min_pos_iou:
if self.gt_max_assign_all:
max_iou_inds = (overlaps[i, :] == gt_max_overlaps[i]) & \
box_responsible_flags.type(torch.bool)
assigned_gt_inds[max_iou_inds] = i + 1
elif box_responsible_flags[gt_argmax_overlaps[i]]:
assigned_gt_inds[gt_argmax_overlaps[i]] = i + 1
# assign labels of positive anchors
if gt_labels is not None:
assigned_labels = assigned_gt_inds.new_full((num_bboxes, ), -1)
pos_inds = torch.nonzero(
assigned_gt_inds > 0, as_tuple=False).squeeze()
if pos_inds.numel() > 0:
assigned_labels[pos_inds] = gt_labels[
assigned_gt_inds[pos_inds] - 1]
else:
assigned_labels = None
return AssignResult(
num_gts, assigned_gt_inds, max_overlaps, labels=assigned_labels)
| insightface/detection/scrfd/mmdet/core/bbox/assigners/grid_assigner.py/0 | {
"file_path": "insightface/detection/scrfd/mmdet/core/bbox/assigners/grid_assigner.py",
"repo_id": "insightface",
"token_count": 3306
} | 114 |
import torch
from .builder import IOU_CALCULATORS
@IOU_CALCULATORS.register_module()
class BboxOverlaps2D(object):
"""2D Overlaps (e.g. IoUs, GIoUs) Calculator."""
def __call__(self, bboxes1, bboxes2, mode='iou', is_aligned=False):
"""Calculate IoU between 2D bboxes.
Args:
bboxes1 (Tensor): bboxes have shape (m, 4) in <x1, y1, x2, y2>
format, or shape (m, 5) in <x1, y1, x2, y2, score> format.
bboxes2 (Tensor): bboxes have shape (m, 4) in <x1, y1, x2, y2>
format, shape (m, 5) in <x1, y1, x2, y2, score> format, or be
empty. If ``is_aligned `` is ``True``, then m and n must be
equal.
mode (str): "iou" (intersection over union), "iof" (intersection
over foreground), or "giou" (generalized intersection over
union).
is_aligned (bool, optional): If True, then m and n must be equal.
Default False.
Returns:
Tensor: shape (m, n) if ``is_aligned `` is False else shape (m,)
"""
assert bboxes1.size(-1) in [0, 4, 5]
assert bboxes2.size(-1) in [0, 4, 5]
if bboxes2.size(-1) == 5:
bboxes2 = bboxes2[..., :4]
if bboxes1.size(-1) == 5:
bboxes1 = bboxes1[..., :4]
return bbox_overlaps(bboxes1, bboxes2, mode, is_aligned)
def __repr__(self):
"""str: a string describing the module"""
repr_str = self.__class__.__name__ + '()'
return repr_str
def bbox_overlaps(bboxes1, bboxes2, mode='iou', is_aligned=False, eps=1e-6):
"""Calculate overlap between two set of bboxes.
If ``is_aligned `` is ``False``, then calculate the overlaps between each
bbox of bboxes1 and bboxes2, otherwise the overlaps between each aligned
pair of bboxes1 and bboxes2.
Args:
bboxes1 (Tensor): shape (B, m, 4) in <x1, y1, x2, y2> format or empty.
bboxes2 (Tensor): shape (B, n, 4) in <x1, y1, x2, y2> format or empty.
B indicates the batch dim, in shape (B1, B2, ..., Bn).
If ``is_aligned `` is ``True``, then m and n must be equal.
mode (str): "iou" (intersection over union), "iof" (intersection over
foreground) or "giou" (generalized intersection over union).
Default "iou".
is_aligned (bool, optional): If True, then m and n must be equal.
Default False.
eps (float, optional): A value added to the denominator for numerical
stability. Default 1e-6.
Returns:
Tensor: shape (m, n) if ``is_aligned `` is False else shape (m,)
Example:
>>> bboxes1 = torch.FloatTensor([
>>> [0, 0, 10, 10],
>>> [10, 10, 20, 20],
>>> [32, 32, 38, 42],
>>> ])
>>> bboxes2 = torch.FloatTensor([
>>> [0, 0, 10, 20],
>>> [0, 10, 10, 19],
>>> [10, 10, 20, 20],
>>> ])
>>> overlaps = bbox_overlaps(bboxes1, bboxes2)
>>> assert overlaps.shape == (3, 3)
>>> overlaps = bbox_overlaps(bboxes1, bboxes2, is_aligned=True)
>>> assert overlaps.shape == (3, )
Example:
>>> empty = torch.empty(0, 4)
>>> nonempty = torch.FloatTensor([[0, 0, 10, 9]])
>>> assert tuple(bbox_overlaps(empty, nonempty).shape) == (0, 1)
>>> assert tuple(bbox_overlaps(nonempty, empty).shape) == (1, 0)
>>> assert tuple(bbox_overlaps(empty, empty).shape) == (0, 0)
"""
assert mode in ['iou', 'iof', 'giou'], f'Unsupported mode {mode}'
# Either the boxes are empty or the length of boxes's last dimenstion is 4
assert (bboxes1.size(-1) == 4 or bboxes1.size(0) == 0)
assert (bboxes2.size(-1) == 4 or bboxes2.size(0) == 0)
# Batch dim must be the same
# Batch dim: (B1, B2, ... Bn)
assert bboxes1.shape[:-2] == bboxes2.shape[:-2]
batch_shape = bboxes1.shape[:-2]
rows = bboxes1.size(-2)
cols = bboxes2.size(-2)
if is_aligned:
assert rows == cols
if rows * cols == 0:
if is_aligned:
return bboxes1.new(batch_shape + (rows, ))
else:
return bboxes1.new(batch_shape + (rows, cols))
area1 = (bboxes1[..., 2] - bboxes1[..., 0]) * (
bboxes1[..., 3] - bboxes1[..., 1])
area2 = (bboxes2[..., 2] - bboxes2[..., 0]) * (
bboxes2[..., 3] - bboxes2[..., 1])
if is_aligned:
lt = torch.max(bboxes1[..., :2], bboxes2[..., :2]) # [B, rows, 2]
rb = torch.min(bboxes1[..., 2:], bboxes2[..., 2:]) # [B, rows, 2]
wh = (rb - lt).clamp(min=0) # [B, rows, 2]
overlap = wh[..., 0] * wh[..., 1]
if mode in ['iou', 'giou']:
union = area1 + area2 - overlap
else:
union = area1
if mode == 'giou':
enclosed_lt = torch.min(bboxes1[..., :2], bboxes2[..., :2])
enclosed_rb = torch.max(bboxes1[..., 2:], bboxes2[..., 2:])
else:
lt = torch.max(bboxes1[..., :, None, :2],
bboxes2[..., None, :, :2]) # [B, rows, cols, 2]
rb = torch.min(bboxes1[..., :, None, 2:],
bboxes2[..., None, :, 2:]) # [B, rows, cols, 2]
wh = (rb - lt).clamp(min=0) # [B, rows, cols, 2]
overlap = wh[..., 0] * wh[..., 1]
if mode in ['iou', 'giou']:
union = area1[..., None] + area2[..., None, :] - overlap
else:
union = area1[..., None]
if mode == 'giou':
enclosed_lt = torch.min(bboxes1[..., :, None, :2],
bboxes2[..., None, :, :2])
enclosed_rb = torch.max(bboxes1[..., :, None, 2:],
bboxes2[..., None, :, 2:])
eps = union.new_tensor([eps])
union = torch.max(union, eps)
ious = overlap / union
if mode in ['iou', 'iof']:
return ious
# calculate gious
enclose_wh = (enclosed_rb - enclosed_lt).clamp(min=0)
enclose_area = enclose_wh[..., 0] * enclose_wh[..., 1]
enclose_area = torch.max(enclose_area, eps)
gious = ious - (enclose_area - union) / enclose_area
return gious
| insightface/detection/scrfd/mmdet/core/bbox/iou_calculators/iou2d_calculator.py/0 | {
"file_path": "insightface/detection/scrfd/mmdet/core/bbox/iou_calculators/iou2d_calculator.py",
"repo_id": "insightface",
"token_count": 3075
} | 115 |
from multiprocessing import Pool
import mmcv
import numpy as np
from mmcv.utils import print_log
from terminaltables import AsciiTable
from .bbox_overlaps import bbox_overlaps
from .class_names import get_classes
def average_precision(recalls, precisions, mode='area'):
"""Calculate average precision (for single or multiple scales).
Args:
recalls (ndarray): shape (num_scales, num_dets) or (num_dets, )
precisions (ndarray): shape (num_scales, num_dets) or (num_dets, )
mode (str): 'area' or '11points', 'area' means calculating the area
under precision-recall curve, '11points' means calculating
the average precision of recalls at [0, 0.1, ..., 1]
Returns:
float or ndarray: calculated average precision
"""
no_scale = False
if recalls.ndim == 1:
no_scale = True
recalls = recalls[np.newaxis, :]
precisions = precisions[np.newaxis, :]
assert recalls.shape == precisions.shape and recalls.ndim == 2
num_scales = recalls.shape[0]
ap = np.zeros(num_scales, dtype=np.float32)
if mode == 'area':
zeros = np.zeros((num_scales, 1), dtype=recalls.dtype)
ones = np.ones((num_scales, 1), dtype=recalls.dtype)
mrec = np.hstack((zeros, recalls, ones))
mpre = np.hstack((zeros, precisions, zeros))
for i in range(mpre.shape[1] - 1, 0, -1):
mpre[:, i - 1] = np.maximum(mpre[:, i - 1], mpre[:, i])
for i in range(num_scales):
ind = np.where(mrec[i, 1:] != mrec[i, :-1])[0]
ap[i] = np.sum(
(mrec[i, ind + 1] - mrec[i, ind]) * mpre[i, ind + 1])
elif mode == '11points':
for i in range(num_scales):
for thr in np.arange(0, 1 + 1e-3, 0.1):
precs = precisions[i, recalls[i, :] >= thr]
prec = precs.max() if precs.size > 0 else 0
ap[i] += prec
ap /= 11
else:
raise ValueError(
'Unrecognized mode, only "area" and "11points" are supported')
if no_scale:
ap = ap[0]
return ap
def tpfp_imagenet(det_bboxes,
gt_bboxes,
gt_bboxes_ignore=None,
default_iou_thr=0.5,
area_ranges=None):
"""Check if detected bboxes are true positive or false positive.
Args:
det_bbox (ndarray): Detected bboxes of this image, of shape (m, 5).
gt_bboxes (ndarray): GT bboxes of this image, of shape (n, 4).
gt_bboxes_ignore (ndarray): Ignored gt bboxes of this image,
of shape (k, 4). Default: None
default_iou_thr (float): IoU threshold to be considered as matched for
medium and large bboxes (small ones have special rules).
Default: 0.5.
area_ranges (list[tuple] | None): Range of bbox areas to be evaluated,
in the format [(min1, max1), (min2, max2), ...]. Default: None.
Returns:
tuple[np.ndarray]: (tp, fp) whose elements are 0 and 1. The shape of
each array is (num_scales, m).
"""
# an indicator of ignored gts
gt_ignore_inds = np.concatenate(
(np.zeros(gt_bboxes.shape[0], dtype=np.bool),
np.ones(gt_bboxes_ignore.shape[0], dtype=np.bool)))
# stack gt_bboxes and gt_bboxes_ignore for convenience
gt_bboxes = np.vstack((gt_bboxes, gt_bboxes_ignore))
num_dets = det_bboxes.shape[0]
num_gts = gt_bboxes.shape[0]
if area_ranges is None:
area_ranges = [(None, None)]
num_scales = len(area_ranges)
# tp and fp are of shape (num_scales, num_gts), each row is tp or fp
# of a certain scale.
tp = np.zeros((num_scales, num_dets), dtype=np.float32)
fp = np.zeros((num_scales, num_dets), dtype=np.float32)
if gt_bboxes.shape[0] == 0:
if area_ranges == [(None, None)]:
fp[...] = 1
else:
det_areas = (det_bboxes[:, 2] - det_bboxes[:, 0]) * (
det_bboxes[:, 3] - det_bboxes[:, 1])
for i, (min_area, max_area) in enumerate(area_ranges):
fp[i, (det_areas >= min_area) & (det_areas < max_area)] = 1
return tp, fp
ious = bbox_overlaps(det_bboxes, gt_bboxes - 1)
gt_w = gt_bboxes[:, 2] - gt_bboxes[:, 0]
gt_h = gt_bboxes[:, 3] - gt_bboxes[:, 1]
iou_thrs = np.minimum((gt_w * gt_h) / ((gt_w + 10.0) * (gt_h + 10.0)),
default_iou_thr)
# sort all detections by scores in descending order
sort_inds = np.argsort(-det_bboxes[:, -1])
for k, (min_area, max_area) in enumerate(area_ranges):
gt_covered = np.zeros(num_gts, dtype=bool)
# if no area range is specified, gt_area_ignore is all False
if min_area is None:
gt_area_ignore = np.zeros_like(gt_ignore_inds, dtype=bool)
else:
gt_areas = gt_w * gt_h
gt_area_ignore = (gt_areas < min_area) | (gt_areas >= max_area)
for i in sort_inds:
max_iou = -1
matched_gt = -1
# find best overlapped available gt
for j in range(num_gts):
# different from PASCAL VOC: allow finding other gts if the
# best overlaped ones are already matched by other det bboxes
if gt_covered[j]:
continue
elif ious[i, j] >= iou_thrs[j] and ious[i, j] > max_iou:
max_iou = ious[i, j]
matched_gt = j
# there are 4 cases for a det bbox:
# 1. it matches a gt, tp = 1, fp = 0
# 2. it matches an ignored gt, tp = 0, fp = 0
# 3. it matches no gt and within area range, tp = 0, fp = 1
# 4. it matches no gt but is beyond area range, tp = 0, fp = 0
if matched_gt >= 0:
gt_covered[matched_gt] = 1
if not (gt_ignore_inds[matched_gt]
or gt_area_ignore[matched_gt]):
tp[k, i] = 1
elif min_area is None:
fp[k, i] = 1
else:
bbox = det_bboxes[i, :4]
area = (bbox[2] - bbox[0]) * (bbox[3] - bbox[1])
if area >= min_area and area < max_area:
fp[k, i] = 1
return tp, fp
def tpfp_default(det_bboxes,
gt_bboxes,
gt_bboxes_ignore=None,
iou_thr=0.5,
area_ranges=None):
"""Check if detected bboxes are true positive or false positive.
Args:
det_bbox (ndarray): Detected bboxes of this image, of shape (m, 5).
gt_bboxes (ndarray): GT bboxes of this image, of shape (n, 4).
gt_bboxes_ignore (ndarray): Ignored gt bboxes of this image,
of shape (k, 4). Default: None
iou_thr (float): IoU threshold to be considered as matched.
Default: 0.5.
area_ranges (list[tuple] | None): Range of bbox areas to be evaluated,
in the format [(min1, max1), (min2, max2), ...]. Default: None.
Returns:
tuple[np.ndarray]: (tp, fp) whose elements are 0 and 1. The shape of
each array is (num_scales, m).
"""
# an indicator of ignored gts
gt_ignore_inds = np.concatenate(
(np.zeros(gt_bboxes.shape[0], dtype=np.bool),
np.ones(gt_bboxes_ignore.shape[0], dtype=np.bool)))
# stack gt_bboxes and gt_bboxes_ignore for convenience
gt_bboxes = np.vstack((gt_bboxes, gt_bboxes_ignore))
num_dets = det_bboxes.shape[0]
num_gts = gt_bboxes.shape[0]
if area_ranges is None:
area_ranges = [(None, None)]
num_scales = len(area_ranges)
# tp and fp are of shape (num_scales, num_gts), each row is tp or fp of
# a certain scale
tp = np.zeros((num_scales, num_dets), dtype=np.float32)
fp = np.zeros((num_scales, num_dets), dtype=np.float32)
# if there is no gt bboxes in this image, then all det bboxes
# within area range are false positives
if gt_bboxes.shape[0] == 0:
if area_ranges == [(None, None)]:
fp[...] = 1
else:
det_areas = (det_bboxes[:, 2] - det_bboxes[:, 0]) * (
det_bboxes[:, 3] - det_bboxes[:, 1])
for i, (min_area, max_area) in enumerate(area_ranges):
fp[i, (det_areas >= min_area) & (det_areas < max_area)] = 1
return tp, fp
ious = bbox_overlaps(det_bboxes, gt_bboxes)
# for each det, the max iou with all gts
ious_max = ious.max(axis=1)
# for each det, which gt overlaps most with it
ious_argmax = ious.argmax(axis=1)
# sort all dets in descending order by scores
sort_inds = np.argsort(-det_bboxes[:, -1])
for k, (min_area, max_area) in enumerate(area_ranges):
gt_covered = np.zeros(num_gts, dtype=bool)
# if no area range is specified, gt_area_ignore is all False
if min_area is None:
gt_area_ignore = np.zeros_like(gt_ignore_inds, dtype=bool)
else:
gt_areas = (gt_bboxes[:, 2] - gt_bboxes[:, 0]) * (
gt_bboxes[:, 3] - gt_bboxes[:, 1])
gt_area_ignore = (gt_areas < min_area) | (gt_areas >= max_area)
for i in sort_inds:
if ious_max[i] >= iou_thr:
matched_gt = ious_argmax[i]
if not (gt_ignore_inds[matched_gt]
or gt_area_ignore[matched_gt]):
if not gt_covered[matched_gt]:
gt_covered[matched_gt] = True
tp[k, i] = 1
else:
fp[k, i] = 1
# otherwise ignore this detected bbox, tp = 0, fp = 0
elif min_area is None:
fp[k, i] = 1
else:
bbox = det_bboxes[i, :4]
area = (bbox[2] - bbox[0]) * (bbox[3] - bbox[1])
if area >= min_area and area < max_area:
fp[k, i] = 1
return tp, fp
def get_cls_results(det_results, annotations, class_id):
"""Get det results and gt information of a certain class.
Args:
det_results (list[list]): Same as `eval_map()`.
annotations (list[dict]): Same as `eval_map()`.
class_id (int): ID of a specific class.
Returns:
tuple[list[np.ndarray]]: detected bboxes, gt bboxes, ignored gt bboxes
"""
cls_dets = [img_res[class_id] for img_res in det_results]
cls_gts = []
cls_gts_ignore = []
for ann in annotations:
gt_inds = ann['labels'] == class_id
cls_gts.append(ann['bboxes'][gt_inds, :])
if ann.get('labels_ignore', None) is not None:
ignore_inds = ann['labels_ignore'] == class_id
cls_gts_ignore.append(ann['bboxes_ignore'][ignore_inds, :])
else:
cls_gts_ignore.append(np.empty((0, 4), dtype=np.float32))
return cls_dets, cls_gts, cls_gts_ignore
def eval_map(det_results,
annotations,
scale_ranges=None,
iou_thr=0.5,
dataset=None,
logger=None,
tpfp_fn=None,
nproc=4):
"""Evaluate mAP of a dataset.
Args:
det_results (list[list]): [[cls1_det, cls2_det, ...], ...].
The outer list indicates images, and the inner list indicates
per-class detected bboxes.
annotations (list[dict]): Ground truth annotations where each item of
the list indicates an image. Keys of annotations are:
- `bboxes`: numpy array of shape (n, 4)
- `labels`: numpy array of shape (n, )
- `bboxes_ignore` (optional): numpy array of shape (k, 4)
- `labels_ignore` (optional): numpy array of shape (k, )
scale_ranges (list[tuple] | None): Range of scales to be evaluated,
in the format [(min1, max1), (min2, max2), ...]. A range of
(32, 64) means the area range between (32**2, 64**2).
Default: None.
iou_thr (float): IoU threshold to be considered as matched.
Default: 0.5.
dataset (list[str] | str | None): Dataset name or dataset classes,
there are minor differences in metrics for different datsets, e.g.
"voc07", "imagenet_det", etc. Default: None.
logger (logging.Logger | str | None): The way to print the mAP
summary. See `mmdet.utils.print_log()` for details. Default: None.
tpfp_fn (callable | None): The function used to determine true/
false positives. If None, :func:`tpfp_default` is used as default
unless dataset is 'det' or 'vid' (:func:`tpfp_imagenet` in this
case). If it is given as a function, then this function is used
to evaluate tp & fp. Default None.
nproc (int): Processes used for computing TP and FP.
Default: 4.
Returns:
tuple: (mAP, [dict, dict, ...])
"""
assert len(det_results) == len(annotations)
num_imgs = len(det_results)
num_scales = len(scale_ranges) if scale_ranges is not None else 1
num_classes = len(det_results[0]) # positive class num
area_ranges = ([(rg[0]**2, rg[1]**2) for rg in scale_ranges]
if scale_ranges is not None else None)
pool = Pool(nproc)
eval_results = []
for i in range(num_classes):
# get gt and det bboxes of this class
cls_dets, cls_gts, cls_gts_ignore = get_cls_results(
det_results, annotations, i)
# choose proper function according to datasets to compute tp and fp
if tpfp_fn is None:
if dataset in ['det', 'vid']:
tpfp_fn = tpfp_imagenet
else:
tpfp_fn = tpfp_default
if not callable(tpfp_fn):
raise ValueError(
f'tpfp_fn has to be a function or None, but got {tpfp_fn}')
# compute tp and fp for each image with multiple processes
tpfp = pool.starmap(
tpfp_fn,
zip(cls_dets, cls_gts, cls_gts_ignore,
[iou_thr for _ in range(num_imgs)],
[area_ranges for _ in range(num_imgs)]))
tp, fp = tuple(zip(*tpfp))
# calculate gt number of each scale
# ignored gts or gts beyond the specific scale are not counted
num_gts = np.zeros(num_scales, dtype=int)
for j, bbox in enumerate(cls_gts):
if area_ranges is None:
num_gts[0] += bbox.shape[0]
else:
gt_areas = (bbox[:, 2] - bbox[:, 0]) * (
bbox[:, 3] - bbox[:, 1])
for k, (min_area, max_area) in enumerate(area_ranges):
num_gts[k] += np.sum((gt_areas >= min_area)
& (gt_areas < max_area))
# sort all det bboxes by score, also sort tp and fp
cls_dets = np.vstack(cls_dets)
num_dets = cls_dets.shape[0]
sort_inds = np.argsort(-cls_dets[:, -1])
tp = np.hstack(tp)[:, sort_inds]
fp = np.hstack(fp)[:, sort_inds]
# calculate recall and precision with tp and fp
tp = np.cumsum(tp, axis=1)
fp = np.cumsum(fp, axis=1)
eps = np.finfo(np.float32).eps
recalls = tp / np.maximum(num_gts[:, np.newaxis], eps)
precisions = tp / np.maximum((tp + fp), eps)
# calculate AP
if scale_ranges is None:
recalls = recalls[0, :]
precisions = precisions[0, :]
num_gts = num_gts.item()
mode = 'area' if dataset != 'voc07' else '11points'
ap = average_precision(recalls, precisions, mode)
eval_results.append({
'num_gts': num_gts,
'num_dets': num_dets,
'recall': recalls,
'precision': precisions,
'ap': ap
})
pool.close()
if scale_ranges is not None:
# shape (num_classes, num_scales)
all_ap = np.vstack([cls_result['ap'] for cls_result in eval_results])
all_num_gts = np.vstack(
[cls_result['num_gts'] for cls_result in eval_results])
mean_ap = []
for i in range(num_scales):
if np.any(all_num_gts[:, i] > 0):
mean_ap.append(all_ap[all_num_gts[:, i] > 0, i].mean())
else:
mean_ap.append(0.0)
else:
aps = []
for cls_result in eval_results:
if cls_result['num_gts'] > 0:
aps.append(cls_result['ap'])
mean_ap = np.array(aps).mean().item() if aps else 0.0
print_map_summary(
mean_ap, eval_results, dataset, area_ranges, logger=logger)
return mean_ap, eval_results
def print_map_summary(mean_ap,
results,
dataset=None,
scale_ranges=None,
logger=None):
"""Print mAP and results of each class.
A table will be printed to show the gts/dets/recall/AP of each class and
the mAP.
Args:
mean_ap (float): Calculated from `eval_map()`.
results (list[dict]): Calculated from `eval_map()`.
dataset (list[str] | str | None): Dataset name or dataset classes.
scale_ranges (list[tuple] | None): Range of scales to be evaluated.
logger (logging.Logger | str | None): The way to print the mAP
summary. See `mmdet.utils.print_log()` for details. Default: None.
"""
if logger == 'silent':
return
if isinstance(results[0]['ap'], np.ndarray):
num_scales = len(results[0]['ap'])
else:
num_scales = 1
if scale_ranges is not None:
assert len(scale_ranges) == num_scales
num_classes = len(results)
recalls = np.zeros((num_scales, num_classes), dtype=np.float32)
aps = np.zeros((num_scales, num_classes), dtype=np.float32)
num_gts = np.zeros((num_scales, num_classes), dtype=int)
for i, cls_result in enumerate(results):
if cls_result['recall'].size > 0:
recalls[:, i] = np.array(cls_result['recall'], ndmin=2)[:, -1]
aps[:, i] = cls_result['ap']
num_gts[:, i] = cls_result['num_gts']
if dataset is None:
label_names = [str(i) for i in range(num_classes)]
elif mmcv.is_str(dataset):
label_names = get_classes(dataset)
else:
label_names = dataset
if not isinstance(mean_ap, list):
mean_ap = [mean_ap]
header = ['class', 'gts', 'dets', 'recall', 'ap']
for i in range(num_scales):
if scale_ranges is not None:
print_log(f'Scale range {scale_ranges[i]}', logger=logger)
table_data = [header]
for j in range(num_classes):
row_data = [
label_names[j], num_gts[i, j], results[j]['num_dets'],
f'{recalls[i, j]:.3f}', f'{aps[i, j]:.3f}'
]
table_data.append(row_data)
table_data.append(['mAP', '', '', '', f'{mean_ap[i]:.3f}'])
table = AsciiTable(table_data)
table.inner_footing_row_border = True
print_log('\n' + table.table, logger=logger)
| insightface/detection/scrfd/mmdet/core/evaluation/mean_ap.py/0 | {
"file_path": "insightface/detection/scrfd/mmdet/core/evaluation/mean_ap.py",
"repo_id": "insightface",
"token_count": 9597
} | 116 |
from functools import partial
import torch
from six.moves import map, zip
def multi_apply(func, *args, **kwargs):
"""Apply function to a list of arguments.
Note:
This function applies the ``func`` to multiple inputs and
map the multiple outputs of the ``func`` into different
list. Each list contains the same type of outputs corresponding
to different inputs.
Args:
func (Function): A function that will be applied to a list of
arguments
Returns:
tuple(list): A tuple containing multiple list, each list contains \
a kind of returned results by the function
"""
pfunc = partial(func, **kwargs) if kwargs else func
map_results = map(pfunc, *args)
return tuple(map(list, zip(*map_results)))
def unmap(data, count, inds, fill=0):
"""Unmap a subset of item (data) back to the original set of items (of size
count)"""
if data.dim() == 1:
ret = data.new_full((count, ), fill)
ret[inds.type(torch.bool)] = data
else:
new_size = (count, ) + data.size()[1:]
ret = data.new_full(new_size, fill)
#print(inds)
#print('CCC', ret.shape, inds.shape, data.shape)
ret[inds.type(torch.bool), :] = data
return ret
| insightface/detection/scrfd/mmdet/core/utils/misc.py/0 | {
"file_path": "insightface/detection/scrfd/mmdet/core/utils/misc.py",
"repo_id": "insightface",
"token_count": 499
} | 117 |
import inspect
import mmcv
import numpy as np
from numpy import random
import cv2
from mmdet.core import PolygonMasks
from mmdet.core.evaluation.bbox_overlaps import bbox_overlaps
from ..builder import PIPELINES
try:
from imagecorruptions import corrupt
except ImportError:
corrupt = None
try:
import albumentations
from albumentations import Compose
except ImportError:
albumentations = None
Compose = None
@PIPELINES.register_module()
class Resize(object):
"""Resize images & bbox & mask.
This transform resizes the input image to some scale. Bboxes and masks are
then resized with the same scale factor. If the input dict contains the key
"scale", then the scale in the input dict is used, otherwise the specified
scale in the init method is used. If the input dict contains the key
"scale_factor" (if MultiScaleFlipAug does not give img_scale but
scale_factor), the actual scale will be computed by image shape and
scale_factor.
`img_scale` can either be a tuple (single-scale) or a list of tuple
(multi-scale). There are 3 multiscale modes:
- ``ratio_range is not None``: randomly sample a ratio from the ratio \
range and multiply it with the image scale.
- ``ratio_range is None`` and ``multiscale_mode == "range"``: randomly \
sample a scale from the multiscale range.
- ``ratio_range is None`` and ``multiscale_mode == "value"``: randomly \
sample a scale from multiple scales.
Args:
img_scale (tuple or list[tuple]): Images scales for resizing.
multiscale_mode (str): Either "range" or "value".
ratio_range (tuple[float]): (min_ratio, max_ratio)
keep_ratio (bool): Whether to keep the aspect ratio when resizing the
image.
bbox_clip_border (bool, optional): Whether clip the objects outside
the border of the image. Defaults to True.
backend (str): Image resize backend, choices are 'cv2' and 'pillow'.
These two backends generates slightly different results. Defaults
to 'cv2'.
override (bool, optional): Whether to override `scale` and
`scale_factor` so as to call resize twice. Default False. If True,
after the first resizing, the existed `scale` and `scale_factor`
will be ignored so the second resizing can be allowed.
This option is a work-around for multiple times of resize in DETR.
Defaults to False.
"""
def __init__(self,
img_scale=None,
multiscale_mode='range',
ratio_range=None,
keep_ratio=True,
bbox_clip_border=True,
backend='cv2',
override=False):
if img_scale is None:
self.img_scale = None
else:
if isinstance(img_scale, list):
self.img_scale = img_scale
else:
self.img_scale = [img_scale]
assert mmcv.is_list_of(self.img_scale, tuple)
if ratio_range is not None:
# mode 1: given a scale and a range of image ratio
assert len(self.img_scale) == 1
else:
# mode 2: given multiple scales or a range of scales
assert multiscale_mode in ['value', 'range']
self.backend = backend
self.multiscale_mode = multiscale_mode
self.ratio_range = ratio_range
self.keep_ratio = keep_ratio
# TODO: refactor the override option in Resize
self.override = override
self.bbox_clip_border = bbox_clip_border
@staticmethod
def random_select(img_scales):
"""Randomly select an img_scale from given candidates.
Args:
img_scales (list[tuple]): Images scales for selection.
Returns:
(tuple, int): Returns a tuple ``(img_scale, scale_dix)``, \
where ``img_scale`` is the selected image scale and \
``scale_idx`` is the selected index in the given candidates.
"""
assert mmcv.is_list_of(img_scales, tuple)
scale_idx = np.random.randint(len(img_scales))
img_scale = img_scales[scale_idx]
return img_scale, scale_idx
@staticmethod
def random_sample(img_scales):
"""Randomly sample an img_scale when ``multiscale_mode=='range'``.
Args:
img_scales (list[tuple]): Images scale range for sampling.
There must be two tuples in img_scales, which specify the lower
and uper bound of image scales.
Returns:
(tuple, None): Returns a tuple ``(img_scale, None)``, where \
``img_scale`` is sampled scale and None is just a placeholder \
to be consistent with :func:`random_select`.
"""
assert mmcv.is_list_of(img_scales, tuple) and len(img_scales) == 2
img_scale_long = [max(s) for s in img_scales]
img_scale_short = [min(s) for s in img_scales]
long_edge = np.random.randint(
min(img_scale_long),
max(img_scale_long) + 1)
short_edge = np.random.randint(
min(img_scale_short),
max(img_scale_short) + 1)
img_scale = (long_edge, short_edge)
return img_scale, None
@staticmethod
def random_sample_ratio(img_scale, ratio_range):
"""Randomly sample an img_scale when ``ratio_range`` is specified.
A ratio will be randomly sampled from the range specified by
``ratio_range``. Then it would be multiplied with ``img_scale`` to
generate sampled scale.
Args:
img_scale (tuple): Images scale base to multiply with ratio.
ratio_range (tuple[float]): The minimum and maximum ratio to scale
the ``img_scale``.
Returns:
(tuple, None): Returns a tuple ``(scale, None)``, where \
``scale`` is sampled ratio multiplied with ``img_scale`` and \
None is just a placeholder to be consistent with \
:func:`random_select`.
"""
assert isinstance(img_scale, tuple) and len(img_scale) == 2
min_ratio, max_ratio = ratio_range
assert min_ratio <= max_ratio
ratio = np.random.random_sample() * (max_ratio - min_ratio) + min_ratio
scale = int(img_scale[0] * ratio), int(img_scale[1] * ratio)
return scale, None
def _random_scale(self, results):
"""Randomly sample an img_scale according to ``ratio_range`` and
``multiscale_mode``.
If ``ratio_range`` is specified, a ratio will be sampled and be
multiplied with ``img_scale``.
If multiple scales are specified by ``img_scale``, a scale will be
sampled according to ``multiscale_mode``.
Otherwise, single scale will be used.
Args:
results (dict): Result dict from :obj:`dataset`.
Returns:
dict: Two new keys 'scale` and 'scale_idx` are added into \
``results``, which would be used by subsequent pipelines.
"""
if self.ratio_range is not None:
scale, scale_idx = self.random_sample_ratio(
self.img_scale[0], self.ratio_range)
elif len(self.img_scale) == 1:
scale, scale_idx = self.img_scale[0], 0
elif self.multiscale_mode == 'range':
scale, scale_idx = self.random_sample(self.img_scale)
elif self.multiscale_mode == 'value':
scale, scale_idx = self.random_select(self.img_scale)
else:
raise NotImplementedError
results['scale'] = scale
results['scale_idx'] = scale_idx
def _resize_img(self, results):
"""Resize images with ``results['scale']``."""
for key in results.get('img_fields', ['img']):
if self.keep_ratio:
img, scale_factor = mmcv.imrescale(
results[key],
results['scale'],
return_scale=True,
backend=self.backend)
# the w_scale and h_scale has minor difference
# a real fix should be done in the mmcv.imrescale in the future
new_h, new_w = img.shape[:2]
h, w = results[key].shape[:2]
w_scale = new_w / w
h_scale = new_h / h
else:
img, w_scale, h_scale = mmcv.imresize(
results[key],
results['scale'],
return_scale=True,
backend=self.backend)
results[key] = img
scale_factor = np.array([w_scale, h_scale, w_scale, h_scale],
dtype=np.float32)
results['img_shape'] = img.shape
# in case that there is no padding
results['pad_shape'] = img.shape
results['scale_factor'] = scale_factor
results['keep_ratio'] = self.keep_ratio
def _resize_bboxes(self, results):
"""Resize bounding boxes with ``results['scale_factor']``."""
for key in results.get('bbox_fields', []):
bboxes = results[key] * results['scale_factor']
if self.bbox_clip_border:
img_shape = results['img_shape']
bboxes[:, 0::2] = np.clip(bboxes[:, 0::2], 0, img_shape[1])
bboxes[:, 1::2] = np.clip(bboxes[:, 1::2], 0, img_shape[0])
results[key] = bboxes
def _resize_keypoints(self, results):
for key in results.get('keypoints_fields', []):
keypointss = results[key].copy()
factors = results['scale_factor']
assert factors[0]==factors[2]
assert factors[1]==factors[3]
#print('AAA', results['scale_factor'])
keypointss[:,:,0] *= factors[0]
keypointss[:,:,1] *= factors[1]
if self.bbox_clip_border:
img_shape = results['img_shape']
keypointss[:,:, 0] = np.clip(keypointss[:,:, 0], 0, img_shape[1])
keypointss[:,:, 1] = np.clip(keypointss[:,:, 1], 0, img_shape[0])
results[key] = keypointss
def _resize_masks(self, results):
"""Resize masks with ``results['scale']``"""
for key in results.get('mask_fields', []):
if results[key] is None:
continue
if self.keep_ratio:
results[key] = results[key].rescale(results['scale'])
else:
results[key] = results[key].resize(results['img_shape'][:2])
def _resize_seg(self, results):
"""Resize semantic segmentation map with ``results['scale']``."""
for key in results.get('seg_fields', []):
if self.keep_ratio:
gt_seg = mmcv.imrescale(
results[key],
results['scale'],
interpolation='nearest',
backend=self.backend)
else:
gt_seg = mmcv.imresize(
results[key],
results['scale'],
interpolation='nearest',
backend=self.backend)
results['gt_semantic_seg'] = gt_seg
def __call__(self, results):
"""Call function to resize images, bounding boxes, masks, semantic
segmentation map.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Resized results, 'img_shape', 'pad_shape', 'scale_factor', \
'keep_ratio' keys are added into result dict.
"""
if 'scale' not in results:
if 'scale_factor' in results:
img_shape = results['img'].shape[:2]
scale_factor = results['scale_factor']
assert isinstance(scale_factor, float)
results['scale'] = tuple(
[int(x * scale_factor) for x in img_shape][::-1])
else:
self._random_scale(results)
else:
if not self.override:
assert 'scale_factor' not in results, (
'scale and scale_factor cannot be both set.')
else:
results.pop('scale')
if 'scale_factor' in results:
results.pop('scale_factor')
self._random_scale(results)
self._resize_img(results)
self._resize_bboxes(results)
self._resize_keypoints(results)
self._resize_masks(results)
self._resize_seg(results)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(img_scale={self.img_scale}, '
repr_str += f'multiscale_mode={self.multiscale_mode}, '
repr_str += f'ratio_range={self.ratio_range}, '
repr_str += f'keep_ratio={self.keep_ratio})'
repr_str += f'bbox_clip_border={self.bbox_clip_border})'
return repr_str
@PIPELINES.register_module()
class RandomFlip(object):
"""Flip the image & bbox & mask.
If the input dict contains the key "flip", then the flag will be used,
otherwise it will be randomly decided by a ratio specified in the init
method.
When random flip is enabled, ``flip_ratio``/``direction`` can either be a
float/string or tuple of float/string. There are 3 flip modes:
- ``flip_ratio`` is float, ``direction`` is string: the image will be
``direction``ly flipped with probability of ``flip_ratio`` .
E.g., ``flip_ratio=0.5``, ``direction='horizontal'``,
then image will be horizontally flipped with probability of 0.5.
- ``flip_ratio`` is float, ``direction`` is list of string: the image wil
be ``direction[i]``ly flipped with probability of
``flip_ratio/len(direction)``.
E.g., ``flip_ratio=0.5``, ``direction=['horizontal', 'vertical']``,
then image will be horizontally flipped with probability of 0.25,
vertically with probability of 0.25.
- ``flip_ratio`` is list of float, ``direction`` is list of string:
given ``len(flip_ratio) == len(direction)``, the image wil
be ``direction[i]``ly flipped with probability of ``flip_ratio[i]``.
E.g., ``flip_ratio=[0.3, 0.5]``, ``direction=['horizontal',
'vertical']``, then image will be horizontally flipped with probability
of 0.3, vertically with probability of 0.5
Args:
flip_ratio (float | list[float], optional): The flipping probability.
Default: None.
direction(str | list[str], optional): The flipping direction. Options
are 'horizontal', 'vertical', 'diagonal'. Default: 'horizontal'.
If input is a list, the length must equal ``flip_ratio``. Each
element in ``flip_ratio`` indicates the flip probability of
corresponding direction.
"""
def __init__(self, flip_ratio=None, direction='horizontal'):
if isinstance(flip_ratio, list):
assert mmcv.is_list_of(flip_ratio, float)
assert 0 <= sum(flip_ratio) <= 1
elif isinstance(flip_ratio, float):
assert 0 <= flip_ratio <= 1
elif flip_ratio is None:
pass
else:
raise ValueError('flip_ratios must be None, float, '
'or list of float')
self.flip_ratio = flip_ratio
valid_directions = ['horizontal', 'vertical', 'diagonal']
if isinstance(direction, str):
assert direction in valid_directions
elif isinstance(direction, list):
assert mmcv.is_list_of(direction, str)
assert set(direction).issubset(set(valid_directions))
else:
raise ValueError('direction must be either str or list of str')
self.direction = direction
if isinstance(flip_ratio, list):
assert len(self.flip_ratio) == len(self.direction)
def bbox_flip(self, bboxes, img_shape, direction):
"""Flip bboxes horizontally.
Args:
bboxes (numpy.ndarray): Bounding boxes, shape (..., 4*k)
img_shape (tuple[int]): Image shape (height, width)
direction (str): Flip direction. Options are 'horizontal',
'vertical'.
Returns:
numpy.ndarray: Flipped bounding boxes.
"""
assert bboxes.shape[-1] % 4 == 0
flipped = bboxes.copy()
if direction == 'horizontal':
w = img_shape[1]
flipped[..., 0::4] = w - bboxes[..., 2::4]
flipped[..., 2::4] = w - bboxes[..., 0::4]
elif direction == 'vertical':
h = img_shape[0]
flipped[..., 1::4] = h - bboxes[..., 3::4]
flipped[..., 3::4] = h - bboxes[..., 1::4]
elif direction == 'diagonal':
w = img_shape[1]
h = img_shape[0]
flipped[..., 0::4] = w - bboxes[..., 2::4]
flipped[..., 1::4] = h - bboxes[..., 3::4]
flipped[..., 2::4] = w - bboxes[..., 0::4]
flipped[..., 3::4] = h - bboxes[..., 1::4]
else:
raise ValueError(f"Invalid flipping direction '{direction}'")
return flipped
def keypoints_flip(self, keypointss, img_shape, direction):
assert direction == 'horizontal'
assert keypointss.shape[-1] == 3
assert keypointss.shape[1]==5
assert keypointss.ndim==3
flipped = keypointss.copy()
flip_order = [1,0,2,4,3]
for idx, a in enumerate(flip_order):
flipped[:,idx,:] = keypointss[:,a,:]
w = img_shape[1]
flipped[..., 0] = w - flipped[..., 0]
return flipped
def __call__(self, results):
"""Call function to flip bounding boxes, masks, semantic segmentation
maps.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Flipped results, 'flip', 'flip_direction' keys are added \
into result dict.
"""
if 'flip' not in results:
if isinstance(self.direction, list):
# None means non-flip
direction_list = self.direction + [None]
else:
# None means non-flip
direction_list = [self.direction, None]
if isinstance(self.flip_ratio, list):
non_flip_ratio = 1 - sum(self.flip_ratio)
flip_ratio_list = self.flip_ratio + [non_flip_ratio]
else:
non_flip_ratio = 1 - self.flip_ratio
# exclude non-flip
single_ratio = self.flip_ratio / (len(direction_list) - 1)
flip_ratio_list = [single_ratio] * (len(direction_list) -
1) + [non_flip_ratio]
cur_dir = np.random.choice(direction_list, p=flip_ratio_list)
results['flip'] = cur_dir is not None
if 'flip_direction' not in results:
results['flip_direction'] = cur_dir
if results['flip']:
# flip image
for key in results.get('img_fields', ['img']):
results[key] = mmcv.imflip(
results[key], direction=results['flip_direction'])
# flip bboxes
for key in results.get('bbox_fields', []):
results[key] = self.bbox_flip(results[key],
results['img_shape'],
results['flip_direction'])
for key in results.get('keypoints_fields', []):
results[key] = self.keypoints_flip(results[key],
results['img_shape'],
results['flip_direction'])
# flip masks
for key in results.get('mask_fields', []):
results[key] = results[key].flip(results['flip_direction'])
# flip segs
for key in results.get('seg_fields', []):
results[key] = mmcv.imflip(
results[key], direction=results['flip_direction'])
return results
def __repr__(self):
return self.__class__.__name__ + f'(flip_ratio={self.flip_ratio})'
@PIPELINES.register_module()
class Pad(object):
"""Pad the image & mask.
There are two padding modes: (1) pad to a fixed size and (2) pad to the
minimum size that is divisible by some number.
Added keys are "pad_shape", "pad_fixed_size", "pad_size_divisor",
Args:
size (tuple, optional): Fixed padding size.
size_divisor (int, optional): The divisor of padded size.
pad_val (float, optional): Padding value, 0 by default.
"""
def __init__(self, size=None, size_divisor=None, pad_val=0):
self.size = size
self.size_divisor = size_divisor
self.pad_val = pad_val
# only one of size and size_divisor should be valid
assert size is not None or size_divisor is not None
assert size is None or size_divisor is None
def _pad_img(self, results):
"""Pad images according to ``self.size``."""
for key in results.get('img_fields', ['img']):
if self.size is not None:
padded_img = mmcv.impad(
results[key], shape=self.size, pad_val=self.pad_val)
elif self.size_divisor is not None:
padded_img = mmcv.impad_to_multiple(
results[key], self.size_divisor, pad_val=self.pad_val)
results[key] = padded_img
results['pad_shape'] = padded_img.shape
results['pad_fixed_size'] = self.size
results['pad_size_divisor'] = self.size_divisor
def _pad_masks(self, results):
"""Pad masks according to ``results['pad_shape']``."""
pad_shape = results['pad_shape'][:2]
for key in results.get('mask_fields', []):
results[key] = results[key].pad(pad_shape, pad_val=self.pad_val)
def _pad_seg(self, results):
"""Pad semantic segmentation map according to
``results['pad_shape']``."""
for key in results.get('seg_fields', []):
results[key] = mmcv.impad(
results[key], shape=results['pad_shape'][:2])
def __call__(self, results):
"""Call function to pad images, masks, semantic segmentation maps.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Updated result dict.
"""
self._pad_img(results)
self._pad_masks(results)
self._pad_seg(results)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(size={self.size}, '
repr_str += f'size_divisor={self.size_divisor}, '
repr_str += f'pad_val={self.pad_val})'
return repr_str
@PIPELINES.register_module()
class Normalize(object):
"""Normalize the image.
Added key is "img_norm_cfg".
Args:
mean (sequence): Mean values of 3 channels.
std (sequence): Std values of 3 channels.
to_rgb (bool): Whether to convert the image from BGR to RGB,
default is true.
"""
def __init__(self, mean, std, to_rgb=True):
self.mean = np.array(mean, dtype=np.float32)
self.std = np.array(std, dtype=np.float32)
self.to_rgb = to_rgb
def __call__(self, results):
"""Call function to normalize images.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Normalized results, 'img_norm_cfg' key is added into
result dict.
"""
for key in results.get('img_fields', ['img']):
results[key] = mmcv.imnormalize(results[key], self.mean, self.std,
self.to_rgb)
results['img_norm_cfg'] = dict(
mean=self.mean, std=self.std, to_rgb=self.to_rgb)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(mean={self.mean}, std={self.std}, to_rgb={self.to_rgb})'
return repr_str
@PIPELINES.register_module()
class RandomCrop(object):
"""Random crop the image & bboxes & masks.
The absolute `crop_size` is sampled based on `crop_type` and `image_size`,
then the cropped results are generated.
Args:
crop_size (tuple): The relative ratio or absolute pixels of
height and width.
crop_type (str, optional): one of "relative_range", "relative",
"absolute", "absolute_range". "relative" randomly crops
(h * crop_size[0], w * crop_size[1]) part from an input of size
(h, w). "relative_range" uniformly samples relative crop size from
range [crop_size[0], 1] and [crop_size[1], 1] for height and width
respectively. "absolute" crops from an input with absolute size
(crop_size[0], crop_size[1]). "absolute_range" uniformly samples
crop_h in range [crop_size[0], min(h, crop_size[1])] and crop_w
in range [crop_size[0], min(w, crop_size[1])]. Default "absolute".
allow_negative_crop (bool, optional): Whether to allow a crop that does
not contain any bbox area. Default False.
bbox_clip_border (bool, optional): Whether clip the objects outside
the border of the image. Defaults to True.
Note:
- If the image is smaller than the absolute crop size, return the
original image.
- The keys for bboxes, labels and masks must be aligned. That is,
`gt_bboxes` corresponds to `gt_labels` and `gt_masks`, and
`gt_bboxes_ignore` corresponds to `gt_labels_ignore` and
`gt_masks_ignore`.
- If the crop does not contain any gt-bbox region and
`allow_negative_crop` is set to False, skip this image.
"""
def __init__(self,
crop_size,
crop_type='absolute',
allow_negative_crop=False,
bbox_clip_border=True):
if crop_type not in [
'relative_range', 'relative', 'absolute', 'absolute_range'
]:
raise ValueError(f'Invalid crop_type {crop_type}.')
if crop_type in ['absolute', 'absolute_range']:
assert crop_size[0] > 0 and crop_size[1] > 0
assert isinstance(crop_size[0], int) and isinstance(
crop_size[1], int)
else:
assert 0 < crop_size[0] <= 1 and 0 < crop_size[1] <= 1
self.crop_size = crop_size
self.crop_type = crop_type
self.allow_negative_crop = allow_negative_crop
self.bbox_clip_border = bbox_clip_border
# The key correspondence from bboxes to labels and masks.
self.bbox2label = {
'gt_bboxes': 'gt_labels',
'gt_bboxes_ignore': 'gt_labels_ignore'
}
self.bbox2mask = {
'gt_bboxes': 'gt_masks',
'gt_bboxes_ignore': 'gt_masks_ignore'
}
def _crop_data(self, results, crop_size, allow_negative_crop):
"""Function to randomly crop images, bounding boxes, masks, semantic
segmentation maps.
Args:
results (dict): Result dict from loading pipeline.
crop_size (tuple): Expected absolute size after cropping, (h, w).
allow_negative_crop (bool): Whether to allow a crop that does not
contain any bbox area. Default to False.
Returns:
dict: Randomly cropped results, 'img_shape' key in result dict is
updated according to crop size.
"""
assert crop_size[0] > 0 and crop_size[1] > 0
for key in results.get('img_fields', ['img']):
img = results[key]
margin_h = max(img.shape[0] - crop_size[0], 0)
margin_w = max(img.shape[1] - crop_size[1], 0)
offset_h = np.random.randint(0, margin_h + 1)
offset_w = np.random.randint(0, margin_w + 1)
crop_y1, crop_y2 = offset_h, offset_h + crop_size[0]
crop_x1, crop_x2 = offset_w, offset_w + crop_size[1]
# crop the image
img = img[crop_y1:crop_y2, crop_x1:crop_x2, ...]
img_shape = img.shape
results[key] = img
results['img_shape'] = img_shape
# crop bboxes accordingly and clip to the image boundary
for key in results.get('bbox_fields', []):
# e.g. gt_bboxes and gt_bboxes_ignore
bbox_offset = np.array([offset_w, offset_h, offset_w, offset_h],
dtype=np.float32)
bboxes = results[key] - bbox_offset
if self.bbox_clip_border:
bboxes[:, 0::2] = np.clip(bboxes[:, 0::2], 0, img_shape[1])
bboxes[:, 1::2] = np.clip(bboxes[:, 1::2], 0, img_shape[0])
valid_inds = (bboxes[:, 2] > bboxes[:, 0]) & (
bboxes[:, 3] > bboxes[:, 1])
# If the crop does not contain any gt-bbox area and
# allow_negative_crop is False, skip this image.
if (key == 'gt_bboxes' and not valid_inds.any()
and not allow_negative_crop):
return None
results[key] = bboxes[valid_inds, :]
# label fields. e.g. gt_labels and gt_labels_ignore
label_key = self.bbox2label.get(key)
if label_key in results:
results[label_key] = results[label_key][valid_inds]
# mask fields, e.g. gt_masks and gt_masks_ignore
mask_key = self.bbox2mask.get(key)
if mask_key in results:
results[mask_key] = results[mask_key][
valid_inds.nonzero()[0]].crop(
np.asarray([crop_x1, crop_y1, crop_x2, crop_y2]))
# crop semantic seg
for key in results.get('seg_fields', []):
results[key] = results[key][crop_y1:crop_y2, crop_x1:crop_x2]
return results
def _get_crop_size(self, image_size):
"""Randomly generates the absolute crop size based on `crop_type` and
`image_size`.
Args:
image_size (tuple): (h, w).
Returns:
crop_size (tuple): (crop_h, crop_w) in absolute pixels.
"""
h, w = image_size
if self.crop_type == 'absolute':
return (min(self.crop_size[0], h), min(self.crop_size[1], w))
elif self.crop_type == 'absolute_range':
assert self.crop_size[0] <= self.crop_size[1]
crop_h = np.random.randint(
min(h, self.crop_size[0]),
min(h, self.crop_size[1]) + 1)
crop_w = np.random.randint(
min(w, self.crop_size[0]),
min(w, self.crop_size[1]) + 1)
return crop_h, crop_w
elif self.crop_type == 'relative':
crop_h, crop_w = self.crop_size
return int(h * crop_h + 0.5), int(w * crop_w + 0.5)
elif self.crop_type == 'relative_range':
crop_size = np.asarray(self.crop_size, dtype=np.float32)
crop_h, crop_w = crop_size + np.random.rand(2) * (1 - crop_size)
return int(h * crop_h + 0.5), int(w * crop_w + 0.5)
def __call__(self, results):
"""Call function to randomly crop images, bounding boxes, masks,
semantic segmentation maps.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Randomly cropped results, 'img_shape' key in result dict is
updated according to crop size.
"""
image_size = results['img'].shape[:2]
crop_size = self._get_crop_size(image_size)
results = self._crop_data(results, crop_size, self.allow_negative_crop)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(crop_size={self.crop_size}, '
repr_str += f'crop_type={self.crop_type}, '
repr_str += f'allow_negative_crop={self.allow_negative_crop}, '
repr_str += f'bbox_clip_border={self.bbox_clip_border})'
return repr_str
@PIPELINES.register_module()
class RandomSquareCrop(object):
"""Random crop the image & bboxes, the cropped patches have minimum IoU
requirement with original image & bboxes, the IoU threshold is randomly
selected from min_ious.
Args:
min_ious (tuple): minimum IoU threshold for all intersections with
bounding boxes
min_crop_size (float): minimum crop's size (i.e. h,w := a*h, a*w,
where a >= min_crop_size).
Note:
The keys for bboxes, labels and masks should be paired. That is, \
`gt_bboxes` corresponds to `gt_labels` and `gt_masks`, and \
`gt_bboxes_ignore` to `gt_labels_ignore` and `gt_masks_ignore`.
"""
def __init__(self, crop_ratio_range=None, crop_choice=None, bbox_clip_border=True):
self.crop_ratio_range = crop_ratio_range
self.crop_choice = crop_choice
self.bbox_clip_border = bbox_clip_border
assert (self.crop_ratio_range is None) ^ (self.crop_choice is None)
if self.crop_ratio_range is not None:
self.crop_ratio_min, self.crop_ratio_max = self.crop_ratio_range
self.bbox2label = {
'gt_bboxes': 'gt_labels',
'gt_bboxes_ignore': 'gt_labels_ignore'
}
self.bbox2mask = {
'gt_bboxes': 'gt_masks',
'gt_bboxes_ignore': 'gt_masks_ignore'
}
def __call__(self, results):
"""Call function to crop images and bounding boxes with minimum IoU
constraint.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Result dict with images and bounding boxes cropped, \
'img_shape' key is updated.
"""
if 'img_fields' in results:
assert results['img_fields'] == ['img'], \
'Only single img_fields is allowed'
img = results['img']
assert 'bbox_fields' in results
assert 'gt_bboxes' in results
boxes = results['gt_bboxes']
#boxes = [results[key] for key in results['bbox_fields']]
#boxes = np.concatenate(boxes, 0)
h, w, c = img.shape
scale_retry = 0
if self.crop_ratio_range is not None:
max_scale = self.crop_ratio_max
else:
max_scale = np.amax(self.crop_choice)
#max_scale = max(max_scale, float(max(w,h))/min(w,h))
while True:
scale_retry += 1
if scale_retry==1 or max_scale>1.0:
if self.crop_ratio_range is not None:
scale = np.random.uniform(self.crop_ratio_min,
self.crop_ratio_max)
elif self.crop_choice is not None:
scale = np.random.choice(self.crop_choice)
else:
#scale = min(scale*1.2, max_scale)
scale = scale*1.2
# print(scale, img.shape[:2], boxes)
# import cv2
# cv2.imwrite('aaa.png', img)
for i in range(250):
short_side = min(w, h)
cw = int(scale * short_side)
ch = cw
# TODO +1
if w==cw:
left = 0
elif w>cw:
#left = random.uniform(w - cw)
left = random.randint(0, w - cw)
else:
left = random.randint(w - cw, 0)
if h==ch:
top = 0
elif h>ch:
#top = random.uniform(h - ch)
top = random.randint(0, h - ch)
else:
top = random.randint(h - ch, 0)
patch = np.array(
(int(left), int(top), int(left + cw), int(top + ch)), dtype=np.int)
# center of boxes should inside the crop img
# only adjust boxes and instance masks when the gt is not empty
# adjust boxes
def is_center_of_bboxes_in_patch(boxes, patch):
# TODO >=
center = (boxes[:, :2] + boxes[:, 2:]) / 2
mask = ((center[:, 0] > patch[0]) *
(center[:, 1] > patch[1]) *
(center[:, 0] < patch[2]) *
(center[:, 1] < patch[3]))
return mask
mask = is_center_of_bboxes_in_patch(boxes, patch)
if not mask.any():
continue
for key in results.get('bbox_fields', []):
boxes = results[key].copy()
#print('BBB', key, boxes.shape)
mask = is_center_of_bboxes_in_patch(boxes, patch)
boxes = boxes[mask]
if self.bbox_clip_border:
boxes[:, 2:] = boxes[:, 2:].clip(max=patch[2:])
boxes[:, :2] = boxes[:, :2].clip(min=patch[:2])
boxes -= np.tile(patch[:2], 2)
results[key] = boxes
# labels
label_key = self.bbox2label.get(key)
if label_key in results:
results[label_key] = results[label_key][mask]
# keypoints field
if key=='gt_bboxes':
for kps_key in results.get('keypoints_fields', []):
keypointss = results[kps_key].copy()
#print('AAAA', kps_key, keypointss.shape, mask.shape)
keypointss = keypointss[mask,:,:]
if self.bbox_clip_border:
keypointss[:,:,:2] = keypointss[:,:,:2].clip(max=patch[2:])
keypointss[:,:,:2] = keypointss[:,:,:2].clip(min=patch[:2])
#keypointss[:,:,:2] -= np.tile(patch[:2], 2)
keypointss[:,:,0] -= patch[0]
keypointss[:,:,1] -= patch[1]
results[kps_key] = keypointss
# mask fields
mask_key = self.bbox2mask.get(key)
if mask_key in results:
results[mask_key] = results[mask_key][mask.nonzero()
[0]].crop(patch)
# adjust the img no matter whether the gt is empty before crop
#img = img[patch[1]:patch[3], patch[0]:patch[2]]
rimg = np.ones( (ch, cw, 3), dtype=img.dtype) * 128
patch_from = patch.copy()
patch_from[0] = max(0, patch_from[0])
patch_from[1] = max(0, patch_from[1])
patch_from[2] = min(img.shape[1], patch_from[2])
patch_from[3] = min(img.shape[0], patch_from[3])
patch_to = patch.copy()
patch_to[0] = max(0, patch_to[0]*-1)
patch_to[1] = max(0, patch_to[1]*-1)
patch_to[2] = patch_to[0] + (patch_from[2] - patch_from[0])
patch_to[3] = patch_to[1] + (patch_from[3] - patch_from[1])
rimg[patch_to[1]:patch_to[3], patch_to[0]:patch_to[2],:] = img[patch_from[1]:patch_from[3], patch_from[0]:patch_from[2], :]
#print(img.shape, scale, patch, patch_from, patch_to, rimg.shape)
img = rimg
results['img'] = img
results['img_shape'] = img.shape
# seg fields
#for key in results.get('seg_fields', []):
# results[key] = results[key][patch[1]:patch[3],
# patch[0]:patch[2]]
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(min_ious={self.min_iou}, '
repr_str += f'crop_size={self.crop_size})'
return repr_str
@PIPELINES.register_module()
class SegRescale(object):
"""Rescale semantic segmentation maps.
Args:
scale_factor (float): The scale factor of the final output.
backend (str): Image rescale backend, choices are 'cv2' and 'pillow'.
These two backends generates slightly different results. Defaults
to 'cv2'.
"""
def __init__(self, scale_factor=1, backend='cv2'):
self.scale_factor = scale_factor
self.backend = backend
def __call__(self, results):
"""Call function to scale the semantic segmentation map.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Result dict with semantic segmentation map scaled.
"""
for key in results.get('seg_fields', []):
if self.scale_factor != 1:
results[key] = mmcv.imrescale(
results[key],
self.scale_factor,
interpolation='nearest',
backend=self.backend)
return results
def __repr__(self):
return self.__class__.__name__ + f'(scale_factor={self.scale_factor})'
@PIPELINES.register_module()
class PhotoMetricDistortion(object):
"""Apply photometric distortion to image sequentially, every transformation
is applied with a probability of 0.5. The position of random contrast is in
second or second to last.
1. random brightness
2. random contrast (mode 0)
3. convert color from BGR to HSV
4. random saturation
5. random hue
6. convert color from HSV to BGR
7. random contrast (mode 1)
8. randomly swap channels
9. random grayscale
Args:
brightness_delta (int): delta of brightness.
contrast_range (tuple): range of contrast.
saturation_range (tuple): range of saturation.
hue_delta (int): delta of hue.
gray_prob (float): prob of grayscale.
"""
def __init__(self,
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18,
gray_prob=0.0):
self.brightness_delta = brightness_delta
self.contrast_lower, self.contrast_upper = contrast_range
self.saturation_lower, self.saturation_upper = saturation_range
self.hue_delta = hue_delta
self.gray_prob = gray_prob
def __call__(self, results):
"""Call function to perform photometric distortion on images.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Result dict with images distorted.
"""
if 'img_fields' in results:
assert results['img_fields'] == ['img'], \
'Only single img_fields is allowed'
img = results['img']
assert img.dtype == np.float32, \
'PhotoMetricDistortion needs the input image of dtype np.float32,'\
' please set "to_float32=True" in "LoadImageFromFile" pipeline'
# random brightness
if random.randint(2):
delta = random.uniform(-self.brightness_delta,
self.brightness_delta)
img += delta
# mode == 0 --> do random contrast first
# mode == 1 --> do random contrast last
mode = random.randint(2)
if mode == 1:
if random.randint(2):
alpha = random.uniform(self.contrast_lower,
self.contrast_upper)
img *= alpha
# convert color from BGR to HSV
img = mmcv.bgr2hsv(img)
# random saturation
if random.randint(2):
img[..., 1] *= random.uniform(self.saturation_lower,
self.saturation_upper)
# random hue
if random.randint(2):
img[..., 0] += random.uniform(-self.hue_delta, self.hue_delta)
img[..., 0][img[..., 0] > 360] -= 360
img[..., 0][img[..., 0] < 0] += 360
# convert color from HSV to BGR
img = mmcv.hsv2bgr(img)
# random contrast
if mode == 0:
if random.randint(2):
alpha = random.uniform(self.contrast_lower,
self.contrast_upper)
img *= alpha
# randomly swap channels
if random.randint(2):
img = img[..., random.permutation(3)]
if self.gray_prob>0.0:
if random.random()<self.gray_prob:
gray = mmcv.bgr2gray(img)
img = cv2.merge([gray, gray, gray])
results['img'] = img
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(\nbrightness_delta={self.brightness_delta},\n'
repr_str += 'contrast_range='
repr_str += f'{(self.contrast_lower, self.contrast_upper)},\n'
repr_str += 'saturation_range='
repr_str += f'{(self.saturation_lower, self.saturation_upper)},\n'
repr_str += f'hue_delta={self.hue_delta},\n'
repr_str += f'gray_prob={self.gray_prob})'
return repr_str
@PIPELINES.register_module()
class Expand(object):
"""Random expand the image & bboxes.
Randomly place the original image on a canvas of 'ratio' x original image
size filled with mean values. The ratio is in the range of ratio_range.
Args:
mean (tuple): mean value of dataset.
to_rgb (bool): if need to convert the order of mean to align with RGB.
ratio_range (tuple): range of expand ratio.
prob (float): probability of applying this transformation
"""
def __init__(self,
mean=(0, 0, 0),
to_rgb=True,
ratio_range=(1, 4),
seg_ignore_label=None,
prob=0.5):
self.to_rgb = to_rgb
self.ratio_range = ratio_range
if to_rgb:
self.mean = mean[::-1]
else:
self.mean = mean
self.min_ratio, self.max_ratio = ratio_range
self.seg_ignore_label = seg_ignore_label
self.prob = prob
def __call__(self, results):
"""Call function to expand images, bounding boxes.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Result dict with images, bounding boxes expanded
"""
if random.uniform(0, 1) > self.prob:
return results
if 'img_fields' in results:
assert results['img_fields'] == ['img'], \
'Only single img_fields is allowed'
img = results['img']
h, w, c = img.shape
ratio = random.uniform(self.min_ratio, self.max_ratio)
# speedup expand when meets large image
if np.all(self.mean == self.mean[0]):
expand_img = np.empty((int(h * ratio), int(w * ratio), c),
img.dtype)
expand_img.fill(self.mean[0])
else:
expand_img = np.full((int(h * ratio), int(w * ratio), c),
self.mean,
dtype=img.dtype)
left = int(random.uniform(0, w * ratio - w))
top = int(random.uniform(0, h * ratio - h))
expand_img[top:top + h, left:left + w] = img
results['img'] = expand_img
# expand bboxes
for key in results.get('bbox_fields', []):
results[key] = results[key] + np.tile(
(left, top), 2).astype(results[key].dtype)
# expand masks
for key in results.get('mask_fields', []):
results[key] = results[key].expand(
int(h * ratio), int(w * ratio), top, left)
# expand segs
for key in results.get('seg_fields', []):
gt_seg = results[key]
expand_gt_seg = np.full((int(h * ratio), int(w * ratio)),
self.seg_ignore_label,
dtype=gt_seg.dtype)
expand_gt_seg[top:top + h, left:left + w] = gt_seg
results[key] = expand_gt_seg
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(mean={self.mean}, to_rgb={self.to_rgb}, '
repr_str += f'ratio_range={self.ratio_range}, '
repr_str += f'seg_ignore_label={self.seg_ignore_label})'
return repr_str
@PIPELINES.register_module()
class MinIoURandomCrop(object):
"""Random crop the image & bboxes, the cropped patches have minimum IoU
requirement with original image & bboxes, the IoU threshold is randomly
selected from min_ious.
Args:
min_ious (tuple): minimum IoU threshold for all intersections with
bounding boxes
min_crop_size (float): minimum crop's size (i.e. h,w := a*h, a*w,
where a >= min_crop_size).
bbox_clip_border (bool, optional): Whether clip the objects outside
the border of the image. Defaults to True.
Note:
The keys for bboxes, labels and masks should be paired. That is, \
`gt_bboxes` corresponds to `gt_labels` and `gt_masks`, and \
`gt_bboxes_ignore` to `gt_labels_ignore` and `gt_masks_ignore`.
"""
def __init__(self,
min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
min_crop_size=0.3,
bbox_clip_border=True):
# 1: return ori img
self.min_ious = min_ious
self.sample_mode = (1, *min_ious, 0)
self.min_crop_size = min_crop_size
self.bbox_clip_border = bbox_clip_border
self.bbox2label = {
'gt_bboxes': 'gt_labels',
'gt_bboxes_ignore': 'gt_labels_ignore'
}
self.bbox2mask = {
'gt_bboxes': 'gt_masks',
'gt_bboxes_ignore': 'gt_masks_ignore'
}
def __call__(self, results):
"""Call function to crop images and bounding boxes with minimum IoU
constraint.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Result dict with images and bounding boxes cropped, \
'img_shape' key is updated.
"""
if 'img_fields' in results:
assert results['img_fields'] == ['img'], \
'Only single img_fields is allowed'
img = results['img']
assert 'bbox_fields' in results
boxes = [results[key] for key in results['bbox_fields']]
boxes = np.concatenate(boxes, 0)
h, w, c = img.shape
while True:
mode = random.choice(self.sample_mode)
self.mode = mode
if mode == 1:
return results
min_iou = mode
for i in range(50):
new_w = random.uniform(self.min_crop_size * w, w)
new_h = random.uniform(self.min_crop_size * h, h)
# h / w in [0.5, 2]
if new_h / new_w < 0.5 or new_h / new_w > 2:
continue
left = random.uniform(w - new_w)
top = random.uniform(h - new_h)
patch = np.array(
(int(left), int(top), int(left + new_w), int(top + new_h)))
# Line or point crop is not allowed
if patch[2] == patch[0] or patch[3] == patch[1]:
continue
overlaps = bbox_overlaps(
patch.reshape(-1, 4), boxes.reshape(-1, 4)).reshape(-1)
if len(overlaps) > 0 and overlaps.min() < min_iou:
continue
# center of boxes should inside the crop img
# only adjust boxes and instance masks when the gt is not empty
if len(overlaps) > 0:
# adjust boxes
def is_center_of_bboxes_in_patch(boxes, patch):
center = (boxes[:, :2] + boxes[:, 2:]) / 2
mask = ((center[:, 0] > patch[0]) *
(center[:, 1] > patch[1]) *
(center[:, 0] < patch[2]) *
(center[:, 1] < patch[3]))
return mask
mask = is_center_of_bboxes_in_patch(boxes, patch)
if not mask.any():
continue
for key in results.get('bbox_fields', []):
boxes = results[key].copy()
mask = is_center_of_bboxes_in_patch(boxes, patch)
boxes = boxes[mask]
if self.bbox_clip_border:
boxes[:, 2:] = boxes[:, 2:].clip(max=patch[2:])
boxes[:, :2] = boxes[:, :2].clip(min=patch[:2])
boxes -= np.tile(patch[:2], 2)
results[key] = boxes
# labels
label_key = self.bbox2label.get(key)
if label_key in results:
results[label_key] = results[label_key][mask]
# mask fields
mask_key = self.bbox2mask.get(key)
if mask_key in results:
results[mask_key] = results[mask_key][
mask.nonzero()[0]].crop(patch)
# adjust the img no matter whether the gt is empty before crop
img = img[patch[1]:patch[3], patch[0]:patch[2]]
results['img'] = img
results['img_shape'] = img.shape
# seg fields
for key in results.get('seg_fields', []):
results[key] = results[key][patch[1]:patch[3],
patch[0]:patch[2]]
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(min_ious={self.min_ious}, '
repr_str += f'min_crop_size={self.min_crop_size}), '
repr_str += f'bbox_clip_border={self.bbox_clip_border})'
return repr_str
@PIPELINES.register_module()
class Corrupt(object):
"""Corruption augmentation.
Corruption transforms implemented based on
`imagecorruptions <https://github.com/bethgelab/imagecorruptions>`_.
Args:
corruption (str): Corruption name.
severity (int, optional): The severity of corruption. Default: 1.
"""
def __init__(self, corruption, severity=1):
self.corruption = corruption
self.severity = severity
def __call__(self, results):
"""Call function to corrupt image.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Result dict with images corrupted.
"""
if corrupt is None:
raise RuntimeError('imagecorruptions is not installed')
if 'img_fields' in results:
assert results['img_fields'] == ['img'], \
'Only single img_fields is allowed'
results['img'] = corrupt(
results['img'].astype(np.uint8),
corruption_name=self.corruption,
severity=self.severity)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(corruption={self.corruption}, '
repr_str += f'severity={self.severity})'
return repr_str
@PIPELINES.register_module()
class Albu(object):
"""Albumentation augmentation.
Adds custom transformations from Albumentations library.
Please, visit `https://albumentations.readthedocs.io`
to get more information.
An example of ``transforms`` is as followed:
.. code-block::
[
dict(
type='ShiftScaleRotate',
shift_limit=0.0625,
scale_limit=0.0,
rotate_limit=0,
interpolation=1,
p=0.5),
dict(
type='RandomBrightnessContrast',
brightness_limit=[0.1, 0.3],
contrast_limit=[0.1, 0.3],
p=0.2),
dict(type='ChannelShuffle', p=0.1),
dict(
type='OneOf',
transforms=[
dict(type='Blur', blur_limit=3, p=1.0),
dict(type='MedianBlur', blur_limit=3, p=1.0)
],
p=0.1),
]
Args:
transforms (list[dict]): A list of albu transformations
bbox_params (dict): Bbox_params for albumentation `Compose`
keymap (dict): Contains {'input key':'albumentation-style key'}
skip_img_without_anno (bool): Whether to skip the image if no ann left
after aug
"""
def __init__(self,
transforms,
bbox_params=None,
keymap=None,
update_pad_shape=False,
skip_img_without_anno=False):
if Compose is None:
raise RuntimeError('albumentations is not installed')
self.transforms = transforms
self.filter_lost_elements = False
self.update_pad_shape = update_pad_shape
self.skip_img_without_anno = skip_img_without_anno
# A simple workaround to remove masks without boxes
if (isinstance(bbox_params, dict) and 'label_fields' in bbox_params
and 'filter_lost_elements' in bbox_params):
self.filter_lost_elements = True
self.origin_label_fields = bbox_params['label_fields']
bbox_params['label_fields'] = ['idx_mapper']
del bbox_params['filter_lost_elements']
self.bbox_params = (
self.albu_builder(bbox_params) if bbox_params else None)
self.aug = Compose([self.albu_builder(t) for t in self.transforms],
bbox_params=self.bbox_params)
if not keymap:
self.keymap_to_albu = {
'img': 'image',
'gt_masks': 'masks',
'gt_bboxes': 'bboxes'
}
else:
self.keymap_to_albu = keymap
self.keymap_back = {v: k for k, v in self.keymap_to_albu.items()}
def albu_builder(self, cfg):
"""Import a module from albumentations.
It inherits some of :func:`build_from_cfg` logic.
Args:
cfg (dict): Config dict. It should at least contain the key "type".
Returns:
obj: The constructed object.
"""
assert isinstance(cfg, dict) and 'type' in cfg
args = cfg.copy()
obj_type = args.pop('type')
if mmcv.is_str(obj_type):
if albumentations is None:
raise RuntimeError('albumentations is not installed')
obj_cls = getattr(albumentations, obj_type)
elif inspect.isclass(obj_type):
obj_cls = obj_type
else:
raise TypeError(
f'type must be a str or valid type, but got {type(obj_type)}')
if 'transforms' in args:
args['transforms'] = [
self.albu_builder(transform)
for transform in args['transforms']
]
return obj_cls(**args)
@staticmethod
def mapper(d, keymap):
"""Dictionary mapper. Renames keys according to keymap provided.
Args:
d (dict): old dict
keymap (dict): {'old_key':'new_key'}
Returns:
dict: new dict.
"""
updated_dict = {}
for k, v in zip(d.keys(), d.values()):
new_k = keymap.get(k, k)
updated_dict[new_k] = d[k]
return updated_dict
def __call__(self, results):
# dict to albumentations format
results = self.mapper(results, self.keymap_to_albu)
# TODO: add bbox_fields
if 'bboxes' in results:
# to list of boxes
if isinstance(results['bboxes'], np.ndarray):
results['bboxes'] = [x for x in results['bboxes']]
# add pseudo-field for filtration
if self.filter_lost_elements:
results['idx_mapper'] = np.arange(len(results['bboxes']))
# TODO: Support mask structure in albu
if 'masks' in results:
if isinstance(results['masks'], PolygonMasks):
raise NotImplementedError(
'Albu only supports BitMap masks now')
ori_masks = results['masks']
if albumentations.__version__ < '0.5':
results['masks'] = results['masks'].masks
else:
results['masks'] = [mask for mask in results['masks'].masks]
results = self.aug(**results)
if 'bboxes' in results:
if isinstance(results['bboxes'], list):
results['bboxes'] = np.array(
results['bboxes'], dtype=np.float32)
results['bboxes'] = results['bboxes'].reshape(-1, 4)
# filter label_fields
if self.filter_lost_elements:
for label in self.origin_label_fields:
results[label] = np.array(
[results[label][i] for i in results['idx_mapper']])
if 'masks' in results:
results['masks'] = np.array(
[results['masks'][i] for i in results['idx_mapper']])
results['masks'] = ori_masks.__class__(
results['masks'], results['image'].shape[0],
results['image'].shape[1])
if (not len(results['idx_mapper'])
and self.skip_img_without_anno):
return None
if 'gt_labels' in results:
if isinstance(results['gt_labels'], list):
results['gt_labels'] = np.array(results['gt_labels'])
results['gt_labels'] = results['gt_labels'].astype(np.int64)
# back to the original format
results = self.mapper(results, self.keymap_back)
# update final shape
if self.update_pad_shape:
results['pad_shape'] = results['img'].shape
return results
def __repr__(self):
repr_str = self.__class__.__name__ + f'(transforms={self.transforms})'
return repr_str
@PIPELINES.register_module()
class RandomCenterCropPad(object):
"""Random center crop and random around padding for CornerNet.
This operation generates randomly cropped image from the original image and
pads it simultaneously. Different from :class:`RandomCrop`, the output
shape may not equal to ``crop_size`` strictly. We choose a random value
from ``ratios`` and the output shape could be larger or smaller than
``crop_size``. The padding operation is also different from :class:`Pad`,
here we use around padding instead of right-bottom padding.
The relation between output image (padding image) and original image:
.. code:: text
output image
+----------------------------+
| padded area |
+------|----------------------------|----------+
| | cropped area | |
| | +---------------+ | |
| | | . center | | | original image
| | | range | | |
| | +---------------+ | |
+------|----------------------------|----------+
| padded area |
+----------------------------+
There are 5 main areas in the figure:
- output image: output image of this operation, also called padding
image in following instruction.
- original image: input image of this operation.
- padded area: non-intersect area of output image and original image.
- cropped area: the overlap of output image and original image.
- center range: a smaller area where random center chosen from.
center range is computed by ``border`` and original image's shape
to avoid our random center is too close to original image's border.
Also this operation act differently in train and test mode, the summary
pipeline is listed below.
Train pipeline:
1. Choose a ``random_ratio`` from ``ratios``, the shape of padding image
will be ``random_ratio * crop_size``.
2. Choose a ``random_center`` in center range.
3. Generate padding image with center matches the ``random_center``.
4. Initialize the padding image with pixel value equals to ``mean``.
5. Copy the cropped area to padding image.
6. Refine annotations.
Test pipeline:
1. Compute output shape according to ``test_pad_mode``.
2. Generate padding image with center matches the original image
center.
3. Initialize the padding image with pixel value equals to ``mean``.
4. Copy the ``cropped area`` to padding image.
Args:
crop_size (tuple | None): expected size after crop, final size will
computed according to ratio. Requires (h, w) in train mode, and
None in test mode.
ratios (tuple): random select a ratio from tuple and crop image to
(crop_size[0] * ratio) * (crop_size[1] * ratio).
Only available in train mode.
border (int): max distance from center select area to image border.
Only available in train mode.
mean (sequence): Mean values of 3 channels.
std (sequence): Std values of 3 channels.
to_rgb (bool): Whether to convert the image from BGR to RGB.
test_mode (bool): whether involve random variables in transform.
In train mode, crop_size is fixed, center coords and ratio is
random selected from predefined lists. In test mode, crop_size
is image's original shape, center coords and ratio is fixed.
test_pad_mode (tuple): padding method and padding shape value, only
available in test mode. Default is using 'logical_or' with
127 as padding shape value.
- 'logical_or': final_shape = input_shape | padding_shape_value
- 'size_divisor': final_shape = int(
ceil(input_shape / padding_shape_value) * padding_shape_value)
bbox_clip_border (bool, optional): Whether clip the objects outside
the border of the image. Defaults to True.
"""
def __init__(self,
crop_size=None,
ratios=(0.9, 1.0, 1.1),
border=128,
mean=None,
std=None,
to_rgb=None,
test_mode=False,
test_pad_mode=('logical_or', 127),
bbox_clip_border=True):
if test_mode:
assert crop_size is None, 'crop_size must be None in test mode'
assert ratios is None, 'ratios must be None in test mode'
assert border is None, 'border must be None in test mode'
assert isinstance(test_pad_mode, (list, tuple))
assert test_pad_mode[0] in ['logical_or', 'size_divisor']
else:
assert isinstance(crop_size, (list, tuple))
assert crop_size[0] > 0 and crop_size[1] > 0, (
'crop_size must > 0 in train mode')
assert isinstance(ratios, (list, tuple))
assert test_pad_mode is None, (
'test_pad_mode must be None in train mode')
self.crop_size = crop_size
self.ratios = ratios
self.border = border
# We do not set default value to mean, std and to_rgb because these
# hyper-parameters are easy to forget but could affect the performance.
# Please use the same setting as Normalize for performance assurance.
assert mean is not None and std is not None and to_rgb is not None
self.to_rgb = to_rgb
self.input_mean = mean
self.input_std = std
if to_rgb:
self.mean = mean[::-1]
self.std = std[::-1]
else:
self.mean = mean
self.std = std
self.test_mode = test_mode
self.test_pad_mode = test_pad_mode
self.bbox_clip_border = bbox_clip_border
def _get_border(self, border, size):
"""Get final border for the target size.
This function generates a ``final_border`` according to image's shape.
The area between ``final_border`` and ``size - final_border`` is the
``center range``. We randomly choose center from the ``center range``
to avoid our random center is too close to original image's border.
Also ``center range`` should be larger than 0.
Args:
border (int): The initial border, default is 128.
size (int): The width or height of original image.
Returns:
int: The final border.
"""
k = 2 * border / size
i = pow(2, np.ceil(np.log2(np.ceil(k))) + (k == int(k)))
return border // i
def _filter_boxes(self, patch, boxes):
"""Check whether the center of each box is in the patch.
Args:
patch (list[int]): The cropped area, [left, top, right, bottom].
boxes (numpy array, (N x 4)): Ground truth boxes.
Returns:
mask (numpy array, (N,)): Each box is inside or outside the patch.
"""
center = (boxes[:, :2] + boxes[:, 2:]) / 2
mask = (center[:, 0] > patch[0]) * (center[:, 1] > patch[1]) * (
center[:, 0] < patch[2]) * (
center[:, 1] < patch[3])
return mask
def _crop_image_and_paste(self, image, center, size):
"""Crop image with a given center and size, then paste the cropped
image to a blank image with two centers align.
This function is equivalent to generating a blank image with ``size``
as its shape. Then cover it on the original image with two centers (
the center of blank image and the random center of original image)
aligned. The overlap area is paste from the original image and the
outside area is filled with ``mean pixel``.
Args:
image (np array, H x W x C): Original image.
center (list[int]): Target crop center coord.
size (list[int]): Target crop size. [target_h, target_w]
Returns:
cropped_img (np array, target_h x target_w x C): Cropped image.
border (np array, 4): The distance of four border of
``cropped_img`` to the original image area, [top, bottom,
left, right]
patch (list[int]): The cropped area, [left, top, right, bottom].
"""
center_y, center_x = center
target_h, target_w = size
img_h, img_w, img_c = image.shape
x0 = max(0, center_x - target_w // 2)
x1 = min(center_x + target_w // 2, img_w)
y0 = max(0, center_y - target_h // 2)
y1 = min(center_y + target_h // 2, img_h)
patch = np.array((int(x0), int(y0), int(x1), int(y1)))
left, right = center_x - x0, x1 - center_x
top, bottom = center_y - y0, y1 - center_y
cropped_center_y, cropped_center_x = target_h // 2, target_w // 2
cropped_img = np.zeros((target_h, target_w, img_c), dtype=image.dtype)
for i in range(img_c):
cropped_img[:, :, i] += self.mean[i]
y_slice = slice(cropped_center_y - top, cropped_center_y + bottom)
x_slice = slice(cropped_center_x - left, cropped_center_x + right)
cropped_img[y_slice, x_slice, :] = image[y0:y1, x0:x1, :]
border = np.array([
cropped_center_y - top, cropped_center_y + bottom,
cropped_center_x - left, cropped_center_x + right
],
dtype=np.float32)
return cropped_img, border, patch
def _train_aug(self, results):
"""Random crop and around padding the original image.
Args:
results (dict): Image infomations in the augment pipeline.
Returns:
results (dict): The updated dict.
"""
img = results['img']
h, w, c = img.shape
boxes = results['gt_bboxes']
while True:
scale = random.choice(self.ratios)
new_h = int(self.crop_size[0] * scale)
new_w = int(self.crop_size[1] * scale)
h_border = self._get_border(self.border, h)
w_border = self._get_border(self.border, w)
for i in range(50):
center_x = random.randint(low=w_border, high=w - w_border)
center_y = random.randint(low=h_border, high=h - h_border)
cropped_img, border, patch = self._crop_image_and_paste(
img, [center_y, center_x], [new_h, new_w])
mask = self._filter_boxes(patch, boxes)
# if image do not have valid bbox, any crop patch is valid.
if not mask.any() and len(boxes) > 0:
continue
results['img'] = cropped_img
results['img_shape'] = cropped_img.shape
results['pad_shape'] = cropped_img.shape
x0, y0, x1, y1 = patch
left_w, top_h = center_x - x0, center_y - y0
cropped_center_x, cropped_center_y = new_w // 2, new_h // 2
# crop bboxes accordingly and clip to the image boundary
for key in results.get('bbox_fields', []):
mask = self._filter_boxes(patch, results[key])
bboxes = results[key][mask]
bboxes[:, 0:4:2] += cropped_center_x - left_w - x0
bboxes[:, 1:4:2] += cropped_center_y - top_h - y0
if self.bbox_clip_border:
bboxes[:, 0:4:2] = np.clip(bboxes[:, 0:4:2], 0, new_w)
bboxes[:, 1:4:2] = np.clip(bboxes[:, 1:4:2], 0, new_h)
keep = (bboxes[:, 2] > bboxes[:, 0]) & (
bboxes[:, 3] > bboxes[:, 1])
bboxes = bboxes[keep]
results[key] = bboxes
if key in ['gt_bboxes']:
if 'gt_labels' in results:
labels = results['gt_labels'][mask]
labels = labels[keep]
results['gt_labels'] = labels
if 'gt_masks' in results:
raise NotImplementedError(
'RandomCenterCropPad only supports bbox.')
# crop semantic seg
for key in results.get('seg_fields', []):
raise NotImplementedError(
'RandomCenterCropPad only supports bbox.')
return results
def _test_aug(self, results):
"""Around padding the original image without cropping.
The padding mode and value are from ``test_pad_mode``.
Args:
results (dict): Image infomations in the augment pipeline.
Returns:
results (dict): The updated dict.
"""
img = results['img']
h, w, c = img.shape
results['img_shape'] = img.shape
if self.test_pad_mode[0] in ['logical_or']:
target_h = h | self.test_pad_mode[1]
target_w = w | self.test_pad_mode[1]
elif self.test_pad_mode[0] in ['size_divisor']:
divisor = self.test_pad_mode[1]
target_h = int(np.ceil(h / divisor)) * divisor
target_w = int(np.ceil(w / divisor)) * divisor
else:
raise NotImplementedError(
'RandomCenterCropPad only support two testing pad mode:'
'logical-or and size_divisor.')
cropped_img, border, _ = self._crop_image_and_paste(
img, [h // 2, w // 2], [target_h, target_w])
results['img'] = cropped_img
results['pad_shape'] = cropped_img.shape
results['border'] = border
return results
def __call__(self, results):
img = results['img']
assert img.dtype == np.float32, (
'RandomCenterCropPad needs the input image of dtype np.float32,'
' please set "to_float32=True" in "LoadImageFromFile" pipeline')
h, w, c = img.shape
assert c == len(self.mean)
if self.test_mode:
return self._test_aug(results)
else:
return self._train_aug(results)
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(crop_size={self.crop_size}, '
repr_str += f'ratios={self.ratios}, '
repr_str += f'border={self.border}, '
repr_str += f'mean={self.input_mean}, '
repr_str += f'std={self.input_std}, '
repr_str += f'to_rgb={self.to_rgb}, '
repr_str += f'test_mode={self.test_mode}, '
repr_str += f'test_pad_mode={self.test_pad_mode}), '
repr_str += f'bbox_clip_border={self.bbox_clip_border})'
return repr_str
@PIPELINES.register_module()
class CutOut(object):
"""CutOut operation.
Randomly drop some regions of image used in
`Cutout <https://arxiv.org/abs/1708.04552>`_.
Args:
n_holes (int | tuple[int, int]): Number of regions to be dropped.
If it is given as a list, number of holes will be randomly
selected from the closed interval [`n_holes[0]`, `n_holes[1]`].
cutout_shape (tuple[int, int] | list[tuple[int, int]]): The candidate
shape of dropped regions. It can be `tuple[int, int]` to use a
fixed cutout shape, or `list[tuple[int, int]]` to randomly choose
shape from the list.
cutout_ratio (tuple[float, float] | list[tuple[float, float]]): The
candidate ratio of dropped regions. It can be `tuple[float, float]`
to use a fixed ratio or `list[tuple[float, float]]` to randomly
choose ratio from the list. Please note that `cutout_shape`
and `cutout_ratio` cannot be both given at the same time.
fill_in (tuple[float, float, float] | tuple[int, int, int]): The value
of pixel to fill in the dropped regions. Default: (0, 0, 0).
"""
def __init__(self,
n_holes,
cutout_shape=None,
cutout_ratio=None,
fill_in=(0, 0, 0)):
assert (cutout_shape is None) ^ (cutout_ratio is None), \
'Either cutout_shape or cutout_ratio should be specified.'
assert (isinstance(cutout_shape, (list, tuple))
or isinstance(cutout_ratio, (list, tuple)))
if isinstance(n_holes, tuple):
assert len(n_holes) == 2 and 0 <= n_holes[0] < n_holes[1]
else:
n_holes = (n_holes, n_holes)
self.n_holes = n_holes
self.fill_in = fill_in
self.with_ratio = cutout_ratio is not None
self.candidates = cutout_ratio if self.with_ratio else cutout_shape
if not isinstance(self.candidates, list):
self.candidates = [self.candidates]
def __call__(self, results):
"""Call function to drop some regions of image."""
h, w, c = results['img'].shape
n_holes = np.random.randint(self.n_holes[0], self.n_holes[1] + 1)
for _ in range(n_holes):
x1 = np.random.randint(0, w)
y1 = np.random.randint(0, h)
index = np.random.randint(0, len(self.candidates))
if not self.with_ratio:
cutout_w, cutout_h = self.candidates[index]
else:
cutout_w = int(self.candidates[index][0] * w)
cutout_h = int(self.candidates[index][1] * h)
x2 = np.clip(x1 + cutout_w, 0, w)
y2 = np.clip(y1 + cutout_h, 0, h)
results['img'][y1:y2, x1:x2, :] = self.fill_in
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(n_holes={self.n_holes}, '
repr_str += (f'cutout_ratio={self.candidates}, ' if self.with_ratio
else f'cutout_shape={self.candidates}, ')
repr_str += f'fill_in={self.fill_in})'
return repr_str
| insightface/detection/scrfd/mmdet/datasets/pipelines/transforms.py/0 | {
"file_path": "insightface/detection/scrfd/mmdet/datasets/pipelines/transforms.py",
"repo_id": "insightface",
"token_count": 39473
} | 118 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
from mmdet.utils import get_root_logger
from mmcv.cnn import (build_conv_layer, build_norm_layer, build_plugin_layer,
constant_init, kaiming_init)
from mmcv.runner import load_checkpoint
from torch.nn.modules.batchnorm import _BatchNorm
from ..builder import BACKBONES
@BACKBONES.register_module()
class MobileNetV1(nn.Module):
def __init__(self,
in_channels=3,
#base_channels=32,
block_cfg = None,
num_stages=4,
out_indices=(0, 1, 2, 3)):
super(MobileNetV1, self).__init__()
self.out_indices = out_indices
def conv_bn(inp, oup, stride):
return nn.Sequential(
nn.Conv2d(inp, oup, 3, stride, 1, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU(inplace=True)
)
def conv_dw(inp, oup, stride):
return nn.Sequential(
nn.Conv2d(inp, inp, 3, stride, 1, groups=inp, bias=False),
nn.BatchNorm2d(inp),
nn.ReLU(inplace=True),
nn.Conv2d(inp, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU(inplace=True),
)
if block_cfg is None:
stage_planes = [8, 16, 32, 64, 128, 256] #0.25 default
stage_blocks = [2,4,4,2]
else:
stage_planes = block_cfg['stage_planes']
stage_blocks = block_cfg['stage_blocks']
assert len(stage_planes)==6
assert len(stage_blocks)==4
self.stem = nn.Sequential(
conv_bn(3, stage_planes[0], 2),
conv_dw(stage_planes[0], stage_planes[1], 1),
)
self.stage_layers = []
for i, num_blocks in enumerate(stage_blocks):
_layers = []
for n in range(num_blocks):
if n==0:
_layer = conv_dw(stage_planes[i+1], stage_planes[i+2], 2)
else:
_layer = conv_dw(stage_planes[i+2], stage_planes[i+2], 1)
_layers.append(_layer)
_block = nn.Sequential(*_layers)
layer_name = f'layer{i + 1}'
self.add_module(layer_name, _block)
self.stage_layers.append(layer_name)
#bc = base_channels
#self.stages = nn.ModuleDict()
#self.stage0 = nn.Sequential(
# conv_bn(3, bc, 2),
# conv_dw(bc, bc*2, 1),
# conv_dw(bc*2, bc*4, 2),
# conv_dw(bc*4, bc*4, 1),
#)
#self.stage1 = nn.Sequential(
# conv_dw(bc*4, bc*8, 2),
# conv_dw(bc*8, bc*8, 1),
# conv_dw(bc*8, bc*8, 1),
# conv_dw(bc*8, bc*8, 1),
#)
#self.stage2 = nn.Sequential(
# conv_dw(bc*8, bc*16, 2),
# conv_dw(bc*16, bc*16, 1),
# conv_dw(bc*16, bc*16, 1),
# conv_dw(bc*16, bc*16, 1),
# #conv_dw(bc*16, bc*16, 1),
# #conv_dw(bc*16, bc*16, 1),
#)
#self.stage3 = nn.Sequential(
# conv_dw(bc*16, bc*32, 2),
# conv_dw(bc*32, bc*32, 1),
#)
#self.stages = [self.stage0, self.stage1, self.stage2, self.stage3]
def forward(self, x):
output = []
x = self.stem(x)
for i, layer_name in enumerate(self.stage_layers):
stage_layer = getattr(self, layer_name)
x = stage_layer(x)
if i in self.out_indices:
output.append(x)
return tuple(output)
def init_weights(self, pretrained=None):
"""Initialize the weights in backbone.
Args:
pretrained (str, optional): Path to pre-trained weights.
Defaults to None.
"""
if isinstance(pretrained, str):
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif pretrained is None:
for m in self.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m)
elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
constant_init(m, 1)
else:
raise TypeError('pretrained must be a str or None')
| insightface/detection/scrfd/mmdet/models/backbones/mobilenet.py/0 | {
"file_path": "insightface/detection/scrfd/mmdet/models/backbones/mobilenet.py",
"repo_id": "insightface",
"token_count": 2478
} | 119 |
from inspect import signature
import torch
from mmdet.core import bbox2result, bbox_mapping_back, multiclass_nms
class BBoxTestMixin(object):
"""Mixin class for test time augmentation of bboxes."""
def merge_aug_bboxes(self, aug_bboxes, aug_scores, img_metas):
"""Merge augmented detection bboxes and scores.
Args:
aug_bboxes (list[Tensor]): shape (n, 4*#class)
aug_scores (list[Tensor] or None): shape (n, #class)
img_shapes (list[Tensor]): shape (3, ).
Returns:
tuple: (bboxes, scores)
"""
recovered_bboxes = []
for bboxes, img_info in zip(aug_bboxes, img_metas):
img_shape = img_info[0]['img_shape']
scale_factor = img_info[0]['scale_factor']
flip = img_info[0]['flip']
flip_direction = img_info[0]['flip_direction']
bboxes = bbox_mapping_back(bboxes, img_shape, scale_factor, flip,
flip_direction)
recovered_bboxes.append(bboxes)
bboxes = torch.cat(recovered_bboxes, dim=0)
if aug_scores is None:
return bboxes
else:
scores = torch.cat(aug_scores, dim=0)
return bboxes, scores
def aug_test_bboxes(self, feats, img_metas, rescale=False):
"""Test det bboxes with test time augmentation.
Args:
feats (list[Tensor]): the outer list indicates test-time
augmentations and inner Tensor should have a shape NxCxHxW,
which contains features for all images in the batch.
img_metas (list[list[dict]]): the outer list indicates test-time
augs (multiscale, flip, etc.) and the inner list indicates
images in a batch. each dict has image information.
rescale (bool, optional): Whether to rescale the results.
Defaults to False.
Returns:
list[ndarray]: bbox results of each class
"""
# check with_nms argument
gb_sig = signature(self.get_bboxes)
gb_args = [p.name for p in gb_sig.parameters.values()]
gbs_sig = signature(self._get_bboxes_single)
gbs_args = [p.name for p in gbs_sig.parameters.values()]
assert ('with_nms' in gb_args) and ('with_nms' in gbs_args), \
f'{self.__class__.__name__}' \
' does not support test-time augmentation'
aug_bboxes = []
aug_scores = []
aug_factors = [] # score_factors for NMS
for x, img_meta in zip(feats, img_metas):
# only one image in the batch
outs = self.forward(x)
bbox_inputs = outs + (img_meta, self.test_cfg, False, False)
bbox_outputs = self.get_bboxes(*bbox_inputs)[0]
aug_bboxes.append(bbox_outputs[0])
aug_scores.append(bbox_outputs[1])
# bbox_outputs of some detectors (e.g., ATSS, FCOS, YOLOv3)
# contains additional element to adjust scores before NMS
if len(bbox_outputs) >= 3:
aug_factors.append(bbox_outputs[2])
# after merging, bboxes will be rescaled to the original image size
merged_bboxes, merged_scores = self.merge_aug_bboxes(
aug_bboxes, aug_scores, img_metas)
merged_factors = torch.cat(aug_factors, dim=0) if aug_factors else None
det_bboxes, det_labels = multiclass_nms(
merged_bboxes,
merged_scores,
self.test_cfg.score_thr,
self.test_cfg.nms,
self.test_cfg.max_per_img,
score_factors=merged_factors)
if rescale:
_det_bboxes = det_bboxes
else:
_det_bboxes = det_bboxes.clone()
_det_bboxes[:, :4] *= det_bboxes.new_tensor(
img_metas[0][0]['scale_factor'])
bbox_results = bbox2result(_det_bboxes, det_labels, self.num_classes)
return bbox_results
| insightface/detection/scrfd/mmdet/models/dense_heads/dense_test_mixins.py/0 | {
"file_path": "insightface/detection/scrfd/mmdet/models/dense_heads/dense_test_mixins.py",
"repo_id": "insightface",
"token_count": 1939
} | 120 |
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import normal_init
from mmcv.ops import batched_nms
from ..builder import HEADS
from .anchor_head import AnchorHead
from .rpn_test_mixin import RPNTestMixin
@HEADS.register_module()
class RPNHead(RPNTestMixin, AnchorHead):
"""RPN head.
Args:
in_channels (int): Number of channels in the input feature map.
""" # noqa: W605
def __init__(self, in_channels, **kwargs):
super(RPNHead, self).__init__(1, in_channels, **kwargs)
def _init_layers(self):
"""Initialize layers of the head."""
self.rpn_conv = nn.Conv2d(
self.in_channels, self.feat_channels, 3, padding=1)
self.rpn_cls = nn.Conv2d(self.feat_channels,
self.num_anchors * self.cls_out_channels, 1)
self.rpn_reg = nn.Conv2d(self.feat_channels, self.num_anchors * 4, 1)
def init_weights(self):
"""Initialize weights of the head."""
normal_init(self.rpn_conv, std=0.01)
normal_init(self.rpn_cls, std=0.01)
normal_init(self.rpn_reg, std=0.01)
def forward_single(self, x):
"""Forward feature map of a single scale level."""
x = self.rpn_conv(x)
x = F.relu(x, inplace=True)
rpn_cls_score = self.rpn_cls(x)
rpn_bbox_pred = self.rpn_reg(x)
return rpn_cls_score, rpn_bbox_pred
def loss(self,
cls_scores,
bbox_preds,
gt_bboxes,
img_metas,
gt_bboxes_ignore=None):
"""Compute losses of the head.
Args:
cls_scores (list[Tensor]): Box scores for each scale level
Has shape (N, num_anchors * num_classes, H, W)
bbox_preds (list[Tensor]): Box energies / deltas for each scale
level with shape (N, num_anchors * 4, H, W)
gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
gt_bboxes_ignore (None | list[Tensor]): specify which bounding
boxes can be ignored when computing the loss.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
losses = super(RPNHead, self).loss(
cls_scores,
bbox_preds,
gt_bboxes,
None,
img_metas,
gt_bboxes_ignore=gt_bboxes_ignore)
return dict(
loss_rpn_cls=losses['loss_cls'], loss_rpn_bbox=losses['loss_bbox'])
def _get_bboxes_single(self,
cls_scores,
bbox_preds,
mlvl_anchors,
img_shape,
scale_factor,
cfg,
rescale=False):
"""Transform outputs for a single batch item into bbox predictions.
Args:
cls_scores (list[Tensor]): Box scores for each scale level
Has shape (num_anchors * num_classes, H, W).
bbox_preds (list[Tensor]): Box energies / deltas for each scale
level with shape (num_anchors * 4, H, W).
mlvl_anchors (list[Tensor]): Box reference for each scale level
with shape (num_total_anchors, 4).
img_shape (tuple[int]): Shape of the input image,
(height, width, 3).
scale_factor (ndarray): Scale factor of the image arange as
(w_scale, h_scale, w_scale, h_scale).
cfg (mmcv.Config): Test / postprocessing configuration,
if None, test_cfg would be used.
rescale (bool): If True, return boxes in original image space.
Returns:
Tensor: Labeled boxes in shape (n, 5), where the first 4 columns
are bounding box positions (tl_x, tl_y, br_x, br_y) and the
5-th column is a score between 0 and 1.
"""
cfg = self.test_cfg if cfg is None else cfg
# bboxes from different level should be independent during NMS,
# level_ids are used as labels for batched NMS to separate them
level_ids = []
mlvl_scores = []
mlvl_bbox_preds = []
mlvl_valid_anchors = []
for idx in range(len(cls_scores)):
rpn_cls_score = cls_scores[idx]
rpn_bbox_pred = bbox_preds[idx]
assert rpn_cls_score.size()[-2:] == rpn_bbox_pred.size()[-2:]
rpn_cls_score = rpn_cls_score.permute(1, 2, 0)
if self.use_sigmoid_cls:
rpn_cls_score = rpn_cls_score.reshape(-1)
scores = rpn_cls_score.sigmoid()
else:
rpn_cls_score = rpn_cls_score.reshape(-1, 2)
# We set FG labels to [0, num_class-1] and BG label to
# num_class in RPN head since mmdet v2.5, which is unified to
# be consistent with other head since mmdet v2.0. In mmdet v2.0
# to v2.4 we keep BG label as 0 and FG label as 1 in rpn head.
scores = rpn_cls_score.softmax(dim=1)[:, 0]
rpn_bbox_pred = rpn_bbox_pred.permute(1, 2, 0).reshape(-1, 4)
anchors = mlvl_anchors[idx]
if cfg.nms_pre > 0 and scores.shape[0] > cfg.nms_pre:
# sort is faster than topk
# _, topk_inds = scores.topk(cfg.nms_pre)
ranked_scores, rank_inds = scores.sort(descending=True)
topk_inds = rank_inds[:cfg.nms_pre]
scores = ranked_scores[:cfg.nms_pre]
rpn_bbox_pred = rpn_bbox_pred[topk_inds, :]
anchors = anchors[topk_inds, :]
mlvl_scores.append(scores)
mlvl_bbox_preds.append(rpn_bbox_pred)
mlvl_valid_anchors.append(anchors)
level_ids.append(
scores.new_full((scores.size(0), ), idx, dtype=torch.long))
scores = torch.cat(mlvl_scores)
anchors = torch.cat(mlvl_valid_anchors)
rpn_bbox_pred = torch.cat(mlvl_bbox_preds)
proposals = self.bbox_coder.decode(
anchors, rpn_bbox_pred, max_shape=img_shape)
ids = torch.cat(level_ids)
if cfg.min_bbox_size > 0:
w = proposals[:, 2] - proposals[:, 0]
h = proposals[:, 3] - proposals[:, 1]
valid_inds = torch.nonzero(
(w >= cfg.min_bbox_size)
& (h >= cfg.min_bbox_size),
as_tuple=False).squeeze()
if valid_inds.sum().item() != len(proposals):
proposals = proposals[valid_inds, :]
scores = scores[valid_inds]
ids = ids[valid_inds]
# TODO: remove the hard coded nms type
nms_cfg = dict(type='nms', iou_threshold=cfg.nms_thr)
dets, keep = batched_nms(proposals, scores, ids, nms_cfg)
return dets[:cfg.nms_post]
| insightface/detection/scrfd/mmdet/models/dense_heads/rpn_head.py/0 | {
"file_path": "insightface/detection/scrfd/mmdet/models/dense_heads/rpn_head.py",
"repo_id": "insightface",
"token_count": 3742
} | 121 |
from ..builder import DETECTORS
from .two_stage import TwoStageDetector
@DETECTORS.register_module()
class FasterRCNN(TwoStageDetector):
"""Implementation of `Faster R-CNN <https://arxiv.org/abs/1506.01497>`_"""
def __init__(self,
backbone,
rpn_head,
roi_head,
train_cfg,
test_cfg,
neck=None,
pretrained=None):
super(FasterRCNN, self).__init__(
backbone=backbone,
neck=neck,
rpn_head=rpn_head,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
pretrained=pretrained)
| insightface/detection/scrfd/mmdet/models/detectors/faster_rcnn.py/0 | {
"file_path": "insightface/detection/scrfd/mmdet/models/detectors/faster_rcnn.py",
"repo_id": "insightface",
"token_count": 391
} | 122 |
import torch
import torch.nn as nn
from mmdet.core import bbox2result
from ..builder import DETECTORS, build_backbone, build_head, build_neck
from .base import BaseDetector
@DETECTORS.register_module()
class SingleStageDetector(BaseDetector):
"""Base class for single-stage detectors.
Single-stage detectors directly and densely predict bounding boxes on the
output features of the backbone+neck.
"""
def __init__(self,
backbone,
neck=None,
bbox_head=None,
train_cfg=None,
test_cfg=None,
pretrained=None):
super(SingleStageDetector, self).__init__()
self.backbone = build_backbone(backbone)
if neck is not None:
self.neck = build_neck(neck)
bbox_head.update(train_cfg=train_cfg)
bbox_head.update(test_cfg=test_cfg)
self.bbox_head = build_head(bbox_head)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
self.init_weights(pretrained=pretrained)
def init_weights(self, pretrained=None):
"""Initialize the weights in detector.
Args:
pretrained (str, optional): Path to pre-trained weights.
Defaults to None.
"""
super(SingleStageDetector, self).init_weights(pretrained)
self.backbone.init_weights(pretrained=pretrained)
if self.with_neck:
if isinstance(self.neck, nn.Sequential):
for m in self.neck:
m.init_weights()
else:
self.neck.init_weights()
self.bbox_head.init_weights()
def extract_feat(self, img):
"""Directly extract features from the backbone+neck."""
x = self.backbone(img)
if self.with_neck:
x = self.neck(x)
return x
def forward_dummy(self, img):
"""Used for computing network flops.
See `mmdetection/tools/get_flops.py`
"""
x = self.extract_feat(img)
outs = self.bbox_head(x)
return outs
def forward_train(self,
img,
img_metas,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=None):
"""
Args:
img (Tensor): Input images of shape (N, C, H, W).
Typically these should be mean centered and std scaled.
img_metas (list[dict]): A List of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
:class:`mmdet.datasets.pipelines.Collect`.
gt_bboxes (list[Tensor]): Each item are the truth boxes for each
image in [tl_x, tl_y, br_x, br_y] format.
gt_labels (list[Tensor]): Class indices corresponding to each box
gt_bboxes_ignore (None | list[Tensor]): Specify which bounding
boxes can be ignored when computing the loss.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
super(SingleStageDetector, self).forward_train(img, img_metas)
x = self.extract_feat(img)
losses = self.bbox_head.forward_train(x, img_metas, gt_bboxes,
gt_labels, gt_bboxes_ignore)
return losses
def simple_test(self, img, img_metas, rescale=False):
"""Test function without test time augmentation.
Args:
imgs (list[torch.Tensor]): List of multiple images
img_metas (list[dict]): List of image information.
rescale (bool, optional): Whether to rescale the results.
Defaults to False.
Returns:
list[list[np.ndarray]]: BBox results of each image and classes.
The outer list corresponds to each image. The inner list
corresponds to each class.
"""
x = self.extract_feat(img)
outs = self.bbox_head(x)
#print(len(outs))
if torch.onnx.is_in_onnx_export():
print('single_stage.py in-onnx-export')
print(outs.__class__)
cls_score, bbox_pred = outs
for c in cls_score:
print(c.shape)
for c in bbox_pred:
print(c.shape)
#print(outs[0].shape, outs[1].shape)
return outs
bbox_list = self.bbox_head.get_bboxes(
*outs, img_metas, rescale=rescale)
# skip post-processing when exporting to ONNX
if torch.onnx.is_in_onnx_export():
return bbox_list
bbox_results = [
bbox2result(det_bboxes, det_labels, self.bbox_head.num_classes)
for det_bboxes, det_labels in bbox_list
]
return bbox_results
def aug_test(self, imgs, img_metas, rescale=False):
"""Test function with test time augmentation.
Args:
imgs (list[Tensor]): the outer list indicates test-time
augmentations and inner Tensor should have a shape NxCxHxW,
which contains all images in the batch.
img_metas (list[list[dict]]): the outer list indicates test-time
augs (multiscale, flip, etc.) and the inner list indicates
images in a batch. each dict has image information.
rescale (bool, optional): Whether to rescale the results.
Defaults to False.
Returns:
list[list[np.ndarray]]: BBox results of each image and classes.
The outer list corresponds to each image. The inner list
corresponds to each class.
"""
assert hasattr(self.bbox_head, 'aug_test'), \
f'{self.bbox_head.__class__.__name__}' \
' does not support test-time augmentation'
print('aug-test:', len(imgs))
feats = self.extract_feats(imgs)
return [self.bbox_head.aug_test(feats, img_metas, rescale=rescale)]
| insightface/detection/scrfd/mmdet/models/detectors/single_stage.py/0 | {
"file_path": "insightface/detection/scrfd/mmdet/models/detectors/single_stage.py",
"repo_id": "insightface",
"token_count": 2971
} | 123 |
import torch.nn as nn
import torch.nn.functional as F
from ..builder import LOSSES
from .utils import weighted_loss
@weighted_loss
def mse_loss(pred, target):
"""Warpper of mse loss."""
return F.mse_loss(pred, target, reduction='none')
@LOSSES.register_module()
class MSELoss(nn.Module):
"""MSELoss.
Args:
reduction (str, optional): The method that reduces the loss to a
scalar. Options are "none", "mean" and "sum".
loss_weight (float, optional): The weight of the loss. Defaults to 1.0
"""
def __init__(self, reduction='mean', loss_weight=1.0):
super().__init__()
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self, pred, target, weight=None, avg_factor=None):
"""Forward function of loss.
Args:
pred (torch.Tensor): The prediction.
target (torch.Tensor): The learning target of the prediction.
weight (torch.Tensor, optional): Weight of the loss for each
prediction. Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
Returns:
torch.Tensor: The calculated loss
"""
loss = self.loss_weight * mse_loss(
pred,
target,
weight,
reduction=self.reduction,
avg_factor=avg_factor)
return loss
| insightface/detection/scrfd/mmdet/models/losses/mse_loss.py/0 | {
"file_path": "insightface/detection/scrfd/mmdet/models/losses/mse_loss.py",
"repo_id": "insightface",
"token_count": 624
} | 124 |
# Copyright (c) 2019 Western Digital Corporation or its affiliates.
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from ..builder import NECKS
class DetectionBlock(nn.Module):
"""Detection block in YOLO neck.
Let out_channels = n, the DetectionBlock contains:
Six ConvLayers, 1 Conv2D Layer and 1 YoloLayer.
The first 6 ConvLayers are formed the following way:
1x1xn, 3x3x2n, 1x1xn, 3x3x2n, 1x1xn, 3x3x2n.
The Conv2D layer is 1x1x255.
Some block will have branch after the fifth ConvLayer.
The input channel is arbitrary (in_channels)
Args:
in_channels (int): The number of input channels.
out_channels (int): The number of output channels.
conv_cfg (dict): Config dict for convolution layer. Default: None.
norm_cfg (dict): Dictionary to construct and config norm layer.
Default: dict(type='BN', requires_grad=True)
act_cfg (dict): Config dict for activation layer.
Default: dict(type='LeakyReLU', negative_slope=0.1).
"""
def __init__(self,
in_channels,
out_channels,
conv_cfg=None,
norm_cfg=dict(type='BN', requires_grad=True),
act_cfg=dict(type='LeakyReLU', negative_slope=0.1)):
super(DetectionBlock, self).__init__()
double_out_channels = out_channels * 2
# shortcut
cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)
self.conv1 = ConvModule(in_channels, out_channels, 1, **cfg)
self.conv2 = ConvModule(
out_channels, double_out_channels, 3, padding=1, **cfg)
self.conv3 = ConvModule(double_out_channels, out_channels, 1, **cfg)
self.conv4 = ConvModule(
out_channels, double_out_channels, 3, padding=1, **cfg)
self.conv5 = ConvModule(double_out_channels, out_channels, 1, **cfg)
def forward(self, x):
tmp = self.conv1(x)
tmp = self.conv2(tmp)
tmp = self.conv3(tmp)
tmp = self.conv4(tmp)
out = self.conv5(tmp)
return out
@NECKS.register_module()
class YOLOV3Neck(nn.Module):
"""The neck of YOLOV3.
It can be treated as a simplified version of FPN. It
will take the result from Darknet backbone and do some upsampling and
concatenation. It will finally output the detection result.
Note:
The input feats should be from top to bottom.
i.e., from high-lvl to low-lvl
But YOLOV3Neck will process them in reversed order.
i.e., from bottom (high-lvl) to top (low-lvl)
Args:
num_scales (int): The number of scales / stages.
in_channels (int): The number of input channels.
out_channels (int): The number of output channels.
conv_cfg (dict): Config dict for convolution layer. Default: None.
norm_cfg (dict): Dictionary to construct and config norm layer.
Default: dict(type='BN', requires_grad=True)
act_cfg (dict): Config dict for activation layer.
Default: dict(type='LeakyReLU', negative_slope=0.1).
"""
def __init__(self,
num_scales,
in_channels,
out_channels,
conv_cfg=None,
norm_cfg=dict(type='BN', requires_grad=True),
act_cfg=dict(type='LeakyReLU', negative_slope=0.1)):
super(YOLOV3Neck, self).__init__()
assert (num_scales == len(in_channels) == len(out_channels))
self.num_scales = num_scales
self.in_channels = in_channels
self.out_channels = out_channels
# shortcut
cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)
# To support arbitrary scales, the code looks awful, but it works.
# Better solution is welcomed.
self.detect1 = DetectionBlock(in_channels[0], out_channels[0], **cfg)
for i in range(1, self.num_scales):
in_c, out_c = self.in_channels[i], self.out_channels[i]
self.add_module(f'conv{i}', ConvModule(in_c, out_c, 1, **cfg))
# in_c + out_c : High-lvl feats will be cat with low-lvl feats
self.add_module(f'detect{i+1}',
DetectionBlock(in_c + out_c, out_c, **cfg))
def forward(self, feats):
assert len(feats) == self.num_scales
# processed from bottom (high-lvl) to top (low-lvl)
outs = []
out = self.detect1(feats[-1])
outs.append(out)
for i, x in enumerate(reversed(feats[:-1])):
conv = getattr(self, f'conv{i+1}')
tmp = conv(out)
# Cat with low-lvl feats
tmp = F.interpolate(tmp, scale_factor=2)
tmp = torch.cat((tmp, x), 1)
detect = getattr(self, f'detect{i+2}')
out = detect(tmp)
outs.append(out)
return tuple(outs)
def init_weights(self):
"""Initialize the weights of module."""
# init is done in ConvModule
pass
| insightface/detection/scrfd/mmdet/models/necks/yolo_neck.py/0 | {
"file_path": "insightface/detection/scrfd/mmdet/models/necks/yolo_neck.py",
"repo_id": "insightface",
"token_count": 2319
} | 125 |
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule, kaiming_init
from mmcv.runner import auto_fp16, force_fp32
from mmdet.models.builder import HEADS
@HEADS.register_module()
class FusedSemanticHead(nn.Module):
r"""Multi-level fused semantic segmentation head.
.. code-block:: none
in_1 -> 1x1 conv ---
|
in_2 -> 1x1 conv -- |
||
in_3 -> 1x1 conv - ||
||| /-> 1x1 conv (mask prediction)
in_4 -> 1x1 conv -----> 3x3 convs (*4)
| \-> 1x1 conv (feature)
in_5 -> 1x1 conv ---
""" # noqa: W605
def __init__(self,
num_ins,
fusion_level,
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=183,
ignore_label=255,
loss_weight=0.2,
conv_cfg=None,
norm_cfg=None):
super(FusedSemanticHead, self).__init__()
self.num_ins = num_ins
self.fusion_level = fusion_level
self.num_convs = num_convs
self.in_channels = in_channels
self.conv_out_channels = conv_out_channels
self.num_classes = num_classes
self.ignore_label = ignore_label
self.loss_weight = loss_weight
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.fp16_enabled = False
self.lateral_convs = nn.ModuleList()
for i in range(self.num_ins):
self.lateral_convs.append(
ConvModule(
self.in_channels,
self.in_channels,
1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
inplace=False))
self.convs = nn.ModuleList()
for i in range(self.num_convs):
in_channels = self.in_channels if i == 0 else conv_out_channels
self.convs.append(
ConvModule(
in_channels,
conv_out_channels,
3,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
self.conv_embedding = ConvModule(
conv_out_channels,
conv_out_channels,
1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
self.conv_logits = nn.Conv2d(conv_out_channels, self.num_classes, 1)
self.criterion = nn.CrossEntropyLoss(ignore_index=ignore_label)
def init_weights(self):
kaiming_init(self.conv_logits)
@auto_fp16()
def forward(self, feats):
x = self.lateral_convs[self.fusion_level](feats[self.fusion_level])
fused_size = tuple(x.shape[-2:])
for i, feat in enumerate(feats):
if i != self.fusion_level:
feat = F.interpolate(
feat, size=fused_size, mode='bilinear', align_corners=True)
x += self.lateral_convs[i](feat)
for i in range(self.num_convs):
x = self.convs[i](x)
mask_pred = self.conv_logits(x)
x = self.conv_embedding(x)
return mask_pred, x
@force_fp32(apply_to=('mask_pred', ))
def loss(self, mask_pred, labels):
labels = labels.squeeze(1).long()
loss_semantic_seg = self.criterion(mask_pred, labels)
loss_semantic_seg *= self.loss_weight
return loss_semantic_seg
| insightface/detection/scrfd/mmdet/models/roi_heads/mask_heads/fused_semantic_head.py/0 | {
"file_path": "insightface/detection/scrfd/mmdet/models/roi_heads/mask_heads/fused_semantic_head.py",
"repo_id": "insightface",
"token_count": 2004
} | 126 |
import torch
from mmcv.ops import batched_nms
from mmdet.core import (bbox2result, bbox2roi, bbox_mapping, merge_aug_bboxes,
multiclass_nms)
from mmdet.models.roi_heads.standard_roi_head import StandardRoIHead
from ..builder import HEADS
@HEADS.register_module()
class TridentRoIHead(StandardRoIHead):
"""Trident roi head.
Args:
num_branch (int): Number of branches in TridentNet.
test_branch_idx (int): In inference, all 3 branches will be used
if `test_branch_idx==-1`, otherwise only branch with index
`test_branch_idx` will be used.
"""
def __init__(self, num_branch, test_branch_idx, **kwargs):
self.num_branch = num_branch
self.test_branch_idx = test_branch_idx
super(TridentRoIHead, self).__init__(**kwargs)
def simple_test(self,
x,
proposal_list,
img_metas,
proposals=None,
rescale=False):
"""Test without augmentation as follows:
1. Compute prediction bbox and label per branch.
2. Merge predictions of each branch according to scores of
bboxes, i.e., bboxes with higher score are kept to give
top-k prediction.
"""
assert self.with_bbox, 'Bbox head must be implemented.'
det_bboxes_list, det_labels_list = self.simple_test_bboxes(
x, img_metas, proposal_list, self.test_cfg, rescale=rescale)
for _ in range(len(det_bboxes_list)):
if det_bboxes_list[_].shape[0] == 0:
det_bboxes_list[_] = det_bboxes_list[_].new_empty((0, 5))
trident_det_bboxes = torch.cat(det_bboxes_list, 0)
trident_det_labels = torch.cat(det_labels_list, 0)
if trident_det_bboxes.numel() == 0:
det_bboxes = trident_det_bboxes.new_zeros((0, 5))
det_labels = trident_det_bboxes.new_zeros((0, ), dtype=torch.long)
else:
nms_bboxes = trident_det_bboxes[:, :4]
nms_scores = trident_det_bboxes[:, 4].contiguous()
nms_inds = trident_det_labels
nms_cfg = self.test_cfg['nms']
det_bboxes, keep = batched_nms(nms_bboxes, nms_scores, nms_inds,
nms_cfg)
det_labels = trident_det_labels[keep]
if self.test_cfg['max_per_img'] > 0:
det_labels = det_labels[:self.test_cfg['max_per_img']]
det_bboxes = det_bboxes[:self.test_cfg['max_per_img']]
det_bboxes, det_labels = [det_bboxes], [det_labels]
bbox_results = [
bbox2result(det_bboxes[i], det_labels[i],
self.bbox_head.num_classes)
for i in range(len(det_bboxes))
]
return bbox_results
def aug_test_bboxes(self, feats, img_metas, proposal_list, rcnn_test_cfg):
"""Test det bboxes with test time augmentation."""
aug_bboxes = []
aug_scores = []
for x, img_meta in zip(feats, img_metas):
# only one image in the batch
img_shape = img_meta[0]['img_shape']
scale_factor = img_meta[0]['scale_factor']
flip = img_meta[0]['flip']
flip_direction = img_meta[0]['flip_direction']
trident_bboxes, trident_scores = [], []
for branch_idx in range(len(proposal_list)):
proposals = bbox_mapping(proposal_list[0][:, :4], img_shape,
scale_factor, flip, flip_direction)
rois = bbox2roi([proposals])
bbox_results = self._bbox_forward(x, rois)
bboxes, scores = self.bbox_head.get_bboxes(
rois,
bbox_results['cls_score'],
bbox_results['bbox_pred'],
img_shape,
scale_factor,
rescale=False,
cfg=None)
trident_bboxes.append(bboxes)
trident_scores.append(scores)
aug_bboxes.append(torch.cat(trident_bboxes, 0))
aug_scores.append(torch.cat(trident_scores, 0))
# after merging, bboxes will be rescaled to the original image size
merged_bboxes, merged_scores = merge_aug_bboxes(
aug_bboxes, aug_scores, img_metas, rcnn_test_cfg)
det_bboxes, det_labels = multiclass_nms(merged_bboxes, merged_scores,
rcnn_test_cfg.score_thr,
rcnn_test_cfg.nms,
rcnn_test_cfg.max_per_img)
return det_bboxes, det_labels
| insightface/detection/scrfd/mmdet/models/roi_heads/trident_roi_head.py/0 | {
"file_path": "insightface/detection/scrfd/mmdet/models/roi_heads/trident_roi_head.py",
"repo_id": "insightface",
"token_count": 2521
} | 127 |
from __future__ import print_function
import cv2
import argparse
import os
import os.path as osp
import shutil
import numpy as np
import json
def parse_args():
parser = argparse.ArgumentParser(
description='convert crowdhuman dataset to scrfd format')
parser.add_argument('--raw', help='raw dataset dir')
parser.add_argument('--save', default='data/crowdhuman', help='save path')
args = parser.parse_args()
return args
def main():
args = parse_args()
raw_image_dir = osp.join(args.raw, 'Images')
for subset in ['train', 'val']:
save_image_dir = osp.join(args.save, subset, 'images')
if not osp.exists(save_image_dir):
os.makedirs(save_image_dir)
anno_file = osp.join(args.raw, 'annotation_%s.odgt'%subset)
fullbody_anno_file = osp.join(osp.join(args.save, subset, "label_fullbody.txt"))
head_anno_file = osp.join(osp.join(args.save, subset, "label_head.txt"))
fullbody_f = open(fullbody_anno_file, 'w')
head_f = open(head_anno_file, 'w')
for line in open(anno_file, 'r'):
data = json.loads(line)
img_id = data['ID']
img_name = "%s.jpg"%img_id
raw_image_file = osp.join(raw_image_dir, img_name)
target_image_file = osp.join(save_image_dir, img_name)
img = cv2.imread(raw_image_file)
print(raw_image_file, img.shape)
fullbody_f.write("# %s %d %d\n"%(img_name,img.shape[1],img.shape[0]))
head_f.write("# %s %d %d\n"%(img_name,img.shape[1],img.shape[0]))
shutil.copyfile(raw_image_file, target_image_file)
items = data['gtboxes']
for item in items:
fbox = item['fbox']
is_ignore = False
extra = item['extra']
if 'ignore' in extra:
is_ignore = extra['ignore']==1
bbox = np.array(fbox, dtype=np.float32)
bbox[2] += bbox[0]
bbox[3] += bbox[1]
if is_ignore:
fullbody_f.write("%.5f %.5f %.5f %.5f %d\n"%(bbox[0], bbox[1], bbox[2], bbox[3], is_ignore))
else:
vbox = item['vbox']
vbox = np.array(vbox, dtype=np.float32)
vbox[2] += vbox[0]
vbox[3] += vbox[1]
x1, y1, x2, y2 = vbox[0], vbox[1], vbox[2], vbox[3]
cx = (x1+x2)/2
cy = (y1+y2)/2
kps = np.ones( (5,3), dtype=np.float32)
kps[0,0] = x1
kps[0,1] = y1
kps[1,0] = x2
kps[1,1] = y1
kps[2,0] = cx
kps[2,1] = cy
kps[3,0] = x1
kps[3,1] = y2
kps[4,0] = x2
kps[4,1] = y2
kps_str = " ".join(["%.5f"%x for x in kps.flatten()])
fullbody_f.write("%.5f %.5f %.5f %.5f %s\n"%(bbox[0], bbox[1], bbox[2], bbox[3], kps_str))
hbox = item['hbox']
is_ignore = False
extra = item['head_attr']
if 'ignore' in extra:
is_ignore = extra['ignore']==1
bbox = np.array(hbox, dtype=np.float32)
bbox[2] += bbox[0]
bbox[3] += bbox[1]
head_f.write("%.5f %.5f %.5f %.5f %d\n"%(bbox[0], bbox[1], bbox[2], bbox[3], is_ignore))
fullbody_f.close()
head_f.close()
if __name__ == '__main__':
main()
| insightface/detection/scrfd/tools/convert_crowdhuman.py/0 | {
"file_path": "insightface/detection/scrfd/tools/convert_crowdhuman.py",
"repo_id": "insightface",
"token_count": 2141
} | 128 |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
@Author : Qingping Zheng
@Contact : qingpingzheng2014@gmail.com
@File : ddgcn.py
@Time : 10/01/21 00:00 PM
@Desc :
@License : Licensed under the Apache License, Version 2.0 (the "License");
@Copyright : Copyright 2022 The Authors. All Rights Reserved.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn.functional as F
import torch.nn as nn
from inplace_abn import InPlaceABNSync
class SpatialGCN(nn.Module):
def __init__(self, plane, abn=InPlaceABNSync):
super(SpatialGCN, self).__init__()
inter_plane = plane // 2
self.node_k = nn.Conv2d(plane, inter_plane, kernel_size=1)
self.node_v = nn.Conv2d(plane, inter_plane, kernel_size=1)
self.node_q = nn.Conv2d(plane, inter_plane, kernel_size=1)
self.conv_wg = nn.Conv1d(inter_plane, inter_plane, kernel_size=1, bias=False)
self.bn_wg = nn.BatchNorm1d(inter_plane)
self.softmax = nn.Softmax(dim=2)
self.out = nn.Sequential(nn.Conv2d(inter_plane, plane, kernel_size=1),
abn(plane))
self.gamma = nn.Parameter(torch.zeros(1))
def forward(self, x):
# b, c, h, w = x.size()
node_k = self.node_k(x)
node_v = self.node_v(x)
node_q = self.node_q(x)
b,c,h,w = node_k.size()
node_k = node_k.view(b, c, -1).permute(0, 2, 1)
node_q = node_q.view(b, c, -1)
node_v = node_v.view(b, c, -1).permute(0, 2, 1)
# A = k * q
# AV = k * q * v
# AVW = k *(q *v) * w
AV = torch.bmm(node_q,node_v)
AV = self.softmax(AV)
AV = torch.bmm(node_k, AV)
AV = AV.transpose(1, 2).contiguous()
AVW = self.conv_wg(AV)
AVW = self.bn_wg(AVW)
AVW = AVW.view(b, c, h, -1)
# out = F.relu_(self.out(AVW) + x)
out = self.gamma * self.out(AVW) + x
return out
class DDualGCN(nn.Module):
"""
Feature GCN with coordinate GCN
"""
def __init__(self, planes, abn=InPlaceABNSync, ratio=4):
super(DDualGCN, self).__init__()
self.phi = nn.Conv2d(planes, planes // ratio * 2, kernel_size=1, bias=False)
self.bn_phi = abn(planes // ratio * 2)
self.theta = nn.Conv2d(planes, planes // ratio, kernel_size=1, bias=False)
self.bn_theta = abn(planes // ratio)
# Interaction Space
# Adjacency Matrix: (-)A_g
self.conv_adj = nn.Conv1d(planes // ratio, planes // ratio, kernel_size=1, bias=False)
self.bn_adj = nn.BatchNorm1d(planes // ratio)
# State Update Function: W_g
self.conv_wg = nn.Conv1d(planes // ratio * 2, planes // ratio * 2, kernel_size=1, bias=False)
self.bn_wg = nn.BatchNorm1d(planes // ratio * 2)
# last fc
self.conv3 = nn.Conv2d(planes // ratio * 2, planes, kernel_size=1, bias=False)
self.bn3 = abn(planes)
self.local = nn.Sequential(
nn.Conv2d(planes, planes, 3, groups=planes, stride=2, padding=1, bias=False),
abn(planes),
nn.Conv2d(planes, planes, 3, groups=planes, stride=2, padding=1, bias=False),
abn(planes),
nn.Conv2d(planes, planes, 3, groups=planes, stride=2, padding=1, bias=False),
abn(planes))
self.gcn_local_attention = SpatialGCN(planes, abn)
self.final = nn.Sequential(nn.Conv2d(planes * 2, planes, kernel_size=1, bias=False),
abn(planes))
self.gamma1 = nn.Parameter(torch.zeros(1))
def to_matrix(self, x):
n, c, h, w = x.size()
x = x.view(n, c, -1)
return x
def forward(self, feat):
# # # # Local # # # #
x = feat
local = self.local(feat)
local = self.gcn_local_attention(local)
local = F.interpolate(local, size=x.size()[2:], mode='bilinear', align_corners=True)
spatial_local_feat = x * local + x
# # # # Projection Space # # # #
x_sqz, b = x, x
x_sqz = self.phi(x_sqz)
x_sqz = self.bn_phi(x_sqz)
x_sqz = self.to_matrix(x_sqz)
b = self.theta(b)
b = self.bn_theta(b)
b = self.to_matrix(b)
# Project
z_idt = torch.matmul(x_sqz, b.transpose(1, 2)) # channel
# # # # Interaction Space # # # #
z = z_idt.transpose(1, 2).contiguous()
z = self.conv_adj(z)
z = self.bn_adj(z)
z = z.transpose(1, 2).contiguous()
# Laplacian smoothing: (I - A_g)Z => Z - A_gZ
z += z_idt
z = self.conv_wg(z)
z = self.bn_wg(z)
# # # # Re-projection Space # # # #
# Re-project
y = torch.matmul(z, b)
n, _, h, w = x.size()
y = y.view(n, -1, h, w)
y = self.conv3(y)
y = self.bn3(y)
# g_out = x + y
# g_out = F.relu_(x+y)
g_out = self.gamma1*y + x
# cat or sum, nearly the same results
out = self.final(torch.cat((spatial_local_feat, g_out), 1))
return out
class DDualGCNHead(nn.Module):
def __init__(self, inplanes, interplanes, abn=InPlaceABNSync):
super(DDualGCNHead, self).__init__()
self.conva = nn.Sequential(nn.Conv2d(inplanes, interplanes, 3, padding=1, bias=False),
abn(interplanes))
self.dualgcn = DDualGCN(interplanes, abn)
self.convb = nn.Sequential(nn.Conv2d(interplanes, interplanes, 3, padding=1, bias=False),
abn(interplanes))
self.bottleneck = nn.Sequential(
nn.Conv2d(inplanes + interplanes, interplanes, kernel_size=3, padding=1, dilation=1, bias=False),
abn(interplanes)
)
def forward(self, x):
output = self.conva(x)
output = self.dualgcn(output)
output = self.convb(output)
output = self.bottleneck(torch.cat([x, output], 1))
return output
| insightface/parsing/dml_csr/networks/modules/ddgcn.py/0 | {
"file_path": "insightface/parsing/dml_csr/networks/modules/ddgcn.py",
"repo_id": "insightface",
"token_count": 3149
} | 129 |
import numpy as np
from numpy.linalg import norm as l2norm
#from easydict import EasyDict
class Face(dict):
def __init__(self, d=None, **kwargs):
if d is None:
d = {}
if kwargs:
d.update(**kwargs)
for k, v in d.items():
setattr(self, k, v)
# Class attributes
#for k in self.__class__.__dict__.keys():
# if not (k.startswith('__') and k.endswith('__')) and not k in ('update', 'pop'):
# setattr(self, k, getattr(self, k))
def __setattr__(self, name, value):
if isinstance(value, (list, tuple)):
value = [self.__class__(x)
if isinstance(x, dict) else x for x in value]
elif isinstance(value, dict) and not isinstance(value, self.__class__):
value = self.__class__(value)
super(Face, self).__setattr__(name, value)
super(Face, self).__setitem__(name, value)
__setitem__ = __setattr__
def __getattr__(self, name):
return None
@property
def embedding_norm(self):
if self.embedding is None:
return None
return l2norm(self.embedding)
@property
def normed_embedding(self):
if self.embedding is None:
return None
return self.embedding / self.embedding_norm
@property
def sex(self):
if self.gender is None:
return None
return 'M' if self.gender==1 else 'F'
| insightface/python-package/insightface/app/common.py/0 | {
"file_path": "insightface/python-package/insightface/app/common.py",
"repo_id": "insightface",
"token_count": 695
} | 130 |
import cv2
import os
import os.path as osp
from pathlib import Path
import pickle
def get_object(name):
objects_dir = osp.join(Path(__file__).parent.absolute(), 'objects')
if not name.endswith('.pkl'):
name = name+".pkl"
filepath = osp.join(objects_dir, name)
if not osp.exists(filepath):
return None
with open(filepath, 'rb') as f:
obj = pickle.load(f)
return obj
| insightface/python-package/insightface/data/pickle_object.py/0 | {
"file_path": "insightface/python-package/insightface/data/pickle_object.py",
"repo_id": "insightface",
"token_count": 176
} | 131 |
#ifndef MESH_CORE_HPP_
#define MESH_CORE_HPP_
#include <stdio.h>
#include <cmath>
#include <algorithm>
#include <string>
#include <iostream>
#include <fstream>
using namespace std;
class point
{
public:
float x;
float y;
float dot(point p)
{
return this->x * p.x + this->y * p.y;
}
point operator-(const point& p)
{
point np;
np.x = this->x - p.x;
np.y = this->y - p.y;
return np;
}
point operator+(const point& p)
{
point np;
np.x = this->x + p.x;
np.y = this->y + p.y;
return np;
}
point operator*(float s)
{
point np;
np.x = s * this->x;
np.y = s * this->y;
return np;
}
};
bool isPointInTri(point p, point p0, point p1, point p2, int h, int w);
void get_point_weight(float* weight, point p, point p0, point p1, point p2);
void _get_normal_core(
float* normal, float* tri_normal, int* triangles,
int ntri);
void _rasterize_triangles_core(
float* vertices, int* triangles,
float* depth_buffer, int* triangle_buffer, float* barycentric_weight,
int nver, int ntri,
int h, int w);
void _render_colors_core(
float* image, float* vertices, int* triangles,
float* colors,
float* depth_buffer,
int nver, int ntri,
int h, int w, int c);
void _render_texture_core(
float* image, float* vertices, int* triangles,
float* texture, float* tex_coords, int* tex_triangles,
float* depth_buffer,
int nver, int tex_nver, int ntri,
int h, int w, int c,
int tex_h, int tex_w, int tex_c,
int mapping_type);
void _write_obj_with_colors_texture(string filename, string mtl_name,
float* vertices, int* triangles, float* colors, float* uv_coords,
int nver, int ntri, int ntexver);
#endif | insightface/python-package/insightface/thirdparty/face3d/mesh/cython/mesh_core.h/0 | {
"file_path": "insightface/python-package/insightface/thirdparty/face3d/mesh/cython/mesh_core.h",
"repo_id": "insightface",
"token_count": 828
} | 132 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from skimage import measure
from mpl_toolkits.mplot3d import Axes3D
def plot_mesh(vertices, triangles, subplot = [1,1,1], title = 'mesh', el = 90, az = -90, lwdt=.1, dist = 6, color = "grey"):
'''
plot the mesh
Args:
vertices: [nver, 3]
triangles: [ntri, 3]
'''
ax = plt.subplot(subplot[0], subplot[1], subplot[2], projection = '3d')
ax.plot_trisurf(vertices[:, 0], vertices[:, 1], vertices[:, 2], triangles = triangles, lw = lwdt, color = color, alpha = 1)
ax.axis("off")
ax.view_init(elev = el, azim = az)
ax.dist = dist
plt.title(title)
### -------------- Todo: use vtk to visualize mesh? or visvis? or VisPy?
| insightface/python-package/insightface/thirdparty/face3d/mesh_numpy/vis.py/0 | {
"file_path": "insightface/python-package/insightface/thirdparty/face3d/mesh_numpy/vis.py",
"repo_id": "insightface",
"token_count": 308
} | 133 |
To reproduce the figures and tables in the notebook, please download everything (model, code, data and meta info) from here:
[GDrive](https://drive.google.com/file/d/1aC4zf2Bn0xCVH_ZtEuQipR2JvRb1bf8o/view?usp=sharing)
or
[Baidu Cloud](https://pan.baidu.com/s/1oer0p4_mcOrs4cfdeWfbFg)
Updated Meta data (1:1 and 1:N):
[Baidu Cloud](https://pan.baidu.com/s/1x-ytzg4zkCTOTtklUgAhfg) (code:7g8o) ;
[GDrive](https://drive.google.com/file/d/1MXzrU_zUESSx_242pRUnVvW_wDzfU8Ky/view?usp=sharing)
Please apply for the IJB-B and IJB-C by yourself and strictly follow their distribution licenses.
## Aknowledgement
Great thanks for Weidi Xie's instruction [2,3,4,5] to evaluate ArcFace [1] on IJB-B[6] and IJB-C[7] (1:1 protocol).
Great thanks for Yuge Huang's code [8] to evaluate ArcFace [1] on IJB-B[6] and IJB-C[7] (1:N protocol).
## Reference
[1] Jiankang Deng, Jia Guo, Niannan Xue, Stefanos Zafeiriou. Arcface: Additive angular margin loss for deep face recognition[J]. arXiv:1801.07698, 2018.
[2] https://github.com/ox-vgg/vgg_face2.
[3] Qiong Cao, Li Shen, Weidi Xie, Omkar M Parkhi, Andrew Zisserman. VGGFace2: A dataset for recognising faces across pose and age. FG, 2018.
[4] Weidi Xie, Andrew Zisserman. Multicolumn Networks for Face Recognition. BMVC 2018.
[5] Weidi Xie, Li Shen, Andrew Zisserman. Comparator Networks. ECCV, 2018.
[6] Whitelam, Cameron, Emma Taborsky, Austin Blanton, Brianna Maze, Jocelyn C. Adams, Tim Miller, Nathan D. Kalka et al. IARPA Janus Benchmark-B Face Dataset. CVPR Workshops, 2017.
[7] Maze, Brianna, Jocelyn Adams, James A. Duncan, Nathan Kalka, Tim Miller, Charles Otto, Anil K. Jain et al. IARPA Janus Benchmark–C: Face Dataset and Protocol. ICB, 2018.
[8] Yuge Huang, Pengcheng Shen, Ying Tai, Shaoxin Li, Xiaoming Liu, Jilin Li, Feiyue Huang, Rongrong Ji. Distribution Distillation Loss: Generic Approach for Improving Face Recognition from Hard Samples. arXiv:2002.03662.
| insightface/recognition/_evaluation_/ijb/README.md/0 | {
"file_path": "insightface/recognition/_evaluation_/ijb/README.md",
"repo_id": "insightface",
"token_count": 732
} | 134 |
'''
@author: insightface
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import os
import json
import argparse
import numpy as np
import mxnet as mx
def is_no_bias(attr):
ret = False
if 'no_bias' in attr and (attr['no_bias'] == True
or attr['no_bias'] == 'True'):
ret = True
return ret
def count_fc_flops(input_filter, output_filter, attr):
#print(input_filter, output_filter ,attr)
ret = 2 * input_filter * output_filter
if is_no_bias(attr):
ret -= output_filter
return int(ret)
def count_conv_flops(input_shape, output_shape, attr):
kernel = attr['kernel'][1:-1].split(',')
kernel = [int(x) for x in kernel]
#print('kernel', kernel)
if is_no_bias(attr):
ret = (2 * input_shape[1] * kernel[0] * kernel[1] -
1) * output_shape[2] * output_shape[3] * output_shape[1]
else:
ret = 2 * input_shape[1] * kernel[0] * kernel[1] * output_shape[
2] * output_shape[3] * output_shape[1]
num_group = 1
if 'num_group' in attr:
num_group = int(attr['num_group'])
ret /= num_group
return int(ret)
def count_flops(sym, **data_shapes):
all_layers = sym.get_internals()
#print(all_layers)
arg_shapes, out_shapes, aux_shapes = all_layers.infer_shape(**data_shapes)
out_shape_dict = dict(zip(all_layers.list_outputs(), out_shapes))
nodes = json.loads(sym.tojson())['nodes']
nodeid_shape = {}
for nodeid, node in enumerate(nodes):
name = node['name']
layer_name = name + "_output"
if layer_name in out_shape_dict:
nodeid_shape[nodeid] = out_shape_dict[layer_name]
#print(nodeid_shape)
FLOPs = 0
for nodeid, node in enumerate(nodes):
flops = 0
if node['op'] == 'Convolution':
output_shape = nodeid_shape[nodeid]
name = node['name']
attr = node['attrs']
input_nodeid = node['inputs'][0][0]
input_shape = nodeid_shape[input_nodeid]
flops = count_conv_flops(input_shape, output_shape, attr)
elif node['op'] == 'FullyConnected':
attr = node['attrs']
output_shape = nodeid_shape[nodeid]
input_nodeid = node['inputs'][0][0]
input_shape = nodeid_shape[input_nodeid]
output_filter = output_shape[1]
input_filter = input_shape[1] * input_shape[2] * input_shape[3]
#assert len(input_shape)==4 and input_shape[2]==1 and input_shape[3]==1
flops = count_fc_flops(input_filter, output_filter, attr)
#print(node, flops)
FLOPs += flops
return FLOPs
def flops_str(FLOPs):
preset = [(1e12, 'T'), (1e9, 'G'), (1e6, 'M'), (1e3, 'K')]
for p in preset:
if FLOPs // p[0] > 0:
N = FLOPs / p[0]
ret = "%.1f%s" % (N, p[1])
return ret
ret = "%.1f" % (FLOPs)
return ret
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='flops counter')
# general
#parser.add_argument('--model', default='../models2/y2-arcface-retinat1/model,1', help='path to load model.')
#parser.add_argument('--model', default='../models2/r100fc-arcface-retinaa/model,1', help='path to load model.')
parser.add_argument('--model',
default='../models2/r50fc-arcface-emore/model,1',
help='path to load model.')
args = parser.parse_args()
_vec = args.model.split(',')
assert len(_vec) == 2
prefix = _vec[0]
epoch = int(_vec[1])
print('loading', prefix, epoch)
sym, arg_params, aux_params = mx.model.load_checkpoint(prefix, epoch)
all_layers = sym.get_internals()
sym = all_layers['fc1_output']
FLOPs = count_flops(sym, data=(1, 3, 112, 112))
print('FLOPs:', FLOPs)
| insightface/recognition/arcface_mxnet/common/flops_counter.py/0 | {
"file_path": "insightface/recognition/arcface_mxnet/common/flops_counter.py",
"repo_id": "insightface",
"token_count": 1830
} | 135 |
import os
import sys
import struct
import argparse
import numbers
import random
from mxnet import recordio
import oneflow.core.record.record_pb2 as of_record
def parse_arguement(argv):
parser = argparse.ArgumentParser()
parser.add_argument(
"--data_dir",
type=str,
default="insightface/datasets/faces_emore",
help="Root directory to mxnet dataset.",
)
parser.add_argument(
"--output_filepath",
type=str,
default="./ofrecord",
help="Path to output OFRecord.",
)
parser.add_argument(
"--num_part", type=int, default=96, help="num_part of OFRecord to generate.",
)
return parser.parse_args(argv)
def load_train_data(data_dir):
path_imgrec = os.path.join(data_dir, "train.rec")
path_imgidx = path_imgrec[0:-4] + ".idx"
print(
"Loading recordio {}\n\
Corresponding record idx is {}".format(
path_imgrec, path_imgidx
)
)
imgrec = recordio.MXIndexedRecordIO(path_imgidx, path_imgrec, "r", key_type=int)
# Read header0 to get some info.
identity_key_start = 0
identity_key_end = 0
imgidx_list = []
id2range = {}
rec0 = imgrec.read_idx(0)
header0, img_str = recordio.unpack(rec0)
if header0.flag > 0:
identity_key_start = int(header0.label[0])
identity_key_end = int(header0.label[1])
imgidx_list = range(1, identity_key_start)
# Read identity id range
for identity in range(identity_key_start, identity_key_end):
rec = imgrec.read_idx(identity)
header, s = recordio.unpack(rec)
a, b = int(header.label[0]), int(header.label[1])
id2range[identity] = (a, b)
else:
imgidx_list = imgrec.keys
return imgrec, imgidx_list
def convert_to_ofrecord(img_data):
""" Convert python dictionary formath data of one image to of.Example proto.
Args:
img_data: Python dict.
Returns:
example: The converted of.Exampl
"""
def _int32_feature(value):
"""Wrapper for inserting int32 features into Example proto."""
if not isinstance(value, list):
value = [value]
return of_record.Feature(int32_list=of_record.Int32List(value=value))
def _float_feature(value):
"""Wrapper for inserting float features into Example proto."""
if not isinstance(value, list):
value = [value]
return of_record.Feature(float_list=of_record.FloatList(value=value))
def _double_feature(value):
"""Wrapper for inserting float features into Example proto."""
if not isinstance(value, list):
value = [value]
return of_record.Feature(double_list=of_record.DoubleList(value=value))
def _bytes_feature(value):
"""Wrapper for inserting bytes features into Example proto."""
# if isinstance(value, six.string_types):
# value = six.binary_type(value, encoding='utf-8')
return of_record.Feature(bytes_list=of_record.BytesList(value=[value]))
example = of_record.OFRecord(
feature={
"label": _int32_feature(img_data["label"]),
"encoded": _bytes_feature(img_data["pixel_data"]),
}
)
return example
def main(args):
# Convert recordio to ofrecord
imgrec, imgidx_list = load_train_data(data_dir=args.data_dir)
imgidx_list = list(imgidx_list)
random.shuffle(imgidx_list)
output_dir = os.path.join(args.output_filepath, "train")
if not os.path.exists(output_dir):
os.makedirs(output_dir)
num_images = len(imgidx_list)
num_images_per_part = (num_images + args.num_part) // args.num_part
print("num_images", num_images, "num_images_per_part", num_images_per_part)
for part_id in range(args.num_part):
part_name = "part-" + "{:0>5d}".format(part_id)
output_file = os.path.join(output_dir, part_name)
file_idx_start = part_id * num_images_per_part
file_idx_end = min((part_id + 1) * num_images_per_part, num_images)
print("part-" + str(part_id), "start", file_idx_start, "end", file_idx_end)
with open(output_file, "wb") as f:
for file_idx in range(file_idx_start, file_idx_end):
idx = imgidx_list[file_idx]
if idx % 10000 == 0:
print("Converting images: {} of {}".format(idx, len(imgidx_list)))
img_data = {}
rec = imgrec.read_idx(idx)
header, s = recordio.unpack(rec)
label = header.label
if not isinstance(label, numbers.Number):
label = label[0]
img_data["label"] = int(label)
img_data["pixel_data"] = s
example = convert_to_ofrecord(img_data)
size = example.ByteSize()
f.write(struct.pack("q", size))
f.write(example.SerializeToString())
if __name__ == "__main__":
main(parse_arguement(sys.argv[1:]))
| insightface/recognition/arcface_oneflow/tools/mx_recordio_2_ofrecord_shuffled_npart.py/0 | {
"file_path": "insightface/recognition/arcface_oneflow/tools/mx_recordio_2_ofrecord_shuffled_npart.py",
"repo_id": "insightface",
"token_count": 2274
} | 136 |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import requests
import json
import base64
import os
import argparse
parser = argparse.ArgumentParser(description="args for paddleserving")
parser.add_argument("--image_dir", type=str, default="./imgs")
args = parser.parse_args()
def cv2_to_base64(image):
return base64.b64encode(image).decode('utf8')
url = "http://127.0.0.1:9998/ArcFace/prediction"
test_img_dir = args.image_dir
for idx, img_file in enumerate(os.listdir(test_img_dir)):
with open(os.path.join(test_img_dir, img_file), 'rb') as file:
image_data1 = file.read()
image = cv2_to_base64(image_data1)
for i in range(1):
data = {"key": ["image"], "value": [image]}
r = requests.post(url=url, data=json.dumps(data))
print(r.json())
print("==> total number of test imgs: ", len(os.listdir(test_img_dir))) | insightface/recognition/arcface_paddle/deploy/pdserving/pipeline_http_client.py/0 | {
"file_path": "insightface/recognition/arcface_paddle/deploy/pdserving/pipeline_http_client.py",
"repo_id": "insightface",
"token_count": 496
} | 137 |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import paddle
from .utils.verification import CallBackVerification
from .utils.io import Checkpoint
from . import backbones
def validation(args):
checkpoint = Checkpoint(
rank=0,
world_size=1,
embedding_size=args.embedding_size,
num_classes=None,
checkpoint_dir=args.checkpoint_dir, )
backbone = eval("backbones.{}".format(args.backbone))(
num_features=args.embedding_size)
checkpoint.load(backbone, for_train=False, dtype='float32')
backbone.eval()
callback_verification = CallBackVerification(
1, 0, args.batch_size, args.val_targets, args.data_dir)
callback_verification(1, backbone)
| insightface/recognition/arcface_paddle/dynamic/validation.py/0 | {
"file_path": "insightface/recognition/arcface_paddle/dynamic/validation.py",
"repo_id": "insightface",
"token_count": 427
} | 138 |
#!/bin/bash
function func_parser_key(){
strs=$1
IFS=":"
array=(${strs})
tmp=${array[0]}
echo ${tmp}
}
function func_parser_value(){
strs=$1
IFS=":"
array=(${strs})
tmp=${array[1]}
echo ${tmp}
}
function func_set_params(){
key=$1
value=$2
if [ ${key}x = "null"x ];then
echo " "
elif [[ ${value} = "null" ]] || [[ ${value} = " " ]] || [ ${#value} -le 0 ];then
echo " "
else
echo "${key}=${value}"
fi
}
function func_parser_params(){
strs=$1
IFS=":"
array=(${strs})
key=${array[0]}
tmp=${array[1]}
IFS="|"
res=""
for _params in ${tmp[*]}; do
IFS="="
array=(${_params})
mode=${array[0]}
value=${array[1]}
if [[ ${mode} = ${MODE} ]]; then
IFS="|"
#echo $(func_set_params "${mode}" "${value}")
echo $value
break
fi
IFS="|"
done
echo ${res}
}
function status_check(){
last_status=$1 # the exit code
run_command=$2
run_log=$3
if [ $last_status -eq 0 ]; then
echo -e "\033[33m Run successfully with command - ${run_command}! \033[0m" | tee -a ${run_log}
else
echo -e "\033[33m Run failed with command - ${run_command}! \033[0m" | tee -a ${run_log}
fi
}
| insightface/recognition/arcface_paddle/test_tipc/common_func.sh/0 | {
"file_path": "insightface/recognition/arcface_paddle/test_tipc/common_func.sh",
"repo_id": "insightface",
"token_count": 674
} | 139 |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import time
import argparse
from paddle.inference import Config
from paddle.inference import create_predictor
def parse_args():
def str2bool(v):
return v.lower() in ("true", "t", "1")
# general params
parser = argparse.ArgumentParser()
parser.add_argument("--use_gpu", type=str2bool, default=False)
parser.add_argument("--gpu_mem", type=int, default=1000)
# params for predict
parser.add_argument("--model_file", type=str)
parser.add_argument("--params_file", type=str)
parser.add_argument("-b", "--batch_size", type=int, default=1)
parser.add_argument("--ir_optim", type=str2bool, default=True)
parser.add_argument("--use_mkldnn", type=str2bool, default=True)
parser.add_argument("--cpu_num_threads", type=int, default=10)
parser.add_argument("--model", type=str)
return parser.parse_args()
def create_paddle_predictor(args):
config = Config(args.model_file, args.params_file)
if args.use_gpu:
config.enable_use_gpu(args.gpu_mem, 0)
else:
config.disable_gpu()
if args.use_mkldnn:
config.enable_mkldnn()
config.set_cpu_math_library_num_threads(args.cpu_num_threads)
config.set_mkldnn_cache_capacity(100)
config.disable_glog_info()
config.switch_ir_optim(args.ir_optim) # default true
config.enable_memory_optim()
# use zero copy
config.switch_use_feed_fetch_ops(False)
predictor = create_predictor(config)
return predictor
class Predictor(object):
def __init__(self, args):
self.args = args
self.paddle_predictor = create_paddle_predictor(args)
input_names = self.paddle_predictor.get_input_names()
self.input_tensor = self.paddle_predictor.get_input_handle(input_names[
0])
output_names = self.paddle_predictor.get_output_names()
self.output_tensor = self.paddle_predictor.get_output_handle(
output_names[0])
def predict(self, batch_input):
self.input_tensor.copy_from_cpu(batch_input)
self.paddle_predictor.run()
batch_output = self.output_tensor.copy_to_cpu()
return batch_output
def benchmark_predict(self):
test_num = 500
test_time = 0.0
for i in range(0, test_num + 10):
inputs = np.random.rand(args.batch_size, 3, 112,
112).astype(np.float32)
start_time = time.time()
batch_output = self.predict(inputs).flatten()
if i >= 10:
test_time += time.time() - start_time
# time.sleep(0.01) # sleep for T4 GPU
print("{0}\tbatch size: {1}\ttime(ms): {2}".format(
args.model, args.batch_size, 1000 * test_time / test_num))
if __name__ == "__main__":
args = parse_args()
assert os.path.exists(
args.model_file), "The path of 'model_file' does not exist: {}".format(
args.model_file)
assert os.path.exists(
args.params_file
), "The path of 'params_file' does not exist: {}".format(args.params_file)
predictor = Predictor(args)
assert args.model is not None
predictor.benchmark_predict()
| insightface/recognition/arcface_paddle/tools/benchmark_speed.py/0 | {
"file_path": "insightface/recognition/arcface_paddle/tools/benchmark_speed.py",
"repo_id": "insightface",
"token_count": 1546
} | 140 |
import torch
from torch import nn
from torch.utils.checkpoint import checkpoint
__all__ = ['iresnet18', 'iresnet34', 'iresnet50', 'iresnet100', 'iresnet200']
using_ckpt = False
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=dilation,
groups=groups,
bias=False,
dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes,
out_planes,
kernel_size=1,
stride=stride,
bias=False)
class IBasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None,
groups=1, base_width=64, dilation=1):
super(IBasicBlock, self).__init__()
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
self.bn1 = nn.BatchNorm2d(inplanes, eps=1e-05,)
self.conv1 = conv3x3(inplanes, planes)
self.bn2 = nn.BatchNorm2d(planes, eps=1e-05,)
self.prelu = nn.PReLU(planes)
self.conv2 = conv3x3(planes, planes, stride)
self.bn3 = nn.BatchNorm2d(planes, eps=1e-05,)
self.downsample = downsample
self.stride = stride
def forward_impl(self, x):
identity = x
out = self.bn1(x)
out = self.conv1(out)
out = self.bn2(out)
out = self.prelu(out)
out = self.conv2(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
return out
def forward(self, x):
if self.training and using_ckpt:
return checkpoint(self.forward_impl, x)
else:
return self.forward_impl(x)
class IResNet(nn.Module):
fc_scale = 7 * 7
def __init__(self,
block, layers, dropout=0, num_features=512, zero_init_residual=False,
groups=1, width_per_group=64, replace_stride_with_dilation=None, fp16=False):
super(IResNet, self).__init__()
self.extra_gflops = 0.0
self.fp16 = fp16
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(self.inplanes, eps=1e-05)
self.prelu = nn.PReLU(self.inplanes)
self.layer1 = self._make_layer(block, 64, layers[0], stride=2)
self.layer2 = self._make_layer(block,
128,
layers[1],
stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block,
256,
layers[2],
stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block,
512,
layers[3],
stride=2,
dilate=replace_stride_with_dilation[2])
self.bn2 = nn.BatchNorm2d(512 * block.expansion, eps=1e-05,)
self.dropout = nn.Dropout(p=dropout, inplace=True)
self.fc = nn.Linear(512 * block.expansion * self.fc_scale, num_features)
self.features = nn.BatchNorm1d(num_features, eps=1e-05)
nn.init.constant_(self.features.weight, 1.0)
self.features.weight.requires_grad = False
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight, 0, 0.1)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
if zero_init_residual:
for m in self.modules():
if isinstance(m, IBasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
nn.BatchNorm2d(planes * block.expansion, eps=1e-05, ),
)
layers = []
layers.append(
block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(
block(self.inplanes,
planes,
groups=self.groups,
base_width=self.base_width,
dilation=self.dilation))
return nn.Sequential(*layers)
def forward(self, x):
with torch.cuda.amp.autocast(self.fp16):
x = self.conv1(x)
x = self.bn1(x)
x = self.prelu(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.bn2(x)
x = torch.flatten(x, 1)
x = self.dropout(x)
x = self.fc(x.float() if self.fp16 else x)
x = self.features(x)
return x
def _iresnet(arch, block, layers, pretrained, progress, **kwargs):
model = IResNet(block, layers, **kwargs)
if pretrained:
raise ValueError()
return model
def iresnet18(pretrained=False, progress=True, **kwargs):
return _iresnet('iresnet18', IBasicBlock, [2, 2, 2, 2], pretrained,
progress, **kwargs)
def iresnet34(pretrained=False, progress=True, **kwargs):
return _iresnet('iresnet34', IBasicBlock, [3, 4, 6, 3], pretrained,
progress, **kwargs)
def iresnet50(pretrained=False, progress=True, **kwargs):
return _iresnet('iresnet50', IBasicBlock, [3, 4, 14, 3], pretrained,
progress, **kwargs)
def iresnet100(pretrained=False, progress=True, **kwargs):
return _iresnet('iresnet100', IBasicBlock, [3, 13, 30, 3], pretrained,
progress, **kwargs)
def iresnet200(pretrained=False, progress=True, **kwargs):
return _iresnet('iresnet200', IBasicBlock, [6, 26, 60, 6], pretrained,
progress, **kwargs)
| insightface/recognition/arcface_torch/backbones/iresnet.py/0 | {
"file_path": "insightface/recognition/arcface_torch/backbones/iresnet.py",
"repo_id": "insightface",
"token_count": 3963
} | 141 |
import numbers
import os
import queue as Queue
import threading
from typing import Iterable
import mxnet as mx
import numpy as np
import torch
from functools import partial
from torch import distributed
from torch.utils.data import DataLoader, Dataset
from torchvision import transforms
from torchvision.datasets import ImageFolder
from utils.utils_distributed_sampler import DistributedSampler
from utils.utils_distributed_sampler import get_dist_info, worker_init_fn
def get_dataloader(
root_dir,
local_rank,
batch_size,
dali = False,
dali_aug = False,
seed = 2048,
num_workers = 2,
) -> Iterable:
rec = os.path.join(root_dir, 'train.rec')
idx = os.path.join(root_dir, 'train.idx')
train_set = None
# Synthetic
if root_dir == "synthetic":
train_set = SyntheticDataset()
dali = False
# Mxnet RecordIO
elif os.path.exists(rec) and os.path.exists(idx):
train_set = MXFaceDataset(root_dir=root_dir, local_rank=local_rank)
# Image Folder
else:
transform = transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
])
train_set = ImageFolder(root_dir, transform)
# DALI
if dali:
return dali_data_iter(
batch_size=batch_size, rec_file=rec, idx_file=idx,
num_threads=2, local_rank=local_rank, dali_aug=dali_aug)
rank, world_size = get_dist_info()
train_sampler = DistributedSampler(
train_set, num_replicas=world_size, rank=rank, shuffle=True, seed=seed)
if seed is None:
init_fn = None
else:
init_fn = partial(worker_init_fn, num_workers=num_workers, rank=rank, seed=seed)
train_loader = DataLoaderX(
local_rank=local_rank,
dataset=train_set,
batch_size=batch_size,
sampler=train_sampler,
num_workers=num_workers,
pin_memory=True,
drop_last=True,
worker_init_fn=init_fn,
)
return train_loader
class BackgroundGenerator(threading.Thread):
def __init__(self, generator, local_rank, max_prefetch=6):
super(BackgroundGenerator, self).__init__()
self.queue = Queue.Queue(max_prefetch)
self.generator = generator
self.local_rank = local_rank
self.daemon = True
self.start()
def run(self):
torch.cuda.set_device(self.local_rank)
for item in self.generator:
self.queue.put(item)
self.queue.put(None)
def next(self):
next_item = self.queue.get()
if next_item is None:
raise StopIteration
return next_item
def __next__(self):
return self.next()
def __iter__(self):
return self
class DataLoaderX(DataLoader):
def __init__(self, local_rank, **kwargs):
super(DataLoaderX, self).__init__(**kwargs)
self.stream = torch.cuda.Stream(local_rank)
self.local_rank = local_rank
def __iter__(self):
self.iter = super(DataLoaderX, self).__iter__()
self.iter = BackgroundGenerator(self.iter, self.local_rank)
self.preload()
return self
def preload(self):
self.batch = next(self.iter, None)
if self.batch is None:
return None
with torch.cuda.stream(self.stream):
for k in range(len(self.batch)):
self.batch[k] = self.batch[k].to(device=self.local_rank, non_blocking=True)
def __next__(self):
torch.cuda.current_stream().wait_stream(self.stream)
batch = self.batch
if batch is None:
raise StopIteration
self.preload()
return batch
class MXFaceDataset(Dataset):
def __init__(self, root_dir, local_rank):
super(MXFaceDataset, self).__init__()
self.transform = transforms.Compose(
[transforms.ToPILImage(),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
])
self.root_dir = root_dir
self.local_rank = local_rank
path_imgrec = os.path.join(root_dir, 'train.rec')
path_imgidx = os.path.join(root_dir, 'train.idx')
self.imgrec = mx.recordio.MXIndexedRecordIO(path_imgidx, path_imgrec, 'r')
s = self.imgrec.read_idx(0)
header, _ = mx.recordio.unpack(s)
if header.flag > 0:
self.header0 = (int(header.label[0]), int(header.label[1]))
self.imgidx = np.array(range(1, int(header.label[0])))
else:
self.imgidx = np.array(list(self.imgrec.keys))
def __getitem__(self, index):
idx = self.imgidx[index]
s = self.imgrec.read_idx(idx)
header, img = mx.recordio.unpack(s)
label = header.label
if not isinstance(label, numbers.Number):
label = label[0]
label = torch.tensor(label, dtype=torch.long)
sample = mx.image.imdecode(img).asnumpy()
if self.transform is not None:
sample = self.transform(sample)
return sample, label
def __len__(self):
return len(self.imgidx)
class SyntheticDataset(Dataset):
def __init__(self):
super(SyntheticDataset, self).__init__()
img = np.random.randint(0, 255, size=(112, 112, 3), dtype=np.int32)
img = np.transpose(img, (2, 0, 1))
img = torch.from_numpy(img).squeeze(0).float()
img = ((img / 255) - 0.5) / 0.5
self.img = img
self.label = 1
def __getitem__(self, index):
return self.img, self.label
def __len__(self):
return 1000000
def dali_data_iter(
batch_size: int, rec_file: str, idx_file: str, num_threads: int,
initial_fill=32768, random_shuffle=True,
prefetch_queue_depth=1, local_rank=0, name="reader",
mean=(127.5, 127.5, 127.5),
std=(127.5, 127.5, 127.5),
dali_aug=False
):
"""
Parameters:
----------
initial_fill: int
Size of the buffer that is used for shuffling. If random_shuffle is False, this parameter is ignored.
"""
rank: int = distributed.get_rank()
world_size: int = distributed.get_world_size()
import nvidia.dali.fn as fn
import nvidia.dali.types as types
from nvidia.dali.pipeline import Pipeline
from nvidia.dali.plugin.pytorch import DALIClassificationIterator
def dali_random_resize(img, resize_size, image_size=112):
img = fn.resize(img, resize_x=resize_size, resize_y=resize_size)
img = fn.resize(img, size=(image_size, image_size))
return img
def dali_random_gaussian_blur(img, window_size):
img = fn.gaussian_blur(img, window_size=window_size * 2 + 1)
return img
def dali_random_gray(img, prob_gray):
saturate = fn.random.coin_flip(probability=1 - prob_gray)
saturate = fn.cast(saturate, dtype=types.FLOAT)
img = fn.hsv(img, saturation=saturate)
return img
def dali_random_hsv(img, hue, saturation):
img = fn.hsv(img, hue=hue, saturation=saturation)
return img
def multiplexing(condition, true_case, false_case):
neg_condition = condition ^ True
return condition * true_case + neg_condition * false_case
condition_resize = fn.random.coin_flip(probability=0.1)
size_resize = fn.random.uniform(range=(int(112 * 0.5), int(112 * 0.8)), dtype=types.FLOAT)
condition_blur = fn.random.coin_flip(probability=0.2)
window_size_blur = fn.random.uniform(range=(1, 2), dtype=types.INT32)
condition_flip = fn.random.coin_flip(probability=0.5)
condition_hsv = fn.random.coin_flip(probability=0.2)
hsv_hue = fn.random.uniform(range=(0., 20.), dtype=types.FLOAT)
hsv_saturation = fn.random.uniform(range=(1., 1.2), dtype=types.FLOAT)
pipe = Pipeline(
batch_size=batch_size, num_threads=num_threads,
device_id=local_rank, prefetch_queue_depth=prefetch_queue_depth, )
condition_flip = fn.random.coin_flip(probability=0.5)
with pipe:
jpegs, labels = fn.readers.mxnet(
path=rec_file, index_path=idx_file, initial_fill=initial_fill,
num_shards=world_size, shard_id=rank,
random_shuffle=random_shuffle, pad_last_batch=False, name=name)
images = fn.decoders.image(jpegs, device="mixed", output_type=types.RGB)
if dali_aug:
images = fn.cast(images, dtype=types.UINT8)
images = multiplexing(condition_resize, dali_random_resize(images, size_resize, image_size=112), images)
images = multiplexing(condition_blur, dali_random_gaussian_blur(images, window_size_blur), images)
images = multiplexing(condition_hsv, dali_random_hsv(images, hsv_hue, hsv_saturation), images)
images = dali_random_gray(images, 0.1)
images = fn.crop_mirror_normalize(
images, dtype=types.FLOAT, mean=mean, std=std, mirror=condition_flip)
pipe.set_outputs(images, labels)
pipe.build()
return DALIWarper(DALIClassificationIterator(pipelines=[pipe], reader_name=name, ))
@torch.no_grad()
class DALIWarper(object):
def __init__(self, dali_iter):
self.iter = dali_iter
def __next__(self):
data_dict = self.iter.__next__()[0]
tensor_data = data_dict['data'].cuda()
tensor_label: torch.Tensor = data_dict['label'].cuda().long()
tensor_label.squeeze_()
return tensor_data, tensor_label
def __iter__(self):
return self
def reset(self):
self.iter.reset()
| insightface/recognition/arcface_torch/dataset.py/0 | {
"file_path": "insightface/recognition/arcface_torch/dataset.py",
"repo_id": "insightface",
"token_count": 4374
} | 142 |
from __future__ import division
import datetime
import os
import os.path as osp
import glob
import numpy as np
import cv2
import sys
import onnxruntime
import onnx
import argparse
from onnx import numpy_helper
from insightface.data import get_image
class ArcFaceORT:
def __init__(self, model_path, cpu=False):
self.model_path = model_path
# providers = None will use available provider, for onnxruntime-gpu it will be "CUDAExecutionProvider"
self.providers = ['CPUExecutionProvider'] if cpu else None
#input_size is (w,h), return error message, return None if success
def check(self, track='cfat', test_img = None):
#default is cfat
max_model_size_mb=1024
max_feat_dim=512
max_time_cost=15
if track.startswith('ms1m'):
max_model_size_mb=1024
max_feat_dim=512
max_time_cost=10
elif track.startswith('glint'):
max_model_size_mb=1024
max_feat_dim=1024
max_time_cost=20
elif track.startswith('cfat'):
max_model_size_mb = 1024
max_feat_dim = 512
max_time_cost = 15
elif track.startswith('unconstrained'):
max_model_size_mb=1024
max_feat_dim=1024
max_time_cost=30
else:
return "track not found"
if not os.path.exists(self.model_path):
return "model_path not exists"
if not os.path.isdir(self.model_path):
return "model_path should be directory"
onnx_files = []
for _file in os.listdir(self.model_path):
if _file.endswith('.onnx'):
onnx_files.append(osp.join(self.model_path, _file))
if len(onnx_files)==0:
return "do not have onnx files"
self.model_file = sorted(onnx_files)[-1]
print('use onnx-model:', self.model_file)
try:
session = onnxruntime.InferenceSession(self.model_file, providers=self.providers)
except:
return "load onnx failed"
input_cfg = session.get_inputs()[0]
input_shape = input_cfg.shape
print('input-shape:', input_shape)
if len(input_shape)!=4:
return "length of input_shape should be 4"
if not isinstance(input_shape[0], str):
#return "input_shape[0] should be str to support batch-inference"
print('reset input-shape[0] to None')
model = onnx.load(self.model_file)
model.graph.input[0].type.tensor_type.shape.dim[0].dim_param = 'None'
new_model_file = osp.join(self.model_path, 'zzzzrefined.onnx')
onnx.save(model, new_model_file)
self.model_file = new_model_file
print('use new onnx-model:', self.model_file)
try:
session = onnxruntime.InferenceSession(self.model_file, providers=self.providers)
except:
return "load onnx failed"
input_cfg = session.get_inputs()[0]
input_shape = input_cfg.shape
print('new-input-shape:', input_shape)
self.image_size = tuple(input_shape[2:4][::-1])
#print('image_size:', self.image_size)
input_name = input_cfg.name
outputs = session.get_outputs()
output_names = []
for o in outputs:
output_names.append(o.name)
#print(o.name, o.shape)
if len(output_names)!=1:
return "number of output nodes should be 1"
self.session = session
self.input_name = input_name
self.output_names = output_names
#print(self.output_names)
model = onnx.load(self.model_file)
graph = model.graph
if len(graph.node)<8:
return "too small onnx graph"
input_size = (112,112)
self.crop = None
if track=='cfat':
crop_file = osp.join(self.model_path, 'crop.txt')
if osp.exists(crop_file):
lines = open(crop_file,'r').readlines()
if len(lines)!=6:
return "crop.txt should contain 6 lines"
lines = [int(x) for x in lines]
self.crop = lines[:4]
input_size = tuple(lines[4:6])
if input_size!=self.image_size:
return "input-size is inconsistant with onnx model input, %s vs %s"%(input_size, self.image_size)
self.model_size_mb = os.path.getsize(self.model_file) / float(1024*1024)
if self.model_size_mb > max_model_size_mb:
return "max model size exceed, given %.3f-MB"%self.model_size_mb
input_mean = None
input_std = None
if track=='cfat':
pn_file = osp.join(self.model_path, 'pixel_norm.txt')
if osp.exists(pn_file):
lines = open(pn_file,'r').readlines()
if len(lines)!=2:
return "pixel_norm.txt should contain 2 lines"
input_mean = float(lines[0])
input_std = float(lines[1])
if input_mean is not None or input_std is not None:
if input_mean is None or input_std is None:
return "please set input_mean and input_std simultaneously"
else:
find_sub = False
find_mul = False
for nid, node in enumerate(graph.node[:8]):
print(nid, node.name)
if node.name.startswith('Sub') or node.name.startswith('_minus'):
find_sub = True
if node.name.startswith('Mul') or node.name.startswith('_mul') or node.name.startswith('Div'):
find_mul = True
if find_sub and find_mul:
print("find sub and mul")
#mxnet arcface model
input_mean = 0.0
input_std = 1.0
else:
input_mean = 127.5
input_std = 127.5
self.input_mean = input_mean
self.input_std = input_std
for initn in graph.initializer:
weight_array = numpy_helper.to_array(initn)
dt = weight_array.dtype
if dt.itemsize<4:
return 'invalid weight type - (%s:%s)' % (initn.name, dt.name)
if test_img is None:
test_img = get_image('Tom_Hanks_54745')
test_img = cv2.resize(test_img, self.image_size)
else:
test_img = cv2.resize(test_img, self.image_size)
feat, cost = self.benchmark(test_img)
batch_result = self.check_batch(test_img)
batch_result_sum = float(np.sum(batch_result))
if batch_result_sum in [float('inf'), -float('inf')] or batch_result_sum != batch_result_sum:
print(batch_result)
print(batch_result_sum)
return "batch result output contains NaN!"
if len(feat.shape) < 2:
return "the shape of the feature must be two, but get {}".format(str(feat.shape))
if feat.shape[1] > max_feat_dim:
return "max feat dim exceed, given %d"%feat.shape[1]
self.feat_dim = feat.shape[1]
cost_ms = cost*1000
if cost_ms>max_time_cost:
return "max time cost exceed, given %.4f"%cost_ms
self.cost_ms = cost_ms
print('check stat:, model-size-mb: %.4f, feat-dim: %d, time-cost-ms: %.4f, input-mean: %.3f, input-std: %.3f'%(self.model_size_mb, self.feat_dim, self.cost_ms, self.input_mean, self.input_std))
return None
def check_batch(self, img):
if not isinstance(img, list):
imgs = [img, ] * 32
if self.crop is not None:
nimgs = []
for img in imgs:
nimg = img[self.crop[1]:self.crop[3], self.crop[0]:self.crop[2], :]
if nimg.shape[0] != self.image_size[1] or nimg.shape[1] != self.image_size[0]:
nimg = cv2.resize(nimg, self.image_size)
nimgs.append(nimg)
imgs = nimgs
blob = cv2.dnn.blobFromImages(
images=imgs, scalefactor=1.0 / self.input_std, size=self.image_size,
mean=(self.input_mean, self.input_mean, self.input_mean), swapRB=True)
net_out = self.session.run(self.output_names, {self.input_name: blob})[0]
return net_out
def meta_info(self):
return {'model-size-mb':self.model_size_mb, 'feature-dim':self.feat_dim, 'infer': self.cost_ms}
def forward(self, imgs):
if not isinstance(imgs, list):
imgs = [imgs]
input_size = self.image_size
if self.crop is not None:
nimgs = []
for img in imgs:
nimg = img[self.crop[1]:self.crop[3],self.crop[0]:self.crop[2],:]
if nimg.shape[0]!=input_size[1] or nimg.shape[1]!=input_size[0]:
nimg = cv2.resize(nimg, input_size)
nimgs.append(nimg)
imgs = nimgs
blob = cv2.dnn.blobFromImages(imgs, 1.0/self.input_std, input_size, (self.input_mean, self.input_mean, self.input_mean), swapRB=True)
net_out = self.session.run(self.output_names, {self.input_name : blob})[0]
return net_out
def benchmark(self, img):
input_size = self.image_size
if self.crop is not None:
nimg = img[self.crop[1]:self.crop[3],self.crop[0]:self.crop[2],:]
if nimg.shape[0]!=input_size[1] or nimg.shape[1]!=input_size[0]:
nimg = cv2.resize(nimg, input_size)
img = nimg
blob = cv2.dnn.blobFromImage(img, 1.0/self.input_std, input_size, (self.input_mean, self.input_mean, self.input_mean), swapRB=True)
costs = []
for _ in range(50):
ta = datetime.datetime.now()
net_out = self.session.run(self.output_names, {self.input_name : blob})[0]
tb = datetime.datetime.now()
cost = (tb-ta).total_seconds()
costs.append(cost)
costs = sorted(costs)
cost = costs[5]
return net_out, cost
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='')
# general
parser.add_argument('workdir', help='submitted work dir', type=str)
parser.add_argument('--track', help='track name, for different challenge', type=str, default='cfat')
args = parser.parse_args()
handler = ArcFaceORT(args.workdir)
err = handler.check(args.track)
print('err:', err)
| insightface/recognition/arcface_torch/onnx_helper.py/0 | {
"file_path": "insightface/recognition/arcface_torch/onnx_helper.py",
"repo_id": "insightface",
"token_count": 5210
} | 143 |
#!/bin/bash
#### Parameters
# datasets options: "casia", "lamp", "buaa", "oulu"
# test_fold_id = -1 if testing 10 fold (casia & lamp) else test_fold_id = i (fold id)
# test_mode: "pretrain" or "finetune"
dataset='oulu'
# img_root="path to data folder"
img_root="/storage/local/local/Oulu_CASIA_NIR_VIS/crops112_3/"
input_mode='grey'
model_mode='29'
test_mode='pretrain'
test_fold_id=-1
model_name='L29.pth.tar' # pretrain model
# model_name=$dataset'_fold'$test_fold_id'_final.pth.tar' # finetune: 'casia_fold1_final.pth.tar'
CUDA_VISIBLE_DEVICES=6 python ./evaluate/eval_${dataset}_112.py --test_fold_id $test_fold_id --input_mode $input_mode --model_mode $model_mode --model_name $model_name --img_root $img_root --test_mode $test_mode | tee test.log | insightface/recognition/idmmd/eval.sh/0 | {
"file_path": "insightface/recognition/idmmd/eval.sh",
"repo_id": "insightface",
"token_count": 307
} | 144 |
## Partial-FC
Partial FC is a distributed deep learning training framework for face recognition. The goal of Partial FC is to facilitate large-scale classification task (e.g. 10 or 100 million identities). It is much faster than the model parallel solution and there is no performance drop.

## Contents
[Partial FC](https://arxiv.org/abs/2203.15565)
- [Largest Face Recognition Dataset: **Glint360k**](#Glint360K)
- [Docker](#Docker)
- [Performance On Million Identities](#Benchmark)
- [FAQ](#FAQ)
- [Citation](#Citation)
## Glint360K
We clean, merge, and release the largest and cleanest face recognition dataset Glint360K,
which contains **`17091657`** images of **`360232`** individuals.
By employing the Patial FC training strategy, baseline models trained on Glint360K can easily achieve state-of-the-art performance.
Detailed evaluation results on the large-scale test set (e.g. IFRT, IJB-C and Megaface) are as follows:
### 1. Evaluation on IFRT
**`r`** denotes the sampling rate of negative class centers.
| Backbone | Dataset | African | Caucasian | Indian | Asian | ALL |
| ------------ | ----------- | ----- | ----- | ------ | ----- | ----- |
| R50 | MS1M-V3 | 76.24 | 86.21 | 84.44 | 37.43 | 71.02 |
| R124 | MS1M-V3 | 81.08 | 89.06 | 87.53 | 38.40 | 74.76 |
| R100 | **Glint360k**(r=1.0) | 89.50 | 94.23 | 93.54 | **65.07** | **88.67** |
| R100 | **Glint360k**(r=0.1) | **90.45** | **94.60** | **93.96** | 63.91 | 88.23 |
### 2. Evaluation on IJB-C and Megaface
We employ ResNet100 as the backbone and CosFace (m=0.4) as the loss function.
TAR@FAR=1e-4 is reported on the IJB-C datasets, and TAR@FAR=1e-6 is reported on the Megaface dataset.
|Test Dataset | IJB-C | Megaface_Id | Megaface_Ver |
| :--- | :---: | :---: | :---: |
| MS1MV2 | 96.4 | 98.3 | 98.6 |
|**Glint360k** | **97.3** | **99.1** | **99.1** |
### 3. License
The Glint360K dataset (and the models trained with this dataset) are available for non-commercial research purposes only.
### 4. Download
- [x] [**Baidu Drive**](https://pan.baidu.com/s/1GsYqTTt7_Dn8BfxxsLFN0w) (code:o3az)
- [x] **Magnet URI**: `magnet:?xt=urn:btih:E5F46EE502B9E76DA8CC3A0E4F7C17E4000C7B1E&dn=glint360k`
Refer to the following command to unzip.
```
cat glint360k_* | tar -xzvf -
# Don't forget the last '-'!
# cf7433cbb915ac422230ba33176f4625 glint360k_00
# 589a5ea3ab59f283d2b5dd3242bc027a glint360k_01
# 8d54fdd5b1e4cd55e1b9a714d76d1075 glint360k_02
# cd7f008579dbed9c5af4d1275915d95e glint360k_03
# 64666b324911b47334cc824f5f836d4c glint360k_04
# a318e4d32493dd5be6b94dd48f9943ac glint360k_05
# c3ae1dcbecea360d2ec2a43a7b6f1d94 glint360k_06
# md5sum:
# 5d9cd9f262ec87a5ca2eac5e703f7cdf train.idx
# 8483be5af6f9906e19f85dee49132f8e train.rec
```
Use [unpack_glint360k.py](./unpack_glint360k.py) to unpack.
### 5. Pretrain models
- [x] [**Baidu Drive**](https://pan.baidu.com/s/1sd9ZRsV2c_dWHW84kz1P1Q) (code:befi)
- [x] [**Google Drive**](https://drive.google.com/drive/folders/1WLjDzEs1wC1K1jxDHNJ7dhEmQ3rOOILl?usp=sharing)
| Framework | backbone | negative class centers sample_rate | IJBC@e4 | IFRT@e6 |
| :--- | :--- | :--- | :--- | :--- |
| mxnet | [R100](https://drive.google.com/drive/folders/1YPqIkOZWrmbli4GWfMJO2b0yiiZ7UCsP?usp=sharing) |1.0|97.3|-|
| mxnet | [R100](https://drive.google.com/drive/folders/1-gF5sDwNoRcjwmpPSTNLpaZJi5N91BvL?usp=sharing) |0.1|97.3|-|
| pytorch | [R50](https://drive.google.com/drive/folders/16hjOGRJpwsJCRjIBbO13z3SrSgvPTaMV?usp=sharing) |1.0|97.0|-|
| pytorch | [R100](https://drive.google.com/drive/folders/19EHffHN0Yn8DjYm5ofrgVOf_xfkrVgqc?usp=sharing) |1.0|97.4|-|
## Docker
Make sure you have installed the NVIDIA driver and Docker engine for your Linux distribution Note that you do not need to
install the CUDA Toolkit and other independence on the host system, but the NVIDIA driver needs to be installed.
Because the CUDA version used in the image is 10.1,
the graphics driver version on the physical machine must be greater than 418.
### 1. Docker Getting Started
You can use dockerhub or offline docker.tar to get the image of the Partial-fc.
1. dockerhub
```shell
docker pull insightface/partial_fc:v1
```
2. offline images
coming soon!
### 2. Getting Started
```shell
sudo docker run -it -v /train_tmp:/train_tmp --net=host --privileged --gpus 8 --shm-size=1g insightface/partial_fc:v1 /bin/bash
```
`/train_tmp` is where you put your training set (if you have enough RAM memory,
you can turn it into `tmpfs` first).
## Benchmark
### 1. Train Glint360K Using MXNET
| Backbone | GPU | FP16 | BatchSize / it | Throughput img / sec |
| :--- | :--- | :--- | :--- | :--- |
| R100 | 8 * Tesla V100-SXM2-32GB | False | 64 | 1748 |
| R100 | 8 * Tesla V100-SXM2-32GB | True | 64 | 3357 |
| R100 | 8 * Tesla V100-SXM2-32GB | False | 128 | 1847 |
| R100 | 8 * Tesla V100-SXM2-32GB | True | 128 | 3867 |
| R50 | 8 * Tesla V100-SXM2-32GB | False | 64 | 2921 |
| R50 | 8 * Tesla V100-SXM2-32GB | True | 64 | 5428 |
| R50 | 8 * Tesla V100-SXM2-32GB | False | 128 | 3045 |
| R50 | 8 * Tesla V100-SXM2-32GB | True | 128 | 6112 |
### 2. Performance On Million Identities
We neglect the influence of IO. All experiments use mixed-precision training, and the backbone is ResNet50.
#### 1 Million Identities On 8 RTX2080Ti
|Method | GPUs | BatchSize | Memory/M | Throughput img/sec | W |
| :--- | :---: | :---: | :---: | :---: | :---: |
| Model Parallel | 8 | 1024 | 10408 | 2390 | GPU |
| **Partial FC(Ours)** | **8** | **1024** | **8100** | **2780** | GPU |
#### 10 Million Identities On 64 RTX2080Ti
|Method | GPUs | BatchSize | Memory/M | Throughput img/sec | W |
| :--- | :---: | :---: | :---: | :---: | :---: |
| Model Parallel | 64 | 2048 | 9684 | 4483 | GPU |
| **Partial FC(Ours)** | **64** | **4096** | **6722** | **12600** | GPU |
## FAQ
#### Glint360K's Face Alignment Settings?
We use a same alignment setting with MS1MV2, code is [here](https://github.com/deepinsight/insightface/issues/1286).
#### Why update Glint360K, is there a bug in the previous version?
In the previous version of Glint360K, there is no bug when using softmax training, but there is a bug in triplet training.
In the latest Glint360k, this bug has been fixed.
#### Dataset in Google Drive or Dropbox?
The torrent has been released.
## Citation
If you find Partial-FC or Glint360K useful in your research, please consider to cite the following related paper:
[Partial FC](https://arxiv.org/abs/2203.15565)
```
@inproceedings{an2022pfc,
title={Killing Two Birds with One Stone: Efficient and Robust Training of Face Recognition CNNs by Partial FC},
author={An, Xiang and Deng, Jiangkang and Guo, Jia and Feng, Ziyong and Zhu, Xuhan and Jing, Yang and Tongliang, Liu},
booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition},
year={2022}
}
```
| insightface/recognition/partial_fc/README.md/0 | {
"file_path": "insightface/recognition/partial_fc/README.md",
"repo_id": "insightface",
"token_count": 3723
} | 145 |
import os
import numpy as np
from mxnet import nd
import mxnet as mx
from memory_samplers import WeightIndexSampler
class MemoryBank(object):
def __init__(self,
num_sample,
num_local,
rank,
local_rank,
embedding_size,
prefix,
gpu=True):
"""
Parameters
----------
num_sample: int
The number of sampled class center.
num_local: int
The number of class center storage in this rank(CPU/GPU).
rank: int
Unique process(GPU) ID from 0 to size - 1.
local_rank: int
Unique process(GPU) ID within the server from 0 to 7.
embedding_size: int
The feature dimension.
prefix_dir: str
Path prefix of model dir.
gpu: bool
If True, class center and class center mom will storage in GPU.
"""
self.num_sample = num_sample
self.num_local = num_local
self.rank = rank
self.embedding_size = embedding_size
self.gpu = gpu
self.prefix = prefix
if self.gpu:
context = mx.gpu(local_rank)
else:
context = mx.cpu()
# In order to apply update, weight and momentum should be storage.
self.weight = nd.random_normal(loc=0,
scale=0.01,
shape=(self.num_local,
self.embedding_size),
ctx=context)
self.weight_mom = nd.zeros_like(self.weight)
# Sampler object
self.weight_index_sampler = WeightIndexSampler(num_sample, num_local,
rank)
def sample(self, global_label):
"""
Parameters
----------
global_label: NDArray
Global label (after gathers label from all rank)
Returns
-------
index: ndarray(numpy)
Local index for memory bank to sample, start from 0 to num_local, length is num_sample.
global_label: ndarray(numpy)
Global label after sort and unique.
"""
assert isinstance(global_label, nd.NDArray)
global_label = global_label.asnumpy()
global_label = np.unique(global_label)
global_label.sort()
index = self.weight_index_sampler(global_label)
index.sort()
return index, global_label
def get(self, index):
"""
Get sampled class centers and their momentum.
Parameters
----------
index: NDArray
Local index for memory bank to sample, start from 0 to num_local.
"""
return self.weight[index], self.weight_mom[index]
def set(self, index, updated_weight, updated_weight_mom=None):
"""
Update sampled class to memory bank, make the class center stored
in the memory bank the latest.
Parameters
----------
index: NDArray
Local index for memory bank to sample, start from 0 to num_local.
updated_weight: NDArray
Class center which has been applied gradients.
updated_weight_mom: NDArray
Class center momentum which has been moved average.
"""
self.weight[index] = updated_weight
self.weight_mom[index] = updated_weight_mom
def save(self):
nd.save(fname=os.path.join(self.prefix,
"%d_centers.param" % self.rank),
data=self.weight)
nd.save(fname=os.path.join(self.prefix,
"%d_centers_mom.param" % self.rank),
data=self.weight_mom)
| insightface/recognition/partial_fc/mxnet/memory_bank.py/0 | {
"file_path": "insightface/recognition/partial_fc/mxnet/memory_bank.py",
"repo_id": "insightface",
"token_count": 1899
} | 146 |
# partial fc
PartialFC-Pytorch has been merged into [arcface_torch](https://github.com/deepinsight/insightface/tree/master/recognition/arcface_torch).
| insightface/recognition/partial_fc/pytorch/README.md/0 | {
"file_path": "insightface/recognition/partial_fc/pytorch/README.md",
"repo_id": "insightface",
"token_count": 52
} | 147 |
import sys
import os
import mxnet as mx
import symbol_utils
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from config import config
def Act(data, act_type, name):
#ignore param act_type, set it in this function
if act_type == 'prelu':
body = mx.sym.LeakyReLU(data=data, act_type='prelu', name=name)
else:
body = mx.sym.Activation(data=data, act_type=act_type, name=name)
return body
def Conv(data,
num_filter=1,
kernel=(1, 1),
stride=(1, 1),
pad=(0, 0),
num_group=1,
name=None,
suffix=''):
conv = mx.sym.Convolution(data=data,
num_filter=num_filter,
kernel=kernel,
num_group=num_group,
stride=stride,
pad=pad,
no_bias=True,
name='%s%s_conv2d' % (name, suffix))
bn = mx.sym.BatchNorm(data=conv,
name='%s%s_batchnorm' % (name, suffix),
fix_gamma=False,
momentum=config.bn_mom)
act = Act(data=bn,
act_type=config.net_act,
name='%s%s_relu' % (name, suffix))
return act
def Linear(data,
num_filter=1,
kernel=(1, 1),
stride=(1, 1),
pad=(0, 0),
num_group=1,
name=None,
suffix=''):
conv = mx.sym.Convolution(data=data,
num_filter=num_filter,
kernel=kernel,
num_group=num_group,
stride=stride,
pad=pad,
no_bias=True,
name='%s%s_conv2d' % (name, suffix))
bn = mx.sym.BatchNorm(data=conv,
name='%s%s_batchnorm' % (name, suffix),
fix_gamma=False,
momentum=config.bn_mom)
return bn
def ConvOnly(data,
num_filter=1,
kernel=(1, 1),
stride=(1, 1),
pad=(0, 0),
num_group=1,
name=None,
suffix=''):
conv = mx.sym.Convolution(data=data,
num_filter=num_filter,
kernel=kernel,
num_group=num_group,
stride=stride,
pad=pad,
no_bias=True,
name='%s%s_conv2d' % (name, suffix))
return conv
def DResidual(data,
num_out=1,
kernel=(3, 3),
stride=(2, 2),
pad=(1, 1),
num_group=1,
name=None,
suffix=''):
conv = Conv(data=data,
num_filter=num_group,
kernel=(1, 1),
pad=(0, 0),
stride=(1, 1),
name='%s%s_conv_sep' % (name, suffix))
conv_dw = Conv(data=conv,
num_filter=num_group,
num_group=num_group,
kernel=kernel,
pad=pad,
stride=stride,
name='%s%s_conv_dw' % (name, suffix))
proj = Linear(data=conv_dw,
num_filter=num_out,
kernel=(1, 1),
pad=(0, 0),
stride=(1, 1),
name='%s%s_conv_proj' % (name, suffix))
return proj
def Residual(data,
num_block=1,
num_out=1,
kernel=(3, 3),
stride=(1, 1),
pad=(1, 1),
num_group=1,
name=None,
suffix=''):
identity = data
for i in range(num_block):
shortcut = identity
conv = DResidual(data=identity,
num_out=num_out,
kernel=kernel,
stride=stride,
pad=pad,
num_group=num_group,
name='%s%s_block' % (name, suffix),
suffix='%d' % i)
identity = conv + shortcut
return identity
def get_symbol():
num_classes = config.emb_size
print('in_network', config)
fc_type = config.net_output
data = mx.symbol.Variable(name="data")
data = data - 127.5
data = data * 0.0078125
blocks = config.net_blocks
conv_1 = Conv(data,
num_filter=64,
kernel=(3, 3),
pad=(1, 1),
stride=(2, 2),
name="conv_1")
if blocks[0] == 1:
conv_2_dw = Conv(conv_1,
num_group=64,
num_filter=64,
kernel=(3, 3),
pad=(1, 1),
stride=(1, 1),
name="conv_2_dw")
else:
conv_2_dw = Residual(conv_1,
num_block=blocks[0],
num_out=64,
kernel=(3, 3),
stride=(1, 1),
pad=(1, 1),
num_group=64,
name="res_2")
conv_23 = DResidual(conv_2_dw,
num_out=64,
kernel=(3, 3),
stride=(2, 2),
pad=(1, 1),
num_group=128,
name="dconv_23")
conv_3 = Residual(conv_23,
num_block=blocks[1],
num_out=64,
kernel=(3, 3),
stride=(1, 1),
pad=(1, 1),
num_group=128,
name="res_3")
conv_34 = DResidual(conv_3,
num_out=128,
kernel=(3, 3),
stride=(2, 2),
pad=(1, 1),
num_group=256,
name="dconv_34")
conv_4 = Residual(conv_34,
num_block=blocks[2],
num_out=128,
kernel=(3, 3),
stride=(1, 1),
pad=(1, 1),
num_group=256,
name="res_4")
conv_45 = DResidual(conv_4,
num_out=128,
kernel=(3, 3),
stride=(2, 2),
pad=(1, 1),
num_group=512,
name="dconv_45")
conv_5 = Residual(conv_45,
num_block=blocks[3],
num_out=128,
kernel=(3, 3),
stride=(1, 1),
pad=(1, 1),
num_group=256,
name="res_5")
conv_6_sep = Conv(conv_5,
num_filter=512,
kernel=(1, 1),
pad=(0, 0),
stride=(1, 1),
name="conv_6sep")
fc1 = symbol_utils.get_fc1(conv_6_sep, num_classes, fc_type)
return fc1
| insightface/recognition/subcenter_arcface/symbol/fmobilefacenet.py/0 | {
"file_path": "insightface/recognition/subcenter_arcface/symbol/fmobilefacenet.py",
"repo_id": "insightface",
"token_count": 4969
} | 148 |
set -ex
GPU=0
python evaluation/eval.py --conf ./confs/test.conf --scan_id 0 --gpu $GPU --checkpoint 400 --eval_rendering | insightface/reconstruction/PBIDR/code/script/fast_eval.sh/0 | {
"file_path": "insightface/reconstruction/PBIDR/code/script/fast_eval.sh",
"repo_id": "insightface",
"token_count": 42
} | 149 |
## Introduction
JMLR is an efficient high accuracy face reconstruction approach which achieved [Rank-1st](https://tianchi.aliyun.com/competition/entrance/531961/rankingList) of
[Perspective Projection Based Monocular 3D Face Reconstruction Challenge](https://tianchi.aliyun.com/competition/entrance/531961/introduction)
of [ECCV-2022 WCPA Workshop](https://sites.google.com/view/wcpa2022).
Paper in [arXiv](https://arxiv.org/abs/2208.07142).
## Method Pipeline
<img src="https://github.com/nttstar/insightface-resources/blob/master/images/jmlr_pipeline.jpg?raw=true" width="800" alt="jmlr-pipeline"/>
## Data preparation
1. Download the dataset from WCPA organiser and put it at somewhere.
2. Create `cache_align/` dir and put `flip_index.npy` file under it.
3. Check `configs/s1.py` and fix the location to yours.
4. Use ``python rec_builder.py`` to generate cached dataset, which will be used in following steps.
## Training
```
CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 python -u -m torch.distributed.launch --nproc_per_node=8 --nnodes=1 --node_rank=0 --master_addr="127.0.0.1" --master_port=13334 train.py configs/s1.py
```
## Inference Example
```
python inference_simple.py
```
## Resources
[flip_index.npy](https://drive.google.com/file/d/1fZ4cRyvQeehwKoMKKSmXUmTx5GEJwyrT/view?usp=sharing)
[pretrained-model](https://drive.google.com/file/d/1qSpqDDLQfcPeFr2b82IZrK8QC_3lci3l/view?usp=sharing)
[projection_matrix.txt](https://drive.google.com/file/d/1joiu-V0qEZxil_AHcg_W726nRxE8Q4dm/view?usp=sharing)
## Results
<img src="https://github.com/nttstar/insightface-resources/blob/master/images/jmlr_id.jpg?raw=true" width="800" alt="jmlr-id"/>
| insightface/reconstruction/jmlr/README.md/0 | {
"file_path": "insightface/reconstruction/jmlr/README.md",
"repo_id": "insightface",
"token_count": 647
} | 150 |
import pickle
import numpy as np
import os
import os.path as osp
import glob
import argparse
import cv2
import time
import datetime
import pickle
import sklearn
import mxnet as mx
from utils.utils_config import get_config
from dataset import FaceDataset, Rt26dof
class RecBuilder():
def __init__(self, path, image_size=(112, 112), is_train=True):
self.path = path
self.image_size = image_size
self.widx = 0
self.wlabel = 0
self.max_label = -1
#assert not osp.exists(path), '%s exists' % path
if is_train:
rec_file = osp.join(path, 'train.rec')
idx_file = osp.join(path, 'train.idx')
else:
rec_file = osp.join(path, 'val.rec')
idx_file = osp.join(path, 'val.idx')
#assert not osp.exists(rec_file), '%s exists' % rec_file
if not osp.exists(path):
os.makedirs(path)
self.writer = mx.recordio.MXIndexedRecordIO(idx_file,
rec_file,
'w')
self.meta = []
def add(self, imgs):
#!!! img should be BGR!!!!
#assert label >= 0
#assert label > self.last_label
assert len(imgs) > 0
label = self.wlabel
for img in imgs:
idx = self.widx
image_meta = {'image_index': idx, 'image_classes': [label]}
header = mx.recordio.IRHeader(0, label, idx, 0)
if isinstance(img, np.ndarray):
s = mx.recordio.pack_img(header,img,quality=95,img_fmt='.jpg')
else:
s = mx.recordio.pack(header, img)
self.writer.write_idx(idx, s)
self.meta.append(image_meta)
self.widx += 1
self.max_label = label
self.wlabel += 1
return label
def add_image(self, img, label):
#!!! img should be BGR!!!!
#assert label >= 0
#assert label > self.last_label
idx = self.widx
header = mx.recordio.IRHeader(0, label, idx, 0)
if isinstance(img, np.ndarray):
s = mx.recordio.pack_img(header,img,quality=100,img_fmt='.jpg')
else:
s = mx.recordio.pack(header, img)
self.writer.write_idx(idx, s)
self.widx += 1
def close(self):
print('stat:', self.widx, self.wlabel)
if __name__ == "__main__":
#cfg = get_config('configs/s1.py')
cfg = get_config('configs/s2.py')
cfg.task = 0
cfg.input_size = 512
for is_train in [True, False]:
dataset = FaceDataset(cfg, is_train=is_train, local_rank=0)
dataset.transform = None
writer = RecBuilder(cfg.cache_dir, is_train=is_train)
#writer = RecBuilder("temp", is_train=is_train)
print('total:', len(dataset))
#meta = np.zeros( (len(dataset), 3), dtype=np.float32 )
meta = []
subset_name = 'train' if is_train else 'val'
meta_path = osp.join(cfg.cache_dir, '%s.meta'%subset_name)
eye_missing = 0
for idx in range(len(dataset)):
#img_local, img_global, label_verts, label_Rt, tform = dataset[idx]
#img_local, label_verts, label_Rt, tform = dataset[idx]
data = dataset[idx]
img_local = data['img_local']
label_verts = data['verts']
label_Rt = data['rt']
tform = data['tform']
label_verts = label_verts.numpy()
label_Rt = label_Rt.numpy()
tform = tform.numpy()
label_6dof = Rt26dof(label_Rt, True)
pose = label_6dof[:3]
#print(image.shape, label_verts.shape, label_6dof.shape)
#print(image.__class__, label_verts.__class__)
img_local = img_local[:,:,::-1]
#img_global = img_global[:,:,::-1]
#image = np.concatenate( (img_local, img_global), axis=1 )
image = img_local
label = list(label_verts.flatten()) + list(label_Rt.flatten()) + list(tform.flatten())
expect_len = 1220*3+16+6
if 'eye_world_left' in data:
if idx==0:
print('find eye')
eyel = data['eye_world_left'].numpy()
eyer = data['eye_world_right'].numpy()
label += list(eyel.flatten())
label += list(eyer.flatten())
expect_len += 481*6
else:
eye_missing += 1
continue
meta.append(pose)
assert len(label)==expect_len
writer.add_image(image, label)
if idx%100==0:
print('processing:', idx, image.shape, len(label))
if idx<10:
cv2.imwrite("temp/%d.jpg"%idx, image)
writer.close()
meta = np.array(meta, dtype=np.float32)
np.save(meta_path, meta)
print('Eye missing:', eye_missing, is_train)
| insightface/reconstruction/jmlr/rec_builder.py/0 | {
"file_path": "insightface/reconstruction/jmlr/rec_builder.py",
"repo_id": "insightface",
"token_count": 2615
} | 151 |
# Copyright (c) 2020, Baris Gecer. All rights reserved.
#
# This work is made available under the CC BY-NC-SA 4.0.
# To view a copy of this license, see LICENSE
import tensorflow as tf
from external.landmark_detector import networks
from external.landmark_detector.flags import FLAGS
def tf_heatmap_to_lms(heatmap):
hs = tf.argmax(tf.reduce_max(heatmap, 2), 1)
ws = tf.argmax(tf.reduce_max(heatmap, 1), 1)
lms = tf.transpose(tf.to_float(tf.stack([hs, ws])), perm=[1, 2, 0])
return lms
class Landmark_Handler():
def __init__(self, args, sess, generated_image):
self.sess = sess
self.model_path = args.landmark_model
n_landmarks = 84
FLAGS.n_landmarks = 84
net_model = networks.DNFaceMultiView('')
with tf.variable_scope('net'):
self.lms_heatmap_prediction, states = net_model._build_network(generated_image, datas=None, is_training=False,
n_channels=n_landmarks)
self.pts_predictions = tf_heatmap_to_lms(self.lms_heatmap_prediction)
variables = tf.all_variables()
variables_to_restore = [v for v in variables if v.name.split('/')[0] == 'net']
self.saver = tf.train.Saver(variables_to_restore)
def load_model(self):
self.saver.restore(self.sess, self.model_path)
| insightface/reconstruction/ostec/core/landmark_handler.py/0 | {
"file_path": "insightface/reconstruction/ostec/core/landmark_handler.py",
"repo_id": "insightface",
"token_count": 632
} | 152 |
import torch
import numpy as np
import networkx as nx
import scipy.sparse as sp
pascal_graph = {
0: [0],
1: [1, 2],
2: [1, 2, 3, 5],
3: [2, 3, 4],
4: [3, 4],
5: [2, 5, 6],
6: [5, 6],
}
cihp_graph = {
0: [],
1: [2, 13],
2: [1, 13],
3: [14, 15],
4: [13],
5: [6, 7, 9, 10, 11, 12, 14, 15],
6: [5, 7, 10, 11, 14, 15, 16, 17],
7: [5, 6, 9, 10, 11, 12, 14, 15],
8: [16, 17, 18, 19],
9: [5, 7, 10, 16, 17, 18, 19],
10: [5, 6, 7, 9, 11, 12, 13, 14, 15, 16, 17],
11: [5, 6, 7, 10, 13],
12: [5, 7, 10, 16, 17],
13: [1, 2, 4, 10, 11],
14: [3, 5, 6, 7, 10],
15: [3, 5, 6, 7, 10],
16: [6, 8, 9, 10, 12, 18],
17: [6, 8, 9, 10, 12, 19],
18: [8, 9, 16],
19: [8, 9, 17],
}
atr_graph = {
0: [],
1: [2, 11],
2: [1, 11],
3: [11],
4: [5, 6, 7, 11, 14, 15, 17],
5: [4, 6, 7, 8, 12, 13],
6: [4, 5, 7, 8, 9, 10, 12, 13],
7: [4, 11, 12, 13, 14, 15],
8: [5, 6],
9: [6, 12],
10: [6, 13],
11: [1, 2, 3, 4, 7, 14, 15, 17],
12: [5, 6, 7, 9],
13: [5, 6, 7, 10],
14: [4, 7, 11, 16],
15: [4, 7, 11, 16],
16: [14, 15],
17: [4, 11],
}
cihp2pascal_adj = np.array(
[
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1],
]
)
cihp2pascal_nlp_adj = np.array(
[
[
1.0,
0.35333052,
0.32727194,
0.17418084,
0.18757584,
0.40608522,
0.37503981,
0.35448462,
0.22598555,
0.23893579,
0.33064262,
0.28923404,
0.27986573,
0.4211553,
0.36915778,
0.41377746,
0.32485771,
0.37248222,
0.36865639,
0.41500332,
],
[
0.39615879,
0.46201529,
0.52321467,
0.30826114,
0.25669527,
0.54747773,
0.3670523,
0.3901983,
0.27519473,
0.3433325,
0.52728509,
0.32771333,
0.34819325,
0.63882953,
0.68042925,
0.69368576,
0.63395791,
0.65344337,
0.59538781,
0.6071375,
],
[
0.16373166,
0.21663339,
0.3053872,
0.28377612,
0.1372435,
0.4448808,
0.29479995,
0.31092595,
0.22703953,
0.33983576,
0.75778818,
0.2619818,
0.37069392,
0.35184867,
0.49877512,
0.49979437,
0.51853277,
0.52517541,
0.32517741,
0.32377309,
],
[
0.32687232,
0.38482461,
0.37693463,
0.41610834,
0.20415749,
0.76749079,
0.35139853,
0.3787411,
0.28411737,
0.35155421,
0.58792618,
0.31141718,
0.40585111,
0.51189218,
0.82042737,
0.8342413,
0.70732188,
0.72752501,
0.60327325,
0.61431337,
],
[
0.34069369,
0.34817292,
0.37525998,
0.36497069,
0.17841617,
0.69746208,
0.31731463,
0.34628951,
0.25167277,
0.32072379,
0.56711286,
0.24894776,
0.37000453,
0.52600859,
0.82483993,
0.84966274,
0.7033991,
0.73449378,
0.56649608,
0.58888791,
],
[
0.28477487,
0.35139564,
0.42742352,
0.41664321,
0.20004676,
0.78566833,
0.42237487,
0.41048549,
0.37933812,
0.46542516,
0.62444759,
0.3274493,
0.49466009,
0.49314658,
0.71244233,
0.71497003,
0.8234787,
0.83566589,
0.62597135,
0.62626812,
],
[
0.3011378,
0.31775977,
0.42922647,
0.36896257,
0.17597556,
0.72214655,
0.39162804,
0.38137872,
0.34980296,
0.43818419,
0.60879174,
0.26762545,
0.46271161,
0.51150476,
0.72318109,
0.73678399,
0.82620388,
0.84942166,
0.5943811,
0.60607602,
],
]
)
pascal2atr_nlp_adj = np.array(
[
[
1.0,
0.35333052,
0.32727194,
0.18757584,
0.40608522,
0.27986573,
0.23893579,
0.27600672,
0.30964391,
0.36865639,
0.41500332,
0.4211553,
0.32485771,
0.37248222,
0.36915778,
0.41377746,
0.32006291,
0.28923404,
],
[
0.39615879,
0.46201529,
0.52321467,
0.25669527,
0.54747773,
0.34819325,
0.3433325,
0.26603942,
0.45162929,
0.59538781,
0.6071375,
0.63882953,
0.63395791,
0.65344337,
0.68042925,
0.69368576,
0.44354613,
0.32771333,
],
[
0.16373166,
0.21663339,
0.3053872,
0.1372435,
0.4448808,
0.37069392,
0.33983576,
0.26563416,
0.35443504,
0.32517741,
0.32377309,
0.35184867,
0.51853277,
0.52517541,
0.49877512,
0.49979437,
0.21750868,
0.2619818,
],
[
0.32687232,
0.38482461,
0.37693463,
0.20415749,
0.76749079,
0.40585111,
0.35155421,
0.28271333,
0.52684576,
0.60327325,
0.61431337,
0.51189218,
0.70732188,
0.72752501,
0.82042737,
0.8342413,
0.40137029,
0.31141718,
],
[
0.34069369,
0.34817292,
0.37525998,
0.17841617,
0.69746208,
0.37000453,
0.32072379,
0.27268885,
0.47426719,
0.56649608,
0.58888791,
0.52600859,
0.7033991,
0.73449378,
0.82483993,
0.84966274,
0.37830796,
0.24894776,
],
[
0.28477487,
0.35139564,
0.42742352,
0.20004676,
0.78566833,
0.49466009,
0.46542516,
0.32662614,
0.55780359,
0.62597135,
0.62626812,
0.49314658,
0.8234787,
0.83566589,
0.71244233,
0.71497003,
0.41223219,
0.3274493,
],
[
0.3011378,
0.31775977,
0.42922647,
0.17597556,
0.72214655,
0.46271161,
0.43818419,
0.3192333,
0.50979216,
0.5943811,
0.60607602,
0.51150476,
0.82620388,
0.84942166,
0.72318109,
0.73678399,
0.39259827,
0.26762545,
],
]
)
cihp2atr_nlp_adj = np.array(
[
[
1.0,
0.35333052,
0.32727194,
0.18757584,
0.40608522,
0.27986573,
0.23893579,
0.27600672,
0.30964391,
0.36865639,
0.41500332,
0.4211553,
0.32485771,
0.37248222,
0.36915778,
0.41377746,
0.32006291,
0.28923404,
],
[
0.35333052,
1.0,
0.39206695,
0.42143438,
0.4736689,
0.47139544,
0.51999208,
0.38354847,
0.45628529,
0.46514124,
0.50083501,
0.4310595,
0.39371443,
0.4319752,
0.42938598,
0.46384034,
0.44833757,
0.6153155,
],
[
0.32727194,
0.39206695,
1.0,
0.32836702,
0.52603065,
0.39543695,
0.3622627,
0.43575346,
0.33866223,
0.45202552,
0.48421,
0.53669903,
0.47266611,
0.50925436,
0.42286557,
0.45403656,
0.37221304,
0.40999322,
],
[
0.17418084,
0.46892601,
0.25774838,
0.31816231,
0.39330317,
0.34218382,
0.48253904,
0.22084125,
0.41335728,
0.52437572,
0.5191713,
0.33576117,
0.44230914,
0.44250678,
0.44330833,
0.43887264,
0.50693611,
0.39278795,
],
[
0.18757584,
0.42143438,
0.32836702,
1.0,
0.35030067,
0.30110947,
0.41055555,
0.34338879,
0.34336307,
0.37704433,
0.38810141,
0.34702081,
0.24171562,
0.25433078,
0.24696241,
0.2570884,
0.4465962,
0.45263213,
],
[
0.40608522,
0.4736689,
0.52603065,
0.35030067,
1.0,
0.54372584,
0.58300258,
0.56674191,
0.555266,
0.66599594,
0.68567555,
0.55716359,
0.62997328,
0.65638548,
0.61219615,
0.63183318,
0.54464151,
0.44293752,
],
[
0.37503981,
0.50675565,
0.4761106,
0.37561813,
0.60419403,
0.77912403,
0.64595517,
0.85939662,
0.46037144,
0.52348817,
0.55875094,
0.37741886,
0.455671,
0.49434392,
0.38479954,
0.41804074,
0.47285709,
0.57236283,
],
[
0.35448462,
0.50576632,
0.51030446,
0.35841033,
0.55106903,
0.50257274,
0.52591451,
0.4283053,
0.39991808,
0.42327211,
0.42853819,
0.42071825,
0.41240559,
0.42259136,
0.38125352,
0.3868255,
0.47604934,
0.51811717,
],
[
0.22598555,
0.5053299,
0.36301185,
0.38002282,
0.49700941,
0.45625243,
0.62876479,
0.4112051,
0.33944371,
0.48322639,
0.50318714,
0.29207815,
0.38801966,
0.41119094,
0.29199072,
0.31021029,
0.41594871,
0.54961962,
],
[
0.23893579,
0.51999208,
0.3622627,
0.41055555,
0.58300258,
0.68874251,
1.0,
0.56977937,
0.49918447,
0.48484363,
0.51615925,
0.41222306,
0.49535971,
0.53134951,
0.3807616,
0.41050298,
0.48675801,
0.51112664,
],
[
0.33064262,
0.306412,
0.60679935,
0.25592294,
0.58738706,
0.40379627,
0.39679161,
0.33618385,
0.39235148,
0.45474013,
0.4648476,
0.59306762,
0.58976007,
0.60778661,
0.55400397,
0.56551297,
0.3698029,
0.33860535,
],
[
0.28923404,
0.6153155,
0.40999322,
0.45263213,
0.44293752,
0.60359359,
0.51112664,
0.46578181,
0.45656936,
0.38142307,
0.38525582,
0.33327223,
0.35360175,
0.36156453,
0.3384992,
0.34261229,
0.49297863,
1.0,
],
[
0.27986573,
0.47139544,
0.39543695,
0.30110947,
0.54372584,
1.0,
0.68874251,
0.67765588,
0.48690078,
0.44010641,
0.44921156,
0.32321099,
0.48311542,
0.4982002,
0.39378102,
0.40297733,
0.45309735,
0.60359359,
],
[
0.4211553,
0.4310595,
0.53669903,
0.34702081,
0.55716359,
0.32321099,
0.41222306,
0.25721705,
0.36633509,
0.5397475,
0.56429928,
1.0,
0.55796926,
0.58842844,
0.57930828,
0.60410597,
0.41615326,
0.33327223,
],
[
0.36915778,
0.42938598,
0.42286557,
0.24696241,
0.61219615,
0.39378102,
0.3807616,
0.28089866,
0.48450394,
0.77400821,
0.68813814,
0.57930828,
0.8856886,
0.81673412,
1.0,
0.92279623,
0.46969152,
0.3384992,
],
[
0.41377746,
0.46384034,
0.45403656,
0.2570884,
0.63183318,
0.40297733,
0.41050298,
0.332879,
0.48799542,
0.69231828,
0.77015091,
0.60410597,
0.79788484,
0.88232104,
0.92279623,
1.0,
0.45685017,
0.34261229,
],
[
0.32485771,
0.39371443,
0.47266611,
0.24171562,
0.62997328,
0.48311542,
0.49535971,
0.32477932,
0.51486622,
0.79353556,
0.69768738,
0.55796926,
1.0,
0.92373745,
0.8856886,
0.79788484,
0.47883134,
0.35360175,
],
[
0.37248222,
0.4319752,
0.50925436,
0.25433078,
0.65638548,
0.4982002,
0.53134951,
0.38057074,
0.52403969,
0.72035243,
0.78711147,
0.58842844,
0.92373745,
1.0,
0.81673412,
0.88232104,
0.47109935,
0.36156453,
],
[
0.36865639,
0.46514124,
0.45202552,
0.37704433,
0.66599594,
0.44010641,
0.48484363,
0.39636574,
0.50175258,
1.0,
0.91320249,
0.5397475,
0.79353556,
0.72035243,
0.77400821,
0.69231828,
0.59087008,
0.38142307,
],
[
0.41500332,
0.50083501,
0.48421,
0.38810141,
0.68567555,
0.44921156,
0.51615925,
0.45156472,
0.50438158,
0.91320249,
1.0,
0.56429928,
0.69768738,
0.78711147,
0.68813814,
0.77015091,
0.57698754,
0.38525582,
],
]
)
def normalize_adj(adj):
"""Symmetrically normalize adjacency matrix."""
adj = sp.coo_matrix(adj)
rowsum = np.array(adj.sum(1))
d_inv_sqrt = np.power(rowsum, -0.5).flatten()
d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.0
d_mat_inv_sqrt = sp.diags(d_inv_sqrt)
return adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo()
def preprocess_adj(adj):
"""Preprocessing of adjacency matrix for simple GCN model and conversion to tuple representation."""
adj = nx.adjacency_matrix(
nx.from_dict_of_lists(adj)
) # return a adjacency matrix of adj ( type is numpy)
adj_normalized = normalize_adj(adj + sp.eye(adj.shape[0])) #
# return sparse_to_tuple(adj_normalized)
return adj_normalized.todense()
def row_norm(inputs):
outputs = []
for x in inputs:
xsum = x.sum()
x = x / xsum
outputs.append(x)
return outputs
def normalize_adj_torch(adj):
# print(adj.size())
if len(adj.size()) == 4:
new_r = torch.zeros(adj.size()).type_as(adj)
for i in range(adj.size(1)):
adj_item = adj[0, i]
rowsum = adj_item.sum(1)
d_inv_sqrt = rowsum.pow_(-0.5)
d_inv_sqrt[torch.isnan(d_inv_sqrt)] = 0
d_mat_inv_sqrt = torch.diag(d_inv_sqrt)
r = torch.matmul(torch.matmul(d_mat_inv_sqrt, adj_item), d_mat_inv_sqrt)
new_r[0, i, ...] = r
return new_r
rowsum = adj.sum(1)
d_inv_sqrt = rowsum.pow_(-0.5)
d_inv_sqrt[torch.isnan(d_inv_sqrt)] = 0
d_mat_inv_sqrt = torch.diag(d_inv_sqrt)
r = torch.matmul(torch.matmul(d_mat_inv_sqrt, adj), d_mat_inv_sqrt)
return r
# def row_norm(adj):
if __name__ == "__main__":
a = row_norm(cihp2pascal_adj)
print(a)
print(cihp2pascal_adj)
# print(a.shape)
| insightface/reconstruction/ostec/external/graphonomy/FaceHairMask/graph.py/0 | {
"file_path": "insightface/reconstruction/ostec/external/graphonomy/FaceHairMask/graph.py",
"repo_id": "insightface",
"token_count": 14206
} | 153 |
Subsets and Splits