Spaces:
Running
on
Zero
Running
on
Zero
zhengchong
commited on
Commit
•
21c6c10
1
Parent(s):
3f59270
chore: Remove unused files and dependencies
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- __pycache__/utils.cpython-39.pyc +0 -0
- app.py +13 -3
- model/DensePose/__pycache__/__init__.cpython-39.pyc +0 -0
- model/SCHP/LICENSE +0 -21
- model/SCHP/README.md +0 -129
- model/SCHP/__init__.py +0 -163
- model/SCHP/__pycache__/__init__.cpython-310.pyc +0 -0
- model/SCHP/__pycache__/__init__.cpython-39.pyc +0 -0
- model/SCHP/datasets/__init__.py +0 -0
- model/SCHP/datasets/__pycache__/__init__.cpython-39.pyc +0 -0
- model/SCHP/datasets/__pycache__/simple_extractor_dataset.cpython-39.pyc +0 -0
- model/SCHP/datasets/datasets.py +0 -205
- model/SCHP/datasets/simple_extractor_dataset.py +0 -92
- model/SCHP/datasets/target_generation.py +0 -40
- model/SCHP/environment.yaml +0 -49
- model/SCHP/evaluate.py +0 -210
- model/SCHP/file_list.txt +0 -0
- model/SCHP/mhp_extension/.ipynb_checkpoints/demo-checkpoint.ipynb +0 -0
- model/SCHP/mhp_extension/README.md +0 -38
- model/SCHP/mhp_extension/coco_style_annotation_creator/__pycache__/pycococreatortools.cpython-37.pyc +0 -0
- model/SCHP/mhp_extension/coco_style_annotation_creator/human_to_coco.py +0 -166
- model/SCHP/mhp_extension/coco_style_annotation_creator/pycococreatortools.py +0 -114
- model/SCHP/mhp_extension/coco_style_annotation_creator/test_human2coco_format.py +0 -74
- model/SCHP/mhp_extension/data/DemoDataset/global_pic/demo.jpg +0 -0
- model/SCHP/mhp_extension/demo.ipynb +0 -0
- model/SCHP/mhp_extension/demo/demo.jpg +0 -0
- model/SCHP/mhp_extension/demo/demo_global_human_parsing.png +0 -0
- model/SCHP/mhp_extension/demo/demo_instance_human_mask.png +0 -0
- model/SCHP/mhp_extension/demo/demo_multiple_human_parsing.png +0 -0
- model/SCHP/mhp_extension/detectron2/.circleci/config.yml +0 -179
- model/SCHP/mhp_extension/detectron2/.clang-format +0 -85
- model/SCHP/mhp_extension/detectron2/.flake8 +0 -9
- model/SCHP/mhp_extension/detectron2/.github/CODE_OF_CONDUCT.md +0 -5
- model/SCHP/mhp_extension/detectron2/.github/CONTRIBUTING.md +0 -49
- model/SCHP/mhp_extension/detectron2/.github/Detectron2-Logo-Horz.svg +0 -1
- model/SCHP/mhp_extension/detectron2/.github/ISSUE_TEMPLATE.md +0 -5
- model/SCHP/mhp_extension/detectron2/.github/ISSUE_TEMPLATE/bugs.md +0 -36
- model/SCHP/mhp_extension/detectron2/.github/ISSUE_TEMPLATE/config.yml +0 -9
- model/SCHP/mhp_extension/detectron2/.github/ISSUE_TEMPLATE/feature-request.md +0 -31
- model/SCHP/mhp_extension/detectron2/.github/ISSUE_TEMPLATE/questions-help-support.md +0 -26
- model/SCHP/mhp_extension/detectron2/.github/ISSUE_TEMPLATE/unexpected-problems-bugs.md +0 -45
- model/SCHP/mhp_extension/detectron2/.github/pull_request_template.md +0 -9
- model/SCHP/mhp_extension/detectron2/.gitignore +0 -46
- model/SCHP/mhp_extension/detectron2/GETTING_STARTED.md +0 -79
- model/SCHP/mhp_extension/detectron2/INSTALL.md +0 -184
- model/SCHP/mhp_extension/detectron2/LICENSE +0 -201
- model/SCHP/mhp_extension/detectron2/MODEL_ZOO.md +0 -903
- model/SCHP/mhp_extension/detectron2/README.md +0 -56
- model/SCHP/mhp_extension/detectron2/configs/Base-RCNN-C4.yaml +0 -18
- model/SCHP/mhp_extension/detectron2/configs/Base-RCNN-DilatedC5.yaml +0 -31
__pycache__/utils.cpython-39.pyc
ADDED
Binary file (20.3 kB). View file
|
|
app.py
CHANGED
@@ -12,7 +12,7 @@ from diffusers.image_processor import VaeImageProcessor
|
|
12 |
from huggingface_hub import snapshot_download
|
13 |
from PIL import Image
|
14 |
|
15 |
-
from model.cloth_masker import
|
16 |
from model.pipeline import CatVTONPipeline
|
17 |
from utils import init_weight_dtype, resize_and_crop, resize_and_padding
|
18 |
|
@@ -123,9 +123,9 @@ pipeline = CatVTONPipeline(
|
|
123 |
)
|
124 |
# AutoMasker
|
125 |
mask_processor = VaeImageProcessor(vae_scale_factor=8, do_normalize=False, do_binarize=True, do_convert_grayscale=True)
|
126 |
-
automasker =
|
127 |
densepose_ckpt=os.path.join(repo_path, "DensePose"),
|
128 |
-
|
129 |
device='cuda',
|
130 |
)
|
131 |
|
@@ -227,6 +227,9 @@ HEADER = """
|
|
227 |
<a href="http://120.76.142.206:8888" style="margin: 0 2px;">
|
228 |
<img src='https://img.shields.io/badge/Demo-Gradio-gold?style=flat&logo=Gradio&logoColor=red' alt='Demo'>
|
229 |
</a>
|
|
|
|
|
|
|
230 |
<a href='https://zheng-chong.github.io/CatVTON/' style="margin: 0 2px;">
|
231 |
<img src='https://img.shields.io/badge/Webpage-Project-silver?style=flat&logo=&logoColor=orange' alt='webpage'>
|
232 |
</a>
|
@@ -234,6 +237,13 @@ HEADER = """
|
|
234 |
<img src='https://img.shields.io/badge/License-CC BY--NC--SA--4.0-lightgreen?style=flat&logo=Lisence' alt='License'>
|
235 |
</a>
|
236 |
</div>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
237 |
"""
|
238 |
|
239 |
def app_gradio():
|
|
|
12 |
from huggingface_hub import snapshot_download
|
13 |
from PIL import Image
|
14 |
|
15 |
+
from model.cloth_masker import AutoMaskerSeg, vis_mask
|
16 |
from model.pipeline import CatVTONPipeline
|
17 |
from utils import init_weight_dtype, resize_and_crop, resize_and_padding
|
18 |
|
|
|
123 |
)
|
124 |
# AutoMasker
|
125 |
mask_processor = VaeImageProcessor(vae_scale_factor=8, do_normalize=False, do_binarize=True, do_convert_grayscale=True)
|
126 |
+
automasker = AutoMaskerSeg(
|
127 |
densepose_ckpt=os.path.join(repo_path, "DensePose"),
|
128 |
+
segformer_ckpt="/home/chongzheng_p23/data/Projects/CatVTON-main/Models/segformer_b3_clothes",
|
129 |
device='cuda',
|
130 |
)
|
131 |
|
|
|
227 |
<a href="http://120.76.142.206:8888" style="margin: 0 2px;">
|
228 |
<img src='https://img.shields.io/badge/Demo-Gradio-gold?style=flat&logo=Gradio&logoColor=red' alt='Demo'>
|
229 |
</a>
|
230 |
+
<a href="https://huggingface.co/spaces/zhengchong/CatVTON" style="margin: 0 2px;">
|
231 |
+
<img src='https://img.shields.io/badge/Space-ZeroGPU-orange?style=flat&logo=Gradio&logoColor=red' alt='Demo'>
|
232 |
+
</a>
|
233 |
<a href='https://zheng-chong.github.io/CatVTON/' style="margin: 0 2px;">
|
234 |
<img src='https://img.shields.io/badge/Webpage-Project-silver?style=flat&logo=&logoColor=orange' alt='webpage'>
|
235 |
</a>
|
|
|
237 |
<img src='https://img.shields.io/badge/License-CC BY--NC--SA--4.0-lightgreen?style=flat&logo=Lisence' alt='License'>
|
238 |
</a>
|
239 |
</div>
|
240 |
+
<br>
|
241 |
+
|
242 |
+
· Thanks to <a href="https://huggingface.co/zero-gpu-explorers">ZeroGPU</a> for providing A100 for this demo. <br>
|
243 |
+
· To adapt to ZeroGPU, we replace SCHP with <a href="https://huggingface.co/mattmdjaga/segformer_b2_clothes">SegFormer</a> which may result in differences from <a href="http://120.76.142.206:8888">our own demo</a>. <br>
|
244 |
+
· This demo and our weights are only open for **Non-commercial Use**. <br>
|
245 |
+
· SafetyChecker is set to filter NSFW content, but it may block normal results too. Please adjust the <span>`seed`</span> for normal outcomes.
|
246 |
+
|
247 |
"""
|
248 |
|
249 |
def app_gradio():
|
model/DensePose/__pycache__/__init__.cpython-39.pyc
CHANGED
Binary files a/model/DensePose/__pycache__/__init__.cpython-39.pyc and b/model/DensePose/__pycache__/__init__.cpython-39.pyc differ
|
|
model/SCHP/LICENSE
DELETED
@@ -1,21 +0,0 @@
|
|
1 |
-
MIT License
|
2 |
-
|
3 |
-
Copyright (c) 2020 Peike Li
|
4 |
-
|
5 |
-
Permission is hereby granted, free of charge, to any person obtaining a copy
|
6 |
-
of this software and associated documentation files (the "Software"), to deal
|
7 |
-
in the Software without restriction, including without limitation the rights
|
8 |
-
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
9 |
-
copies of the Software, and to permit persons to whom the Software is
|
10 |
-
furnished to do so, subject to the following conditions:
|
11 |
-
|
12 |
-
The above copyright notice and this permission notice shall be included in all
|
13 |
-
copies or substantial portions of the Software.
|
14 |
-
|
15 |
-
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
16 |
-
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
17 |
-
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
18 |
-
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
19 |
-
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
20 |
-
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
21 |
-
SOFTWARE.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
model/SCHP/README.md
DELETED
@@ -1,129 +0,0 @@
|
|
1 |
-
# Self Correction for Human Parsing
|
2 |
-
|
3 |
-
![Python 3.6](https://img.shields.io/badge/python-3.6-green.svg)
|
4 |
-
[![License: MIT](https://img.shields.io/badge/License-MIT-green.svg)](https://opensource.org/licenses/MIT)
|
5 |
-
|
6 |
-
An out-of-box human parsing representation extractor.
|
7 |
-
|
8 |
-
Our solution ranks 1st for all human parsing tracks (including single, multiple and video) in the third LIP challenge!
|
9 |
-
|
10 |
-
![lip-visualization](./demo/lip-visualization.jpg)
|
11 |
-
|
12 |
-
Features:
|
13 |
-
- [x] Out-of-box human parsing extractor for other downstream applications.
|
14 |
-
- [x] Pretrained model on three popular single person human parsing datasets.
|
15 |
-
- [x] Training and inferecne code.
|
16 |
-
- [x] Simple yet effective extension on multi-person and video human parsing tasks.
|
17 |
-
|
18 |
-
## Requirements
|
19 |
-
|
20 |
-
```
|
21 |
-
conda env create -f environment.yaml
|
22 |
-
conda activate schp
|
23 |
-
pip install -r requirements.txt
|
24 |
-
```
|
25 |
-
|
26 |
-
## Simple Out-of-Box Extractor
|
27 |
-
|
28 |
-
The easiest way to get started is to use our trained SCHP models on your own images to extract human parsing representations. Here we provided state-of-the-art [trained models](https://drive.google.com/drive/folders/1uOaQCpNtosIjEL2phQKEdiYd0Td18jNo?usp=sharing) on three popular datasets. Theses three datasets have different label system, you can choose the best one to fit on your own task.
|
29 |
-
|
30 |
-
**LIP** ([exp-schp-201908261155-lip.pth](https://drive.google.com/file/d/1k4dllHpu0bdx38J7H28rVVLpU-kOHmnH/view?usp=sharing))
|
31 |
-
|
32 |
-
* mIoU on LIP validation: **59.36 %**.
|
33 |
-
|
34 |
-
* LIP is the largest single person human parsing dataset with 50000+ images. This dataset focus more on the complicated real scenarios. LIP has 20 labels, including 'Background', 'Hat', 'Hair', 'Glove', 'Sunglasses', 'Upper-clothes', 'Dress', 'Coat', 'Socks', 'Pants', 'Jumpsuits', 'Scarf', 'Skirt', 'Face', 'Left-arm', 'Right-arm', 'Left-leg', 'Right-leg', 'Left-shoe', 'Right-shoe'.
|
35 |
-
|
36 |
-
**ATR** ([exp-schp-201908301523-atr.pth](https://drive.google.com/file/d/1ruJg4lqR_jgQPj-9K0PP-L2vJERYOxLP/view?usp=sharing))
|
37 |
-
|
38 |
-
* mIoU on ATR test: **82.29%**.
|
39 |
-
|
40 |
-
* ATR is a large single person human parsing dataset with 17000+ images. This dataset focus more on fashion AI. ATR has 18 labels, including 'Background', 'Hat', 'Hair', 'Sunglasses', 'Upper-clothes', 'Skirt', 'Pants', 'Dress', 'Belt', 'Left-shoe', 'Right-shoe', 'Face', 'Left-leg', 'Right-leg', 'Left-arm', 'Right-arm', 'Bag', 'Scarf'.
|
41 |
-
|
42 |
-
**Pascal-Person-Part** ([exp-schp-201908270938-pascal-person-part.pth](https://drive.google.com/file/d/1E5YwNKW2VOEayK9mWCS3Kpsxf-3z04ZE/view?usp=sharing))
|
43 |
-
|
44 |
-
* mIoU on Pascal-Person-Part validation: **71.46** %.
|
45 |
-
|
46 |
-
* Pascal Person Part is a tiny single person human parsing dataset with 3000+ images. This dataset focus more on body parts segmentation. Pascal Person Part has 7 labels, including 'Background', 'Head', 'Torso', 'Upper Arms', 'Lower Arms', 'Upper Legs', 'Lower Legs'.
|
47 |
-
|
48 |
-
Choose one and have fun on your own task!
|
49 |
-
|
50 |
-
To extract the human parsing representation, simply put your own image in the `INPUT_PATH` folder, then download a pretrained model and run the following command. The output images with the same file name will be saved in `OUTPUT_PATH`
|
51 |
-
|
52 |
-
```
|
53 |
-
python simple_extractor.py --dataset [DATASET] --model-restore [CHECKPOINT_PATH] --input-dir [INPUT_PATH] --output-dir [OUTPUT_PATH]
|
54 |
-
```
|
55 |
-
|
56 |
-
**[Updated]** Here is also a [colab demo example](https://colab.research.google.com/drive/1JOwOPaChoc9GzyBi5FUEYTSaP2qxJl10?usp=sharing) for quick inference provided by [@levindabhi](https://github.com/levindabhi).
|
57 |
-
|
58 |
-
The `DATASET` command has three options, including 'lip', 'atr' and 'pascal'. Note each pixel in the output images denotes the predicted label number. The output images have the same size as the input ones. To better visualization, we put a palette with the output images. We suggest you to read the image with `PIL`.
|
59 |
-
|
60 |
-
If you need not only the final parsing images, but also the feature map representations. Add `--logits` command to save the output feature maps. These feature maps are the logits before softmax layer.
|
61 |
-
|
62 |
-
## Dataset Preparation
|
63 |
-
|
64 |
-
Please download the [LIP](http://sysu-hcp.net/lip/) dataset following the below structure.
|
65 |
-
|
66 |
-
```commandline
|
67 |
-
data/LIP
|
68 |
-
|--- train_imgaes # 30462 training single person images
|
69 |
-
|--- val_images # 10000 validation single person images
|
70 |
-
|--- train_segmentations # 30462 training annotations
|
71 |
-
|--- val_segmentations # 10000 training annotations
|
72 |
-
|--- train_id.txt # training image list
|
73 |
-
|--- val_id.txt # validation image list
|
74 |
-
```
|
75 |
-
|
76 |
-
## Training
|
77 |
-
|
78 |
-
```
|
79 |
-
python train.py
|
80 |
-
```
|
81 |
-
By default, the trained model will be saved in `./log` directory. Please read the arguments for more details.
|
82 |
-
|
83 |
-
## Evaluation
|
84 |
-
```
|
85 |
-
python evaluate.py --model-restore [CHECKPOINT_PATH]
|
86 |
-
```
|
87 |
-
CHECKPOINT_PATH should be the path of trained model.
|
88 |
-
|
89 |
-
## Extension on Multiple Human Parsing
|
90 |
-
|
91 |
-
Please read [MultipleHumanParsing.md](./mhp_extension/README.md) for more details.
|
92 |
-
|
93 |
-
## Citation
|
94 |
-
|
95 |
-
Please cite our work if you find this repo useful in your research.
|
96 |
-
|
97 |
-
```latex
|
98 |
-
@article{li2020self,
|
99 |
-
title={Self-Correction for Human Parsing},
|
100 |
-
author={Li, Peike and Xu, Yunqiu and Wei, Yunchao and Yang, Yi},
|
101 |
-
journal={IEEE Transactions on Pattern Analysis and Machine Intelligence},
|
102 |
-
year={2020},
|
103 |
-
doi={10.1109/TPAMI.2020.3048039}}
|
104 |
-
```
|
105 |
-
|
106 |
-
## Visualization
|
107 |
-
|
108 |
-
* Source Image.
|
109 |
-
![demo](./demo/demo.jpg)
|
110 |
-
* LIP Parsing Result.
|
111 |
-
![demo-lip](./demo/demo_lip.png)
|
112 |
-
* ATR Parsing Result.
|
113 |
-
![demo-atr](./demo/demo_atr.png)
|
114 |
-
* Pascal-Person-Part Parsing Result.
|
115 |
-
![demo-pascal](./demo/demo_pascal.png)
|
116 |
-
* Source Image.
|
117 |
-
![demo](./mhp_extension/demo/demo.jpg)
|
118 |
-
* Instance Human Mask.
|
119 |
-
![demo-lip](./mhp_extension/demo/demo_instance_human_mask.png)
|
120 |
-
* Global Human Parsing Result.
|
121 |
-
![demo-lip](./mhp_extension/demo/demo_global_human_parsing.png)
|
122 |
-
* Multiple Human Parsing Result.
|
123 |
-
![demo-lip](./mhp_extension/demo/demo_multiple_human_parsing.png)
|
124 |
-
|
125 |
-
|
126 |
-
## Related
|
127 |
-
Our code adopts the [InplaceSyncBN](https://github.com/mapillary/inplace_abn) to save gpu memory cost.
|
128 |
-
|
129 |
-
There is also a [PaddlePaddle](https://github.com/PaddlePaddle/PaddleSeg/tree/develop/contrib/ACE2P) Implementation of this project.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
model/SCHP/__init__.py
DELETED
@@ -1,163 +0,0 @@
|
|
1 |
-
from model.SCHP import networks
|
2 |
-
from model.SCHP.utils.transforms import get_affine_transform, transform_logits
|
3 |
-
|
4 |
-
from collections import OrderedDict
|
5 |
-
import torch
|
6 |
-
import numpy as np
|
7 |
-
import cv2
|
8 |
-
from PIL import Image
|
9 |
-
from torchvision import transforms
|
10 |
-
|
11 |
-
def get_palette(num_cls):
|
12 |
-
""" Returns the color map for visualizing the segmentation mask.
|
13 |
-
Args:
|
14 |
-
num_cls: Number of classes
|
15 |
-
Returns:
|
16 |
-
The color map
|
17 |
-
"""
|
18 |
-
n = num_cls
|
19 |
-
palette = [0] * (n * 3)
|
20 |
-
for j in range(0, n):
|
21 |
-
lab = j
|
22 |
-
palette[j * 3 + 0] = 0
|
23 |
-
palette[j * 3 + 1] = 0
|
24 |
-
palette[j * 3 + 2] = 0
|
25 |
-
i = 0
|
26 |
-
while lab:
|
27 |
-
palette[j * 3 + 0] |= (((lab >> 0) & 1) << (7 - i))
|
28 |
-
palette[j * 3 + 1] |= (((lab >> 1) & 1) << (7 - i))
|
29 |
-
palette[j * 3 + 2] |= (((lab >> 2) & 1) << (7 - i))
|
30 |
-
i += 1
|
31 |
-
lab >>= 3
|
32 |
-
return palette
|
33 |
-
|
34 |
-
dataset_settings = {
|
35 |
-
'lip': {
|
36 |
-
'input_size': [473, 473],
|
37 |
-
'num_classes': 20,
|
38 |
-
'label': ['Background', 'Hat', 'Hair', 'Glove', 'Sunglasses', 'Upper-clothes', 'Dress', 'Coat',
|
39 |
-
'Socks', 'Pants', 'Jumpsuits', 'Scarf', 'Skirt', 'Face', 'Left-arm', 'Right-arm',
|
40 |
-
'Left-leg', 'Right-leg', 'Left-shoe', 'Right-shoe']
|
41 |
-
},
|
42 |
-
'atr': {
|
43 |
-
'input_size': [512, 512],
|
44 |
-
'num_classes': 18,
|
45 |
-
'label': ['Background', 'Hat', 'Hair', 'Sunglasses', 'Upper-clothes', 'Skirt', 'Pants', 'Dress', 'Belt',
|
46 |
-
'Left-shoe', 'Right-shoe', 'Face', 'Left-leg', 'Right-leg', 'Left-arm', 'Right-arm', 'Bag', 'Scarf']
|
47 |
-
},
|
48 |
-
'pascal': {
|
49 |
-
'input_size': [512, 512],
|
50 |
-
'num_classes': 7,
|
51 |
-
'label': ['Background', 'Head', 'Torso', 'Upper Arms', 'Lower Arms', 'Upper Legs', 'Lower Legs'],
|
52 |
-
}
|
53 |
-
}
|
54 |
-
|
55 |
-
class SCHP:
|
56 |
-
def __init__(self, ckpt_path, device):
|
57 |
-
dataset_type = None
|
58 |
-
if 'lip' in ckpt_path:
|
59 |
-
dataset_type = 'lip'
|
60 |
-
elif 'atr' in ckpt_path:
|
61 |
-
dataset_type = 'atr'
|
62 |
-
elif 'pascal' in ckpt_path:
|
63 |
-
dataset_type = 'pascal'
|
64 |
-
assert dataset_type is not None, 'Dataset type not found in checkpoint path'
|
65 |
-
self.device = device
|
66 |
-
self.num_classes = dataset_settings[dataset_type]['num_classes']
|
67 |
-
self.input_size = dataset_settings[dataset_type]['input_size']
|
68 |
-
self.aspect_ratio = self.input_size[1] * 1.0 / self.input_size[0]
|
69 |
-
self.palette = get_palette(self.num_classes)
|
70 |
-
|
71 |
-
self.label = dataset_settings[dataset_type]['label']
|
72 |
-
self.model = networks.init_model('resnet101', num_classes=self.num_classes, pretrained=None).to(device)
|
73 |
-
self.load_ckpt(ckpt_path)
|
74 |
-
self.model.eval()
|
75 |
-
|
76 |
-
self.transform = transforms.Compose([
|
77 |
-
transforms.ToTensor(),
|
78 |
-
transforms.Normalize(mean=[0.406, 0.456, 0.485], std=[0.225, 0.224, 0.229])
|
79 |
-
])
|
80 |
-
self.upsample = torch.nn.Upsample(size=self.input_size, mode='bilinear', align_corners=True)
|
81 |
-
|
82 |
-
|
83 |
-
def load_ckpt(self, ckpt_path):
|
84 |
-
state_dict = torch.load(ckpt_path, map_location='cpu')['state_dict']
|
85 |
-
new_state_dict = OrderedDict()
|
86 |
-
for k, v in state_dict.items():
|
87 |
-
name = k[7:] # remove `module.`
|
88 |
-
new_state_dict[name] = v
|
89 |
-
self.model.load_state_dict(new_state_dict)
|
90 |
-
|
91 |
-
def _box2cs(self, box):
|
92 |
-
x, y, w, h = box[:4]
|
93 |
-
return self._xywh2cs(x, y, w, h)
|
94 |
-
|
95 |
-
def _xywh2cs(self, x, y, w, h):
|
96 |
-
center = np.zeros((2), dtype=np.float32)
|
97 |
-
center[0] = x + w * 0.5
|
98 |
-
center[1] = y + h * 0.5
|
99 |
-
if w > self.aspect_ratio * h:
|
100 |
-
h = w * 1.0 / self.aspect_ratio
|
101 |
-
elif w < self.aspect_ratio * h:
|
102 |
-
w = h * self.aspect_ratio
|
103 |
-
scale = np.array([w, h], dtype=np.float32)
|
104 |
-
return center, scale
|
105 |
-
|
106 |
-
def preprocess(self, image):
|
107 |
-
if isinstance(image, str):
|
108 |
-
img = cv2.imread(image, cv2.IMREAD_COLOR)
|
109 |
-
elif isinstance(image, Image.Image):
|
110 |
-
# to cv2 format
|
111 |
-
img = np.array(image)
|
112 |
-
|
113 |
-
h, w, _ = img.shape
|
114 |
-
# Get person center and scale
|
115 |
-
person_center, s = self._box2cs([0, 0, w - 1, h - 1])
|
116 |
-
r = 0
|
117 |
-
trans = get_affine_transform(person_center, s, r, self.input_size)
|
118 |
-
input = cv2.warpAffine(
|
119 |
-
img,
|
120 |
-
trans,
|
121 |
-
(int(self.input_size[1]), int(self.input_size[0])),
|
122 |
-
flags=cv2.INTER_LINEAR,
|
123 |
-
borderMode=cv2.BORDER_CONSTANT,
|
124 |
-
borderValue=(0, 0, 0))
|
125 |
-
|
126 |
-
input = self.transform(input).to(self.device).unsqueeze(0)
|
127 |
-
meta = {
|
128 |
-
'center': person_center,
|
129 |
-
'height': h,
|
130 |
-
'width': w,
|
131 |
-
'scale': s,
|
132 |
-
'rotation': r
|
133 |
-
}
|
134 |
-
return input, meta
|
135 |
-
|
136 |
-
|
137 |
-
def __call__(self, image_or_path):
|
138 |
-
if isinstance(image_or_path, list):
|
139 |
-
image_list = []
|
140 |
-
meta_list = []
|
141 |
-
for image in image_or_path:
|
142 |
-
image, meta = self.preprocess(image)
|
143 |
-
image_list.append(image)
|
144 |
-
meta_list.append(meta)
|
145 |
-
image = torch.cat(image_list, dim=0)
|
146 |
-
else:
|
147 |
-
image, meta = self.preprocess(image_or_path)
|
148 |
-
meta_list = [meta]
|
149 |
-
|
150 |
-
output = self.model(image)
|
151 |
-
upsample_outputs = self.upsample(output[0][-1])
|
152 |
-
upsample_outputs = upsample_outputs.permute(0, 2, 3, 1) # BCHW -> BHWC
|
153 |
-
|
154 |
-
output_img_list = []
|
155 |
-
for upsample_output, meta in zip(upsample_outputs, meta_list):
|
156 |
-
c, s, w, h = meta['center'], meta['scale'], meta['width'], meta['height']
|
157 |
-
logits_result = transform_logits(upsample_output.data.cpu().numpy(), c, s, w, h, input_size=self.input_size)
|
158 |
-
parsing_result = np.argmax(logits_result, axis=2)
|
159 |
-
output_img = Image.fromarray(np.asarray(parsing_result, dtype=np.uint8))
|
160 |
-
output_img.putpalette(self.palette)
|
161 |
-
output_img_list.append(output_img)
|
162 |
-
|
163 |
-
return output_img_list[0] if len(output_img_list) == 1 else output_img_list
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
model/SCHP/__pycache__/__init__.cpython-310.pyc
DELETED
Binary file (4.95 kB)
|
|
model/SCHP/__pycache__/__init__.cpython-39.pyc
DELETED
Binary file (4.96 kB)
|
|
model/SCHP/datasets/__init__.py
DELETED
File without changes
|
model/SCHP/datasets/__pycache__/__init__.cpython-39.pyc
DELETED
Binary file (170 Bytes)
|
|
model/SCHP/datasets/__pycache__/simple_extractor_dataset.cpython-39.pyc
DELETED
Binary file (2.79 kB)
|
|
model/SCHP/datasets/datasets.py
DELETED
@@ -1,205 +0,0 @@
|
|
1 |
-
#!/usr/bin/env python
|
2 |
-
# -*- encoding: utf-8 -*-
|
3 |
-
|
4 |
-
"""
|
5 |
-
@Author : Peike Li
|
6 |
-
@Contact : peike.li@yahoo.com
|
7 |
-
@File : datasets.py
|
8 |
-
@Time : 8/4/19 3:35 PM
|
9 |
-
@Desc :
|
10 |
-
@License : This source code is licensed under the license found in the
|
11 |
-
LICENSE file in the root directory of this source tree.
|
12 |
-
"""
|
13 |
-
|
14 |
-
import os
|
15 |
-
import numpy as np
|
16 |
-
import random
|
17 |
-
import torch
|
18 |
-
import cv2
|
19 |
-
from torch.utils import data
|
20 |
-
from utils.transforms import get_affine_transform
|
21 |
-
|
22 |
-
|
23 |
-
class LIPDataSet(data.Dataset):
|
24 |
-
def __init__(self, root, dataset, crop_size=[473, 473], scale_factor=0.25,
|
25 |
-
rotation_factor=30, ignore_label=255, transform=None):
|
26 |
-
self.root = root
|
27 |
-
self.aspect_ratio = crop_size[1] * 1.0 / crop_size[0]
|
28 |
-
self.crop_size = np.asarray(crop_size)
|
29 |
-
self.ignore_label = ignore_label
|
30 |
-
self.scale_factor = scale_factor
|
31 |
-
self.rotation_factor = rotation_factor
|
32 |
-
self.flip_prob = 0.5
|
33 |
-
self.transform = transform
|
34 |
-
self.dataset = dataset
|
35 |
-
|
36 |
-
list_path = os.path.join(self.root, self.dataset + '_id.txt')
|
37 |
-
train_list = [i_id.strip() for i_id in open(list_path)]
|
38 |
-
|
39 |
-
self.train_list = train_list
|
40 |
-
self.number_samples = len(self.train_list)
|
41 |
-
|
42 |
-
def __len__(self):
|
43 |
-
return self.number_samples
|
44 |
-
|
45 |
-
def _box2cs(self, box):
|
46 |
-
x, y, w, h = box[:4]
|
47 |
-
return self._xywh2cs(x, y, w, h)
|
48 |
-
|
49 |
-
def _xywh2cs(self, x, y, w, h):
|
50 |
-
center = np.zeros((2), dtype=np.float32)
|
51 |
-
center[0] = x + w * 0.5
|
52 |
-
center[1] = y + h * 0.5
|
53 |
-
if w > self.aspect_ratio * h:
|
54 |
-
h = w * 1.0 / self.aspect_ratio
|
55 |
-
elif w < self.aspect_ratio * h:
|
56 |
-
w = h * self.aspect_ratio
|
57 |
-
scale = np.array([w * 1.0, h * 1.0], dtype=np.float32)
|
58 |
-
return center, scale
|
59 |
-
|
60 |
-
def __getitem__(self, index):
|
61 |
-
train_item = self.train_list[index]
|
62 |
-
|
63 |
-
im_path = os.path.join(self.root, self.dataset + '_images', train_item + '.jpg')
|
64 |
-
parsing_anno_path = os.path.join(self.root, self.dataset + '_segmentations', train_item + '.png')
|
65 |
-
|
66 |
-
im = cv2.imread(im_path, cv2.IMREAD_COLOR)
|
67 |
-
h, w, _ = im.shape
|
68 |
-
parsing_anno = np.zeros((h, w), dtype=np.long)
|
69 |
-
|
70 |
-
# Get person center and scale
|
71 |
-
person_center, s = self._box2cs([0, 0, w - 1, h - 1])
|
72 |
-
r = 0
|
73 |
-
|
74 |
-
if self.dataset != 'test':
|
75 |
-
# Get pose annotation
|
76 |
-
parsing_anno = cv2.imread(parsing_anno_path, cv2.IMREAD_GRAYSCALE)
|
77 |
-
if self.dataset == 'train' or self.dataset == 'trainval':
|
78 |
-
sf = self.scale_factor
|
79 |
-
rf = self.rotation_factor
|
80 |
-
s = s * np.clip(np.random.randn() * sf + 1, 1 - sf, 1 + sf)
|
81 |
-
r = np.clip(np.random.randn() * rf, -rf * 2, rf * 2) if random.random() <= 0.6 else 0
|
82 |
-
|
83 |
-
if random.random() <= self.flip_prob:
|
84 |
-
im = im[:, ::-1, :]
|
85 |
-
parsing_anno = parsing_anno[:, ::-1]
|
86 |
-
person_center[0] = im.shape[1] - person_center[0] - 1
|
87 |
-
right_idx = [15, 17, 19]
|
88 |
-
left_idx = [14, 16, 18]
|
89 |
-
for i in range(0, 3):
|
90 |
-
right_pos = np.where(parsing_anno == right_idx[i])
|
91 |
-
left_pos = np.where(parsing_anno == left_idx[i])
|
92 |
-
parsing_anno[right_pos[0], right_pos[1]] = left_idx[i]
|
93 |
-
parsing_anno[left_pos[0], left_pos[1]] = right_idx[i]
|
94 |
-
|
95 |
-
trans = get_affine_transform(person_center, s, r, self.crop_size)
|
96 |
-
input = cv2.warpAffine(
|
97 |
-
im,
|
98 |
-
trans,
|
99 |
-
(int(self.crop_size[1]), int(self.crop_size[0])),
|
100 |
-
flags=cv2.INTER_LINEAR,
|
101 |
-
borderMode=cv2.BORDER_CONSTANT,
|
102 |
-
borderValue=(0, 0, 0))
|
103 |
-
|
104 |
-
if self.transform:
|
105 |
-
input = self.transform(input)
|
106 |
-
|
107 |
-
meta = {
|
108 |
-
'name': train_item,
|
109 |
-
'center': person_center,
|
110 |
-
'height': h,
|
111 |
-
'width': w,
|
112 |
-
'scale': s,
|
113 |
-
'rotation': r
|
114 |
-
}
|
115 |
-
|
116 |
-
if self.dataset == 'val' or self.dataset == 'test':
|
117 |
-
return input, meta
|
118 |
-
else:
|
119 |
-
label_parsing = cv2.warpAffine(
|
120 |
-
parsing_anno,
|
121 |
-
trans,
|
122 |
-
(int(self.crop_size[1]), int(self.crop_size[0])),
|
123 |
-
flags=cv2.INTER_NEAREST,
|
124 |
-
borderMode=cv2.BORDER_CONSTANT,
|
125 |
-
borderValue=(255))
|
126 |
-
|
127 |
-
label_parsing = torch.from_numpy(label_parsing)
|
128 |
-
|
129 |
-
return input, label_parsing, meta
|
130 |
-
|
131 |
-
|
132 |
-
class LIPDataValSet(data.Dataset):
|
133 |
-
def __init__(self, root, dataset='val', crop_size=[473, 473], transform=None, flip=False):
|
134 |
-
self.root = root
|
135 |
-
self.crop_size = crop_size
|
136 |
-
self.transform = transform
|
137 |
-
self.flip = flip
|
138 |
-
self.dataset = dataset
|
139 |
-
self.root = root
|
140 |
-
self.aspect_ratio = crop_size[1] * 1.0 / crop_size[0]
|
141 |
-
self.crop_size = np.asarray(crop_size)
|
142 |
-
|
143 |
-
val_list=[]
|
144 |
-
for root, dirs, files in os.walk("/home/chongzheng_p23/data/Datasets/UniFashion/YOOX/YOOX-Images"):
|
145 |
-
for file in files:
|
146 |
-
if file.endswith(".jpg"):
|
147 |
-
source_file_path = os.path.join(root, file)
|
148 |
-
val_list.append(source_file_path)
|
149 |
-
|
150 |
-
self.val_list = val_list
|
151 |
-
self.number_samples = len(self.val_list)
|
152 |
-
|
153 |
-
def __len__(self):
|
154 |
-
return len(self.val_list)
|
155 |
-
|
156 |
-
def _box2cs(self, box):
|
157 |
-
x, y, w, h = box[:4]
|
158 |
-
return self._xywh2cs(x, y, w, h)
|
159 |
-
|
160 |
-
def _xywh2cs(self, x, y, w, h):
|
161 |
-
center = np.zeros((2), dtype=np.float32)
|
162 |
-
center[0] = x + w * 0.5
|
163 |
-
center[1] = y + h * 0.5
|
164 |
-
if w > self.aspect_ratio * h:
|
165 |
-
h = w * 1.0 / self.aspect_ratio
|
166 |
-
elif w < self.aspect_ratio * h:
|
167 |
-
w = h * self.aspect_ratio
|
168 |
-
scale = np.array([w * 1.0, h * 1.0], dtype=np.float32)
|
169 |
-
|
170 |
-
return center, scale
|
171 |
-
|
172 |
-
def __getitem__(self, index):
|
173 |
-
val_item = self.val_list[index]
|
174 |
-
# Load training image
|
175 |
-
im_path = val_item
|
176 |
-
im = cv2.imread(im_path, cv2.IMREAD_COLOR)
|
177 |
-
h, w, _ = im.shape
|
178 |
-
# Get person center and scale
|
179 |
-
person_center, s = self._box2cs([0, 0, w - 1, h - 1])
|
180 |
-
r = 0
|
181 |
-
trans = get_affine_transform(person_center, s, r, self.crop_size)
|
182 |
-
input = cv2.warpAffine(
|
183 |
-
im,
|
184 |
-
trans,
|
185 |
-
(int(self.crop_size[1]), int(self.crop_size[0])),
|
186 |
-
flags=cv2.INTER_LINEAR,
|
187 |
-
borderMode=cv2.BORDER_CONSTANT,
|
188 |
-
borderValue=(0, 0, 0))
|
189 |
-
input = self.transform(input)
|
190 |
-
flip_input = input.flip(dims=[-1])
|
191 |
-
if self.flip:
|
192 |
-
batch_input_im = torch.stack([input, flip_input])
|
193 |
-
else:
|
194 |
-
batch_input_im = input
|
195 |
-
|
196 |
-
meta = {
|
197 |
-
'name': val_item, #root
|
198 |
-
'center': person_center,
|
199 |
-
'height': h,
|
200 |
-
'width': w,
|
201 |
-
'scale': s,
|
202 |
-
'rotation': r
|
203 |
-
}
|
204 |
-
|
205 |
-
return batch_input_im, meta
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
model/SCHP/datasets/simple_extractor_dataset.py
DELETED
@@ -1,92 +0,0 @@
|
|
1 |
-
#!/usr/bin/env python
|
2 |
-
# -*- encoding: utf-8 -*-
|
3 |
-
|
4 |
-
"""
|
5 |
-
@Author : Peike Li
|
6 |
-
@Contact : peike.li@yahoo.com
|
7 |
-
@File : dataset.py
|
8 |
-
@Time : 8/30/19 9:12 PM
|
9 |
-
@Desc : Dataset Definition
|
10 |
-
@License : This source code is licensed under the license found in the
|
11 |
-
LICENSE file in the root directory of this source tree.
|
12 |
-
"""
|
13 |
-
|
14 |
-
import os
|
15 |
-
import cv2
|
16 |
-
import numpy as np
|
17 |
-
|
18 |
-
from torch.utils import data
|
19 |
-
from utils.transforms import get_affine_transform
|
20 |
-
|
21 |
-
|
22 |
-
class SimpleFolderDataset(data.Dataset):
|
23 |
-
def __init__(self, root, input_size=[512, 512], transform=None):
|
24 |
-
self.root = root
|
25 |
-
self.input_size = input_size
|
26 |
-
self.transform = transform
|
27 |
-
self.aspect_ratio = input_size[1] * 1.0 / input_size[0]
|
28 |
-
self.input_size = np.asarray(input_size)
|
29 |
-
|
30 |
-
self.file_list=[]
|
31 |
-
self.root_list=[]
|
32 |
-
for root, dirs, files in os.walk(root):
|
33 |
-
for file in files:
|
34 |
-
if file.endswith(".jpg"):
|
35 |
-
source_file_path = os.path.join(root, file)
|
36 |
-
self.file_list.append(source_file_path)
|
37 |
-
self.root_list.append(root)
|
38 |
-
|
39 |
-
def __len__(self):
|
40 |
-
return len(self.file_list)
|
41 |
-
|
42 |
-
def _box2cs(self, box):
|
43 |
-
x, y, w, h = box[:4]
|
44 |
-
return self._xywh2cs(x, y, w, h)
|
45 |
-
|
46 |
-
def _xywh2cs(self, x, y, w, h):
|
47 |
-
center = np.zeros((2), dtype=np.float32)
|
48 |
-
center[0] = x + w * 0.5
|
49 |
-
center[1] = y + h * 0.5
|
50 |
-
if w > self.aspect_ratio * h:
|
51 |
-
h = w * 1.0 / self.aspect_ratio
|
52 |
-
elif w < self.aspect_ratio * h:
|
53 |
-
w = h * self.aspect_ratio
|
54 |
-
scale = np.array([w, h], dtype=np.float32)
|
55 |
-
return center, scale
|
56 |
-
|
57 |
-
def __getitem__(self, index):
|
58 |
-
img_path = self.file_list[index]
|
59 |
-
root = self.root_list[index]
|
60 |
-
img_name = img_path.split("/")[-1].split(".")[0]
|
61 |
-
img = cv2.imread(img_path, cv2.IMREAD_COLOR)
|
62 |
-
|
63 |
-
if img is None:
|
64 |
-
return self.__getitem__(index+1)
|
65 |
-
else:
|
66 |
-
h, w, _ = img.shape
|
67 |
-
|
68 |
-
# Get person center and scale
|
69 |
-
person_center, s = self._box2cs([0, 0, w - 1, h - 1])
|
70 |
-
r = 0
|
71 |
-
trans = get_affine_transform(person_center, s, r, self.input_size)
|
72 |
-
input = cv2.warpAffine(
|
73 |
-
img,
|
74 |
-
trans,
|
75 |
-
(int(self.input_size[1]), int(self.input_size[0])),
|
76 |
-
flags=cv2.INTER_LINEAR,
|
77 |
-
borderMode=cv2.BORDER_CONSTANT,
|
78 |
-
borderValue=(0, 0, 0))
|
79 |
-
|
80 |
-
input = self.transform(input)
|
81 |
-
meta = {
|
82 |
-
'img_path': img_path,
|
83 |
-
'name': img_name,
|
84 |
-
'root': root,
|
85 |
-
'center': person_center,
|
86 |
-
'height': h,
|
87 |
-
'width': w,
|
88 |
-
'scale': s,
|
89 |
-
'rotation': r
|
90 |
-
}
|
91 |
-
|
92 |
-
return input, meta
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
model/SCHP/datasets/target_generation.py
DELETED
@@ -1,40 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
from torch.nn import functional as F
|
3 |
-
|
4 |
-
|
5 |
-
def generate_edge_tensor(label, edge_width=3):
|
6 |
-
# label = label.type(torch.cuda.FloatTensor)
|
7 |
-
if len(label.shape) == 2:
|
8 |
-
label = label.unsqueeze(0)
|
9 |
-
n, h, w = label.shape
|
10 |
-
edge = torch.zeros(label.shape, dtype=torch.float)#.cuda()
|
11 |
-
# right
|
12 |
-
edge_right = edge[:, 1:h, :]
|
13 |
-
edge_right[(label[:, 1:h, :] != label[:, :h - 1, :]) & (label[:, 1:h, :] != 255)
|
14 |
-
& (label[:, :h - 1, :] != 255)] = 1
|
15 |
-
|
16 |
-
# up
|
17 |
-
edge_up = edge[:, :, :w - 1]
|
18 |
-
edge_up[(label[:, :, :w - 1] != label[:, :, 1:w])
|
19 |
-
& (label[:, :, :w - 1] != 255)
|
20 |
-
& (label[:, :, 1:w] != 255)] = 1
|
21 |
-
|
22 |
-
# upright
|
23 |
-
edge_upright = edge[:, :h - 1, :w - 1]
|
24 |
-
edge_upright[(label[:, :h - 1, :w - 1] != label[:, 1:h, 1:w])
|
25 |
-
& (label[:, :h - 1, :w - 1] != 255)
|
26 |
-
& (label[:, 1:h, 1:w] != 255)] = 1
|
27 |
-
|
28 |
-
# bottomright
|
29 |
-
edge_bottomright = edge[:, :h - 1, 1:w]
|
30 |
-
edge_bottomright[(label[:, :h - 1, 1:w] != label[:, 1:h, :w - 1])
|
31 |
-
& (label[:, :h - 1, 1:w] != 255)
|
32 |
-
& (label[:, 1:h, :w - 1] != 255)] = 1
|
33 |
-
|
34 |
-
kernel = torch.ones((1, 1, edge_width, edge_width), dtype=torch.float)#.cuda()
|
35 |
-
with torch.no_grad():
|
36 |
-
edge = edge.unsqueeze(1)
|
37 |
-
edge = F.conv2d(edge, kernel, stride=1, padding=1)
|
38 |
-
edge[edge!=0] = 1
|
39 |
-
edge = edge.squeeze()
|
40 |
-
return edge
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
model/SCHP/environment.yaml
DELETED
@@ -1,49 +0,0 @@
|
|
1 |
-
name: schp
|
2 |
-
channels:
|
3 |
-
- pytorch
|
4 |
-
- defaults
|
5 |
-
dependencies:
|
6 |
-
- _libgcc_mutex=0.1=main
|
7 |
-
- blas=1.0=mkl
|
8 |
-
- ca-certificates=2020.12.8=h06a4308_0
|
9 |
-
- certifi=2020.12.5=py38h06a4308_0
|
10 |
-
- cudatoolkit=10.1.243=h6bb024c_0
|
11 |
-
- freetype=2.10.4=h5ab3b9f_0
|
12 |
-
- intel-openmp=2020.2=254
|
13 |
-
- jpeg=9b=h024ee3a_2
|
14 |
-
- lcms2=2.11=h396b838_0
|
15 |
-
- ld_impl_linux-64=2.33.1=h53a641e_7
|
16 |
-
- libedit=3.1.20191231=h14c3975_1
|
17 |
-
- libffi=3.3=he6710b0_2
|
18 |
-
- libgcc-ng=9.1.0=hdf63c60_0
|
19 |
-
- libpng=1.6.37=hbc83047_0
|
20 |
-
- libstdcxx-ng=9.1.0=hdf63c60_0
|
21 |
-
- libtiff=4.1.0=h2733197_1
|
22 |
-
- lz4-c=1.9.2=heb0550a_3
|
23 |
-
- mkl=2020.2=256
|
24 |
-
- mkl-service=2.3.0=py38he904b0f_0
|
25 |
-
- mkl_fft=1.2.0=py38h23d657b_0
|
26 |
-
- mkl_random=1.1.1=py38h0573a6f_0
|
27 |
-
- ncurses=6.2=he6710b0_1
|
28 |
-
- ninja=1.10.2=py38hff7bd54_0
|
29 |
-
- numpy=1.19.2=py38h54aff64_0
|
30 |
-
- numpy-base=1.19.2=py38hfa32c7d_0
|
31 |
-
- olefile=0.46=py_0
|
32 |
-
- openssl=1.1.1i=h27cfd23_0
|
33 |
-
- pillow=8.0.1=py38he98fc37_0
|
34 |
-
- pip=20.3.3=py38h06a4308_0
|
35 |
-
- python=3.8.5=h7579374_1
|
36 |
-
- readline=8.0=h7b6447c_0
|
37 |
-
- setuptools=51.0.0=py38h06a4308_2
|
38 |
-
- six=1.15.0=py38h06a4308_0
|
39 |
-
- sqlite=3.33.0=h62c20be_0
|
40 |
-
- tk=8.6.10=hbc83047_0
|
41 |
-
- tqdm=4.55.0=pyhd3eb1b0_0
|
42 |
-
- wheel=0.36.2=pyhd3eb1b0_0
|
43 |
-
- xz=5.2.5=h7b6447c_0
|
44 |
-
- zlib=1.2.11=h7b6447c_3
|
45 |
-
- zstd=1.4.5=h9ceee32_0
|
46 |
-
- pytorch=1.5.1=py3.8_cuda10.1.243_cudnn7.6.3_0
|
47 |
-
- torchvision=0.6.1=py38_cu101
|
48 |
-
prefix: /home/peike/opt/anaconda3/envs/schp
|
49 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
model/SCHP/evaluate.py
DELETED
@@ -1,210 +0,0 @@
|
|
1 |
-
#!/usr/bin/env python
|
2 |
-
# -*- encoding: utf-8 -*-
|
3 |
-
|
4 |
-
"""
|
5 |
-
@Author : Peike Li
|
6 |
-
@Contact : peike.li@yahoo.com
|
7 |
-
@File : evaluate.py
|
8 |
-
@Time : 8/4/19 3:36 PM
|
9 |
-
@Desc :
|
10 |
-
@License : This source code is licensed under the license found in the
|
11 |
-
LICENSE file in the root directory of this source tree.
|
12 |
-
"""
|
13 |
-
|
14 |
-
import os
|
15 |
-
import argparse
|
16 |
-
import numpy as np
|
17 |
-
import torch
|
18 |
-
|
19 |
-
from torch.utils import data
|
20 |
-
from tqdm import tqdm
|
21 |
-
from PIL import Image as PILImage
|
22 |
-
import torchvision.transforms as transforms
|
23 |
-
import torch.backends.cudnn as cudnn
|
24 |
-
|
25 |
-
import networks
|
26 |
-
from datasets.datasets import LIPDataValSet
|
27 |
-
from utils.miou import compute_mean_ioU
|
28 |
-
from utils.transforms import BGR2RGB_transform
|
29 |
-
from utils.transforms import transform_parsing
|
30 |
-
|
31 |
-
|
32 |
-
def get_arguments():
|
33 |
-
"""Parse all the arguments provided from the CLI.
|
34 |
-
|
35 |
-
Returns:
|
36 |
-
A list of parsed arguments.
|
37 |
-
"""
|
38 |
-
parser = argparse.ArgumentParser(description="Self Correction for Human Parsing")
|
39 |
-
|
40 |
-
# Network Structure
|
41 |
-
parser.add_argument("--arch", type=str, default='resnet101')
|
42 |
-
# Data Preference
|
43 |
-
parser.add_argument("--data-dir", type=str, default='./data/LIP')
|
44 |
-
parser.add_argument("--batch-size", type=int, default=1)
|
45 |
-
parser.add_argument("--input-size", type=str, default='473,473')
|
46 |
-
parser.add_argument("--num-classes", type=int, default=20)
|
47 |
-
parser.add_argument("--ignore-label", type=int, default=255)
|
48 |
-
parser.add_argument("--random-mirror", action="store_true")
|
49 |
-
parser.add_argument("--random-scale", action="store_true")
|
50 |
-
# Evaluation Preference
|
51 |
-
parser.add_argument("--log-dir", type=str, default='./log')
|
52 |
-
parser.add_argument("--model-restore", type=str,
|
53 |
-
default='/data1/chongzheng/zhangwq/Self-Correction-Human-Parsing-master/exp-schp-201908301523-atr.pth')
|
54 |
-
parser.add_argument("--gpu", type=str, default='0', help="choose gpu device.")
|
55 |
-
parser.add_argument("--save-results", action="store_true", help="whether to save the results.")
|
56 |
-
parser.add_argument("--flip", action="store_true", help="random flip during the test.")
|
57 |
-
parser.add_argument("--multi-scales", type=str, default='1', help="multiple scales during the test")
|
58 |
-
return parser.parse_args()
|
59 |
-
|
60 |
-
|
61 |
-
def get_palette(num_cls):
|
62 |
-
""" Returns the color map for visualizing the segmentation mask.
|
63 |
-
Args:
|
64 |
-
num_cls: Number of classes
|
65 |
-
Returns:
|
66 |
-
The color map
|
67 |
-
"""
|
68 |
-
n = num_cls
|
69 |
-
palette = [0] * (n * 3)
|
70 |
-
for j in range(0, n):
|
71 |
-
lab = j
|
72 |
-
palette[j * 3 + 0] = 0
|
73 |
-
palette[j * 3 + 1] = 0
|
74 |
-
palette[j * 3 + 2] = 0
|
75 |
-
i = 0
|
76 |
-
while lab:
|
77 |
-
palette[j * 3 + 0] |= (((lab >> 0) & 1) << (7 - i))
|
78 |
-
palette[j * 3 + 1] |= (((lab >> 1) & 1) << (7 - i))
|
79 |
-
palette[j * 3 + 2] |= (((lab >> 2) & 1) << (7 - i))
|
80 |
-
i += 1
|
81 |
-
lab >>= 3
|
82 |
-
return palette
|
83 |
-
|
84 |
-
|
85 |
-
def multi_scale_testing(model, batch_input_im, crop_size=[473, 473], flip=True, multi_scales=[1]):
|
86 |
-
flipped_idx = (15, 14, 17, 16, 19, 18)
|
87 |
-
if len(batch_input_im.shape) > 4:
|
88 |
-
batch_input_im = batch_input_im.squeeze()
|
89 |
-
if len(batch_input_im.shape) == 3:
|
90 |
-
batch_input_im = batch_input_im.unsqueeze(0)
|
91 |
-
|
92 |
-
interp = torch.nn.Upsample(size=crop_size, mode='bilinear', align_corners=True)
|
93 |
-
ms_outputs = []
|
94 |
-
for s in multi_scales:
|
95 |
-
interp_im = torch.nn.Upsample(scale_factor=s, mode='bilinear', align_corners=True)
|
96 |
-
scaled_im = interp_im(batch_input_im)
|
97 |
-
parsing_output = model(scaled_im)
|
98 |
-
parsing_output = parsing_output[0][-1]
|
99 |
-
output = parsing_output[0]
|
100 |
-
if flip:
|
101 |
-
flipped_output = parsing_output[1]
|
102 |
-
flipped_output[14:20, :, :] = flipped_output[flipped_idx, :, :]
|
103 |
-
output += flipped_output.flip(dims=[-1])
|
104 |
-
output *= 0.5
|
105 |
-
output = interp(output.unsqueeze(0))
|
106 |
-
ms_outputs.append(output[0])
|
107 |
-
ms_fused_parsing_output = torch.stack(ms_outputs)
|
108 |
-
ms_fused_parsing_output = ms_fused_parsing_output.mean(0)
|
109 |
-
ms_fused_parsing_output = ms_fused_parsing_output.permute(1, 2, 0) # HWC
|
110 |
-
parsing = torch.argmax(ms_fused_parsing_output, dim=2)
|
111 |
-
parsing = parsing.data.cpu().numpy()
|
112 |
-
ms_fused_parsing_output = ms_fused_parsing_output.data.cpu().numpy()
|
113 |
-
return parsing, ms_fused_parsing_output
|
114 |
-
|
115 |
-
|
116 |
-
def main():
|
117 |
-
"""Create the model and start the evaluation process."""
|
118 |
-
args = get_arguments()
|
119 |
-
multi_scales = [float(i) for i in args.multi_scales.split(',')]
|
120 |
-
gpus = [int(i) for i in args.gpu.split(',')]
|
121 |
-
assert len(gpus) == 1
|
122 |
-
if not args.gpu == 'None':
|
123 |
-
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
|
124 |
-
|
125 |
-
cudnn.benchmark = True
|
126 |
-
cudnn.enabled = True
|
127 |
-
|
128 |
-
h, w = map(int, args.input_size.split(','))
|
129 |
-
input_size = [h, w]
|
130 |
-
|
131 |
-
model = networks.init_model(args.arch, num_classes=args.num_classes, pretrained=None)
|
132 |
-
|
133 |
-
IMAGE_MEAN = model.mean
|
134 |
-
IMAGE_STD = model.std
|
135 |
-
INPUT_SPACE = model.input_space
|
136 |
-
print('image mean: {}'.format(IMAGE_MEAN))
|
137 |
-
print('image std: {}'.format(IMAGE_STD))
|
138 |
-
print('input space:{}'.format(INPUT_SPACE))
|
139 |
-
if INPUT_SPACE == 'BGR':
|
140 |
-
print('BGR Transformation')
|
141 |
-
transform = transforms.Compose([
|
142 |
-
transforms.ToTensor(),
|
143 |
-
transforms.Normalize(mean=IMAGE_MEAN,
|
144 |
-
std=IMAGE_STD),
|
145 |
-
|
146 |
-
])
|
147 |
-
if INPUT_SPACE == 'RGB':
|
148 |
-
print('RGB Transformation')
|
149 |
-
transform = transforms.Compose([
|
150 |
-
transforms.ToTensor(),
|
151 |
-
BGR2RGB_transform(),
|
152 |
-
transforms.Normalize(mean=IMAGE_MEAN,
|
153 |
-
std=IMAGE_STD),
|
154 |
-
])
|
155 |
-
|
156 |
-
# Data loader
|
157 |
-
lip_test_dataset = LIPDataValSet(args.data_dir, 'val', crop_size=input_size, transform=transform, flip=args.flip)
|
158 |
-
num_samples = len(lip_test_dataset)
|
159 |
-
print('Totoal testing sample numbers: {}'.format(num_samples))
|
160 |
-
testloader = data.DataLoader(lip_test_dataset, batch_size=args.batch_size, shuffle=False, pin_memory=True)
|
161 |
-
|
162 |
-
# Load model weight
|
163 |
-
state_dict = torch.load(args.model_restore)['state_dict']
|
164 |
-
from collections import OrderedDict
|
165 |
-
new_state_dict = OrderedDict()
|
166 |
-
for k, v in state_dict.items():
|
167 |
-
name = k[7:] # remove `module.`
|
168 |
-
new_state_dict[name] = v
|
169 |
-
model.load_state_dict(new_state_dict)
|
170 |
-
model.cuda()
|
171 |
-
model.eval()
|
172 |
-
|
173 |
-
sp_results_dir = os.path.join(args.log_dir, 'sp_results')
|
174 |
-
if not os.path.exists(sp_results_dir):
|
175 |
-
os.makedirs(sp_results_dir)
|
176 |
-
|
177 |
-
palette = get_palette(20)
|
178 |
-
parsing_preds = []
|
179 |
-
scales = np.zeros((num_samples, 2), dtype=np.float32)
|
180 |
-
centers = np.zeros((num_samples, 2), dtype=np.int32)
|
181 |
-
with torch.no_grad():
|
182 |
-
for idx, batch in enumerate(tqdm(testloader)):
|
183 |
-
image, meta = batch
|
184 |
-
if (len(image.shape) > 4):
|
185 |
-
image = image.squeeze()
|
186 |
-
im_name = meta['name'][0]
|
187 |
-
c = meta['center'].numpy()[0]
|
188 |
-
s = meta['scale'].numpy()[0]
|
189 |
-
w = meta['width'].numpy()[0]
|
190 |
-
h = meta['height'].numpy()[0]
|
191 |
-
scales[idx, :] = s
|
192 |
-
centers[idx, :] = c
|
193 |
-
parsing, logits = multi_scale_testing(model, image.cuda(), crop_size=input_size, flip=args.flip,
|
194 |
-
multi_scales=multi_scales)
|
195 |
-
if args.save_results:
|
196 |
-
parsing_result = transform_parsing(parsing, c, s, w, h, input_size)
|
197 |
-
parsing_result_path = os.path.join(sp_results_dir, im_name + '.png')
|
198 |
-
output_im = PILImage.fromarray(np.asarray(parsing_result, dtype=np.uint8))
|
199 |
-
output_im.putpalette(palette)
|
200 |
-
output_im.save(parsing_result_path)
|
201 |
-
|
202 |
-
parsing_preds.append(parsing)
|
203 |
-
assert len(parsing_preds) == num_samples
|
204 |
-
mIoU = compute_mean_ioU(parsing_preds, scales, centers, args.num_classes, args.data_dir, input_size)
|
205 |
-
print(mIoU)
|
206 |
-
return
|
207 |
-
|
208 |
-
|
209 |
-
if __name__ == '__main__':
|
210 |
-
main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
model/SCHP/file_list.txt
DELETED
The diff for this file is too large to render.
See raw diff
|
|
model/SCHP/mhp_extension/.ipynb_checkpoints/demo-checkpoint.ipynb
DELETED
The diff for this file is too large to render.
See raw diff
|
|
model/SCHP/mhp_extension/README.md
DELETED
@@ -1,38 +0,0 @@
|
|
1 |
-
# Self Correction for Human Parsing
|
2 |
-
|
3 |
-
We propose a simple yet effective multiple human parsing framework by extending our self-correction network.
|
4 |
-
|
5 |
-
Here we show an example usage jupyter notebook in [demo.ipynb](./demo.ipynb).
|
6 |
-
|
7 |
-
## Requirements
|
8 |
-
|
9 |
-
Please see [INSTALL.md](https://github.com/facebookresearch/detectron2/blob/master/INSTALL.md) for further requirements.
|
10 |
-
|
11 |
-
## Citation
|
12 |
-
|
13 |
-
Please cite our work if you find this repo useful in your research.
|
14 |
-
|
15 |
-
```latex
|
16 |
-
@article{li2019self,
|
17 |
-
title={Self-Correction for Human Parsing},
|
18 |
-
author={Li, Peike and Xu, Yunqiu and Wei, Yunchao and Yang, Yi},
|
19 |
-
journal={arXiv preprint arXiv:1910.09777},
|
20 |
-
year={2019}
|
21 |
-
}
|
22 |
-
```
|
23 |
-
|
24 |
-
## Visualization
|
25 |
-
|
26 |
-
* Source Image.
|
27 |
-
![demo](./demo/demo.jpg)
|
28 |
-
* Instance Human Mask.
|
29 |
-
![demo-lip](./demo/demo_instance_human_mask.png)
|
30 |
-
* Global Human Parsing Result.
|
31 |
-
![demo-lip](./demo/demo_global_human_parsing.png)
|
32 |
-
* Multiple Human Parsing Result.
|
33 |
-
![demo-lip](./demo/demo_multiple_human_parsing.png)
|
34 |
-
|
35 |
-
## Related
|
36 |
-
|
37 |
-
Our implementation is based on the [Detectron2](https://github.com/facebookresearch/detectron2).
|
38 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
model/SCHP/mhp_extension/coco_style_annotation_creator/__pycache__/pycococreatortools.cpython-37.pyc
DELETED
Binary file (3.6 kB)
|
|
model/SCHP/mhp_extension/coco_style_annotation_creator/human_to_coco.py
DELETED
@@ -1,166 +0,0 @@
|
|
1 |
-
import argparse
|
2 |
-
import datetime
|
3 |
-
import json
|
4 |
-
import os
|
5 |
-
from PIL import Image
|
6 |
-
import numpy as np
|
7 |
-
|
8 |
-
import pycococreatortools
|
9 |
-
|
10 |
-
|
11 |
-
def get_arguments():
|
12 |
-
parser = argparse.ArgumentParser(description="transform mask annotation to coco annotation")
|
13 |
-
parser.add_argument("--dataset", type=str, default='CIHP', help="name of dataset (CIHP, MHPv2 or VIP)")
|
14 |
-
parser.add_argument("--json_save_dir", type=str, default='../data/msrcnn_finetune_annotations',
|
15 |
-
help="path to save coco-style annotation json file")
|
16 |
-
parser.add_argument("--use_val", type=bool, default=False,
|
17 |
-
help="use train+val set for finetuning or not")
|
18 |
-
parser.add_argument("--train_img_dir", type=str, default='../data/instance-level_human_parsing/Training/Images',
|
19 |
-
help="train image path")
|
20 |
-
parser.add_argument("--train_anno_dir", type=str,
|
21 |
-
default='../data/instance-level_human_parsing/Training/Human_ids',
|
22 |
-
help="train human mask path")
|
23 |
-
parser.add_argument("--val_img_dir", type=str, default='../data/instance-level_human_parsing/Validation/Images',
|
24 |
-
help="val image path")
|
25 |
-
parser.add_argument("--val_anno_dir", type=str,
|
26 |
-
default='../data/instance-level_human_parsing/Validation/Human_ids',
|
27 |
-
help="val human mask path")
|
28 |
-
return parser.parse_args()
|
29 |
-
|
30 |
-
|
31 |
-
def main(args):
|
32 |
-
INFO = {
|
33 |
-
"description": args.split_name + " Dataset",
|
34 |
-
"url": "",
|
35 |
-
"version": "",
|
36 |
-
"year": 2019,
|
37 |
-
"contributor": "xyq",
|
38 |
-
"date_created": datetime.datetime.utcnow().isoformat(' ')
|
39 |
-
}
|
40 |
-
|
41 |
-
LICENSES = [
|
42 |
-
{
|
43 |
-
"id": 1,
|
44 |
-
"name": "",
|
45 |
-
"url": ""
|
46 |
-
}
|
47 |
-
]
|
48 |
-
|
49 |
-
CATEGORIES = [
|
50 |
-
{
|
51 |
-
'id': 1,
|
52 |
-
'name': 'person',
|
53 |
-
'supercategory': 'person',
|
54 |
-
},
|
55 |
-
]
|
56 |
-
|
57 |
-
coco_output = {
|
58 |
-
"info": INFO,
|
59 |
-
"licenses": LICENSES,
|
60 |
-
"categories": CATEGORIES,
|
61 |
-
"images": [],
|
62 |
-
"annotations": []
|
63 |
-
}
|
64 |
-
|
65 |
-
image_id = 1
|
66 |
-
segmentation_id = 1
|
67 |
-
|
68 |
-
for image_name in os.listdir(args.train_img_dir):
|
69 |
-
image = Image.open(os.path.join(args.train_img_dir, image_name))
|
70 |
-
image_info = pycococreatortools.create_image_info(
|
71 |
-
image_id, image_name, image.size
|
72 |
-
)
|
73 |
-
coco_output["images"].append(image_info)
|
74 |
-
|
75 |
-
human_mask_name = os.path.splitext(image_name)[0] + '.png'
|
76 |
-
human_mask = np.asarray(Image.open(os.path.join(args.train_anno_dir, human_mask_name)))
|
77 |
-
human_gt_labels = np.unique(human_mask)
|
78 |
-
|
79 |
-
for i in range(1, len(human_gt_labels)):
|
80 |
-
category_info = {'id': 1, 'is_crowd': 0}
|
81 |
-
binary_mask = np.uint8(human_mask == i)
|
82 |
-
annotation_info = pycococreatortools.create_annotation_info(
|
83 |
-
segmentation_id, image_id, category_info, binary_mask,
|
84 |
-
image.size, tolerance=10
|
85 |
-
)
|
86 |
-
if annotation_info is not None:
|
87 |
-
coco_output["annotations"].append(annotation_info)
|
88 |
-
|
89 |
-
segmentation_id += 1
|
90 |
-
image_id += 1
|
91 |
-
|
92 |
-
if not os.path.exists(args.json_save_dir):
|
93 |
-
os.makedirs(args.json_save_dir)
|
94 |
-
if not args.use_val:
|
95 |
-
with open('{}/{}_train.json'.format(args.json_save_dir, args.split_name), 'w') as output_json_file:
|
96 |
-
json.dump(coco_output, output_json_file)
|
97 |
-
else:
|
98 |
-
for image_name in os.listdir(args.val_img_dir):
|
99 |
-
image = Image.open(os.path.join(args.val_img_dir, image_name))
|
100 |
-
image_info = pycococreatortools.create_image_info(
|
101 |
-
image_id, image_name, image.size
|
102 |
-
)
|
103 |
-
coco_output["images"].append(image_info)
|
104 |
-
|
105 |
-
human_mask_name = os.path.splitext(image_name)[0] + '.png'
|
106 |
-
human_mask = np.asarray(Image.open(os.path.join(args.val_anno_dir, human_mask_name)))
|
107 |
-
human_gt_labels = np.unique(human_mask)
|
108 |
-
|
109 |
-
for i in range(1, len(human_gt_labels)):
|
110 |
-
category_info = {'id': 1, 'is_crowd': 0}
|
111 |
-
binary_mask = np.uint8(human_mask == i)
|
112 |
-
annotation_info = pycococreatortools.create_annotation_info(
|
113 |
-
segmentation_id, image_id, category_info, binary_mask,
|
114 |
-
image.size, tolerance=10
|
115 |
-
)
|
116 |
-
if annotation_info is not None:
|
117 |
-
coco_output["annotations"].append(annotation_info)
|
118 |
-
|
119 |
-
segmentation_id += 1
|
120 |
-
image_id += 1
|
121 |
-
|
122 |
-
with open('{}/{}_trainval.json'.format(args.json_save_dir, args.split_name), 'w') as output_json_file:
|
123 |
-
json.dump(coco_output, output_json_file)
|
124 |
-
|
125 |
-
coco_output_val = {
|
126 |
-
"info": INFO,
|
127 |
-
"licenses": LICENSES,
|
128 |
-
"categories": CATEGORIES,
|
129 |
-
"images": [],
|
130 |
-
"annotations": []
|
131 |
-
}
|
132 |
-
|
133 |
-
image_id_val = 1
|
134 |
-
segmentation_id_val = 1
|
135 |
-
|
136 |
-
for image_name in os.listdir(args.val_img_dir):
|
137 |
-
image = Image.open(os.path.join(args.val_img_dir, image_name))
|
138 |
-
image_info = pycococreatortools.create_image_info(
|
139 |
-
image_id_val, image_name, image.size
|
140 |
-
)
|
141 |
-
coco_output_val["images"].append(image_info)
|
142 |
-
|
143 |
-
human_mask_name = os.path.splitext(image_name)[0] + '.png'
|
144 |
-
human_mask = np.asarray(Image.open(os.path.join(args.val_anno_dir, human_mask_name)))
|
145 |
-
human_gt_labels = np.unique(human_mask)
|
146 |
-
|
147 |
-
for i in range(1, len(human_gt_labels)):
|
148 |
-
category_info = {'id': 1, 'is_crowd': 0}
|
149 |
-
binary_mask = np.uint8(human_mask == i)
|
150 |
-
annotation_info = pycococreatortools.create_annotation_info(
|
151 |
-
segmentation_id_val, image_id_val, category_info, binary_mask,
|
152 |
-
image.size, tolerance=10
|
153 |
-
)
|
154 |
-
if annotation_info is not None:
|
155 |
-
coco_output_val["annotations"].append(annotation_info)
|
156 |
-
|
157 |
-
segmentation_id_val += 1
|
158 |
-
image_id_val += 1
|
159 |
-
|
160 |
-
with open('{}/{}_val.json'.format(args.json_save_dir, args.split_name), 'w') as output_json_file_val:
|
161 |
-
json.dump(coco_output_val, output_json_file_val)
|
162 |
-
|
163 |
-
|
164 |
-
if __name__ == "__main__":
|
165 |
-
args = get_arguments()
|
166 |
-
main(args)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
model/SCHP/mhp_extension/coco_style_annotation_creator/pycococreatortools.py
DELETED
@@ -1,114 +0,0 @@
|
|
1 |
-
import re
|
2 |
-
import datetime
|
3 |
-
import numpy as np
|
4 |
-
from itertools import groupby
|
5 |
-
from skimage import measure
|
6 |
-
from PIL import Image
|
7 |
-
from pycocotools import mask
|
8 |
-
|
9 |
-
convert = lambda text: int(text) if text.isdigit() else text.lower()
|
10 |
-
natrual_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key)]
|
11 |
-
|
12 |
-
|
13 |
-
def resize_binary_mask(array, new_size):
|
14 |
-
image = Image.fromarray(array.astype(np.uint8) * 255)
|
15 |
-
image = image.resize(new_size)
|
16 |
-
return np.asarray(image).astype(np.bool_)
|
17 |
-
|
18 |
-
|
19 |
-
def close_contour(contour):
|
20 |
-
if not np.array_equal(contour[0], contour[-1]):
|
21 |
-
contour = np.vstack((contour, contour[0]))
|
22 |
-
return contour
|
23 |
-
|
24 |
-
|
25 |
-
def binary_mask_to_rle(binary_mask):
|
26 |
-
rle = {'counts': [], 'size': list(binary_mask.shape)}
|
27 |
-
counts = rle.get('counts')
|
28 |
-
for i, (value, elements) in enumerate(groupby(binary_mask.ravel(order='F'))):
|
29 |
-
if i == 0 and value == 1:
|
30 |
-
counts.append(0)
|
31 |
-
counts.append(len(list(elements)))
|
32 |
-
|
33 |
-
return rle
|
34 |
-
|
35 |
-
|
36 |
-
def binary_mask_to_polygon(binary_mask, tolerance=0):
|
37 |
-
"""Converts a binary mask to COCO polygon representation
|
38 |
-
Args:
|
39 |
-
binary_mask: a 2D binary numpy array where '1's represent the object
|
40 |
-
tolerance: Maximum distance from original points of polygon to approximated
|
41 |
-
polygonal chain. If tolerance is 0, the original coordinate array is returned.
|
42 |
-
"""
|
43 |
-
polygons = []
|
44 |
-
# pad mask to close contours of shapes which start and end at an edge
|
45 |
-
padded_binary_mask = np.pad(binary_mask, pad_width=1, mode='constant', constant_values=0)
|
46 |
-
contours = measure.find_contours(padded_binary_mask, 0.5)
|
47 |
-
contours = np.subtract(contours, 1)
|
48 |
-
for contour in contours:
|
49 |
-
contour = close_contour(contour)
|
50 |
-
contour = measure.approximate_polygon(contour, tolerance)
|
51 |
-
if len(contour) < 3:
|
52 |
-
continue
|
53 |
-
contour = np.flip(contour, axis=1)
|
54 |
-
segmentation = contour.ravel().tolist()
|
55 |
-
# after padding and subtracting 1 we may get -0.5 points in our segmentation
|
56 |
-
segmentation = [0 if i < 0 else i for i in segmentation]
|
57 |
-
polygons.append(segmentation)
|
58 |
-
|
59 |
-
return polygons
|
60 |
-
|
61 |
-
|
62 |
-
def create_image_info(image_id, file_name, image_size,
|
63 |
-
date_captured=datetime.datetime.utcnow().isoformat(' '),
|
64 |
-
license_id=1, coco_url="", flickr_url=""):
|
65 |
-
image_info = {
|
66 |
-
"id": image_id,
|
67 |
-
"file_name": file_name,
|
68 |
-
"width": image_size[0],
|
69 |
-
"height": image_size[1],
|
70 |
-
"date_captured": date_captured,
|
71 |
-
"license": license_id,
|
72 |
-
"coco_url": coco_url,
|
73 |
-
"flickr_url": flickr_url
|
74 |
-
}
|
75 |
-
|
76 |
-
return image_info
|
77 |
-
|
78 |
-
|
79 |
-
def create_annotation_info(annotation_id, image_id, category_info, binary_mask,
|
80 |
-
image_size=None, tolerance=2, bounding_box=None):
|
81 |
-
if image_size is not None:
|
82 |
-
binary_mask = resize_binary_mask(binary_mask, image_size)
|
83 |
-
|
84 |
-
binary_mask_encoded = mask.encode(np.asfortranarray(binary_mask.astype(np.uint8)))
|
85 |
-
|
86 |
-
area = mask.area(binary_mask_encoded)
|
87 |
-
if area < 1:
|
88 |
-
return None
|
89 |
-
|
90 |
-
if bounding_box is None:
|
91 |
-
bounding_box = mask.toBbox(binary_mask_encoded)
|
92 |
-
|
93 |
-
if category_info["is_crowd"]:
|
94 |
-
is_crowd = 1
|
95 |
-
segmentation = binary_mask_to_rle(binary_mask)
|
96 |
-
else:
|
97 |
-
is_crowd = 0
|
98 |
-
segmentation = binary_mask_to_polygon(binary_mask, tolerance)
|
99 |
-
if not segmentation:
|
100 |
-
return None
|
101 |
-
|
102 |
-
annotation_info = {
|
103 |
-
"id": annotation_id,
|
104 |
-
"image_id": image_id,
|
105 |
-
"category_id": category_info["id"],
|
106 |
-
"iscrowd": is_crowd,
|
107 |
-
"area": area.tolist(),
|
108 |
-
"bbox": bounding_box.tolist(),
|
109 |
-
"segmentation": segmentation,
|
110 |
-
"width": binary_mask.shape[1],
|
111 |
-
"height": binary_mask.shape[0],
|
112 |
-
}
|
113 |
-
|
114 |
-
return annotation_info
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
model/SCHP/mhp_extension/coco_style_annotation_creator/test_human2coco_format.py
DELETED
@@ -1,74 +0,0 @@
|
|
1 |
-
import argparse
|
2 |
-
import datetime
|
3 |
-
import json
|
4 |
-
import os
|
5 |
-
from PIL import Image
|
6 |
-
|
7 |
-
import pycococreatortools
|
8 |
-
|
9 |
-
|
10 |
-
def get_arguments():
|
11 |
-
parser = argparse.ArgumentParser(description="transform mask annotation to coco annotation")
|
12 |
-
parser.add_argument("--dataset", type=str, default='CIHP', help="name of dataset (CIHP, MHPv2 or VIP)")
|
13 |
-
parser.add_argument("--json_save_dir", type=str, default='../data/CIHP/annotations',
|
14 |
-
help="path to save coco-style annotation json file")
|
15 |
-
parser.add_argument("--test_img_dir", type=str, default='../data/CIHP/Testing/Images',
|
16 |
-
help="test image path")
|
17 |
-
return parser.parse_args()
|
18 |
-
|
19 |
-
args = get_arguments()
|
20 |
-
|
21 |
-
INFO = {
|
22 |
-
"description": args.dataset + "Dataset",
|
23 |
-
"url": "",
|
24 |
-
"version": "",
|
25 |
-
"year": 2020,
|
26 |
-
"contributor": "yunqiuxu",
|
27 |
-
"date_created": datetime.datetime.utcnow().isoformat(' ')
|
28 |
-
}
|
29 |
-
|
30 |
-
LICENSES = [
|
31 |
-
{
|
32 |
-
"id": 1,
|
33 |
-
"name": "",
|
34 |
-
"url": ""
|
35 |
-
}
|
36 |
-
]
|
37 |
-
|
38 |
-
CATEGORIES = [
|
39 |
-
{
|
40 |
-
'id': 1,
|
41 |
-
'name': 'person',
|
42 |
-
'supercategory': 'person',
|
43 |
-
},
|
44 |
-
]
|
45 |
-
|
46 |
-
|
47 |
-
def main(args):
|
48 |
-
coco_output = {
|
49 |
-
"info": INFO,
|
50 |
-
"licenses": LICENSES,
|
51 |
-
"categories": CATEGORIES,
|
52 |
-
"images": [],
|
53 |
-
"annotations": []
|
54 |
-
}
|
55 |
-
|
56 |
-
image_id = 1
|
57 |
-
|
58 |
-
for image_name in os.listdir(args.test_img_dir):
|
59 |
-
image = Image.open(os.path.join(args.test_img_dir, image_name))
|
60 |
-
image_info = pycococreatortools.create_image_info(
|
61 |
-
image_id, image_name, image.size
|
62 |
-
)
|
63 |
-
coco_output["images"].append(image_info)
|
64 |
-
image_id += 1
|
65 |
-
|
66 |
-
if not os.path.exists(os.path.join(args.json_save_dir)):
|
67 |
-
os.mkdir(os.path.join(args.json_save_dir))
|
68 |
-
|
69 |
-
with open('{}/{}.json'.format(args.json_save_dir, args.dataset), 'w') as output_json_file:
|
70 |
-
json.dump(coco_output, output_json_file)
|
71 |
-
|
72 |
-
|
73 |
-
if __name__ == "__main__":
|
74 |
-
main(args)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
model/SCHP/mhp_extension/data/DemoDataset/global_pic/demo.jpg
DELETED
Binary file (139 kB)
|
|
model/SCHP/mhp_extension/demo.ipynb
DELETED
The diff for this file is too large to render.
See raw diff
|
|
model/SCHP/mhp_extension/demo/demo.jpg
DELETED
Binary file (139 kB)
|
|
model/SCHP/mhp_extension/demo/demo_global_human_parsing.png
DELETED
Binary file (16.8 kB)
|
|
model/SCHP/mhp_extension/demo/demo_instance_human_mask.png
DELETED
Binary file (15.5 kB)
|
|
model/SCHP/mhp_extension/demo/demo_multiple_human_parsing.png
DELETED
Binary file (17.1 kB)
|
|
model/SCHP/mhp_extension/detectron2/.circleci/config.yml
DELETED
@@ -1,179 +0,0 @@
|
|
1 |
-
# Python CircleCI 2.0 configuration file
|
2 |
-
#
|
3 |
-
# Check https://circleci.com/docs/2.0/language-python/ for more details
|
4 |
-
#
|
5 |
-
version: 2
|
6 |
-
|
7 |
-
# -------------------------------------------------------------------------------------
|
8 |
-
# Environments to run the jobs in
|
9 |
-
# -------------------------------------------------------------------------------------
|
10 |
-
cpu: &cpu
|
11 |
-
docker:
|
12 |
-
- image: circleci/python:3.6.8-stretch
|
13 |
-
resource_class: medium
|
14 |
-
|
15 |
-
gpu: &gpu
|
16 |
-
machine:
|
17 |
-
image: ubuntu-1604:201903-01
|
18 |
-
docker_layer_caching: true
|
19 |
-
resource_class: gpu.small
|
20 |
-
|
21 |
-
# -------------------------------------------------------------------------------------
|
22 |
-
# Re-usable commands
|
23 |
-
# -------------------------------------------------------------------------------------
|
24 |
-
install_python: &install_python
|
25 |
-
- run:
|
26 |
-
name: Install Python
|
27 |
-
working_directory: ~/
|
28 |
-
command: |
|
29 |
-
pyenv install 3.6.1
|
30 |
-
pyenv global 3.6.1
|
31 |
-
|
32 |
-
setup_venv: &setup_venv
|
33 |
-
- run:
|
34 |
-
name: Setup Virtual Env
|
35 |
-
working_directory: ~/
|
36 |
-
command: |
|
37 |
-
python -m venv ~/venv
|
38 |
-
echo ". ~/venv/bin/activate" >> $BASH_ENV
|
39 |
-
. ~/venv/bin/activate
|
40 |
-
python --version
|
41 |
-
which python
|
42 |
-
which pip
|
43 |
-
pip install --upgrade pip
|
44 |
-
|
45 |
-
install_dep: &install_dep
|
46 |
-
- run:
|
47 |
-
name: Install Dependencies
|
48 |
-
command: |
|
49 |
-
pip install --progress-bar off -U 'git+https://github.com/facebookresearch/fvcore'
|
50 |
-
pip install --progress-bar off cython opencv-python
|
51 |
-
pip install --progress-bar off 'git+https://github.com/cocodataset/cocoapi.git#subdirectory=PythonAPI'
|
52 |
-
pip install --progress-bar off torch torchvision
|
53 |
-
|
54 |
-
install_detectron2: &install_detectron2
|
55 |
-
- run:
|
56 |
-
name: Install Detectron2
|
57 |
-
command: |
|
58 |
-
gcc --version
|
59 |
-
pip install -U --progress-bar off -e .[dev]
|
60 |
-
python -m detectron2.utils.collect_env
|
61 |
-
|
62 |
-
install_nvidia_driver: &install_nvidia_driver
|
63 |
-
- run:
|
64 |
-
name: Install nvidia driver
|
65 |
-
working_directory: ~/
|
66 |
-
command: |
|
67 |
-
wget -q 'https://s3.amazonaws.com/ossci-linux/nvidia_driver/NVIDIA-Linux-x86_64-430.40.run'
|
68 |
-
sudo /bin/bash ./NVIDIA-Linux-x86_64-430.40.run -s --no-drm
|
69 |
-
nvidia-smi
|
70 |
-
|
71 |
-
run_unittests: &run_unittests
|
72 |
-
- run:
|
73 |
-
name: Run Unit Tests
|
74 |
-
command: |
|
75 |
-
python -m unittest discover -v -s tests
|
76 |
-
|
77 |
-
# -------------------------------------------------------------------------------------
|
78 |
-
# Jobs to run
|
79 |
-
# -------------------------------------------------------------------------------------
|
80 |
-
jobs:
|
81 |
-
cpu_tests:
|
82 |
-
<<: *cpu
|
83 |
-
|
84 |
-
working_directory: ~/detectron2
|
85 |
-
|
86 |
-
steps:
|
87 |
-
- checkout
|
88 |
-
- <<: *setup_venv
|
89 |
-
|
90 |
-
# Cache the venv directory that contains dependencies
|
91 |
-
- restore_cache:
|
92 |
-
keys:
|
93 |
-
- cache-key-{{ .Branch }}-ID-20200425
|
94 |
-
|
95 |
-
- <<: *install_dep
|
96 |
-
|
97 |
-
- save_cache:
|
98 |
-
paths:
|
99 |
-
- ~/venv
|
100 |
-
key: cache-key-{{ .Branch }}-ID-20200425
|
101 |
-
|
102 |
-
- <<: *install_detectron2
|
103 |
-
|
104 |
-
- run:
|
105 |
-
name: isort
|
106 |
-
command: |
|
107 |
-
isort -c -sp .
|
108 |
-
- run:
|
109 |
-
name: black
|
110 |
-
command: |
|
111 |
-
black --check -l 100 .
|
112 |
-
- run:
|
113 |
-
name: flake8
|
114 |
-
command: |
|
115 |
-
flake8 .
|
116 |
-
|
117 |
-
- <<: *run_unittests
|
118 |
-
|
119 |
-
gpu_tests:
|
120 |
-
<<: *gpu
|
121 |
-
|
122 |
-
working_directory: ~/detectron2
|
123 |
-
|
124 |
-
steps:
|
125 |
-
- checkout
|
126 |
-
- <<: *install_nvidia_driver
|
127 |
-
|
128 |
-
- run:
|
129 |
-
name: Install nvidia-docker
|
130 |
-
working_directory: ~/
|
131 |
-
command: |
|
132 |
-
curl -s -L https://nvidia.github.io/nvidia-docker/gpgkey | sudo apt-key add -
|
133 |
-
distribution=$(. /etc/os-release;echo $ID$VERSION_ID)
|
134 |
-
curl -s -L https://nvidia.github.io/nvidia-docker/$distribution/nvidia-docker.list | \
|
135 |
-
sudo tee /etc/apt/sources.list.d/nvidia-docker.list
|
136 |
-
sudo apt-get update && sudo apt-get install -y nvidia-docker2
|
137 |
-
# reload the docker daemon configuration
|
138 |
-
sudo pkill -SIGHUP dockerd
|
139 |
-
|
140 |
-
- run:
|
141 |
-
name: Launch docker
|
142 |
-
working_directory: ~/detectron2/docker
|
143 |
-
command: |
|
144 |
-
nvidia-docker build -t detectron2:v0 -f Dockerfile-circleci .
|
145 |
-
nvidia-docker run -itd --name d2 detectron2:v0
|
146 |
-
docker exec -it d2 nvidia-smi
|
147 |
-
|
148 |
-
- run:
|
149 |
-
name: Build Detectron2
|
150 |
-
command: |
|
151 |
-
docker exec -it d2 pip install 'git+https://github.com/facebookresearch/fvcore'
|
152 |
-
docker cp ~/detectron2 d2:/detectron2
|
153 |
-
# This will build d2 for the target GPU arch only
|
154 |
-
docker exec -it d2 pip install -e /detectron2
|
155 |
-
docker exec -it d2 python3 -m detectron2.utils.collect_env
|
156 |
-
docker exec -it d2 python3 -c 'import torch; assert(torch.cuda.is_available())'
|
157 |
-
|
158 |
-
- run:
|
159 |
-
name: Run Unit Tests
|
160 |
-
command: |
|
161 |
-
docker exec -e CIRCLECI=true -it d2 python3 -m unittest discover -v -s /detectron2/tests
|
162 |
-
|
163 |
-
workflows:
|
164 |
-
version: 2
|
165 |
-
regular_test:
|
166 |
-
jobs:
|
167 |
-
- cpu_tests
|
168 |
-
- gpu_tests
|
169 |
-
|
170 |
-
#nightly_test:
|
171 |
-
#jobs:
|
172 |
-
#- gpu_tests
|
173 |
-
#triggers:
|
174 |
-
#- schedule:
|
175 |
-
#cron: "0 0 * * *"
|
176 |
-
#filters:
|
177 |
-
#branches:
|
178 |
-
#only:
|
179 |
-
#- master
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
model/SCHP/mhp_extension/detectron2/.clang-format
DELETED
@@ -1,85 +0,0 @@
|
|
1 |
-
AccessModifierOffset: -1
|
2 |
-
AlignAfterOpenBracket: AlwaysBreak
|
3 |
-
AlignConsecutiveAssignments: false
|
4 |
-
AlignConsecutiveDeclarations: false
|
5 |
-
AlignEscapedNewlinesLeft: true
|
6 |
-
AlignOperands: false
|
7 |
-
AlignTrailingComments: false
|
8 |
-
AllowAllParametersOfDeclarationOnNextLine: false
|
9 |
-
AllowShortBlocksOnASingleLine: false
|
10 |
-
AllowShortCaseLabelsOnASingleLine: false
|
11 |
-
AllowShortFunctionsOnASingleLine: Empty
|
12 |
-
AllowShortIfStatementsOnASingleLine: false
|
13 |
-
AllowShortLoopsOnASingleLine: false
|
14 |
-
AlwaysBreakAfterReturnType: None
|
15 |
-
AlwaysBreakBeforeMultilineStrings: true
|
16 |
-
AlwaysBreakTemplateDeclarations: true
|
17 |
-
BinPackArguments: false
|
18 |
-
BinPackParameters: false
|
19 |
-
BraceWrapping:
|
20 |
-
AfterClass: false
|
21 |
-
AfterControlStatement: false
|
22 |
-
AfterEnum: false
|
23 |
-
AfterFunction: false
|
24 |
-
AfterNamespace: false
|
25 |
-
AfterObjCDeclaration: false
|
26 |
-
AfterStruct: false
|
27 |
-
AfterUnion: false
|
28 |
-
BeforeCatch: false
|
29 |
-
BeforeElse: false
|
30 |
-
IndentBraces: false
|
31 |
-
BreakBeforeBinaryOperators: None
|
32 |
-
BreakBeforeBraces: Attach
|
33 |
-
BreakBeforeTernaryOperators: true
|
34 |
-
BreakConstructorInitializersBeforeComma: false
|
35 |
-
BreakAfterJavaFieldAnnotations: false
|
36 |
-
BreakStringLiterals: false
|
37 |
-
ColumnLimit: 80
|
38 |
-
CommentPragmas: '^ IWYU pragma:'
|
39 |
-
ConstructorInitializerAllOnOneLineOrOnePerLine: true
|
40 |
-
ConstructorInitializerIndentWidth: 4
|
41 |
-
ContinuationIndentWidth: 4
|
42 |
-
Cpp11BracedListStyle: true
|
43 |
-
DerivePointerAlignment: false
|
44 |
-
DisableFormat: false
|
45 |
-
ForEachMacros: [ FOR_EACH, FOR_EACH_ENUMERATE, FOR_EACH_KV, FOR_EACH_R, FOR_EACH_RANGE, ]
|
46 |
-
IncludeCategories:
|
47 |
-
- Regex: '^<.*\.h(pp)?>'
|
48 |
-
Priority: 1
|
49 |
-
- Regex: '^<.*'
|
50 |
-
Priority: 2
|
51 |
-
- Regex: '.*'
|
52 |
-
Priority: 3
|
53 |
-
IndentCaseLabels: true
|
54 |
-
IndentWidth: 2
|
55 |
-
IndentWrappedFunctionNames: false
|
56 |
-
KeepEmptyLinesAtTheStartOfBlocks: false
|
57 |
-
MacroBlockBegin: ''
|
58 |
-
MacroBlockEnd: ''
|
59 |
-
MaxEmptyLinesToKeep: 1
|
60 |
-
NamespaceIndentation: None
|
61 |
-
ObjCBlockIndentWidth: 2
|
62 |
-
ObjCSpaceAfterProperty: false
|
63 |
-
ObjCSpaceBeforeProtocolList: false
|
64 |
-
PenaltyBreakBeforeFirstCallParameter: 1
|
65 |
-
PenaltyBreakComment: 300
|
66 |
-
PenaltyBreakFirstLessLess: 120
|
67 |
-
PenaltyBreakString: 1000
|
68 |
-
PenaltyExcessCharacter: 1000000
|
69 |
-
PenaltyReturnTypeOnItsOwnLine: 200
|
70 |
-
PointerAlignment: Left
|
71 |
-
ReflowComments: true
|
72 |
-
SortIncludes: true
|
73 |
-
SpaceAfterCStyleCast: false
|
74 |
-
SpaceBeforeAssignmentOperators: true
|
75 |
-
SpaceBeforeParens: ControlStatements
|
76 |
-
SpaceInEmptyParentheses: false
|
77 |
-
SpacesBeforeTrailingComments: 1
|
78 |
-
SpacesInAngles: false
|
79 |
-
SpacesInContainerLiterals: true
|
80 |
-
SpacesInCStyleCastParentheses: false
|
81 |
-
SpacesInParentheses: false
|
82 |
-
SpacesInSquareBrackets: false
|
83 |
-
Standard: Cpp11
|
84 |
-
TabWidth: 8
|
85 |
-
UseTab: Never
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
model/SCHP/mhp_extension/detectron2/.flake8
DELETED
@@ -1,9 +0,0 @@
|
|
1 |
-
# This is an example .flake8 config, used when developing *Black* itself.
|
2 |
-
# Keep in sync with setup.cfg which is used for source packages.
|
3 |
-
|
4 |
-
[flake8]
|
5 |
-
ignore = W503, E203, E221, C901, C408, E741
|
6 |
-
max-line-length = 100
|
7 |
-
max-complexity = 18
|
8 |
-
select = B,C,E,F,W,T4,B9
|
9 |
-
exclude = build,__init__.py
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
model/SCHP/mhp_extension/detectron2/.github/CODE_OF_CONDUCT.md
DELETED
@@ -1,5 +0,0 @@
|
|
1 |
-
# Code of Conduct
|
2 |
-
|
3 |
-
Facebook has adopted a Code of Conduct that we expect project participants to adhere to.
|
4 |
-
Please read the [full text](https://code.fb.com/codeofconduct/)
|
5 |
-
so that you can understand what actions will and will not be tolerated.
|
|
|
|
|
|
|
|
|
|
|
|
model/SCHP/mhp_extension/detectron2/.github/CONTRIBUTING.md
DELETED
@@ -1,49 +0,0 @@
|
|
1 |
-
# Contributing to detectron2
|
2 |
-
|
3 |
-
## Issues
|
4 |
-
We use GitHub issues to track public bugs and questions.
|
5 |
-
Please make sure to follow one of the
|
6 |
-
[issue templates](https://github.com/facebookresearch/detectron2/issues/new/choose)
|
7 |
-
when reporting any issues.
|
8 |
-
|
9 |
-
Facebook has a [bounty program](https://www.facebook.com/whitehat/) for the safe
|
10 |
-
disclosure of security bugs. In those cases, please go through the process
|
11 |
-
outlined on that page and do not file a public issue.
|
12 |
-
|
13 |
-
## Pull Requests
|
14 |
-
We actively welcome your pull requests.
|
15 |
-
|
16 |
-
However, if you're adding any significant features (e.g. > 50 lines), please
|
17 |
-
make sure to have a corresponding issue to discuss your motivation and proposals,
|
18 |
-
before sending a PR. We do not always accept new features, and we take the following
|
19 |
-
factors into consideration:
|
20 |
-
|
21 |
-
1. Whether the same feature can be achieved without modifying detectron2.
|
22 |
-
Detectron2 is designed so that you can implement many extensions from the outside, e.g.
|
23 |
-
those in [projects](https://github.com/facebookresearch/detectron2/tree/master/projects).
|
24 |
-
If some part is not as extensible, you can also bring up the issue to make it more extensible.
|
25 |
-
2. Whether the feature is potentially useful to a large audience, or only to a small portion of users.
|
26 |
-
3. Whether the proposed solution has a good design / interface.
|
27 |
-
4. Whether the proposed solution adds extra mental/practical overhead to users who don't
|
28 |
-
need such feature.
|
29 |
-
5. Whether the proposed solution breaks existing APIs.
|
30 |
-
|
31 |
-
When sending a PR, please do:
|
32 |
-
|
33 |
-
1. If a PR contains multiple orthogonal changes, split it to several PRs.
|
34 |
-
2. If you've added code that should be tested, add tests.
|
35 |
-
3. For PRs that need experiments (e.g. adding a new model or new methods),
|
36 |
-
you don't need to update model zoo, but do provide experiment results in the description of the PR.
|
37 |
-
4. If APIs are changed, update the documentation.
|
38 |
-
5. Make sure your code lints with `./dev/linter.sh`.
|
39 |
-
|
40 |
-
|
41 |
-
## Contributor License Agreement ("CLA")
|
42 |
-
In order to accept your pull request, we need you to submit a CLA. You only need
|
43 |
-
to do this once to work on any of Facebook's open source projects.
|
44 |
-
|
45 |
-
Complete your CLA here: <https://code.facebook.com/cla>
|
46 |
-
|
47 |
-
## License
|
48 |
-
By contributing to detectron2, you agree that your contributions will be licensed
|
49 |
-
under the LICENSE file in the root directory of this source tree.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
model/SCHP/mhp_extension/detectron2/.github/Detectron2-Logo-Horz.svg
DELETED
model/SCHP/mhp_extension/detectron2/.github/ISSUE_TEMPLATE.md
DELETED
@@ -1,5 +0,0 @@
|
|
1 |
-
|
2 |
-
Please select an issue template from
|
3 |
-
https://github.com/facebookresearch/detectron2/issues/new/choose .
|
4 |
-
|
5 |
-
Otherwise your issue will be closed.
|
|
|
|
|
|
|
|
|
|
|
|
model/SCHP/mhp_extension/detectron2/.github/ISSUE_TEMPLATE/bugs.md
DELETED
@@ -1,36 +0,0 @@
|
|
1 |
-
---
|
2 |
-
name: "🐛 Bugs"
|
3 |
-
about: Report bugs in detectron2
|
4 |
-
title: Please read & provide the following
|
5 |
-
|
6 |
-
---
|
7 |
-
|
8 |
-
## Instructions To Reproduce the 🐛 Bug:
|
9 |
-
|
10 |
-
1. what changes you made (`git diff`) or what code you wrote
|
11 |
-
```
|
12 |
-
<put diff or code here>
|
13 |
-
```
|
14 |
-
2. what exact command you run:
|
15 |
-
3. what you observed (including __full logs__):
|
16 |
-
```
|
17 |
-
<put logs here>
|
18 |
-
```
|
19 |
-
4. please simplify the steps as much as possible so they do not require additional resources to
|
20 |
-
run, such as a private dataset.
|
21 |
-
|
22 |
-
## Expected behavior:
|
23 |
-
|
24 |
-
If there are no obvious error in "what you observed" provided above,
|
25 |
-
please tell us the expected behavior.
|
26 |
-
|
27 |
-
## Environment:
|
28 |
-
|
29 |
-
Provide your environment information using the following command:
|
30 |
-
```
|
31 |
-
wget -nc -q https://github.com/facebookresearch/detectron2/raw/master/detectron2/utils/collect_env.py && python collect_env.py
|
32 |
-
```
|
33 |
-
|
34 |
-
If your issue looks like an installation issue / environment issue,
|
35 |
-
please first try to solve it yourself with the instructions in
|
36 |
-
https://detectron2.readthedocs.io/tutorials/install.html#common-installation-issues
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
model/SCHP/mhp_extension/detectron2/.github/ISSUE_TEMPLATE/config.yml
DELETED
@@ -1,9 +0,0 @@
|
|
1 |
-
# require an issue template to be chosen
|
2 |
-
blank_issues_enabled: false
|
3 |
-
|
4 |
-
# Unexpected behaviors & bugs are split to two templates.
|
5 |
-
# When they are one template, users think "it's not a bug" and don't choose the template.
|
6 |
-
#
|
7 |
-
# But the file name is still "unexpected-problems-bugs.md" so that old references
|
8 |
-
# to this issue template still works.
|
9 |
-
# It's ok since this template should be a superset of "bugs.md" (unexpected behaviors is a superset of bugs)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
model/SCHP/mhp_extension/detectron2/.github/ISSUE_TEMPLATE/feature-request.md
DELETED
@@ -1,31 +0,0 @@
|
|
1 |
-
---
|
2 |
-
name: "\U0001F680Feature Request"
|
3 |
-
about: Submit a proposal/request for a new detectron2 feature
|
4 |
-
|
5 |
-
---
|
6 |
-
|
7 |
-
## 🚀 Feature
|
8 |
-
A clear and concise description of the feature proposal.
|
9 |
-
|
10 |
-
|
11 |
-
## Motivation & Examples
|
12 |
-
|
13 |
-
Tell us why the feature is useful.
|
14 |
-
|
15 |
-
Describe what the feature would look like, if it is implemented.
|
16 |
-
Best demonstrated using **code examples** in addition to words.
|
17 |
-
|
18 |
-
## Note
|
19 |
-
|
20 |
-
We only consider adding new features if they are relevant to many users.
|
21 |
-
|
22 |
-
If you request implementation of research papers --
|
23 |
-
we only consider papers that have enough significance and prevalance in the object detection field.
|
24 |
-
|
25 |
-
We do not take requests for most projects in the `projects/` directory,
|
26 |
-
because they are research code release that is mainly for other researchers to reproduce results.
|
27 |
-
|
28 |
-
Instead of adding features inside detectron2,
|
29 |
-
you can implement many features by [extending detectron2](https://detectron2.readthedocs.io/tutorials/extend.html).
|
30 |
-
The [projects/](https://github.com/facebookresearch/detectron2/tree/master/projects/) directory contains many of such examples.
|
31 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
model/SCHP/mhp_extension/detectron2/.github/ISSUE_TEMPLATE/questions-help-support.md
DELETED
@@ -1,26 +0,0 @@
|
|
1 |
-
---
|
2 |
-
name: "❓How to do something?"
|
3 |
-
about: How to do something using detectron2? What does an API do?
|
4 |
-
|
5 |
-
---
|
6 |
-
|
7 |
-
## ❓ How to do something using detectron2
|
8 |
-
|
9 |
-
Describe what you want to do, including:
|
10 |
-
1. what inputs you will provide, if any:
|
11 |
-
2. what outputs you are expecting:
|
12 |
-
|
13 |
-
## ❓ What does an API do and how to use it?
|
14 |
-
Please link to which API or documentation you're asking about from
|
15 |
-
https://detectron2.readthedocs.io/
|
16 |
-
|
17 |
-
|
18 |
-
NOTE:
|
19 |
-
|
20 |
-
1. Only general answers are provided.
|
21 |
-
If you want to ask about "why X did not work", please use the
|
22 |
-
[Unexpected behaviors](https://github.com/facebookresearch/detectron2/issues/new/choose) issue template.
|
23 |
-
|
24 |
-
2. About how to implement new models / new dataloader / new training logic, etc., check documentation first.
|
25 |
-
|
26 |
-
3. We do not answer general machine learning / computer vision questions that are not specific to detectron2, such as how a model works, how to improve your training/make it converge, or what algorithm/methods can be used to achieve X.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
model/SCHP/mhp_extension/detectron2/.github/ISSUE_TEMPLATE/unexpected-problems-bugs.md
DELETED
@@ -1,45 +0,0 @@
|
|
1 |
-
---
|
2 |
-
name: "Unexpected behaviors"
|
3 |
-
about: Run into unexpected behaviors when using detectron2
|
4 |
-
title: Please read & provide the following
|
5 |
-
|
6 |
-
---
|
7 |
-
|
8 |
-
If you do not know the root cause of the problem, and wish someone to help you, please
|
9 |
-
post according to this template:
|
10 |
-
|
11 |
-
## Instructions To Reproduce the Issue:
|
12 |
-
|
13 |
-
1. what changes you made (`git diff`) or what code you wrote
|
14 |
-
```
|
15 |
-
<put diff or code here>
|
16 |
-
```
|
17 |
-
2. what exact command you run:
|
18 |
-
3. what you observed (including __full logs__):
|
19 |
-
```
|
20 |
-
<put logs here>
|
21 |
-
```
|
22 |
-
4. please simplify the steps as much as possible so they do not require additional resources to
|
23 |
-
run, such as a private dataset.
|
24 |
-
|
25 |
-
## Expected behavior:
|
26 |
-
|
27 |
-
If there are no obvious error in "what you observed" provided above,
|
28 |
-
please tell us the expected behavior.
|
29 |
-
|
30 |
-
If you expect the model to converge / work better, note that we do not give suggestions
|
31 |
-
on how to train a new model.
|
32 |
-
Only in one of the two conditions we will help with it:
|
33 |
-
(1) You're unable to reproduce the results in detectron2 model zoo.
|
34 |
-
(2) It indicates a detectron2 bug.
|
35 |
-
|
36 |
-
## Environment:
|
37 |
-
|
38 |
-
Provide your environment information using the following command:
|
39 |
-
```
|
40 |
-
wget -nc -q https://github.com/facebookresearch/detectron2/raw/master/detectron2/utils/collect_env.py && python collect_env.py
|
41 |
-
```
|
42 |
-
|
43 |
-
If your issue looks like an installation issue / environment issue,
|
44 |
-
please first try to solve it yourself with the instructions in
|
45 |
-
https://detectron2.readthedocs.io/tutorials/install.html#common-installation-issues
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
model/SCHP/mhp_extension/detectron2/.github/pull_request_template.md
DELETED
@@ -1,9 +0,0 @@
|
|
1 |
-
Thanks for your contribution!
|
2 |
-
|
3 |
-
If you're sending a large PR (e.g., >50 lines),
|
4 |
-
please open an issue first about the feature / bug, and indicate how you want to contribute.
|
5 |
-
|
6 |
-
Before submitting a PR, please run `dev/linter.sh` to lint the code.
|
7 |
-
|
8 |
-
See https://detectron2.readthedocs.io/notes/contributing.html#pull-requests
|
9 |
-
about how we handle PRs.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
model/SCHP/mhp_extension/detectron2/.gitignore
DELETED
@@ -1,46 +0,0 @@
|
|
1 |
-
# output dir
|
2 |
-
output
|
3 |
-
instant_test_output
|
4 |
-
inference_test_output
|
5 |
-
|
6 |
-
|
7 |
-
*.jpg
|
8 |
-
*.png
|
9 |
-
*.txt
|
10 |
-
*.json
|
11 |
-
*.diff
|
12 |
-
|
13 |
-
# compilation and distribution
|
14 |
-
__pycache__
|
15 |
-
_ext
|
16 |
-
*.pyc
|
17 |
-
*.so
|
18 |
-
detectron2.egg-info/
|
19 |
-
build/
|
20 |
-
dist/
|
21 |
-
wheels/
|
22 |
-
|
23 |
-
# pytorch/python/numpy formats
|
24 |
-
*.pth
|
25 |
-
*.pkl
|
26 |
-
*.npy
|
27 |
-
|
28 |
-
# ipython/jupyter notebooks
|
29 |
-
*.ipynb
|
30 |
-
**/.ipynb_checkpoints/
|
31 |
-
|
32 |
-
# Editor temporaries
|
33 |
-
*.swn
|
34 |
-
*.swo
|
35 |
-
*.swp
|
36 |
-
*~
|
37 |
-
|
38 |
-
# editor settings
|
39 |
-
.idea
|
40 |
-
.vscode
|
41 |
-
|
42 |
-
# project dirs
|
43 |
-
/detectron2/model_zoo/configs
|
44 |
-
/datasets
|
45 |
-
/projects/*/datasets
|
46 |
-
/models
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
model/SCHP/mhp_extension/detectron2/GETTING_STARTED.md
DELETED
@@ -1,79 +0,0 @@
|
|
1 |
-
## Getting Started with Detectron2
|
2 |
-
|
3 |
-
This document provides a brief intro of the usage of builtin command-line tools in detectron2.
|
4 |
-
|
5 |
-
For a tutorial that involves actual coding with the API,
|
6 |
-
see our [Colab Notebook](https://colab.research.google.com/drive/16jcaJoc6bCFAQ96jDe2HwtXj7BMD_-m5)
|
7 |
-
which covers how to run inference with an
|
8 |
-
existing model, and how to train a builtin model on a custom dataset.
|
9 |
-
|
10 |
-
For more advanced tutorials, refer to our [documentation](https://detectron2.readthedocs.io/tutorials/extend.html).
|
11 |
-
|
12 |
-
|
13 |
-
### Inference Demo with Pre-trained Models
|
14 |
-
|
15 |
-
1. Pick a model and its config file from
|
16 |
-
[model zoo](MODEL_ZOO.md),
|
17 |
-
for example, `mask_rcnn_R_50_FPN_3x.yaml`.
|
18 |
-
2. We provide `demo.py` that is able to run builtin standard models. Run it with:
|
19 |
-
```
|
20 |
-
cd demo/
|
21 |
-
python demo.py --config-file ../configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml \
|
22 |
-
--input input1.jpg input2.jpg \
|
23 |
-
[--other-options]
|
24 |
-
--opts MODEL.WEIGHTS detectron2://COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x/137849600/model_final_f10217.pkl
|
25 |
-
```
|
26 |
-
The configs are made for training, therefore we need to specify `MODEL.WEIGHTS` to a model from model zoo for evaluation.
|
27 |
-
This command will run the inference and show visualizations in an OpenCV window.
|
28 |
-
|
29 |
-
For details of the command line arguments, see `demo.py -h` or look at its source code
|
30 |
-
to understand its behavior. Some common arguments are:
|
31 |
-
* To run __on your webcam__, replace `--input files` with `--webcam`.
|
32 |
-
* To run __on a video__, replace `--input files` with `--video-input video.mp4`.
|
33 |
-
* To run __on cpu__, add `MODEL.DEVICE cpu` after `--opts`.
|
34 |
-
* To save outputs to a directory (for images) or a file (for webcam or video), use `--output`.
|
35 |
-
|
36 |
-
|
37 |
-
### Training & Evaluation in Command Line
|
38 |
-
|
39 |
-
We provide a script in "tools/{,plain_}train_net.py", that is made to train
|
40 |
-
all the configs provided in detectron2.
|
41 |
-
You may want to use it as a reference to write your own training script.
|
42 |
-
|
43 |
-
To train a model with "train_net.py", first
|
44 |
-
setup the corresponding datasets following
|
45 |
-
[datasets/README.md](./datasets/README.md),
|
46 |
-
then run:
|
47 |
-
```
|
48 |
-
cd tools/
|
49 |
-
./train_net.py --num-gpus 8 \
|
50 |
-
--config-file ../configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml
|
51 |
-
```
|
52 |
-
|
53 |
-
The configs are made for 8-GPU training.
|
54 |
-
To train on 1 GPU, you may need to [change some parameters](https://arxiv.org/abs/1706.02677), e.g.:
|
55 |
-
```
|
56 |
-
./train_net.py \
|
57 |
-
--config-file ../configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml \
|
58 |
-
--num-gpus 1 SOLVER.IMS_PER_BATCH 2 SOLVER.BASE_LR 0.0025
|
59 |
-
```
|
60 |
-
|
61 |
-
For most models, CPU training is not supported.
|
62 |
-
|
63 |
-
To evaluate a model's performance, use
|
64 |
-
```
|
65 |
-
./train_net.py \
|
66 |
-
--config-file ../configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml \
|
67 |
-
--eval-only MODEL.WEIGHTS /path/to/checkpoint_file
|
68 |
-
```
|
69 |
-
For more options, see `./train_net.py -h`.
|
70 |
-
|
71 |
-
### Use Detectron2 APIs in Your Code
|
72 |
-
|
73 |
-
See our [Colab Notebook](https://colab.research.google.com/drive/16jcaJoc6bCFAQ96jDe2HwtXj7BMD_-m5)
|
74 |
-
to learn how to use detectron2 APIs to:
|
75 |
-
1. run inference with an existing model
|
76 |
-
2. train a builtin model on a custom dataset
|
77 |
-
|
78 |
-
See [detectron2/projects](https://github.com/facebookresearch/detectron2/tree/master/projects)
|
79 |
-
for more ways to build your project on detectron2.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
model/SCHP/mhp_extension/detectron2/INSTALL.md
DELETED
@@ -1,184 +0,0 @@
|
|
1 |
-
## Installation
|
2 |
-
|
3 |
-
Our [Colab Notebook](https://colab.research.google.com/drive/16jcaJoc6bCFAQ96jDe2HwtXj7BMD_-m5)
|
4 |
-
has step-by-step instructions that install detectron2.
|
5 |
-
The [Dockerfile](docker)
|
6 |
-
also installs detectron2 with a few simple commands.
|
7 |
-
|
8 |
-
### Requirements
|
9 |
-
- Linux or macOS with Python ≥ 3.6
|
10 |
-
- PyTorch ≥ 1.4
|
11 |
-
- [torchvision](https://github.com/pytorch/vision/) that matches the PyTorch installation.
|
12 |
-
You can install them together at [pytorch.org](https://pytorch.org) to make sure of this.
|
13 |
-
- OpenCV, optional, needed by demo and visualization
|
14 |
-
- pycocotools: `pip install cython; pip install -U 'git+https://github.com/cocodataset/cocoapi.git#subdirectory=PythonAPI'`
|
15 |
-
|
16 |
-
|
17 |
-
### Build Detectron2 from Source
|
18 |
-
|
19 |
-
gcc & g++ ≥ 5 are required. [ninja](https://ninja-build.org/) is recommended for faster build.
|
20 |
-
After having them, run:
|
21 |
-
```
|
22 |
-
python -m pip install 'git+https://github.com/facebookresearch/detectron2.git'
|
23 |
-
# (add --user if you don't have permission)
|
24 |
-
|
25 |
-
# Or, to install it from a local clone:
|
26 |
-
git clone https://github.com/facebookresearch/detectron2.git
|
27 |
-
python -m pip install -e detectron2
|
28 |
-
|
29 |
-
# Or if you are on macOS
|
30 |
-
# CC=clang CXX=clang++ python -m pip install -e .
|
31 |
-
```
|
32 |
-
|
33 |
-
To __rebuild__ detectron2 that's built from a local clone, use `rm -rf build/ **/*.so` to clean the
|
34 |
-
old build first. You often need to rebuild detectron2 after reinstalling PyTorch.
|
35 |
-
|
36 |
-
### Install Pre-Built Detectron2 (Linux only)
|
37 |
-
```
|
38 |
-
# for CUDA 10.1:
|
39 |
-
python -m pip install detectron2 -f https://dl.fbaipublicfiles.com/detectron2/wheels/cu101/index.html
|
40 |
-
```
|
41 |
-
You can replace cu101 with "cu{100,92}" or "cpu".
|
42 |
-
|
43 |
-
Note that:
|
44 |
-
1. Such installation has to be used with certain version of official PyTorch release.
|
45 |
-
See [releases](https://github.com/facebookresearch/detectron2/releases) for requirements.
|
46 |
-
It will not work with a different version of PyTorch or a non-official build of PyTorch.
|
47 |
-
2. Such installation is out-of-date w.r.t. master branch of detectron2. It may not be
|
48 |
-
compatible with the master branch of a research project that uses detectron2 (e.g. those in
|
49 |
-
[projects](projects) or [meshrcnn](https://github.com/facebookresearch/meshrcnn/)).
|
50 |
-
|
51 |
-
### Common Installation Issues
|
52 |
-
|
53 |
-
If you met issues using the pre-built detectron2, please uninstall it and try building it from source.
|
54 |
-
|
55 |
-
Click each issue for its solutions:
|
56 |
-
|
57 |
-
<details>
|
58 |
-
<summary>
|
59 |
-
Undefined torch/aten/caffe2 symbols, or segmentation fault immediately when running the library.
|
60 |
-
</summary>
|
61 |
-
<br/>
|
62 |
-
|
63 |
-
This usually happens when detectron2 or torchvision is not
|
64 |
-
compiled with the version of PyTorch you're running.
|
65 |
-
|
66 |
-
Pre-built torchvision or detectron2 has to work with the corresponding official release of pytorch.
|
67 |
-
If the error comes from a pre-built torchvision, uninstall torchvision and pytorch and reinstall them
|
68 |
-
following [pytorch.org](http://pytorch.org). So the versions will match.
|
69 |
-
|
70 |
-
If the error comes from a pre-built detectron2, check [release notes](https://github.com/facebookresearch/detectron2/releases)
|
71 |
-
to see the corresponding pytorch version required for each pre-built detectron2.
|
72 |
-
|
73 |
-
If the error comes from detectron2 or torchvision that you built manually from source,
|
74 |
-
remove files you built (`build/`, `**/*.so`) and rebuild it so it can pick up the version of pytorch currently in your environment.
|
75 |
-
|
76 |
-
If you cannot resolve this problem, please include the output of `gdb -ex "r" -ex "bt" -ex "quit" --args python -m detectron2.utils.collect_env`
|
77 |
-
in your issue.
|
78 |
-
</details>
|
79 |
-
|
80 |
-
<details>
|
81 |
-
<summary>
|
82 |
-
Undefined C++ symbols (e.g. `GLIBCXX`) or C++ symbols not found.
|
83 |
-
</summary>
|
84 |
-
<br/>
|
85 |
-
Usually it's because the library is compiled with a newer C++ compiler but run with an old C++ runtime.
|
86 |
-
|
87 |
-
This often happens with old anaconda.
|
88 |
-
Try `conda update libgcc`. Then rebuild detectron2.
|
89 |
-
|
90 |
-
The fundamental solution is to run the code with proper C++ runtime.
|
91 |
-
One way is to use `LD_PRELOAD=/path/to/libstdc++.so`.
|
92 |
-
|
93 |
-
</details>
|
94 |
-
|
95 |
-
<details>
|
96 |
-
<summary>
|
97 |
-
"Not compiled with GPU support" or "Detectron2 CUDA Compiler: not available".
|
98 |
-
</summary>
|
99 |
-
<br/>
|
100 |
-
CUDA is not found when building detectron2.
|
101 |
-
You should make sure
|
102 |
-
|
103 |
-
```
|
104 |
-
python -c 'import torch; from torch.utils.cpp_extension import CUDA_HOME; print(torch.cuda.is_available(), CUDA_HOME)'
|
105 |
-
```
|
106 |
-
|
107 |
-
print valid outputs at the time you build detectron2.
|
108 |
-
|
109 |
-
Most models can run inference (but not training) without GPU support. To use CPUs, set `MODEL.DEVICE='cpu'` in the config.
|
110 |
-
</details>
|
111 |
-
|
112 |
-
<details>
|
113 |
-
<summary>
|
114 |
-
"invalid device function" or "no kernel image is available for execution".
|
115 |
-
</summary>
|
116 |
-
<br/>
|
117 |
-
Two possibilities:
|
118 |
-
|
119 |
-
* You build detectron2 with one version of CUDA but run it with a different version.
|
120 |
-
|
121 |
-
To check whether it is the case,
|
122 |
-
use `python -m detectron2.utils.collect_env` to find out inconsistent CUDA versions.
|
123 |
-
In the output of this command, you should expect "Detectron2 CUDA Compiler", "CUDA_HOME", "PyTorch built with - CUDA"
|
124 |
-
to contain cuda libraries of the same version.
|
125 |
-
|
126 |
-
When they are inconsistent,
|
127 |
-
you need to either install a different build of PyTorch (or build by yourself)
|
128 |
-
to match your local CUDA installation, or install a different version of CUDA to match PyTorch.
|
129 |
-
|
130 |
-
* Detectron2 or PyTorch/torchvision is not built for the correct GPU architecture (compute compatibility).
|
131 |
-
|
132 |
-
The GPU architecture for PyTorch/detectron2/torchvision is available in the "architecture flags" in
|
133 |
-
`python -m detectron2.utils.collect_env`.
|
134 |
-
|
135 |
-
The GPU architecture flags of detectron2/torchvision by default matches the GPU model detected
|
136 |
-
during compilation. This means the compiled code may not work on a different GPU model.
|
137 |
-
To overwrite the GPU architecture for detectron2/torchvision, use `TORCH_CUDA_ARCH_LIST` environment variable during compilation.
|
138 |
-
|
139 |
-
For example, `export TORCH_CUDA_ARCH_LIST=6.0,7.0` makes it compile for both P100s and V100s.
|
140 |
-
Visit [developer.nvidia.com/cuda-gpus](https://developer.nvidia.com/cuda-gpus) to find out
|
141 |
-
the correct compute compatibility number for your device.
|
142 |
-
|
143 |
-
</details>
|
144 |
-
|
145 |
-
<details>
|
146 |
-
<summary>
|
147 |
-
Undefined CUDA symbols; cannot open libcudart.so; other nvcc failures.
|
148 |
-
</summary>
|
149 |
-
<br/>
|
150 |
-
The version of NVCC you use to build detectron2 or torchvision does
|
151 |
-
not match the version of CUDA you are running with.
|
152 |
-
This often happens when using anaconda's CUDA runtime.
|
153 |
-
|
154 |
-
Use `python -m detectron2.utils.collect_env` to find out inconsistent CUDA versions.
|
155 |
-
In the output of this command, you should expect "Detectron2 CUDA Compiler", "CUDA_HOME", "PyTorch built with - CUDA"
|
156 |
-
to contain cuda libraries of the same version.
|
157 |
-
|
158 |
-
When they are inconsistent,
|
159 |
-
you need to either install a different build of PyTorch (or build by yourself)
|
160 |
-
to match your local CUDA installation, or install a different version of CUDA to match PyTorch.
|
161 |
-
</details>
|
162 |
-
|
163 |
-
|
164 |
-
<details>
|
165 |
-
<summary>
|
166 |
-
"ImportError: cannot import name '_C'".
|
167 |
-
</summary>
|
168 |
-
<br/>
|
169 |
-
Please build and install detectron2 following the instructions above.
|
170 |
-
|
171 |
-
If you are running code from detectron2's root directory, `cd` to a different one.
|
172 |
-
Otherwise you may not import the code that you installed.
|
173 |
-
</details>
|
174 |
-
|
175 |
-
<details>
|
176 |
-
<summary>
|
177 |
-
ONNX conversion segfault after some "TraceWarning".
|
178 |
-
</summary>
|
179 |
-
<br/>
|
180 |
-
The ONNX package is compiled with too old compiler.
|
181 |
-
|
182 |
-
Please build and install ONNX from its source code using a compiler
|
183 |
-
whose version is closer to what's used by PyTorch (available in `torch.__config__.show()`).
|
184 |
-
</details>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
model/SCHP/mhp_extension/detectron2/LICENSE
DELETED
@@ -1,201 +0,0 @@
|
|
1 |
-
Apache License
|
2 |
-
Version 2.0, January 2004
|
3 |
-
http://www.apache.org/licenses/
|
4 |
-
|
5 |
-
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
6 |
-
|
7 |
-
1. Definitions.
|
8 |
-
|
9 |
-
"License" shall mean the terms and conditions for use, reproduction,
|
10 |
-
and distribution as defined by Sections 1 through 9 of this document.
|
11 |
-
|
12 |
-
"Licensor" shall mean the copyright owner or entity authorized by
|
13 |
-
the copyright owner that is granting the License.
|
14 |
-
|
15 |
-
"Legal Entity" shall mean the union of the acting entity and all
|
16 |
-
other entities that control, are controlled by, or are under common
|
17 |
-
control with that entity. For the purposes of this definition,
|
18 |
-
"control" means (i) the power, direct or indirect, to cause the
|
19 |
-
direction or management of such entity, whether by contract or
|
20 |
-
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
21 |
-
outstanding shares, or (iii) beneficial ownership of such entity.
|
22 |
-
|
23 |
-
"You" (or "Your") shall mean an individual or Legal Entity
|
24 |
-
exercising permissions granted by this License.
|
25 |
-
|
26 |
-
"Source" form shall mean the preferred form for making modifications,
|
27 |
-
including but not limited to software source code, documentation
|
28 |
-
source, and configuration files.
|
29 |
-
|
30 |
-
"Object" form shall mean any form resulting from mechanical
|
31 |
-
transformation or translation of a Source form, including but
|
32 |
-
not limited to compiled object code, generated documentation,
|
33 |
-
and conversions to other media types.
|
34 |
-
|
35 |
-
"Work" shall mean the work of authorship, whether in Source or
|
36 |
-
Object form, made available under the License, as indicated by a
|
37 |
-
copyright notice that is included in or attached to the work
|
38 |
-
(an example is provided in the Appendix below).
|
39 |
-
|
40 |
-
"Derivative Works" shall mean any work, whether in Source or Object
|
41 |
-
form, that is based on (or derived from) the Work and for which the
|
42 |
-
editorial revisions, annotations, elaborations, or other modifications
|
43 |
-
represent, as a whole, an original work of authorship. For the purposes
|
44 |
-
of this License, Derivative Works shall not include works that remain
|
45 |
-
separable from, or merely link (or bind by name) to the interfaces of,
|
46 |
-
the Work and Derivative Works thereof.
|
47 |
-
|
48 |
-
"Contribution" shall mean any work of authorship, including
|
49 |
-
the original version of the Work and any modifications or additions
|
50 |
-
to that Work or Derivative Works thereof, that is intentionally
|
51 |
-
submitted to Licensor for inclusion in the Work by the copyright owner
|
52 |
-
or by an individual or Legal Entity authorized to submit on behalf of
|
53 |
-
the copyright owner. For the purposes of this definition, "submitted"
|
54 |
-
means any form of electronic, verbal, or written communication sent
|
55 |
-
to the Licensor or its representatives, including but not limited to
|
56 |
-
communication on electronic mailing lists, source code control systems,
|
57 |
-
and issue tracking systems that are managed by, or on behalf of, the
|
58 |
-
Licensor for the purpose of discussing and improving the Work, but
|
59 |
-
excluding communication that is conspicuously marked or otherwise
|
60 |
-
designated in writing by the copyright owner as "Not a Contribution."
|
61 |
-
|
62 |
-
"Contributor" shall mean Licensor and any individual or Legal Entity
|
63 |
-
on behalf of whom a Contribution has been received by Licensor and
|
64 |
-
subsequently incorporated within the Work.
|
65 |
-
|
66 |
-
2. Grant of Copyright License. Subject to the terms and conditions of
|
67 |
-
this License, each Contributor hereby grants to You a perpetual,
|
68 |
-
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
69 |
-
copyright license to reproduce, prepare Derivative Works of,
|
70 |
-
publicly display, publicly perform, sublicense, and distribute the
|
71 |
-
Work and such Derivative Works in Source or Object form.
|
72 |
-
|
73 |
-
3. Grant of Patent License. Subject to the terms and conditions of
|
74 |
-
this License, each Contributor hereby grants to You a perpetual,
|
75 |
-
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
76 |
-
(except as stated in this section) patent license to make, have made,
|
77 |
-
use, offer to sell, sell, import, and otherwise transfer the Work,
|
78 |
-
where such license applies only to those patent claims licensable
|
79 |
-
by such Contributor that are necessarily infringed by their
|
80 |
-
Contribution(s) alone or by combination of their Contribution(s)
|
81 |
-
with the Work to which such Contribution(s) was submitted. If You
|
82 |
-
institute patent litigation against any entity (including a
|
83 |
-
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
84 |
-
or a Contribution incorporated within the Work constitutes direct
|
85 |
-
or contributory patent infringement, then any patent licenses
|
86 |
-
granted to You under this License for that Work shall terminate
|
87 |
-
as of the date such litigation is filed.
|
88 |
-
|
89 |
-
4. Redistribution. You may reproduce and distribute copies of the
|
90 |
-
Work or Derivative Works thereof in any medium, with or without
|
91 |
-
modifications, and in Source or Object form, provided that You
|
92 |
-
meet the following conditions:
|
93 |
-
|
94 |
-
(a) You must give any other recipients of the Work or
|
95 |
-
Derivative Works a copy of this License; and
|
96 |
-
|
97 |
-
(b) You must cause any modified files to carry prominent notices
|
98 |
-
stating that You changed the files; and
|
99 |
-
|
100 |
-
(c) You must retain, in the Source form of any Derivative Works
|
101 |
-
that You distribute, all copyright, patent, trademark, and
|
102 |
-
attribution notices from the Source form of the Work,
|
103 |
-
excluding those notices that do not pertain to any part of
|
104 |
-
the Derivative Works; and
|
105 |
-
|
106 |
-
(d) If the Work includes a "NOTICE" text file as part of its
|
107 |
-
distribution, then any Derivative Works that You distribute must
|
108 |
-
include a readable copy of the attribution notices contained
|
109 |
-
within such NOTICE file, excluding those notices that do not
|
110 |
-
pertain to any part of the Derivative Works, in at least one
|
111 |
-
of the following places: within a NOTICE text file distributed
|
112 |
-
as part of the Derivative Works; within the Source form or
|
113 |
-
documentation, if provided along with the Derivative Works; or,
|
114 |
-
within a display generated by the Derivative Works, if and
|
115 |
-
wherever such third-party notices normally appear. The contents
|
116 |
-
of the NOTICE file are for informational purposes only and
|
117 |
-
do not modify the License. You may add Your own attribution
|
118 |
-
notices within Derivative Works that You distribute, alongside
|
119 |
-
or as an addendum to the NOTICE text from the Work, provided
|
120 |
-
that such additional attribution notices cannot be construed
|
121 |
-
as modifying the License.
|
122 |
-
|
123 |
-
You may add Your own copyright statement to Your modifications and
|
124 |
-
may provide additional or different license terms and conditions
|
125 |
-
for use, reproduction, or distribution of Your modifications, or
|
126 |
-
for any such Derivative Works as a whole, provided Your use,
|
127 |
-
reproduction, and distribution of the Work otherwise complies with
|
128 |
-
the conditions stated in this License.
|
129 |
-
|
130 |
-
5. Submission of Contributions. Unless You explicitly state otherwise,
|
131 |
-
any Contribution intentionally submitted for inclusion in the Work
|
132 |
-
by You to the Licensor shall be under the terms and conditions of
|
133 |
-
this License, without any additional terms or conditions.
|
134 |
-
Notwithstanding the above, nothing herein shall supersede or modify
|
135 |
-
the terms of any separate license agreement you may have executed
|
136 |
-
with Licensor regarding such Contributions.
|
137 |
-
|
138 |
-
6. Trademarks. This License does not grant permission to use the trade
|
139 |
-
names, trademarks, service marks, or product names of the Licensor,
|
140 |
-
except as required for reasonable and customary use in describing the
|
141 |
-
origin of the Work and reproducing the content of the NOTICE file.
|
142 |
-
|
143 |
-
7. Disclaimer of Warranty. Unless required by applicable law or
|
144 |
-
agreed to in writing, Licensor provides the Work (and each
|
145 |
-
Contributor provides its Contributions) on an "AS IS" BASIS,
|
146 |
-
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
147 |
-
implied, including, without limitation, any warranties or conditions
|
148 |
-
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
149 |
-
PARTICULAR PURPOSE. You are solely responsible for determining the
|
150 |
-
appropriateness of using or redistributing the Work and assume any
|
151 |
-
risks associated with Your exercise of permissions under this License.
|
152 |
-
|
153 |
-
8. Limitation of Liability. In no event and under no legal theory,
|
154 |
-
whether in tort (including negligence), contract, or otherwise,
|
155 |
-
unless required by applicable law (such as deliberate and grossly
|
156 |
-
negligent acts) or agreed to in writing, shall any Contributor be
|
157 |
-
liable to You for damages, including any direct, indirect, special,
|
158 |
-
incidental, or consequential damages of any character arising as a
|
159 |
-
result of this License or out of the use or inability to use the
|
160 |
-
Work (including but not limited to damages for loss of goodwill,
|
161 |
-
work stoppage, computer failure or malfunction, or any and all
|
162 |
-
other commercial damages or losses), even if such Contributor
|
163 |
-
has been advised of the possibility of such damages.
|
164 |
-
|
165 |
-
9. Accepting Warranty or Additional Liability. While redistributing
|
166 |
-
the Work or Derivative Works thereof, You may choose to offer,
|
167 |
-
and charge a fee for, acceptance of support, warranty, indemnity,
|
168 |
-
or other liability obligations and/or rights consistent with this
|
169 |
-
License. However, in accepting such obligations, You may act only
|
170 |
-
on Your own behalf and on Your sole responsibility, not on behalf
|
171 |
-
of any other Contributor, and only if You agree to indemnify,
|
172 |
-
defend, and hold each Contributor harmless for any liability
|
173 |
-
incurred by, or claims asserted against, such Contributor by reason
|
174 |
-
of your accepting any such warranty or additional liability.
|
175 |
-
|
176 |
-
END OF TERMS AND CONDITIONS
|
177 |
-
|
178 |
-
APPENDIX: How to apply the Apache License to your work.
|
179 |
-
|
180 |
-
To apply the Apache License to your work, attach the following
|
181 |
-
boilerplate notice, with the fields enclosed by brackets "[]"
|
182 |
-
replaced with your own identifying information. (Don't include
|
183 |
-
the brackets!) The text should be enclosed in the appropriate
|
184 |
-
comment syntax for the file format. We also recommend that a
|
185 |
-
file or class name and description of purpose be included on the
|
186 |
-
same "printed page" as the copyright notice for easier
|
187 |
-
identification within third-party archives.
|
188 |
-
|
189 |
-
Copyright 2019 - present, Facebook, Inc
|
190 |
-
|
191 |
-
Licensed under the Apache License, Version 2.0 (the "License");
|
192 |
-
you may not use this file except in compliance with the License.
|
193 |
-
You may obtain a copy of the License at
|
194 |
-
|
195 |
-
http://www.apache.org/licenses/LICENSE-2.0
|
196 |
-
|
197 |
-
Unless required by applicable law or agreed to in writing, software
|
198 |
-
distributed under the License is distributed on an "AS IS" BASIS,
|
199 |
-
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
200 |
-
See the License for the specific language governing permissions and
|
201 |
-
limitations under the License.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
model/SCHP/mhp_extension/detectron2/MODEL_ZOO.md
DELETED
@@ -1,903 +0,0 @@
|
|
1 |
-
# Detectron2 Model Zoo and Baselines
|
2 |
-
|
3 |
-
## Introduction
|
4 |
-
|
5 |
-
This file documents a large collection of baselines trained
|
6 |
-
with detectron2 in Sep-Oct, 2019.
|
7 |
-
All numbers were obtained on [Big Basin](https://engineering.fb.com/data-center-engineering/introducing-big-basin-our-next-generation-ai-hardware/)
|
8 |
-
servers with 8 NVIDIA V100 GPUs & NVLink. The software in use were PyTorch 1.3, CUDA 9.2, cuDNN 7.4.2 or 7.6.3.
|
9 |
-
You can access these models from code using [detectron2.model_zoo](https://detectron2.readthedocs.io/modules/model_zoo.html) APIs.
|
10 |
-
|
11 |
-
In addition to these official baseline models, you can find more models in [projects/](projects/).
|
12 |
-
|
13 |
-
#### How to Read the Tables
|
14 |
-
* The "Name" column contains a link to the config file. Running `tools/train_net.py` with this config file
|
15 |
-
and 8 GPUs will reproduce the model.
|
16 |
-
* Training speed is averaged across the entire training.
|
17 |
-
We keep updating the speed with latest version of detectron2/pytorch/etc.,
|
18 |
-
so they might be different from the `metrics` file.
|
19 |
-
Training speed for multi-machine jobs is not provided.
|
20 |
-
* Inference speed is measured by `tools/train_net.py --eval-only`, or [inference_on_dataset()](https://detectron2.readthedocs.io/modules/evaluation.html#detectron2.evaluation.inference_on_dataset),
|
21 |
-
with batch size 1 in detectron2 directly.
|
22 |
-
Measuring it with your own code will likely introduce other overhead.
|
23 |
-
Actual deployment in production should in general be faster than the given inference
|
24 |
-
speed due to more optimizations.
|
25 |
-
* The *model id* column is provided for ease of reference.
|
26 |
-
To check downloaded file integrity, any model on this page contains its md5 prefix in its file name.
|
27 |
-
* Training curves and other statistics can be found in `metrics` for each model.
|
28 |
-
|
29 |
-
#### Common Settings for COCO Models
|
30 |
-
* All COCO models were trained on `train2017` and evaluated on `val2017`.
|
31 |
-
* The default settings are __not directly comparable__ with Detectron's standard settings.
|
32 |
-
For example, our default training data augmentation uses scale jittering in addition to horizontal flipping.
|
33 |
-
|
34 |
-
To make fair comparisons with Detectron's settings, see
|
35 |
-
[Detectron1-Comparisons](configs/Detectron1-Comparisons/) for accuracy comparison,
|
36 |
-
and [benchmarks](https://detectron2.readthedocs.io/notes/benchmarks.html)
|
37 |
-
for speed comparison.
|
38 |
-
* For Faster/Mask R-CNN, we provide baselines based on __3 different backbone combinations__:
|
39 |
-
* __FPN__: Use a ResNet+FPN backbone with standard conv and FC heads for mask and box prediction,
|
40 |
-
respectively. It obtains the best
|
41 |
-
speed/accuracy tradeoff, but the other two are still useful for research.
|
42 |
-
* __C4__: Use a ResNet conv4 backbone with conv5 head. The original baseline in the Faster R-CNN paper.
|
43 |
-
* __DC5__ (Dilated-C5): Use a ResNet conv5 backbone with dilations in conv5, and standard conv and FC heads
|
44 |
-
for mask and box prediction, respectively.
|
45 |
-
This is used by the Deformable ConvNet paper.
|
46 |
-
* Most models are trained with the 3x schedule (~37 COCO epochs).
|
47 |
-
Although 1x models are heavily under-trained, we provide some ResNet-50 models with the 1x (~12 COCO epochs)
|
48 |
-
training schedule for comparison when doing quick research iteration.
|
49 |
-
|
50 |
-
#### ImageNet Pretrained Models
|
51 |
-
|
52 |
-
We provide backbone models pretrained on ImageNet-1k dataset.
|
53 |
-
These models have __different__ format from those provided in Detectron: we do not fuse BatchNorm into an affine layer.
|
54 |
-
* [R-50.pkl](https://dl.fbaipublicfiles.com/detectron2/ImageNetPretrained/MSRA/R-50.pkl): converted copy of [MSRA's original ResNet-50](https://github.com/KaimingHe/deep-residual-networks) model.
|
55 |
-
* [R-101.pkl](https://dl.fbaipublicfiles.com/detectron2/ImageNetPretrained/MSRA/R-101.pkl): converted copy of [MSRA's original ResNet-101](https://github.com/KaimingHe/deep-residual-networks) model.
|
56 |
-
* [X-101-32x8d.pkl](https://dl.fbaipublicfiles.com/detectron2/ImageNetPretrained/FAIR/X-101-32x8d.pkl): ResNeXt-101-32x8d model trained with Caffe2 at FB.
|
57 |
-
|
58 |
-
Pretrained models in Detectron's format can still be used. For example:
|
59 |
-
* [X-152-32x8d-IN5k.pkl](https://dl.fbaipublicfiles.com/detectron/ImageNetPretrained/25093814/X-152-32x8d-IN5k.pkl):
|
60 |
-
ResNeXt-152-32x8d model trained on ImageNet-5k with Caffe2 at FB (see ResNeXt paper for details on ImageNet-5k).
|
61 |
-
* [R-50-GN.pkl](https://dl.fbaipublicfiles.com/detectron/ImageNetPretrained/47261647/R-50-GN.pkl):
|
62 |
-
ResNet-50 with Group Normalization.
|
63 |
-
* [R-101-GN.pkl](https://dl.fbaipublicfiles.com/detectron/ImageNetPretrained/47592356/R-101-GN.pkl):
|
64 |
-
ResNet-101 with Group Normalization.
|
65 |
-
|
66 |
-
Torchvision's ResNet models can be used after converted by [this script](tools/convert-torchvision-to-d2.py).
|
67 |
-
|
68 |
-
#### License
|
69 |
-
|
70 |
-
All models available for download through this document are licensed under the
|
71 |
-
[Creative Commons Attribution-ShareAlike 3.0 license](https://creativecommons.org/licenses/by-sa/3.0/).
|
72 |
-
|
73 |
-
### COCO Object Detection Baselines
|
74 |
-
|
75 |
-
#### Faster R-CNN:
|
76 |
-
<!--
|
77 |
-
(fb only) To update the table in vim:
|
78 |
-
1. Remove the old table: d}
|
79 |
-
2. Copy the below command to the place of the table
|
80 |
-
3. :.!bash
|
81 |
-
|
82 |
-
./gen_html_table.py --config 'COCO-Detection/faster*50*'{1x,3x}'*' 'COCO-Detection/faster*101*' --name R50-C4 R50-DC5 R50-FPN R50-C4 R50-DC5 R50-FPN R101-C4 R101-DC5 R101-FPN X101-FPN --fields lr_sched train_speed inference_speed mem box_AP
|
83 |
-
-->
|
84 |
-
|
85 |
-
|
86 |
-
<table><tbody>
|
87 |
-
<!-- START TABLE -->
|
88 |
-
<!-- TABLE HEADER -->
|
89 |
-
<th valign="bottom">Name</th>
|
90 |
-
<th valign="bottom">lr<br/>sched</th>
|
91 |
-
<th valign="bottom">train<br/>time<br/>(s/iter)</th>
|
92 |
-
<th valign="bottom">inference<br/>time<br/>(s/im)</th>
|
93 |
-
<th valign="bottom">train<br/>mem<br/>(GB)</th>
|
94 |
-
<th valign="bottom">box<br/>AP</th>
|
95 |
-
<th valign="bottom">model id</th>
|
96 |
-
<th valign="bottom">download</th>
|
97 |
-
<!-- TABLE BODY -->
|
98 |
-
<!-- ROW: faster_rcnn_R_50_C4_1x -->
|
99 |
-
<tr><td align="left"><a href="configs/COCO-Detection/faster_rcnn_R_50_C4_1x.yaml">R50-C4</a></td>
|
100 |
-
<td align="center">1x</td>
|
101 |
-
<td align="center">0.551</td>
|
102 |
-
<td align="center">0.102</td>
|
103 |
-
<td align="center">4.8</td>
|
104 |
-
<td align="center">35.7</td>
|
105 |
-
<td align="center">137257644</td>
|
106 |
-
<td align="center"><a href="https://dl.fbaipublicfiles.com/detectron2/COCO-Detection/faster_rcnn_R_50_C4_1x/137257644/model_final_721ade.pkl">model</a> | <a href="https://dl.fbaipublicfiles.com/detectron2/COCO-Detection/faster_rcnn_R_50_C4_1x/137257644/metrics.json">metrics</a></td>
|
107 |
-
</tr>
|
108 |
-
<!-- ROW: faster_rcnn_R_50_DC5_1x -->
|
109 |
-
<tr><td align="left"><a href="configs/COCO-Detection/faster_rcnn_R_50_DC5_1x.yaml">R50-DC5</a></td>
|
110 |
-
<td align="center">1x</td>
|
111 |
-
<td align="center">0.380</td>
|
112 |
-
<td align="center">0.068</td>
|
113 |
-
<td align="center">5.0</td>
|
114 |
-
<td align="center">37.3</td>
|
115 |
-
<td align="center">137847829</td>
|
116 |
-
<td align="center"><a href="https://dl.fbaipublicfiles.com/detectron2/COCO-Detection/faster_rcnn_R_50_DC5_1x/137847829/model_final_51d356.pkl">model</a> | <a href="https://dl.fbaipublicfiles.com/detectron2/COCO-Detection/faster_rcnn_R_50_DC5_1x/137847829/metrics.json">metrics</a></td>
|
117 |
-
</tr>
|
118 |
-
<!-- ROW: faster_rcnn_R_50_FPN_1x -->
|
119 |
-
<tr><td align="left"><a href="configs/COCO-Detection/faster_rcnn_R_50_FPN_1x.yaml">R50-FPN</a></td>
|
120 |
-
<td align="center">1x</td>
|
121 |
-
<td align="center">0.210</td>
|
122 |
-
<td align="center">0.038</td>
|
123 |
-
<td align="center">3.0</td>
|
124 |
-
<td align="center">37.9</td>
|
125 |
-
<td align="center">137257794</td>
|
126 |
-
<td align="center"><a href="https://dl.fbaipublicfiles.com/detectron2/COCO-Detection/faster_rcnn_R_50_FPN_1x/137257794/model_final_b275ba.pkl">model</a> | <a href="https://dl.fbaipublicfiles.com/detectron2/COCO-Detection/faster_rcnn_R_50_FPN_1x/137257794/metrics.json">metrics</a></td>
|
127 |
-
</tr>
|
128 |
-
<!-- ROW: faster_rcnn_R_50_C4_3x -->
|
129 |
-
<tr><td align="left"><a href="configs/COCO-Detection/faster_rcnn_R_50_C4_3x.yaml">R50-C4</a></td>
|
130 |
-
<td align="center">3x</td>
|
131 |
-
<td align="center">0.543</td>
|
132 |
-
<td align="center">0.104</td>
|
133 |
-
<td align="center">4.8</td>
|
134 |
-
<td align="center">38.4</td>
|
135 |
-
<td align="center">137849393</td>
|
136 |
-
<td align="center"><a href="https://dl.fbaipublicfiles.com/detectron2/COCO-Detection/faster_rcnn_R_50_C4_3x/137849393/model_final_f97cb7.pkl">model</a> | <a href="https://dl.fbaipublicfiles.com/detectron2/COCO-Detection/faster_rcnn_R_50_C4_3x/137849393/metrics.json">metrics</a></td>
|
137 |
-
</tr>
|
138 |
-
<!-- ROW: faster_rcnn_R_50_DC5_3x -->
|
139 |
-
<tr><td align="left"><a href="configs/COCO-Detection/faster_rcnn_R_50_DC5_3x.yaml">R50-DC5</a></td>
|
140 |
-
<td align="center">3x</td>
|
141 |
-
<td align="center">0.378</td>
|
142 |
-
<td align="center">0.070</td>
|
143 |
-
<td align="center">5.0</td>
|
144 |
-
<td align="center">39.0</td>
|
145 |
-
<td align="center">137849425</td>
|
146 |
-
<td align="center"><a href="https://dl.fbaipublicfiles.com/detectron2/COCO-Detection/faster_rcnn_R_50_DC5_3x/137849425/model_final_68d202.pkl">model</a> | <a href="https://dl.fbaipublicfiles.com/detectron2/COCO-Detection/faster_rcnn_R_50_DC5_3x/137849425/metrics.json">metrics</a></td>
|
147 |
-
</tr>
|
148 |
-
<!-- ROW: faster_rcnn_R_50_FPN_3x -->
|
149 |
-
<tr><td align="left"><a href="configs/COCO-Detection/faster_rcnn_R_50_FPN_3x.yaml">R50-FPN</a></td>
|
150 |
-
<td align="center">3x</td>
|
151 |
-
<td align="center">0.209</td>
|
152 |
-
<td align="center">0.038</td>
|
153 |
-
<td align="center">3.0</td>
|
154 |
-
<td align="center">40.2</td>
|
155 |
-
<td align="center">137849458</td>
|
156 |
-
<td align="center"><a href="https://dl.fbaipublicfiles.com/detectron2/COCO-Detection/faster_rcnn_R_50_FPN_3x/137849458/model_final_280758.pkl">model</a> | <a href="https://dl.fbaipublicfiles.com/detectron2/COCO-Detection/faster_rcnn_R_50_FPN_3x/137849458/metrics.json">metrics</a></td>
|
157 |
-
</tr>
|
158 |
-
<!-- ROW: faster_rcnn_R_101_C4_3x -->
|
159 |
-
<tr><td align="left"><a href="configs/COCO-Detection/faster_rcnn_R_101_C4_3x.yaml">R101-C4</a></td>
|
160 |
-
<td align="center">3x</td>
|
161 |
-
<td align="center">0.619</td>
|
162 |
-
<td align="center">0.139</td>
|
163 |
-
<td align="center">5.9</td>
|
164 |
-
<td align="center">41.1</td>
|
165 |
-
<td align="center">138204752</td>
|
166 |
-
<td align="center"><a href="https://dl.fbaipublicfiles.com/detectron2/COCO-Detection/faster_rcnn_R_101_C4_3x/138204752/model_final_298dad.pkl">model</a> | <a href="https://dl.fbaipublicfiles.com/detectron2/COCO-Detection/faster_rcnn_R_101_C4_3x/138204752/metrics.json">metrics</a></td>
|
167 |
-
</tr>
|
168 |
-
<!-- ROW: faster_rcnn_R_101_DC5_3x -->
|
169 |
-
<tr><td align="left"><a href="configs/COCO-Detection/faster_rcnn_R_101_DC5_3x.yaml">R101-DC5</a></td>
|
170 |
-
<td align="center">3x</td>
|
171 |
-
<td align="center">0.452</td>
|
172 |
-
<td align="center">0.086</td>
|
173 |
-
<td align="center">6.1</td>
|
174 |
-
<td align="center">40.6</td>
|
175 |
-
<td align="center">138204841</td>
|
176 |
-
<td align="center"><a href="https://dl.fbaipublicfiles.com/detectron2/COCO-Detection/faster_rcnn_R_101_DC5_3x/138204841/model_final_3e0943.pkl">model</a> | <a href="https://dl.fbaipublicfiles.com/detectron2/COCO-Detection/faster_rcnn_R_101_DC5_3x/138204841/metrics.json">metrics</a></td>
|
177 |
-
</tr>
|
178 |
-
<!-- ROW: faster_rcnn_R_101_FPN_3x -->
|
179 |
-
<tr><td align="left"><a href="configs/COCO-Detection/faster_rcnn_R_101_FPN_3x.yaml">R101-FPN</a></td>
|
180 |
-
<td align="center">3x</td>
|
181 |
-
<td align="center">0.286</td>
|
182 |
-
<td align="center">0.051</td>
|
183 |
-
<td align="center">4.1</td>
|
184 |
-
<td align="center">42.0</td>
|
185 |
-
<td align="center">137851257</td>
|
186 |
-
<td align="center"><a href="https://dl.fbaipublicfiles.com/detectron2/COCO-Detection/faster_rcnn_R_101_FPN_3x/137851257/model_final_f6e8b1.pkl">model</a> | <a href="https://dl.fbaipublicfiles.com/detectron2/COCO-Detection/faster_rcnn_R_101_FPN_3x/137851257/metrics.json">metrics</a></td>
|
187 |
-
</tr>
|
188 |
-
<!-- ROW: faster_rcnn_X_101_32x8d_FPN_3x -->
|
189 |
-
<tr><td align="left"><a href="configs/COCO-Detection/faster_rcnn_X_101_32x8d_FPN_3x.yaml">X101-FPN</a></td>
|
190 |
-
<td align="center">3x</td>
|
191 |
-
<td align="center">0.638</td>
|
192 |
-
<td align="center">0.098</td>
|
193 |
-
<td align="center">6.7</td>
|
194 |
-
<td align="center">43.0</td>
|
195 |
-
<td align="center">139173657</td>
|
196 |
-
<td align="center"><a href="https://dl.fbaipublicfiles.com/detectron2/COCO-Detection/faster_rcnn_X_101_32x8d_FPN_3x/139173657/model_final_68b088.pkl">model</a> | <a href="https://dl.fbaipublicfiles.com/detectron2/COCO-Detection/faster_rcnn_X_101_32x8d_FPN_3x/139173657/metrics.json">metrics</a></td>
|
197 |
-
</tr>
|
198 |
-
</tbody></table>
|
199 |
-
|
200 |
-
#### RetinaNet:
|
201 |
-
<!--
|
202 |
-
./gen_html_table.py --config 'COCO-Detection/retina*50*' 'COCO-Detection/retina*101*' --name R50 R50 R101 --fields lr_sched train_speed inference_speed mem box_AP
|
203 |
-
-->
|
204 |
-
|
205 |
-
|
206 |
-
<table><tbody>
|
207 |
-
<!-- START TABLE -->
|
208 |
-
<!-- TABLE HEADER -->
|
209 |
-
<th valign="bottom">Name</th>
|
210 |
-
<th valign="bottom">lr<br/>sched</th>
|
211 |
-
<th valign="bottom">train<br/>time<br/>(s/iter)</th>
|
212 |
-
<th valign="bottom">inference<br/>time<br/>(s/im)</th>
|
213 |
-
<th valign="bottom">train<br/>mem<br/>(GB)</th>
|
214 |
-
<th valign="bottom">box<br/>AP</th>
|
215 |
-
<th valign="bottom">model id</th>
|
216 |
-
<th valign="bottom">download</th>
|
217 |
-
<!-- TABLE BODY -->
|
218 |
-
<!-- ROW: retinanet_R_50_FPN_1x -->
|
219 |
-
<tr><td align="left"><a href="configs/COCO-Detection/retinanet_R_50_FPN_1x.yaml">R50</a></td>
|
220 |
-
<td align="center">1x</td>
|
221 |
-
<td align="center">0.200</td>
|
222 |
-
<td align="center">0.055</td>
|
223 |
-
<td align="center">3.9</td>
|
224 |
-
<td align="center">36.5</td>
|
225 |
-
<td align="center">137593951</td>
|
226 |
-
<td align="center"><a href="https://dl.fbaipublicfiles.com/detectron2/COCO-Detection/retinanet_R_50_FPN_1x/137593951/model_final_b796dc.pkl">model</a> | <a href="https://dl.fbaipublicfiles.com/detectron2/COCO-Detection/retinanet_R_50_FPN_1x/137593951/metrics.json">metrics</a></td>
|
227 |
-
</tr>
|
228 |
-
<!-- ROW: retinanet_R_50_FPN_3x -->
|
229 |
-
<tr><td align="left"><a href="configs/COCO-Detection/retinanet_R_50_FPN_3x.yaml">R50</a></td>
|
230 |
-
<td align="center">3x</td>
|
231 |
-
<td align="center">0.201</td>
|
232 |
-
<td align="center">0.055</td>
|
233 |
-
<td align="center">3.9</td>
|
234 |
-
<td align="center">37.9</td>
|
235 |
-
<td align="center">137849486</td>
|
236 |
-
<td align="center"><a href="https://dl.fbaipublicfiles.com/detectron2/COCO-Detection/retinanet_R_50_FPN_3x/137849486/model_final_4cafe0.pkl">model</a> | <a href="https://dl.fbaipublicfiles.com/detectron2/COCO-Detection/retinanet_R_50_FPN_3x/137849486/metrics.json">metrics</a></td>
|
237 |
-
</tr>
|
238 |
-
<!-- ROW: retinanet_R_101_FPN_3x -->
|
239 |
-
<tr><td align="left"><a href="configs/COCO-Detection/retinanet_R_101_FPN_3x.yaml">R101</a></td>
|
240 |
-
<td align="center">3x</td>
|
241 |
-
<td align="center">0.280</td>
|
242 |
-
<td align="center">0.068</td>
|
243 |
-
<td align="center">5.1</td>
|
244 |
-
<td align="center">39.9</td>
|
245 |
-
<td align="center">138363263</td>
|
246 |
-
<td align="center"><a href="https://dl.fbaipublicfiles.com/detectron2/COCO-Detection/retinanet_R_101_FPN_3x/138363263/model_final_59f53c.pkl">model</a> | <a href="https://dl.fbaipublicfiles.com/detectron2/COCO-Detection/retinanet_R_101_FPN_3x/138363263/metrics.json">metrics</a></td>
|
247 |
-
</tr>
|
248 |
-
</tbody></table>
|
249 |
-
|
250 |
-
#### RPN & Fast R-CNN:
|
251 |
-
<!--
|
252 |
-
./gen_html_table.py --config 'COCO-Detection/rpn*' 'COCO-Detection/fast_rcnn*' --name "RPN R50-C4" "RPN R50-FPN" "Fast R-CNN R50-FPN" --fields lr_sched train_speed inference_speed mem box_AP prop_AR
|
253 |
-
-->
|
254 |
-
|
255 |
-
<table><tbody>
|
256 |
-
<!-- START TABLE -->
|
257 |
-
<!-- TABLE HEADER -->
|
258 |
-
<th valign="bottom">Name</th>
|
259 |
-
<th valign="bottom">lr<br/>sched</th>
|
260 |
-
<th valign="bottom">train<br/>time<br/>(s/iter)</th>
|
261 |
-
<th valign="bottom">inference<br/>time<br/>(s/im)</th>
|
262 |
-
<th valign="bottom">train<br/>mem<br/>(GB)</th>
|
263 |
-
<th valign="bottom">box<br/>AP</th>
|
264 |
-
<th valign="bottom">prop.<br/>AR</th>
|
265 |
-
<th valign="bottom">model id</th>
|
266 |
-
<th valign="bottom">download</th>
|
267 |
-
<!-- TABLE BODY -->
|
268 |
-
<!-- ROW: rpn_R_50_C4_1x -->
|
269 |
-
<tr><td align="left"><a href="configs/COCO-Detection/rpn_R_50_C4_1x.yaml">RPN R50-C4</a></td>
|
270 |
-
<td align="center">1x</td>
|
271 |
-
<td align="center">0.130</td>
|
272 |
-
<td align="center">0.034</td>
|
273 |
-
<td align="center">1.5</td>
|
274 |
-
<td align="center"></td>
|
275 |
-
<td align="center">51.6</td>
|
276 |
-
<td align="center">137258005</td>
|
277 |
-
<td align="center"><a href="https://dl.fbaipublicfiles.com/detectron2/COCO-Detection/rpn_R_50_C4_1x/137258005/model_final_450694.pkl">model</a> | <a href="https://dl.fbaipublicfiles.com/detectron2/COCO-Detection/rpn_R_50_C4_1x/137258005/metrics.json">metrics</a></td>
|
278 |
-
</tr>
|
279 |
-
<!-- ROW: rpn_R_50_FPN_1x -->
|
280 |
-
<tr><td align="left"><a href="configs/COCO-Detection/rpn_R_50_FPN_1x.yaml">RPN R50-FPN</a></td>
|
281 |
-
<td align="center">1x</td>
|
282 |
-
<td align="center">0.186</td>
|
283 |
-
<td align="center">0.032</td>
|
284 |
-
<td align="center">2.7</td>
|
285 |
-
<td align="center"></td>
|
286 |
-
<td align="center">58.0</td>
|
287 |
-
<td align="center">137258492</td>
|
288 |
-
<td align="center"><a href="https://dl.fbaipublicfiles.com/detectron2/COCO-Detection/rpn_R_50_FPN_1x/137258492/model_final_02ce48.pkl">model</a> | <a href="https://dl.fbaipublicfiles.com/detectron2/COCO-Detection/rpn_R_50_FPN_1x/137258492/metrics.json">metrics</a></td>
|
289 |
-
</tr>
|
290 |
-
<!-- ROW: fast_rcnn_R_50_FPN_1x -->
|
291 |
-
<tr><td align="left"><a href="configs/COCO-Detection/fast_rcnn_R_50_FPN_1x.yaml">Fast R-CNN R50-FPN</a></td>
|
292 |
-
<td align="center">1x</td>
|
293 |
-
<td align="center">0.140</td>
|
294 |
-
<td align="center">0.029</td>
|
295 |
-
<td align="center">2.6</td>
|
296 |
-
<td align="center">37.8</td>
|
297 |
-
<td align="center"></td>
|
298 |
-
<td align="center">137635226</td>
|
299 |
-
<td align="center"><a href="https://dl.fbaipublicfiles.com/detectron2/COCO-Detection/fast_rcnn_R_50_FPN_1x/137635226/model_final_e5f7ce.pkl">model</a> | <a href="https://dl.fbaipublicfiles.com/detectron2/COCO-Detection/fast_rcnn_R_50_FPN_1x/137635226/metrics.json">metrics</a></td>
|
300 |
-
</tr>
|
301 |
-
</tbody></table>
|
302 |
-
|
303 |
-
### COCO Instance Segmentation Baselines with Mask R-CNN
|
304 |
-
<!--
|
305 |
-
./gen_html_table.py --config 'COCO-InstanceSegmentation/mask*50*'{1x,3x}'*' 'COCO-InstanceSegmentation/mask*101*' --name R50-C4 R50-DC5 R50-FPN R50-C4 R50-DC5 R50-FPN R101-C4 R101-DC5 R101-FPN X101-FPN --fields lr_sched train_speed inference_speed mem box_AP mask_AP
|
306 |
-
-->
|
307 |
-
|
308 |
-
|
309 |
-
|
310 |
-
<table><tbody>
|
311 |
-
<!-- START TABLE -->
|
312 |
-
<!-- TABLE HEADER -->
|
313 |
-
<th valign="bottom">Name</th>
|
314 |
-
<th valign="bottom">lr<br/>sched</th>
|
315 |
-
<th valign="bottom">train<br/>time<br/>(s/iter)</th>
|
316 |
-
<th valign="bottom">inference<br/>time<br/>(s/im)</th>
|
317 |
-
<th valign="bottom">train<br/>mem<br/>(GB)</th>
|
318 |
-
<th valign="bottom">box<br/>AP</th>
|
319 |
-
<th valign="bottom">mask<br/>AP</th>
|
320 |
-
<th valign="bottom">model id</th>
|
321 |
-
<th valign="bottom">download</th>
|
322 |
-
<!-- TABLE BODY -->
|
323 |
-
<!-- ROW: mask_rcnn_R_50_C4_1x -->
|
324 |
-
<tr><td align="left"><a href="configs/COCO-InstanceSegmentation/mask_rcnn_R_50_C4_1x.yaml">R50-C4</a></td>
|
325 |
-
<td align="center">1x</td>
|
326 |
-
<td align="center">0.584</td>
|
327 |
-
<td align="center">0.110</td>
|
328 |
-
<td align="center">5.2</td>
|
329 |
-
<td align="center">36.8</td>
|
330 |
-
<td align="center">32.2</td>
|
331 |
-
<td align="center">137259246</td>
|
332 |
-
<td align="center"><a href="https://dl.fbaipublicfiles.com/detectron2/COCO-InstanceSegmentation/mask_rcnn_R_50_C4_1x/137259246/model_final_9243eb.pkl">model</a> | <a href="https://dl.fbaipublicfiles.com/detectron2/COCO-InstanceSegmentation/mask_rcnn_R_50_C4_1x/137259246/metrics.json">metrics</a></td>
|
333 |
-
</tr>
|
334 |
-
<!-- ROW: mask_rcnn_R_50_DC5_1x -->
|
335 |
-
<tr><td align="left"><a href="configs/COCO-InstanceSegmentation/mask_rcnn_R_50_DC5_1x.yaml">R50-DC5</a></td>
|
336 |
-
<td align="center">1x</td>
|
337 |
-
<td align="center">0.471</td>
|
338 |
-
<td align="center">0.076</td>
|
339 |
-
<td align="center">6.5</td>
|
340 |
-
<td align="center">38.3</td>
|
341 |
-
<td align="center">34.2</td>
|
342 |
-
<td align="center">137260150</td>
|
343 |
-
<td align="center"><a href="https://dl.fbaipublicfiles.com/detectron2/COCO-InstanceSegmentation/mask_rcnn_R_50_DC5_1x/137260150/model_final_4f86c3.pkl">model</a> | <a href="https://dl.fbaipublicfiles.com/detectron2/COCO-InstanceSegmentation/mask_rcnn_R_50_DC5_1x/137260150/metrics.json">metrics</a></td>
|
344 |
-
</tr>
|
345 |
-
<!-- ROW: mask_rcnn_R_50_FPN_1x -->
|
346 |
-
<tr><td align="left"><a href="configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml">R50-FPN</a></td>
|
347 |
-
<td align="center">1x</td>
|
348 |
-
<td align="center">0.261</td>
|
349 |
-
<td align="center">0.043</td>
|
350 |
-
<td align="center">3.4</td>
|
351 |
-
<td align="center">38.6</td>
|
352 |
-
<td align="center">35.2</td>
|
353 |
-
<td align="center">137260431</td>
|
354 |
-
<td align="center"><a href="https://dl.fbaipublicfiles.com/detectron2/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x/137260431/model_final_a54504.pkl">model</a> | <a href="https://dl.fbaipublicfiles.com/detectron2/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x/137260431/metrics.json">metrics</a></td>
|
355 |
-
</tr>
|
356 |
-
<!-- ROW: mask_rcnn_R_50_C4_3x -->
|
357 |
-
<tr><td align="left"><a href="configs/COCO-InstanceSegmentation/mask_rcnn_R_50_C4_3x.yaml">R50-C4</a></td>
|
358 |
-
<td align="center">3x</td>
|
359 |
-
<td align="center">0.575</td>
|
360 |
-
<td align="center">0.111</td>
|
361 |
-
<td align="center">5.2</td>
|
362 |
-
<td align="center">39.8</td>
|
363 |
-
<td align="center">34.4</td>
|
364 |
-
<td align="center">137849525</td>
|
365 |
-
<td align="center"><a href="https://dl.fbaipublicfiles.com/detectron2/COCO-InstanceSegmentation/mask_rcnn_R_50_C4_3x/137849525/model_final_4ce675.pkl">model</a> | <a href="https://dl.fbaipublicfiles.com/detectron2/COCO-InstanceSegmentation/mask_rcnn_R_50_C4_3x/137849525/metrics.json">metrics</a></td>
|
366 |
-
</tr>
|
367 |
-
<!-- ROW: mask_rcnn_R_50_DC5_3x -->
|
368 |
-
<tr><td align="left"><a href="configs/COCO-InstanceSegmentation/mask_rcnn_R_50_DC5_3x.yaml">R50-DC5</a></td>
|
369 |
-
<td align="center">3x</td>
|
370 |
-
<td align="center">0.470</td>
|
371 |
-
<td align="center">0.076</td>
|
372 |
-
<td align="center">6.5</td>
|
373 |
-
<td align="center">40.0</td>
|
374 |
-
<td align="center">35.9</td>
|
375 |
-
<td align="center">137849551</td>
|
376 |
-
<td align="center"><a href="https://dl.fbaipublicfiles.com/detectron2/COCO-InstanceSegmentation/mask_rcnn_R_50_DC5_3x/137849551/model_final_84107b.pkl">model</a> | <a href="https://dl.fbaipublicfiles.com/detectron2/COCO-InstanceSegmentation/mask_rcnn_R_50_DC5_3x/137849551/metrics.json">metrics</a></td>
|
377 |
-
</tr>
|
378 |
-
<!-- ROW: mask_rcnn_R_50_FPN_3x -->
|
379 |
-
<tr><td align="left"><a href="configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml">R50-FPN</a></td>
|
380 |
-
<td align="center">3x</td>
|
381 |
-
<td align="center">0.261</td>
|
382 |
-
<td align="center">0.043</td>
|
383 |
-
<td align="center">3.4</td>
|
384 |
-
<td align="center">41.0</td>
|
385 |
-
<td align="center">37.2</td>
|
386 |
-
<td align="center">137849600</td>
|
387 |
-
<td align="center"><a href="https://dl.fbaipublicfiles.com/detectron2/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x/137849600/model_final_f10217.pkl">model</a> | <a href="https://dl.fbaipublicfiles.com/detectron2/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x/137849600/metrics.json">metrics</a></td>
|
388 |
-
</tr>
|
389 |
-
<!-- ROW: mask_rcnn_R_101_C4_3x -->
|
390 |
-
<tr><td align="left"><a href="configs/COCO-InstanceSegmentation/mask_rcnn_R_101_C4_3x.yaml">R101-C4</a></td>
|
391 |
-
<td align="center">3x</td>
|
392 |
-
<td align="center">0.652</td>
|
393 |
-
<td align="center">0.145</td>
|
394 |
-
<td align="center">6.3</td>
|
395 |
-
<td align="center">42.6</td>
|
396 |
-
<td align="center">36.7</td>
|
397 |
-
<td align="center">138363239</td>
|
398 |
-
<td align="center"><a href="https://dl.fbaipublicfiles.com/detectron2/COCO-InstanceSegmentation/mask_rcnn_R_101_C4_3x/138363239/model_final_a2914c.pkl">model</a> | <a href="https://dl.fbaipublicfiles.com/detectron2/COCO-InstanceSegmentation/mask_rcnn_R_101_C4_3x/138363239/metrics.json">metrics</a></td>
|
399 |
-
</tr>
|
400 |
-
<!-- ROW: mask_rcnn_R_101_DC5_3x -->
|
401 |
-
<tr><td align="left"><a href="configs/COCO-InstanceSegmentation/mask_rcnn_R_101_DC5_3x.yaml">R101-DC5</a></td>
|
402 |
-
<td align="center">3x</td>
|
403 |
-
<td align="center">0.545</td>
|
404 |
-
<td align="center">0.092</td>
|
405 |
-
<td align="center">7.6</td>
|
406 |
-
<td align="center">41.9</td>
|
407 |
-
<td align="center">37.3</td>
|
408 |
-
<td align="center">138363294</td>
|
409 |
-
<td align="center"><a href="https://dl.fbaipublicfiles.com/detectron2/COCO-InstanceSegmentation/mask_rcnn_R_101_DC5_3x/138363294/model_final_0464b7.pkl">model</a> | <a href="https://dl.fbaipublicfiles.com/detectron2/COCO-InstanceSegmentation/mask_rcnn_R_101_DC5_3x/138363294/metrics.json">metrics</a></td>
|
410 |
-
</tr>
|
411 |
-
<!-- ROW: mask_rcnn_R_101_FPN_3x -->
|
412 |
-
<tr><td align="left"><a href="configs/COCO-InstanceSegmentation/mask_rcnn_R_101_FPN_3x.yaml">R101-FPN</a></td>
|
413 |
-
<td align="center">3x</td>
|
414 |
-
<td align="center">0.340</td>
|
415 |
-
<td align="center">0.056</td>
|
416 |
-
<td align="center">4.6</td>
|
417 |
-
<td align="center">42.9</td>
|
418 |
-
<td align="center">38.6</td>
|
419 |
-
<td align="center">138205316</td>
|
420 |
-
<td align="center"><a href="https://dl.fbaipublicfiles.com/detectron2/COCO-InstanceSegmentation/mask_rcnn_R_101_FPN_3x/138205316/model_final_a3ec72.pkl">model</a> | <a href="https://dl.fbaipublicfiles.com/detectron2/COCO-InstanceSegmentation/mask_rcnn_R_101_FPN_3x/138205316/metrics.json">metrics</a></td>
|
421 |
-
</tr>
|
422 |
-
<!-- ROW: mask_rcnn_X_101_32x8d_FPN_3x -->
|
423 |
-
<tr><td align="left"><a href="configs/COCO-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_3x.yaml">X101-FPN</a></td>
|
424 |
-
<td align="center">3x</td>
|
425 |
-
<td align="center">0.690</td>
|
426 |
-
<td align="center">0.103</td>
|
427 |
-
<td align="center">7.2</td>
|
428 |
-
<td align="center">44.3</td>
|
429 |
-
<td align="center">39.5</td>
|
430 |
-
<td align="center">139653917</td>
|
431 |
-
<td align="center"><a href="https://dl.fbaipublicfiles.com/detectron2/COCO-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_3x/139653917/model_final_2d9806.pkl">model</a> | <a href="https://dl.fbaipublicfiles.com/detectron2/COCO-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_3x/139653917/metrics.json">metrics</a></td>
|
432 |
-
</tr>
|
433 |
-
</tbody></table>
|
434 |
-
|
435 |
-
### COCO Person Keypoint Detection Baselines with Keypoint R-CNN
|
436 |
-
<!--
|
437 |
-
./gen_html_table.py --config 'COCO-Keypoints/*50*' 'COCO-Keypoints/*101*' --name R50-FPN R50-FPN R101-FPN X101-FPN --fields lr_sched train_speed inference_speed mem box_AP keypoint_AP
|
438 |
-
-->
|
439 |
-
|
440 |
-
|
441 |
-
<table><tbody>
|
442 |
-
<!-- START TABLE -->
|
443 |
-
<!-- TABLE HEADER -->
|
444 |
-
<th valign="bottom">Name</th>
|
445 |
-
<th valign="bottom">lr<br/>sched</th>
|
446 |
-
<th valign="bottom">train<br/>time<br/>(s/iter)</th>
|
447 |
-
<th valign="bottom">inference<br/>time<br/>(s/im)</th>
|
448 |
-
<th valign="bottom">train<br/>mem<br/>(GB)</th>
|
449 |
-
<th valign="bottom">box<br/>AP</th>
|
450 |
-
<th valign="bottom">kp.<br/>AP</th>
|
451 |
-
<th valign="bottom">model id</th>
|
452 |
-
<th valign="bottom">download</th>
|
453 |
-
<!-- TABLE BODY -->
|
454 |
-
<!-- ROW: keypoint_rcnn_R_50_FPN_1x -->
|
455 |
-
<tr><td align="left"><a href="configs/COCO-Keypoints/keypoint_rcnn_R_50_FPN_1x.yaml">R50-FPN</a></td>
|
456 |
-
<td align="center">1x</td>
|
457 |
-
<td align="center">0.315</td>
|
458 |
-
<td align="center">0.072</td>
|
459 |
-
<td align="center">5.0</td>
|
460 |
-
<td align="center">53.6</td>
|
461 |
-
<td align="center">64.0</td>
|
462 |
-
<td align="center">137261548</td>
|
463 |
-
<td align="center"><a href="https://dl.fbaipublicfiles.com/detectron2/COCO-Keypoints/keypoint_rcnn_R_50_FPN_1x/137261548/model_final_04e291.pkl">model</a> | <a href="https://dl.fbaipublicfiles.com/detectron2/COCO-Keypoints/keypoint_rcnn_R_50_FPN_1x/137261548/metrics.json">metrics</a></td>
|
464 |
-
</tr>
|
465 |
-
<!-- ROW: keypoint_rcnn_R_50_FPN_3x -->
|
466 |
-
<tr><td align="left"><a href="configs/COCO-Keypoints/keypoint_rcnn_R_50_FPN_3x.yaml">R50-FPN</a></td>
|
467 |
-
<td align="center">3x</td>
|
468 |
-
<td align="center">0.316</td>
|
469 |
-
<td align="center">0.066</td>
|
470 |
-
<td align="center">5.0</td>
|
471 |
-
<td align="center">55.4</td>
|
472 |
-
<td align="center">65.5</td>
|
473 |
-
<td align="center">137849621</td>
|
474 |
-
<td align="center"><a href="https://dl.fbaipublicfiles.com/detectron2/COCO-Keypoints/keypoint_rcnn_R_50_FPN_3x/137849621/model_final_a6e10b.pkl">model</a> | <a href="https://dl.fbaipublicfiles.com/detectron2/COCO-Keypoints/keypoint_rcnn_R_50_FPN_3x/137849621/metrics.json">metrics</a></td>
|
475 |
-
</tr>
|
476 |
-
<!-- ROW: keypoint_rcnn_R_101_FPN_3x -->
|
477 |
-
<tr><td align="left"><a href="configs/COCO-Keypoints/keypoint_rcnn_R_101_FPN_3x.yaml">R101-FPN</a></td>
|
478 |
-
<td align="center">3x</td>
|
479 |
-
<td align="center">0.390</td>
|
480 |
-
<td align="center">0.076</td>
|
481 |
-
<td align="center">6.1</td>
|
482 |
-
<td align="center">56.4</td>
|
483 |
-
<td align="center">66.1</td>
|
484 |
-
<td align="center">138363331</td>
|
485 |
-
<td align="center"><a href="https://dl.fbaipublicfiles.com/detectron2/COCO-Keypoints/keypoint_rcnn_R_101_FPN_3x/138363331/model_final_997cc7.pkl">model</a> | <a href="https://dl.fbaipublicfiles.com/detectron2/COCO-Keypoints/keypoint_rcnn_R_101_FPN_3x/138363331/metrics.json">metrics</a></td>
|
486 |
-
</tr>
|
487 |
-
<!-- ROW: keypoint_rcnn_X_101_32x8d_FPN_3x -->
|
488 |
-
<tr><td align="left"><a href="configs/COCO-Keypoints/keypoint_rcnn_X_101_32x8d_FPN_3x.yaml">X101-FPN</a></td>
|
489 |
-
<td align="center">3x</td>
|
490 |
-
<td align="center">0.738</td>
|
491 |
-
<td align="center">0.121</td>
|
492 |
-
<td align="center">8.7</td>
|
493 |
-
<td align="center">57.3</td>
|
494 |
-
<td align="center">66.0</td>
|
495 |
-
<td align="center">139686956</td>
|
496 |
-
<td align="center"><a href="https://dl.fbaipublicfiles.com/detectron2/COCO-Keypoints/keypoint_rcnn_X_101_32x8d_FPN_3x/139686956/model_final_5ad38f.pkl">model</a> | <a href="https://dl.fbaipublicfiles.com/detectron2/COCO-Keypoints/keypoint_rcnn_X_101_32x8d_FPN_3x/139686956/metrics.json">metrics</a></td>
|
497 |
-
</tr>
|
498 |
-
</tbody></table>
|
499 |
-
|
500 |
-
### COCO Panoptic Segmentation Baselines with Panoptic FPN
|
501 |
-
<!--
|
502 |
-
./gen_html_table.py --config 'COCO-PanopticSegmentation/*50*' 'COCO-PanopticSegmentation/*101*' --name R50-FPN R50-FPN R101-FPN --fields lr_sched train_speed inference_speed mem box_AP mask_AP PQ
|
503 |
-
-->
|
504 |
-
|
505 |
-
|
506 |
-
<table><tbody>
|
507 |
-
<!-- START TABLE -->
|
508 |
-
<!-- TABLE HEADER -->
|
509 |
-
<th valign="bottom">Name</th>
|
510 |
-
<th valign="bottom">lr<br/>sched</th>
|
511 |
-
<th valign="bottom">train<br/>time<br/>(s/iter)</th>
|
512 |
-
<th valign="bottom">inference<br/>time<br/>(s/im)</th>
|
513 |
-
<th valign="bottom">train<br/>mem<br/>(GB)</th>
|
514 |
-
<th valign="bottom">box<br/>AP</th>
|
515 |
-
<th valign="bottom">mask<br/>AP</th>
|
516 |
-
<th valign="bottom">PQ</th>
|
517 |
-
<th valign="bottom">model id</th>
|
518 |
-
<th valign="bottom">download</th>
|
519 |
-
<!-- TABLE BODY -->
|
520 |
-
<!-- ROW: panoptic_fpn_R_50_1x -->
|
521 |
-
<tr><td align="left"><a href="configs/COCO-PanopticSegmentation/panoptic_fpn_R_50_1x.yaml">R50-FPN</a></td>
|
522 |
-
<td align="center">1x</td>
|
523 |
-
<td align="center">0.304</td>
|
524 |
-
<td align="center">0.053</td>
|
525 |
-
<td align="center">4.8</td>
|
526 |
-
<td align="center">37.6</td>
|
527 |
-
<td align="center">34.7</td>
|
528 |
-
<td align="center">39.4</td>
|
529 |
-
<td align="center">139514544</td>
|
530 |
-
<td align="center"><a href="https://dl.fbaipublicfiles.com/detectron2/COCO-PanopticSegmentation/panoptic_fpn_R_50_1x/139514544/model_final_dbfeb4.pkl">model</a> | <a href="https://dl.fbaipublicfiles.com/detectron2/COCO-PanopticSegmentation/panoptic_fpn_R_50_1x/139514544/metrics.json">metrics</a></td>
|
531 |
-
</tr>
|
532 |
-
<!-- ROW: panoptic_fpn_R_50_3x -->
|
533 |
-
<tr><td align="left"><a href="configs/COCO-PanopticSegmentation/panoptic_fpn_R_50_3x.yaml">R50-FPN</a></td>
|
534 |
-
<td align="center">3x</td>
|
535 |
-
<td align="center">0.302</td>
|
536 |
-
<td align="center">0.053</td>
|
537 |
-
<td align="center">4.8</td>
|
538 |
-
<td align="center">40.0</td>
|
539 |
-
<td align="center">36.5</td>
|
540 |
-
<td align="center">41.5</td>
|
541 |
-
<td align="center">139514569</td>
|
542 |
-
<td align="center"><a href="https://dl.fbaipublicfiles.com/detectron2/COCO-PanopticSegmentation/panoptic_fpn_R_50_3x/139514569/model_final_c10459.pkl">model</a> | <a href="https://dl.fbaipublicfiles.com/detectron2/COCO-PanopticSegmentation/panoptic_fpn_R_50_3x/139514569/metrics.json">metrics</a></td>
|
543 |
-
</tr>
|
544 |
-
<!-- ROW: panoptic_fpn_R_101_3x -->
|
545 |
-
<tr><td align="left"><a href="configs/COCO-PanopticSegmentation/panoptic_fpn_R_101_3x.yaml">R101-FPN</a></td>
|
546 |
-
<td align="center">3x</td>
|
547 |
-
<td align="center">0.392</td>
|
548 |
-
<td align="center">0.066</td>
|
549 |
-
<td align="center">6.0</td>
|
550 |
-
<td align="center">42.4</td>
|
551 |
-
<td align="center">38.5</td>
|
552 |
-
<td align="center">43.0</td>
|
553 |
-
<td align="center">139514519</td>
|
554 |
-
<td align="center"><a href="https://dl.fbaipublicfiles.com/detectron2/COCO-PanopticSegmentation/panoptic_fpn_R_101_3x/139514519/model_final_cafdb1.pkl">model</a> | <a href="https://dl.fbaipublicfiles.com/detectron2/COCO-PanopticSegmentation/panoptic_fpn_R_101_3x/139514519/metrics.json">metrics</a></td>
|
555 |
-
</tr>
|
556 |
-
</tbody></table>
|
557 |
-
|
558 |
-
|
559 |
-
### LVIS Instance Segmentation Baselines with Mask R-CNN
|
560 |
-
|
561 |
-
Mask R-CNN baselines on the [LVIS dataset](https://lvisdataset.org), v0.5.
|
562 |
-
These baselines are described in Table 3(c) of the [LVIS paper](https://arxiv.org/abs/1908.03195).
|
563 |
-
|
564 |
-
NOTE: the 1x schedule here has the same amount of __iterations__ as the COCO 1x baselines.
|
565 |
-
They are roughly 24 epochs of LVISv0.5 data.
|
566 |
-
The final results of these configs have large variance across different runs.
|
567 |
-
|
568 |
-
<!--
|
569 |
-
./gen_html_table.py --config 'LVIS-InstanceSegmentation/mask*50*' 'LVIS-InstanceSegmentation/mask*101*' --name R50-FPN R101-FPN X101-FPN --fields lr_sched train_speed inference_speed mem box_AP mask_AP
|
570 |
-
-->
|
571 |
-
|
572 |
-
|
573 |
-
<table><tbody>
|
574 |
-
<!-- START TABLE -->
|
575 |
-
<!-- TABLE HEADER -->
|
576 |
-
<th valign="bottom">Name</th>
|
577 |
-
<th valign="bottom">lr<br/>sched</th>
|
578 |
-
<th valign="bottom">train<br/>time<br/>(s/iter)</th>
|
579 |
-
<th valign="bottom">inference<br/>time<br/>(s/im)</th>
|
580 |
-
<th valign="bottom">train<br/>mem<br/>(GB)</th>
|
581 |
-
<th valign="bottom">box<br/>AP</th>
|
582 |
-
<th valign="bottom">mask<br/>AP</th>
|
583 |
-
<th valign="bottom">model id</th>
|
584 |
-
<th valign="bottom">download</th>
|
585 |
-
<!-- TABLE BODY -->
|
586 |
-
<!-- ROW: mask_rcnn_R_50_FPN_1x -->
|
587 |
-
<tr><td align="left"><a href="configs/LVIS-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml">R50-FPN</a></td>
|
588 |
-
<td align="center">1x</td>
|
589 |
-
<td align="center">0.292</td>
|
590 |
-
<td align="center">0.107</td>
|
591 |
-
<td align="center">7.1</td>
|
592 |
-
<td align="center">23.6</td>
|
593 |
-
<td align="center">24.4</td>
|
594 |
-
<td align="center">144219072</td>
|
595 |
-
<td align="center"><a href="https://dl.fbaipublicfiles.com/detectron2/LVIS-InstanceSegmentation/mask_rcnn_R_50_FPN_1x/144219072/model_final_571f7c.pkl">model</a> | <a href="https://dl.fbaipublicfiles.com/detectron2/LVIS-InstanceSegmentation/mask_rcnn_R_50_FPN_1x/144219072/metrics.json">metrics</a></td>
|
596 |
-
</tr>
|
597 |
-
<!-- ROW: mask_rcnn_R_101_FPN_1x -->
|
598 |
-
<tr><td align="left"><a href="configs/LVIS-InstanceSegmentation/mask_rcnn_R_101_FPN_1x.yaml">R101-FPN</a></td>
|
599 |
-
<td align="center">1x</td>
|
600 |
-
<td align="center">0.371</td>
|
601 |
-
<td align="center">0.114</td>
|
602 |
-
<td align="center">7.8</td>
|
603 |
-
<td align="center">25.6</td>
|
604 |
-
<td align="center">25.9</td>
|
605 |
-
<td align="center">144219035</td>
|
606 |
-
<td align="center"><a href="https://dl.fbaipublicfiles.com/detectron2/LVIS-InstanceSegmentation/mask_rcnn_R_101_FPN_1x/144219035/model_final_824ab5.pkl">model</a> | <a href="https://dl.fbaipublicfiles.com/detectron2/LVIS-InstanceSegmentation/mask_rcnn_R_101_FPN_1x/144219035/metrics.json">metrics</a></td>
|
607 |
-
</tr>
|
608 |
-
<!-- ROW: mask_rcnn_X_101_32x8d_FPN_1x -->
|
609 |
-
<tr><td align="left"><a href="configs/LVIS-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_1x.yaml">X101-FPN</a></td>
|
610 |
-
<td align="center">1x</td>
|
611 |
-
<td align="center">0.712</td>
|
612 |
-
<td align="center">0.151</td>
|
613 |
-
<td align="center">10.2</td>
|
614 |
-
<td align="center">26.7</td>
|
615 |
-
<td align="center">27.1</td>
|
616 |
-
<td align="center">144219108</td>
|
617 |
-
<td align="center"><a href="https://dl.fbaipublicfiles.com/detectron2/LVIS-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_1x/144219108/model_final_5e3439.pkl">model</a> | <a href="https://dl.fbaipublicfiles.com/detectron2/LVIS-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_1x/144219108/metrics.json">metrics</a></td>
|
618 |
-
</tr>
|
619 |
-
</tbody></table>
|
620 |
-
|
621 |
-
|
622 |
-
|
623 |
-
### Cityscapes & Pascal VOC Baselines
|
624 |
-
|
625 |
-
Simple baselines for
|
626 |
-
* Mask R-CNN on Cityscapes instance segmentation (initialized from COCO pre-training, then trained on Cityscapes fine annotations only)
|
627 |
-
* Faster R-CNN on PASCAL VOC object detection (trained on VOC 2007 train+val + VOC 2012 train+val, tested on VOC 2007 using 11-point interpolated AP)
|
628 |
-
|
629 |
-
<!--
|
630 |
-
./gen_html_table.py --config 'Cityscapes/*' 'PascalVOC-Detection/*' --name "R50-FPN, Cityscapes" "R50-C4, VOC" --fields train_speed inference_speed mem box_AP box_AP50 mask_AP
|
631 |
-
-->
|
632 |
-
|
633 |
-
|
634 |
-
<table><tbody>
|
635 |
-
<!-- START TABLE -->
|
636 |
-
<!-- TABLE HEADER -->
|
637 |
-
<th valign="bottom">Name</th>
|
638 |
-
<th valign="bottom">train<br/>time<br/>(s/iter)</th>
|
639 |
-
<th valign="bottom">inference<br/>time<br/>(s/im)</th>
|
640 |
-
<th valign="bottom">train<br/>mem<br/>(GB)</th>
|
641 |
-
<th valign="bottom">box<br/>AP</th>
|
642 |
-
<th valign="bottom">box<br/>AP50</th>
|
643 |
-
<th valign="bottom">mask<br/>AP</th>
|
644 |
-
<th valign="bottom">model id</th>
|
645 |
-
<th valign="bottom">download</th>
|
646 |
-
<!-- TABLE BODY -->
|
647 |
-
<!-- ROW: mask_rcnn_R_50_FPN -->
|
648 |
-
<tr><td align="left"><a href="configs/Cityscapes/mask_rcnn_R_50_FPN.yaml">R50-FPN, Cityscapes</a></td>
|
649 |
-
<td align="center">0.240</td>
|
650 |
-
<td align="center">0.078</td>
|
651 |
-
<td align="center">4.4</td>
|
652 |
-
<td align="center"></td>
|
653 |
-
<td align="center"></td>
|
654 |
-
<td align="center">36.5</td>
|
655 |
-
<td align="center">142423278</td>
|
656 |
-
<td align="center"><a href="https://dl.fbaipublicfiles.com/detectron2/Cityscapes/mask_rcnn_R_50_FPN/142423278/model_final_af9cf5.pkl">model</a> | <a href="https://dl.fbaipublicfiles.com/detectron2/Cityscapes/mask_rcnn_R_50_FPN/142423278/metrics.json">metrics</a></td>
|
657 |
-
</tr>
|
658 |
-
<!-- ROW: faster_rcnn_R_50_C4 -->
|
659 |
-
<tr><td align="left"><a href="configs/PascalVOC-Detection/faster_rcnn_R_50_C4.yaml">R50-C4, VOC</a></td>
|
660 |
-
<td align="center">0.537</td>
|
661 |
-
<td align="center">0.081</td>
|
662 |
-
<td align="center">4.8</td>
|
663 |
-
<td align="center">51.9</td>
|
664 |
-
<td align="center">80.3</td>
|
665 |
-
<td align="center"></td>
|
666 |
-
<td align="center">142202221</td>
|
667 |
-
<td align="center"><a href="https://dl.fbaipublicfiles.com/detectron2/PascalVOC-Detection/faster_rcnn_R_50_C4/142202221/model_final_b1acc2.pkl">model</a> | <a href="https://dl.fbaipublicfiles.com/detectron2/PascalVOC-Detection/faster_rcnn_R_50_C4/142202221/metrics.json">metrics</a></td>
|
668 |
-
</tr>
|
669 |
-
</tbody></table>
|
670 |
-
|
671 |
-
|
672 |
-
|
673 |
-
### Other Settings
|
674 |
-
|
675 |
-
Ablations for Deformable Conv and Cascade R-CNN:
|
676 |
-
|
677 |
-
<!--
|
678 |
-
./gen_html_table.py --config 'COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml' 'Misc/*R_50_FPN_1x_dconv*' 'Misc/cascade*1x.yaml' 'COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml' 'Misc/*R_50_FPN_3x_dconv*' 'Misc/cascade*3x.yaml' --name "Baseline R50-FPN" "Deformable Conv" "Cascade R-CNN" "Baseline R50-FPN" "Deformable Conv" "Cascade R-CNN" --fields lr_sched train_speed inference_speed mem box_AP mask_AP
|
679 |
-
-->
|
680 |
-
|
681 |
-
|
682 |
-
<table><tbody>
|
683 |
-
<!-- START TABLE -->
|
684 |
-
<!-- TABLE HEADER -->
|
685 |
-
<th valign="bottom">Name</th>
|
686 |
-
<th valign="bottom">lr<br/>sched</th>
|
687 |
-
<th valign="bottom">train<br/>time<br/>(s/iter)</th>
|
688 |
-
<th valign="bottom">inference<br/>time<br/>(s/im)</th>
|
689 |
-
<th valign="bottom">train<br/>mem<br/>(GB)</th>
|
690 |
-
<th valign="bottom">box<br/>AP</th>
|
691 |
-
<th valign="bottom">mask<br/>AP</th>
|
692 |
-
<th valign="bottom">model id</th>
|
693 |
-
<th valign="bottom">download</th>
|
694 |
-
<!-- TABLE BODY -->
|
695 |
-
<!-- ROW: mask_rcnn_R_50_FPN_1x -->
|
696 |
-
<tr><td align="left"><a href="configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml">Baseline R50-FPN</a></td>
|
697 |
-
<td align="center">1x</td>
|
698 |
-
<td align="center">0.261</td>
|
699 |
-
<td align="center">0.043</td>
|
700 |
-
<td align="center">3.4</td>
|
701 |
-
<td align="center">38.6</td>
|
702 |
-
<td align="center">35.2</td>
|
703 |
-
<td align="center">137260431</td>
|
704 |
-
<td align="center"><a href="https://dl.fbaipublicfiles.com/detectron2/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x/137260431/model_final_a54504.pkl">model</a> | <a href="https://dl.fbaipublicfiles.com/detectron2/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x/137260431/metrics.json">metrics</a></td>
|
705 |
-
</tr>
|
706 |
-
<!-- ROW: mask_rcnn_R_50_FPN_1x_dconv_c3-c5 -->
|
707 |
-
<tr><td align="left"><a href="configs/Misc/mask_rcnn_R_50_FPN_1x_dconv_c3-c5.yaml">Deformable Conv</a></td>
|
708 |
-
<td align="center">1x</td>
|
709 |
-
<td align="center">0.342</td>
|
710 |
-
<td align="center">0.048</td>
|
711 |
-
<td align="center">3.5</td>
|
712 |
-
<td align="center">41.5</td>
|
713 |
-
<td align="center">37.5</td>
|
714 |
-
<td align="center">138602867</td>
|
715 |
-
<td align="center"><a href="https://dl.fbaipublicfiles.com/detectron2/Misc/mask_rcnn_R_50_FPN_1x_dconv_c3-c5/138602867/model_final_65c703.pkl">model</a> | <a href="https://dl.fbaipublicfiles.com/detectron2/Misc/mask_rcnn_R_50_FPN_1x_dconv_c3-c5/138602867/metrics.json">metrics</a></td>
|
716 |
-
</tr>
|
717 |
-
<!-- ROW: cascade_mask_rcnn_R_50_FPN_1x -->
|
718 |
-
<tr><td align="left"><a href="configs/Misc/cascade_mask_rcnn_R_50_FPN_1x.yaml">Cascade R-CNN</a></td>
|
719 |
-
<td align="center">1x</td>
|
720 |
-
<td align="center">0.317</td>
|
721 |
-
<td align="center">0.052</td>
|
722 |
-
<td align="center">4.0</td>
|
723 |
-
<td align="center">42.1</td>
|
724 |
-
<td align="center">36.4</td>
|
725 |
-
<td align="center">138602847</td>
|
726 |
-
<td align="center"><a href="https://dl.fbaipublicfiles.com/detectron2/Misc/cascade_mask_rcnn_R_50_FPN_1x/138602847/model_final_e9d89b.pkl">model</a> | <a href="https://dl.fbaipublicfiles.com/detectron2/Misc/cascade_mask_rcnn_R_50_FPN_1x/138602847/metrics.json">metrics</a></td>
|
727 |
-
</tr>
|
728 |
-
<!-- ROW: mask_rcnn_R_50_FPN_3x -->
|
729 |
-
<tr><td align="left"><a href="configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml">Baseline R50-FPN</a></td>
|
730 |
-
<td align="center">3x</td>
|
731 |
-
<td align="center">0.261</td>
|
732 |
-
<td align="center">0.043</td>
|
733 |
-
<td align="center">3.4</td>
|
734 |
-
<td align="center">41.0</td>
|
735 |
-
<td align="center">37.2</td>
|
736 |
-
<td align="center">137849600</td>
|
737 |
-
<td align="center"><a href="https://dl.fbaipublicfiles.com/detectron2/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x/137849600/model_final_f10217.pkl">model</a> | <a href="https://dl.fbaipublicfiles.com/detectron2/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x/137849600/metrics.json">metrics</a></td>
|
738 |
-
</tr>
|
739 |
-
<!-- ROW: mask_rcnn_R_50_FPN_3x_dconv_c3-c5 -->
|
740 |
-
<tr><td align="left"><a href="configs/Misc/mask_rcnn_R_50_FPN_3x_dconv_c3-c5.yaml">Deformable Conv</a></td>
|
741 |
-
<td align="center">3x</td>
|
742 |
-
<td align="center">0.349</td>
|
743 |
-
<td align="center">0.047</td>
|
744 |
-
<td align="center">3.5</td>
|
745 |
-
<td align="center">42.7</td>
|
746 |
-
<td align="center">38.5</td>
|
747 |
-
<td align="center">144998336</td>
|
748 |
-
<td align="center"><a href="https://dl.fbaipublicfiles.com/detectron2/Misc/mask_rcnn_R_50_FPN_3x_dconv_c3-c5/144998336/model_final_821d0b.pkl">model</a> | <a href="https://dl.fbaipublicfiles.com/detectron2/Misc/mask_rcnn_R_50_FPN_3x_dconv_c3-c5/144998336/metrics.json">metrics</a></td>
|
749 |
-
</tr>
|
750 |
-
<!-- ROW: cascade_mask_rcnn_R_50_FPN_3x -->
|
751 |
-
<tr><td align="left"><a href="configs/Misc/cascade_mask_rcnn_R_50_FPN_3x.yaml">Cascade R-CNN</a></td>
|
752 |
-
<td align="center">3x</td>
|
753 |
-
<td align="center">0.328</td>
|
754 |
-
<td align="center">0.053</td>
|
755 |
-
<td align="center">4.0</td>
|
756 |
-
<td align="center">44.3</td>
|
757 |
-
<td align="center">38.5</td>
|
758 |
-
<td align="center">144998488</td>
|
759 |
-
<td align="center"><a href="https://dl.fbaipublicfiles.com/detectron2/Misc/cascade_mask_rcnn_R_50_FPN_3x/144998488/model_final_480dd8.pkl">model</a> | <a href="https://dl.fbaipublicfiles.com/detectron2/Misc/cascade_mask_rcnn_R_50_FPN_3x/144998488/metrics.json">metrics</a></td>
|
760 |
-
</tr>
|
761 |
-
</tbody></table>
|
762 |
-
|
763 |
-
|
764 |
-
Ablations for normalization methods, and a few models trained from scratch following [Rethinking ImageNet Pre-training](https://arxiv.org/abs/1811.08883).
|
765 |
-
(Note: The baseline uses `2fc` head while the others use [`4conv1fc` head](https://arxiv.org/abs/1803.08494))
|
766 |
-
<!--
|
767 |
-
./gen_html_table.py --config 'COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml' 'Misc/mask*50_FPN_3x_gn.yaml' 'Misc/mask*50_FPN_3x_syncbn.yaml' 'Misc/scratch*' --name "Baseline R50-FPN" "GN" "SyncBN" "GN (from scratch)" "GN (from scratch)" "SyncBN (from scratch)" --fields lr_sched train_speed inference_speed mem box_AP mask_AP
|
768 |
-
-->
|
769 |
-
|
770 |
-
|
771 |
-
<table><tbody>
|
772 |
-
<!-- START TABLE -->
|
773 |
-
<!-- TABLE HEADER -->
|
774 |
-
<th valign="bottom">Name</th>
|
775 |
-
<th valign="bottom">lr<br/>sched</th>
|
776 |
-
<th valign="bottom">train<br/>time<br/>(s/iter)</th>
|
777 |
-
<th valign="bottom">inference<br/>time<br/>(s/im)</th>
|
778 |
-
<th valign="bottom">train<br/>mem<br/>(GB)</th>
|
779 |
-
<th valign="bottom">box<br/>AP</th>
|
780 |
-
<th valign="bottom">mask<br/>AP</th>
|
781 |
-
<th valign="bottom">model id</th>
|
782 |
-
<th valign="bottom">download</th>
|
783 |
-
<!-- TABLE BODY -->
|
784 |
-
<!-- ROW: mask_rcnn_R_50_FPN_3x -->
|
785 |
-
<tr><td align="left"><a href="configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml">Baseline R50-FPN</a></td>
|
786 |
-
<td align="center">3x</td>
|
787 |
-
<td align="center">0.261</td>
|
788 |
-
<td align="center">0.043</td>
|
789 |
-
<td align="center">3.4</td>
|
790 |
-
<td align="center">41.0</td>
|
791 |
-
<td align="center">37.2</td>
|
792 |
-
<td align="center">137849600</td>
|
793 |
-
<td align="center"><a href="https://dl.fbaipublicfiles.com/detectron2/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x/137849600/model_final_f10217.pkl">model</a> | <a href="https://dl.fbaipublicfiles.com/detectron2/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x/137849600/metrics.json">metrics</a></td>
|
794 |
-
</tr>
|
795 |
-
<!-- ROW: mask_rcnn_R_50_FPN_3x_gn -->
|
796 |
-
<tr><td align="left"><a href="configs/Misc/mask_rcnn_R_50_FPN_3x_gn.yaml">GN</a></td>
|
797 |
-
<td align="center">3x</td>
|
798 |
-
<td align="center">0.356</td>
|
799 |
-
<td align="center">0.069</td>
|
800 |
-
<td align="center">7.3</td>
|
801 |
-
<td align="center">42.6</td>
|
802 |
-
<td align="center">38.6</td>
|
803 |
-
<td align="center">138602888</td>
|
804 |
-
<td align="center"><a href="https://dl.fbaipublicfiles.com/detectron2/Misc/mask_rcnn_R_50_FPN_3x_gn/138602888/model_final_dc5d9e.pkl">model</a> | <a href="https://dl.fbaipublicfiles.com/detectron2/Misc/mask_rcnn_R_50_FPN_3x_gn/138602888/metrics.json">metrics</a></td>
|
805 |
-
</tr>
|
806 |
-
<!-- ROW: mask_rcnn_R_50_FPN_3x_syncbn -->
|
807 |
-
<tr><td align="left"><a href="configs/Misc/mask_rcnn_R_50_FPN_3x_syncbn.yaml">SyncBN</a></td>
|
808 |
-
<td align="center">3x</td>
|
809 |
-
<td align="center">0.371</td>
|
810 |
-
<td align="center">0.053</td>
|
811 |
-
<td align="center">5.5</td>
|
812 |
-
<td align="center">41.9</td>
|
813 |
-
<td align="center">37.8</td>
|
814 |
-
<td align="center">169527823</td>
|
815 |
-
<td align="center"><a href="https://dl.fbaipublicfiles.com/detectron2/Misc/mask_rcnn_R_50_FPN_3x_syncbn/169527823/model_final_3b3c51.pkl">model</a> | <a href="https://dl.fbaipublicfiles.com/detectron2/Misc/mask_rcnn_R_50_FPN_3x_syncbn/169527823/metrics.json">metrics</a></td>
|
816 |
-
</tr>
|
817 |
-
<!-- ROW: scratch_mask_rcnn_R_50_FPN_3x_gn -->
|
818 |
-
<tr><td align="left"><a href="configs/Misc/scratch_mask_rcnn_R_50_FPN_3x_gn.yaml">GN (from scratch)</a></td>
|
819 |
-
<td align="center">3x</td>
|
820 |
-
<td align="center">0.400</td>
|
821 |
-
<td align="center">0.069</td>
|
822 |
-
<td align="center">9.8</td>
|
823 |
-
<td align="center">39.9</td>
|
824 |
-
<td align="center">36.6</td>
|
825 |
-
<td align="center">138602908</td>
|
826 |
-
<td align="center"><a href="https://dl.fbaipublicfiles.com/detectron2/Misc/scratch_mask_rcnn_R_50_FPN_3x_gn/138602908/model_final_01ca85.pkl">model</a> | <a href="https://dl.fbaipublicfiles.com/detectron2/Misc/scratch_mask_rcnn_R_50_FPN_3x_gn/138602908/metrics.json">metrics</a></td>
|
827 |
-
</tr>
|
828 |
-
<!-- ROW: scratch_mask_rcnn_R_50_FPN_9x_gn -->
|
829 |
-
<tr><td align="left"><a href="configs/Misc/scratch_mask_rcnn_R_50_FPN_9x_gn.yaml">GN (from scratch)</a></td>
|
830 |
-
<td align="center">9x</td>
|
831 |
-
<td align="center">N/A</td>
|
832 |
-
<td align="center">0.070</td>
|
833 |
-
<td align="center">9.8</td>
|
834 |
-
<td align="center">43.7</td>
|
835 |
-
<td align="center">39.6</td>
|
836 |
-
<td align="center">183808979</td>
|
837 |
-
<td align="center"><a href="https://dl.fbaipublicfiles.com/detectron2/Misc/scratch_mask_rcnn_R_50_FPN_9x_gn/183808979/model_final_da7b4c.pkl">model</a> | <a href="https://dl.fbaipublicfiles.com/detectron2/Misc/scratch_mask_rcnn_R_50_FPN_9x_gn/183808979/metrics.json">metrics</a></td>
|
838 |
-
</tr>
|
839 |
-
<!-- ROW: scratch_mask_rcnn_R_50_FPN_9x_syncbn -->
|
840 |
-
<tr><td align="left"><a href="configs/Misc/scratch_mask_rcnn_R_50_FPN_9x_syncbn.yaml">SyncBN (from scratch)</a></td>
|
841 |
-
<td align="center">9x</td>
|
842 |
-
<td align="center">N/A</td>
|
843 |
-
<td align="center">0.055</td>
|
844 |
-
<td align="center">7.2</td>
|
845 |
-
<td align="center">43.6</td>
|
846 |
-
<td align="center">39.3</td>
|
847 |
-
<td align="center">184226666</td>
|
848 |
-
<td align="center"><a href="https://dl.fbaipublicfiles.com/detectron2/Misc/scratch_mask_rcnn_R_50_FPN_9x_syncbn/184226666/model_final_5ce33e.pkl">model</a> | <a href="https://dl.fbaipublicfiles.com/detectron2/Misc/scratch_mask_rcnn_R_50_FPN_9x_syncbn/184226666/metrics.json">metrics</a></td>
|
849 |
-
</tr>
|
850 |
-
</tbody></table>
|
851 |
-
|
852 |
-
|
853 |
-
A few very large models trained for a long time, for demo purposes. They are trained using multiple machines:
|
854 |
-
|
855 |
-
<!--
|
856 |
-
./gen_html_table.py --config 'Misc/panoptic_*dconv*' 'Misc/cascade_*152*' --name "Panoptic FPN R101" "Mask R-CNN X152" --fields inference_speed mem box_AP mask_AP PQ
|
857 |
-
# manually add TTA results
|
858 |
-
-->
|
859 |
-
|
860 |
-
|
861 |
-
<table><tbody>
|
862 |
-
<!-- START TABLE -->
|
863 |
-
<!-- TABLE HEADER -->
|
864 |
-
<th valign="bottom">Name</th>
|
865 |
-
<th valign="bottom">inference<br/>time<br/>(s/im)</th>
|
866 |
-
<th valign="bottom">train<br/>mem<br/>(GB)</th>
|
867 |
-
<th valign="bottom">box<br/>AP</th>
|
868 |
-
<th valign="bottom">mask<br/>AP</th>
|
869 |
-
<th valign="bottom">PQ</th>
|
870 |
-
<th valign="bottom">model id</th>
|
871 |
-
<th valign="bottom">download</th>
|
872 |
-
<!-- TABLE BODY -->
|
873 |
-
<!-- ROW: panoptic_fpn_R_101_dconv_cascade_gn_3x -->
|
874 |
-
<tr><td align="left"><a href="configs/Misc/panoptic_fpn_R_101_dconv_cascade_gn_3x.yaml">Panoptic FPN R101</a></td>
|
875 |
-
<td align="center">0.107</td>
|
876 |
-
<td align="center">11.4</td>
|
877 |
-
<td align="center">47.4</td>
|
878 |
-
<td align="center">41.3</td>
|
879 |
-
<td align="center">46.1</td>
|
880 |
-
<td align="center">139797668</td>
|
881 |
-
<td align="center"><a href="https://dl.fbaipublicfiles.com/detectron2/Misc/panoptic_fpn_R_101_dconv_cascade_gn_3x/139797668/model_final_be35db.pkl">model</a> | <a href="https://dl.fbaipublicfiles.com/detectron2/Misc/panoptic_fpn_R_101_dconv_cascade_gn_3x/139797668/metrics.json">metrics</a></td>
|
882 |
-
</tr>
|
883 |
-
<!-- ROW: cascade_mask_rcnn_X_152_32x8d_FPN_IN5k_gn_dconv -->
|
884 |
-
<tr><td align="left"><a href="configs/Misc/cascade_mask_rcnn_X_152_32x8d_FPN_IN5k_gn_dconv.yaml">Mask R-CNN X152</a></td>
|
885 |
-
<td align="center">0.242</td>
|
886 |
-
<td align="center">15.1</td>
|
887 |
-
<td align="center">50.2</td>
|
888 |
-
<td align="center">44.0</td>
|
889 |
-
<td align="center"></td>
|
890 |
-
<td align="center">18131413</td>
|
891 |
-
<td align="center"><a href="https://dl.fbaipublicfiles.com/detectron2/Misc/cascade_mask_rcnn_X_152_32x8d_FPN_IN5k_gn_dconv/18131413/model_0039999_e76410.pkl">model</a> | <a href="https://dl.fbaipublicfiles.com/detectron2/Misc/cascade_mask_rcnn_X_152_32x8d_FPN_IN5k_gn_dconv/18131413/metrics.json">metrics</a></td>
|
892 |
-
</tr>
|
893 |
-
<!-- ROW: TTA cascade_mask_rcnn_X_152_32x8d_FPN_IN5k_gn_dconv -->
|
894 |
-
<tr><td align="left">above + test-time aug.</td>
|
895 |
-
<td align="center"></td>
|
896 |
-
<td align="center"></td>
|
897 |
-
<td align="center">51.9</td>
|
898 |
-
<td align="center">45.9</td>
|
899 |
-
<td align="center"></td>
|
900 |
-
<td align="center"></td>
|
901 |
-
<td align="center"></td>
|
902 |
-
</tr>
|
903 |
-
</tbody></table>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
model/SCHP/mhp_extension/detectron2/README.md
DELETED
@@ -1,56 +0,0 @@
|
|
1 |
-
<img src=".github/Detectron2-Logo-Horz.svg" width="300" >
|
2 |
-
|
3 |
-
Detectron2 is Facebook AI Research's next generation software system
|
4 |
-
that implements state-of-the-art object detection algorithms.
|
5 |
-
It is a ground-up rewrite of the previous version,
|
6 |
-
[Detectron](https://github.com/facebookresearch/Detectron/),
|
7 |
-
and it originates from [maskrcnn-benchmark](https://github.com/facebookresearch/maskrcnn-benchmark/).
|
8 |
-
|
9 |
-
<div align="center">
|
10 |
-
<img src="https://user-images.githubusercontent.com/1381301/66535560-d3422200-eace-11e9-9123-5535d469db19.png"/>
|
11 |
-
</div>
|
12 |
-
|
13 |
-
### What's New
|
14 |
-
* It is powered by the [PyTorch](https://pytorch.org) deep learning framework.
|
15 |
-
* Includes more features such as panoptic segmentation, densepose, Cascade R-CNN, rotated bounding boxes, etc.
|
16 |
-
* Can be used as a library to support [different projects](projects/) on top of it.
|
17 |
-
We'll open source more research projects in this way.
|
18 |
-
* It [trains much faster](https://detectron2.readthedocs.io/notes/benchmarks.html).
|
19 |
-
|
20 |
-
See our [blog post](https://ai.facebook.com/blog/-detectron2-a-pytorch-based-modular-object-detection-library-/)
|
21 |
-
to see more demos and learn about detectron2.
|
22 |
-
|
23 |
-
## Installation
|
24 |
-
|
25 |
-
See [INSTALL.md](INSTALL.md).
|
26 |
-
|
27 |
-
## Quick Start
|
28 |
-
|
29 |
-
See [GETTING_STARTED.md](GETTING_STARTED.md),
|
30 |
-
or the [Colab Notebook](https://colab.research.google.com/drive/16jcaJoc6bCFAQ96jDe2HwtXj7BMD_-m5).
|
31 |
-
|
32 |
-
Learn more at our [documentation](https://detectron2.readthedocs.org).
|
33 |
-
And see [projects/](projects/) for some projects that are built on top of detectron2.
|
34 |
-
|
35 |
-
## Model Zoo and Baselines
|
36 |
-
|
37 |
-
We provide a large set of baseline results and trained models available for download in the [Detectron2 Model Zoo](MODEL_ZOO.md).
|
38 |
-
|
39 |
-
|
40 |
-
## License
|
41 |
-
|
42 |
-
Detectron2 is released under the [Apache 2.0 license](LICENSE).
|
43 |
-
|
44 |
-
## Citing Detectron2
|
45 |
-
|
46 |
-
If you use Detectron2 in your research or wish to refer to the baseline results published in the [Model Zoo](MODEL_ZOO.md), please use the following BibTeX entry.
|
47 |
-
|
48 |
-
```BibTeX
|
49 |
-
@misc{wu2019detectron2,
|
50 |
-
author = {Yuxin Wu and Alexander Kirillov and Francisco Massa and
|
51 |
-
Wan-Yen Lo and Ross Girshick},
|
52 |
-
title = {Detectron2},
|
53 |
-
howpublished = {\url{https://github.com/facebookresearch/detectron2}},
|
54 |
-
year = {2019}
|
55 |
-
}
|
56 |
-
```
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
model/SCHP/mhp_extension/detectron2/configs/Base-RCNN-C4.yaml
DELETED
@@ -1,18 +0,0 @@
|
|
1 |
-
MODEL:
|
2 |
-
META_ARCHITECTURE: "GeneralizedRCNN"
|
3 |
-
RPN:
|
4 |
-
PRE_NMS_TOPK_TEST: 6000
|
5 |
-
POST_NMS_TOPK_TEST: 1000
|
6 |
-
ROI_HEADS:
|
7 |
-
NAME: "Res5ROIHeads"
|
8 |
-
DATASETS:
|
9 |
-
TRAIN: ("coco_2017_train",)
|
10 |
-
TEST: ("coco_2017_val",)
|
11 |
-
SOLVER:
|
12 |
-
IMS_PER_BATCH: 16
|
13 |
-
BASE_LR: 0.02
|
14 |
-
STEPS: (60000, 80000)
|
15 |
-
MAX_ITER: 90000
|
16 |
-
INPUT:
|
17 |
-
MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800)
|
18 |
-
VERSION: 2
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
model/SCHP/mhp_extension/detectron2/configs/Base-RCNN-DilatedC5.yaml
DELETED
@@ -1,31 +0,0 @@
|
|
1 |
-
MODEL:
|
2 |
-
META_ARCHITECTURE: "GeneralizedRCNN"
|
3 |
-
RESNETS:
|
4 |
-
OUT_FEATURES: ["res5"]
|
5 |
-
RES5_DILATION: 2
|
6 |
-
RPN:
|
7 |
-
IN_FEATURES: ["res5"]
|
8 |
-
PRE_NMS_TOPK_TEST: 6000
|
9 |
-
POST_NMS_TOPK_TEST: 1000
|
10 |
-
ROI_HEADS:
|
11 |
-
NAME: "StandardROIHeads"
|
12 |
-
IN_FEATURES: ["res5"]
|
13 |
-
ROI_BOX_HEAD:
|
14 |
-
NAME: "FastRCNNConvFCHead"
|
15 |
-
NUM_FC: 2
|
16 |
-
POOLER_RESOLUTION: 7
|
17 |
-
ROI_MASK_HEAD:
|
18 |
-
NAME: "MaskRCNNConvUpsampleHead"
|
19 |
-
NUM_CONV: 4
|
20 |
-
POOLER_RESOLUTION: 14
|
21 |
-
DATASETS:
|
22 |
-
TRAIN: ("coco_2017_train",)
|
23 |
-
TEST: ("coco_2017_val",)
|
24 |
-
SOLVER:
|
25 |
-
IMS_PER_BATCH: 16
|
26 |
-
BASE_LR: 0.02
|
27 |
-
STEPS: (60000, 80000)
|
28 |
-
MAX_ITER: 90000
|
29 |
-
INPUT:
|
30 |
-
MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800)
|
31 |
-
VERSION: 2
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|